about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/administration/imperative-containers.section.md2
-rw-r--r--nixos/doc/manual/administration/nixos-state.section.md28
-rw-r--r--nixos/doc/manual/administration/running.md1
-rw-r--r--nixos/doc/manual/administration/system-state.chapter.md17
-rw-r--r--nixos/doc/manual/administration/systemd-state.section.md52
-rw-r--r--nixos/doc/manual/administration/zfs-state.section.md16
-rw-r--r--nixos/doc/manual/configuration/gpu-accel.chapter.md8
-rw-r--r--nixos/doc/manual/configuration/luks-file-systems.section.md42
-rw-r--r--nixos/doc/manual/development/option-types.section.md7
-rw-r--r--nixos/doc/manual/development/unit-handling.section.md39
-rw-r--r--nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md2
-rw-r--r--nixos/doc/manual/installation/changing-config.chapter.md8
-rw-r--r--nixos/doc/manual/installation/installing-usb.section.md2
-rw-r--r--nixos/doc/manual/release-notes/release-notes.md1
-rw-r--r--nixos/doc/manual/release-notes/rl-2111.section.md2
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md92
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md197
-rw-r--r--nixos/lib/eval-config.nix1
-rw-r--r--nixos/lib/make-disk-image.nix9
-rw-r--r--nixos/lib/make-options-doc/default.nix2
-rw-r--r--nixos/lib/make-single-disk-zfs-image.nix5
-rw-r--r--nixos/lib/make-squashfs.nix3
-rw-r--r--nixos/lib/systemd-lib.nix68
-rw-r--r--nixos/lib/test-driver/default.nix6
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py32
-rw-r--r--nixos/maintainers/option-usages.nix6
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-amis.sh54
-rw-r--r--nixos/maintainers/scripts/openstack/openstack-image-zfs.nix8
-rw-r--r--nixos/modules/config/iproute2.nix5
-rw-r--r--nixos/modules/config/krb5/default.nix369
-rw-r--r--nixos/modules/config/ldap.nix101
-rw-r--r--nixos/modules/config/ldso.nix58
-rw-r--r--nixos/modules/config/mysql.nix57
-rw-r--r--nixos/modules/config/nix-channel.nix1
-rw-r--r--nixos/modules/config/nix.nix8
-rw-r--r--nixos/modules/config/no-x-libs.nix6
-rw-r--r--nixos/modules/config/pulseaudio.nix22
-rw-r--r--nixos/modules/config/shells-environment.nix3
-rw-r--r--nixos/modules/config/stub-ld.nix56
-rw-r--r--nixos/modules/config/swap.nix3
-rw-r--r--nixos/modules/config/sysctl.nix30
-rw-r--r--nixos/modules/config/users-groups.nix2
-rw-r--r--nixos/modules/hardware/all-firmware.nix5
-rw-r--r--nixos/modules/hardware/keyboard/qmk.nix1
-rw-r--r--nixos/modules/hardware/usb-storage.nix2
-rw-r--r--nixos/modules/hardware/video/amdgpu-pro.nix7
-rw-r--r--nixos/modules/hardware/video/nvidia.nix72
-rw-r--r--nixos/modules/hardware/video/webcam/ipu6.nix16
-rw-r--r--nixos/modules/i18n/input-method/fcitx5.nix13
-rw-r--r--nixos/modules/image/repart-image.nix110
-rw-r--r--nixos/modules/image/repart.nix134
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-minimal.nix4
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix3
-rw-r--r--nixos/modules/installer/tools/tools.nix3
-rw-r--r--nixos/modules/misc/documentation.nix8
-rw-r--r--nixos/modules/misc/ids.nix4
-rw-r--r--nixos/modules/misc/mandoc.nix161
-rw-r--r--nixos/modules/misc/version.nix34
-rw-r--r--nixos/modules/module-list.nix29
-rw-r--r--nixos/modules/profiles/installation-device.nix2
-rw-r--r--nixos/modules/profiles/minimal.nix2
-rw-r--r--nixos/modules/programs/atop.nix5
-rw-r--r--nixos/modules/programs/direnv.nix9
-rw-r--r--nixos/modules/programs/firefox.nix1
-rw-r--r--nixos/modules/programs/firejail.nix2
-rw-r--r--nixos/modules/programs/gamemode.nix2
-rw-r--r--nixos/modules/programs/gpaste.nix2
-rw-r--r--nixos/modules/programs/hyprland.nix11
-rw-r--r--nixos/modules/programs/mininet.nix35
-rw-r--r--nixos/modules/programs/mosh.nix24
-rw-r--r--nixos/modules/programs/nix-ld.nix2
-rw-r--r--nixos/modules/programs/partition-manager.nix2
-rw-r--r--nixos/modules/programs/screen.nix38
-rw-r--r--nixos/modules/programs/singularity.nix7
-rw-r--r--nixos/modules/programs/ssh.nix20
-rw-r--r--nixos/modules/programs/starship.nix57
-rw-r--r--nixos/modules/programs/tsm-client.nix299
-rw-r--r--nixos/modules/programs/wayland/labwc.nix25
-rw-r--r--nixos/modules/programs/wayland/river.nix3
-rw-r--r--nixos/modules/programs/wayland/sway.nix43
-rw-r--r--nixos/modules/programs/winbox.nix24
-rw-r--r--nixos/modules/security/acme/default.md26
-rw-r--r--nixos/modules/security/acme/default.nix4
-rw-r--r--nixos/modules/security/apparmor.nix3
-rw-r--r--nixos/modules/security/auditd.nix4
-rw-r--r--nixos/modules/security/duosec.nix6
-rw-r--r--nixos/modules/security/ipa.nix50
-rw-r--r--nixos/modules/security/krb5/default.nix90
-rw-r--r--nixos/modules/security/krb5/krb5-conf-format.nix88
-rw-r--r--nixos/modules/security/pam.nix66
-rw-r--r--nixos/modules/security/sudo-rs.nix2
-rw-r--r--nixos/modules/security/sudo.nix2
-rw-r--r--nixos/modules/security/wrappers/default.nix4
-rw-r--r--nixos/modules/security/wrappers/wrapper.nix4
-rw-r--r--nixos/modules/services/admin/pgadmin.nix26
-rw-r--r--nixos/modules/services/audio/gmediarender.nix1
-rw-r--r--nixos/modules/services/audio/jmusicbot.nix1
-rw-r--r--nixos/modules/services/audio/mopidy.nix2
-rw-r--r--nixos/modules/services/audio/mympd.nix129
-rw-r--r--nixos/modules/services/audio/spotifyd.nix1
-rw-r--r--nixos/modules/services/audio/wyoming/faster-whisper.nix1
-rw-r--r--nixos/modules/services/audio/wyoming/piper.nix1
-rw-r--r--nixos/modules/services/audio/ympd.nix1
-rw-r--r--nixos/modules/services/backup/borgbackup.nix56
-rw-r--r--nixos/modules/services/backup/btrbk.nix54
-rw-r--r--nixos/modules/services/backup/postgresql-backup.nix5
-rw-r--r--nixos/modules/services/backup/restic.nix3
-rw-r--r--nixos/modules/services/backup/snapraid.nix (renamed from nixos/modules/tasks/snapraid.nix)9
-rw-r--r--nixos/modules/services/backup/tsm.nix11
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix9
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix2
-rw-r--r--nixos/modules/services/cluster/spark/default.nix16
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix3
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/worker.nix6
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agents.nix8
-rw-r--r--nixos/modules/services/continuous-integration/gitea-actions-runner.nix1
-rw-r--r--nixos/modules/services/continuous-integration/github-runner/options.nix1
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix1
-rw-r--r--nixos/modules/services/continuous-integration/jenkins/default.nix1
-rw-r--r--nixos/modules/services/databases/aerospike.nix13
-rw-r--r--nixos/modules/services/databases/firebird.nix2
-rw-r--r--nixos/modules/services/databases/influxdb.nix1
-rw-r--r--nixos/modules/services/databases/lldap.nix1
-rw-r--r--nixos/modules/services/databases/openldap.nix1
-rw-r--r--nixos/modules/services/databases/postgresql.md2
-rw-r--r--nixos/modules/services/databases/postgresql.nix13
-rw-r--r--nixos/modules/services/desktops/ayatana-indicators.nix58
-rw-r--r--nixos/modules/services/desktops/flatpak.nix1
-rw-r--r--nixos/modules/services/desktops/geoclue2.nix2
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix182
-rw-r--r--nixos/modules/services/development/livebook.md13
-rw-r--r--nixos/modules/services/development/livebook.nix15
-rw-r--r--nixos/modules/services/development/nixseparatedebuginfod.nix105
-rw-r--r--nixos/modules/services/development/zammad.nix55
-rw-r--r--nixos/modules/services/display-managers/greetd.nix12
-rw-r--r--nixos/modules/services/editors/emacs.nix21
-rw-r--r--nixos/modules/services/games/teeworlds.nix2
-rw-r--r--nixos/modules/services/hardware/acpid.nix1
-rw-r--r--nixos/modules/services/hardware/kanata.nix8
-rw-r--r--nixos/modules/services/hardware/keyd.nix6
-rw-r--r--nixos/modules/services/hardware/pcscd.nix9
-rw-r--r--nixos/modules/services/hardware/power-profiles-daemon.nix19
-rw-r--r--nixos/modules/services/hardware/sane.nix9
-rw-r--r--nixos/modules/services/hardware/thermald.nix7
-rw-r--r--nixos/modules/services/hardware/thinkfan.nix2
-rw-r--r--nixos/modules/services/hardware/udev.nix6
-rw-r--r--nixos/modules/services/hardware/vdr.nix95
-rw-r--r--nixos/modules/services/home-automation/evcc.nix1
-rw-r--r--nixos/modules/services/home-automation/home-assistant.nix20
-rw-r--r--nixos/modules/services/logging/journaldriver.nix1
-rw-r--r--nixos/modules/services/logging/logcheck.nix14
-rw-r--r--nixos/modules/services/logging/vector.nix6
-rw-r--r--nixos/modules/services/mail/dovecot.nix187
-rw-r--r--nixos/modules/services/mail/listmonk.nix5
-rw-r--r--nixos/modules/services/mail/nullmailer.nix8
-rw-r--r--nixos/modules/services/mail/postfix.nix15
-rw-r--r--nixos/modules/services/mail/roundcube.nix44
-rw-r--r--nixos/modules/services/mail/rspamd-trainer.nix76
-rw-r--r--nixos/modules/services/mail/sympa.nix2
-rw-r--r--nixos/modules/services/matrix/appservice-irc.nix2
-rw-r--r--nixos/modules/services/matrix/matrix-sliding-sync.nix9
-rw-r--r--nixos/modules/services/matrix/maubot.nix2
-rw-r--r--nixos/modules/services/matrix/synapse.md8
-rw-r--r--nixos/modules/services/matrix/synapse.nix16
-rw-r--r--nixos/modules/services/misc/amazon-ssm-agent.nix1
-rw-r--r--nixos/modules/services/misc/ankisyncd.nix6
-rw-r--r--nixos/modules/services/misc/bcg.nix2
-rw-r--r--nixos/modules/services/misc/domoticz.nix1
-rw-r--r--nixos/modules/services/misc/etesync-dav.nix1
-rw-r--r--nixos/modules/services/misc/gitea.nix15
-rw-r--r--nixos/modules/services/misc/guix/default.nix406
-rw-r--r--nixos/modules/services/misc/llama-cpp.nix111
-rw-r--r--nixos/modules/services/misc/mediatomb.nix1
-rw-r--r--nixos/modules/services/misc/metabase.nix1
-rw-r--r--nixos/modules/services/misc/moonraker.nix8
-rw-r--r--nixos/modules/services/misc/nitter.nix23
-rw-r--r--nixos/modules/services/misc/nix-ssh-serve.nix4
-rw-r--r--nixos/modules/services/misc/ntfy-sh.nix6
-rw-r--r--nixos/modules/services/misc/ollama.nix50
-rw-r--r--nixos/modules/services/misc/paperless.nix48
-rw-r--r--nixos/modules/services/misc/portunus.nix10
-rw-r--r--nixos/modules/services/misc/preload.nix2
-rw-r--r--nixos/modules/services/misc/redmine.nix7
-rw-r--r--nixos/modules/services/misc/tandoor-recipes.nix2
-rw-r--r--nixos/modules/services/misc/taskserver/helper-tool.py34
-rw-r--r--nixos/modules/services/misc/tuxclocker.nix71
-rw-r--r--nixos/modules/services/monitoring/grafana.nix4
-rw-r--r--nixos/modules/services/monitoring/mackerel-agent.nix1
-rw-r--r--nixos/modules/services/monitoring/netdata.nix11
-rw-r--r--nixos/modules/services/monitoring/prometheus/alertmanager.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/default.nix27
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/kea.nix8
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix10
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/nginx.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/ping.nix48
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix22
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/snmp.nix47
-rw-r--r--nixos/modules/services/monitoring/snmpd.nix83
-rw-r--r--nixos/modules/services/monitoring/teamviewer.nix1
-rw-r--r--nixos/modules/services/monitoring/telegraf.nix1
-rw-r--r--nixos/modules/services/monitoring/thanos.nix3
-rw-r--r--nixos/modules/services/monitoring/ups.nix422
-rw-r--r--nixos/modules/services/monitoring/watchdogd.nix131
-rw-r--r--nixos/modules/services/network-filesystems/drbd.nix4
-rw-r--r--nixos/modules/services/network-filesystems/eris-server.nix56
-rw-r--r--nixos/modules/services/network-filesystems/kubo.nix60
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix1
-rw-r--r--nixos/modules/services/network-filesystems/samba.nix2
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix40
-rw-r--r--nixos/modules/services/networking/bitcoind.nix1
-rw-r--r--nixos/modules/services/networking/dante.nix1
-rw-r--r--nixos/modules/services/networking/ddclient.nix4
-rw-r--r--nixos/modules/services/networking/dhcpcd.nix2
-rw-r--r--nixos/modules/services/networking/dnsmasq.md68
-rw-r--r--nixos/modules/services/networking/dnsmasq.nix2
-rw-r--r--nixos/modules/services/networking/ejabberd.nix6
-rw-r--r--nixos/modules/services/networking/ergo.nix1
-rw-r--r--nixos/modules/services/networking/expressvpn.nix1
-rw-r--r--nixos/modules/services/networking/firewall-iptables.nix3
-rw-r--r--nixos/modules/services/networking/frp.nix22
-rw-r--r--nixos/modules/services/networking/gns3-server.md31
-rw-r--r--nixos/modules/services/networking/gns3-server.nix263
-rw-r--r--nixos/modules/services/networking/harmonia.nix7
-rw-r--r--nixos/modules/services/networking/headscale.nix1
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/default.nix3
-rw-r--r--nixos/modules/services/networking/ivpn.nix2
-rw-r--r--nixos/modules/services/networking/iwd.nix2
-rw-r--r--nixos/modules/services/networking/jigasi.nix237
-rw-r--r--nixos/modules/services/networking/kea.nix29
-rw-r--r--nixos/modules/services/networking/keepalived/default.nix21
-rw-r--r--nixos/modules/services/networking/miniupnpd.nix53
-rw-r--r--nixos/modules/services/networking/mosquitto.nix1
-rw-r--r--nixos/modules/services/networking/mullvad-vpn.nix2
-rw-r--r--nixos/modules/services/networking/nbd.nix1
-rw-r--r--nixos/modules/services/networking/nebula.nix2
-rw-r--r--nixos/modules/services/networking/networkmanager.nix5
-rw-r--r--nixos/modules/services/networking/ntp/ntpd-rs.nix89
-rw-r--r--nixos/modules/services/networking/ocserv.nix1
-rw-r--r--nixos/modules/services/networking/pleroma.nix1
-rw-r--r--nixos/modules/services/networking/quicktun.nix146
-rw-r--r--nixos/modules/services/networking/rosenpass.nix1
-rw-r--r--nixos/modules/services/networking/rxe.nix2
-rw-r--r--nixos/modules/services/networking/soju.nix1
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix12
-rw-r--r--nixos/modules/services/networking/strongswan-swanctl/module.nix1
-rw-r--r--nixos/modules/services/networking/strongswan.nix1
-rw-r--r--nixos/modules/services/networking/syncplay.nix1
-rw-r--r--nixos/modules/services/networking/syncthing.nix11
-rw-r--r--nixos/modules/services/networking/tailscale.nix4
-rw-r--r--nixos/modules/services/networking/teamspeak3.nix42
-rw-r--r--nixos/modules/services/networking/tinyproxy.nix2
-rw-r--r--nixos/modules/services/networking/vdirsyncer.nix8
-rw-r--r--nixos/modules/services/networking/wasabibackend.nix1
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix4
-rw-r--r--nixos/modules/services/networking/xrdp.nix181
-rw-r--r--nixos/modules/services/networking/yggdrasil.nix28
-rw-r--r--nixos/modules/services/networking/zerotierone.nix27
-rw-r--r--nixos/modules/services/networking/znc/default.nix1
-rw-r--r--nixos/modules/services/printing/cupsd.nix5
-rw-r--r--nixos/modules/services/search/hound.nix77
-rw-r--r--nixos/modules/services/security/bitwarden-directory-connector-cli.nix323
-rw-r--r--nixos/modules/services/security/certmgr.nix1
-rw-r--r--nixos/modules/services/security/clamav.nix54
-rw-r--r--nixos/modules/services/security/munge.nix14
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix1
-rw-r--r--nixos/modules/services/security/shibboleth-sp.nix33
-rw-r--r--nixos/modules/services/security/tor.nix2
-rw-r--r--nixos/modules/services/security/vaultwarden/backup.sh4
-rw-r--r--nixos/modules/services/security/vaultwarden/default.nix8
-rw-r--r--nixos/modules/services/system/cachix-agent/default.nix1
-rw-r--r--nixos/modules/services/system/cachix-watch-store.nix13
-rw-r--r--nixos/modules/services/system/cloud-init.nix5
-rw-r--r--nixos/modules/services/system/dbus.nix5
-rw-r--r--nixos/modules/services/system/kerberos/default.nix2
-rw-r--r--nixos/modules/services/system/kerberos/heimdal.nix8
-rw-r--r--nixos/modules/services/system/kerberos/mit.nix2
-rw-r--r--nixos/modules/services/system/zram-generator.nix2
-rw-r--r--nixos/modules/services/torrent/transmission.nix37
-rw-r--r--nixos/modules/services/video/frigate.nix1
-rw-r--r--nixos/modules/services/video/go2rtc/default.nix1
-rw-r--r--nixos/modules/services/web-apps/akkoma.nix2
-rw-r--r--nixos/modules/services/web-apps/alps.nix1
-rw-r--r--nixos/modules/services/web-apps/c2fmzq-server.nix9
-rw-r--r--nixos/modules/services/web-apps/code-server.nix260
-rw-r--r--nixos/modules/services/web-apps/dokuwiki.nix73
-rw-r--r--nixos/modules/services/web-apps/freshrss.nix1
-rw-r--r--nixos/modules/services/web-apps/healthchecks.nix1
-rw-r--r--nixos/modules/services/web-apps/invidious.nix313
-rw-r--r--nixos/modules/services/web-apps/invoiceplane.nix40
-rw-r--r--nixos/modules/services/web-apps/jitsi-meet.nix38
-rw-r--r--nixos/modules/services/web-apps/keycloak.nix14
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix23
-rw-r--r--nixos/modules/services/web-apps/mattermost.nix2
-rw-r--r--nixos/modules/services/web-apps/miniflux.nix13
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix2
-rw-r--r--nixos/modules/services/web-apps/netbox.nix3
-rw-r--r--nixos/modules/services/web-apps/nextcloud.md4
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix571
-rw-r--r--nixos/modules/services/web-apps/node-red.nix2
-rw-r--r--nixos/modules/services/web-apps/openvscode-server.nix1
-rw-r--r--nixos/modules/services/web-apps/outline.nix31
-rw-r--r--nixos/modules/services/web-apps/peering-manager.nix1
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.md108
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.nix260
-rw-r--r--nixos/modules/services/web-apps/windmill.nix177
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix12
-rw-r--r--nixos/modules/services/web-servers/agate.nix1
-rw-r--r--nixos/modules/services/web-servers/caddy/default.nix9
-rw-r--r--nixos/modules/services/web-servers/mighttpd2.nix1
-rw-r--r--nixos/modules/services/web-servers/minio.nix1
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix61
-rw-r--r--nixos/modules/services/web-servers/nginx/location-options.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/tailscale-auth.nix158
-rw-r--r--nixos/modules/services/web-servers/nginx/vhost-options.nix32
-rw-r--r--nixos/modules/services/web-servers/traefik.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/cinnamon.nix30
-rw-r--r--nixos/modules/services/x11/desktop-managers/gnome.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix40
-rw-r--r--nixos/modules/services/x11/display-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/sddm.nix4
-rw-r--r--nixos/modules/services/x11/hardware/libinput.nix6
-rw-r--r--nixos/modules/services/x11/xserver.nix4
-rw-r--r--nixos/modules/system/activation/bootspec.nix2
-rwxr-xr-xnixos/modules/system/activation/switch-to-configuration.pl12
-rw-r--r--nixos/modules/system/boot/binfmt.nix2
-rw-r--r--nixos/modules/system/boot/clevis.md51
-rw-r--r--nixos/modules/system/boot/clevis.nix107
-rw-r--r--nixos/modules/system/boot/grow-partition.nix2
-rw-r--r--nixos/modules/system/boot/initrd-ssh.nix6
-rw-r--r--nixos/modules/system/boot/loader/grub/grub.nix12
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py94
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix6
-rw-r--r--nixos/modules/system/boot/luksroot.nix50
-rw-r--r--nixos/modules/system/boot/networkd.nix5
-rw-r--r--nixos/modules/system/boot/resolved.nix33
-rw-r--r--nixos/modules/system/boot/systemd.nix40
-rw-r--r--nixos/modules/system/boot/systemd/initrd-secrets.nix3
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix7
-rw-r--r--nixos/modules/system/boot/systemd/journald-gateway.nix135
-rw-r--r--nixos/modules/system/boot/systemd/journald-remote.nix163
-rw-r--r--nixos/modules/system/boot/systemd/journald-upload.nix111
-rw-r--r--nixos/modules/system/boot/systemd/journald.nix21
-rw-r--r--nixos/modules/system/boot/systemd/oomd.nix26
-rw-r--r--nixos/modules/system/boot/systemd/repart.nix3
-rw-r--r--nixos/modules/system/boot/systemd/sysupdate.nix2
-rw-r--r--nixos/modules/system/boot/systemd/tmpfiles.nix35
-rw-r--r--nixos/modules/system/boot/timesyncd.nix7
-rw-r--r--nixos/modules/tasks/filesystems.nix3
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix26
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix51
-rw-r--r--nixos/modules/tasks/network-interfaces-scripted.nix5
-rw-r--r--nixos/modules/tasks/network-interfaces.nix16
-rw-r--r--nixos/modules/tasks/trackpoint.nix11
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix1
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix28
-rw-r--r--nixos/modules/virtualisation/incus.nix13
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix17
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix4
-rw-r--r--nixos/modules/virtualisation/lxc.nix26
-rw-r--r--nixos/modules/virtualisation/lxcfs.nix12
-rw-r--r--nixos/modules/virtualisation/lxd-agent.nix18
-rw-r--r--nixos/modules/virtualisation/lxd-virtual-machine.nix4
-rw-r--r--nixos/modules/virtualisation/lxd.nix32
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix1
-rw-r--r--nixos/modules/virtualisation/podman/default.nix33
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix2
-rw-r--r--nixos/modules/virtualisation/vmware-host.nix61
-rw-r--r--nixos/modules/virtualisation/waydroid.nix2
-rw-r--r--nixos/release-combined.nix8
-rw-r--r--nixos/tests/3proxy.nix1
-rw-r--r--nixos/tests/acme.nix1
-rw-r--r--nixos/tests/adguardhome.nix1
-rw-r--r--nixos/tests/all-terminfo.nix6
-rw-r--r--nixos/tests/all-tests.nix37
-rw-r--r--nixos/tests/anbox.nix2
-rw-r--r--nixos/tests/angie-api.nix148
-rw-r--r--nixos/tests/appliance-repart-image.nix12
-rw-r--r--nixos/tests/auth-mysql.nix3
-rw-r--r--nixos/tests/avahi.nix2
-rw-r--r--nixos/tests/ayatana-indicators.nix89
-rw-r--r--nixos/tests/babeld.nix4
-rw-r--r--nixos/tests/bittorrent.nix4
-rw-r--r--nixos/tests/bootspec.nix31
-rw-r--r--nixos/tests/btrbk-section-order.nix7
-rw-r--r--nixos/tests/btrbk.nix1
-rw-r--r--nixos/tests/buildbot.nix4
-rw-r--r--nixos/tests/c2fmzq.nix7
-rw-r--r--nixos/tests/caddy.nix15
-rw-r--r--nixos/tests/ceph-single-node.nix21
-rw-r--r--nixos/tests/cinnamon-wayland.nix77
-rw-r--r--nixos/tests/cinnamon.nix6
-rw-r--r--nixos/tests/cloud-init.nix1
-rw-r--r--nixos/tests/code-server.nix22
-rw-r--r--nixos/tests/containers-custom-pkgs.nix2
-rw-r--r--nixos/tests/containers-imperative.nix1
-rw-r--r--nixos/tests/corerad.nix2
-rw-r--r--nixos/tests/curl-impersonate.nix2
-rw-r--r--nixos/tests/dhparams.nix2
-rw-r--r--nixos/tests/docker-tools.nix2
-rw-r--r--nixos/tests/drawterm.nix58
-rw-r--r--nixos/tests/elk.nix6
-rw-r--r--nixos/tests/eris-server.nix2
-rw-r--r--nixos/tests/ferm.nix2
-rw-r--r--nixos/tests/frp.nix25
-rw-r--r--nixos/tests/frr.nix2
-rw-r--r--nixos/tests/ft2-clone.nix4
-rw-r--r--nixos/tests/gitdaemon.nix3
-rw-r--r--nixos/tests/gitlab.nix2
-rw-r--r--nixos/tests/gns3-server.nix55
-rw-r--r--nixos/tests/google-oslogin/default.nix3
-rw-r--r--nixos/tests/guix/basic.nix42
-rw-r--r--nixos/tests/guix/default.nix8
-rw-r--r--nixos/tests/guix/publish.nix96
-rw-r--r--nixos/tests/guix/scripts/add-existing-files-to-store.scm52
-rw-r--r--nixos/tests/guix/scripts/create-file-to-store.scm8
-rw-r--r--nixos/tests/haproxy.nix109
-rw-r--r--nixos/tests/harmonia.nix3
-rw-r--r--nixos/tests/home-assistant.nix4
-rw-r--r--nixos/tests/hostname.nix1
-rw-r--r--nixos/tests/incron.nix6
-rw-r--r--nixos/tests/incus/container.nix8
-rw-r--r--nixos/tests/incus/default.nix5
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix112
-rw-r--r--nixos/tests/incus/preseed.nix4
-rw-r--r--nixos/tests/incus/socket-activated.nix4
-rw-r--r--nixos/tests/incus/virtual-machine.nix7
-rw-r--r--nixos/tests/initrd-network-openvpn/default.nix15
-rw-r--r--nixos/tests/input-remapper.nix5
-rw-r--r--nixos/tests/installed-tests/flatpak.nix1
-rw-r--r--nixos/tests/installer-systemd-stage-1.nix5
-rw-r--r--nixos/tests/installer.nix407
-rw-r--r--nixos/tests/invidious.nix120
-rw-r--r--nixos/tests/invoiceplane.nix104
-rw-r--r--nixos/tests/iscsi-root.nix4
-rw-r--r--nixos/tests/kanidm.nix1
-rw-r--r--nixos/tests/kerberos/heimdal.nix23
-rw-r--r--nixos/tests/kerberos/mit.nix23
-rw-r--r--nixos/tests/kernel-generic.nix1
-rw-r--r--nixos/tests/krb5/default.nix1
-rw-r--r--nixos/tests/krb5/deprecated-config.nix50
-rw-r--r--nixos/tests/krb5/example-config.nix140
-rw-r--r--nixos/tests/kubo/default.nix4
-rw-r--r--nixos/tests/kubo/kubo.nix7
-rw-r--r--nixos/tests/lemmy.nix2
-rw-r--r--nixos/tests/livebook-service.nix4
-rw-r--r--nixos/tests/lvm2/systemd-stage-1.nix4
-rw-r--r--nixos/tests/lvm2/thinpool.nix2
-rw-r--r--nixos/tests/lvm2/vdo.nix4
-rw-r--r--nixos/tests/lxd/container.nix4
-rw-r--r--nixos/tests/lxd/nftables.nix6
-rw-r--r--nixos/tests/lxd/preseed.nix2
-rw-r--r--nixos/tests/lxd/ui.nix4
-rw-r--r--nixos/tests/lxd/virtual-machine.nix4
-rw-r--r--nixos/tests/mate.nix27
-rw-r--r--nixos/tests/miriway.nix2
-rw-r--r--nixos/tests/mobilizon.nix4
-rw-r--r--nixos/tests/mongodb.nix2
-rw-r--r--nixos/tests/munin.nix2
-rw-r--r--nixos/tests/musescore.nix7
-rw-r--r--nixos/tests/mympd.nix27
-rw-r--r--nixos/tests/mysql/mariadb-galera.nix4
-rw-r--r--nixos/tests/mysql/mysql-backup.nix3
-rw-r--r--nixos/tests/mysql/mysql-replication.nix4
-rw-r--r--nixos/tests/mysql/mysql.nix4
-rw-r--r--nixos/tests/networking.nix3
-rw-r--r--nixos/tests/nextcloud/basic.nix10
-rw-r--r--nixos/tests/nextcloud/default.nix2
-rw-r--r--nixos/tests/nextcloud/with-postgresql-and-redis.nix2
-rw-r--r--nixos/tests/nfs/kerberos.nix23
-rw-r--r--nixos/tests/nginx-etag-compression.nix45
-rw-r--r--nixos/tests/nginx-http3.nix182
-rw-r--r--nixos/tests/nginx-moreheaders.nix37
-rw-r--r--nixos/tests/nginx-redirectcode.nix25
-rw-r--r--nixos/tests/nginx-variants.nix10
-rw-r--r--nixos/tests/nitter.nix21
-rw-r--r--nixos/tests/nixos-rebuild-specialisations.nix2
-rw-r--r--nixos/tests/nixos-rebuild-target-host.nix136
-rw-r--r--nixos/tests/nixseparatedebuginfod.nix80
-rw-r--r--nixos/tests/npmrc.nix22
-rw-r--r--nixos/tests/ntfy-sh-migration.nix77
-rw-r--r--nixos/tests/ntpd-rs.nix51
-rw-r--r--nixos/tests/oci-containers.nix2
-rw-r--r--nixos/tests/opensmtpd-rspamd.nix1
-rw-r--r--nixos/tests/opensmtpd.nix1
-rw-r--r--nixos/tests/openssh.nix28
-rw-r--r--nixos/tests/opentabletdriver.nix4
-rw-r--r--nixos/tests/os-prober.nix2
-rw-r--r--nixos/tests/owncast.nix2
-rw-r--r--nixos/tests/pam/pam-file-contents.nix2
-rw-r--r--nixos/tests/pantheon.nix34
-rw-r--r--nixos/tests/paperless.nix2
-rw-r--r--nixos/tests/pgadmin4.nix61
-rw-r--r--nixos/tests/pgjwt.nix2
-rw-r--r--nixos/tests/podman/default.nix2
-rw-r--r--nixos/tests/postgis.nix7
-rw-r--r--nixos/tests/prometheus-exporters.nix59
-rw-r--r--nixos/tests/promscale.nix2
-rw-r--r--nixos/tests/prowlarr.nix4
-rw-r--r--nixos/tests/qemu-vm-restrictnetwork.nix2
-rw-r--r--nixos/tests/quicktun.nix18
-rw-r--r--nixos/tests/rspamd-trainer.nix155
-rw-r--r--nixos/tests/rss2email.nix1
-rw-r--r--nixos/tests/slimserver.nix4
-rw-r--r--nixos/tests/snmpd.nix23
-rw-r--r--nixos/tests/sogo.nix2
-rw-r--r--nixos/tests/spark/default.nix66
-rw-r--r--nixos/tests/ssh-agent-auth.nix55
-rw-r--r--nixos/tests/ssh-audit.nix1
-rw-r--r--nixos/tests/stub-ld.nix73
-rw-r--r--nixos/tests/stunnel.nix4
-rw-r--r--nixos/tests/suwayomi-server.nix46
-rw-r--r--nixos/tests/sway.nix2
-rw-r--r--nixos/tests/sysinit-reactivation.nix107
-rw-r--r--nixos/tests/systemd-boot.nix2
-rw-r--r--nixos/tests/systemd-initrd-networkd.nix3
-rw-r--r--nixos/tests/systemd-journal-gateway.nix90
-rw-r--r--nixos/tests/systemd-journal-upload.nix101
-rw-r--r--nixos/tests/systemd-journal.nix8
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver.nix3
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix8
-rw-r--r--nixos/tests/systemd-nspawn.nix1
-rw-r--r--nixos/tests/systemd-timesyncd-nscd-dnssec.nix61
-rw-r--r--nixos/tests/systemtap.nix50
-rw-r--r--nixos/tests/tayga.nix2
-rw-r--r--nixos/tests/telegraf.nix1
-rw-r--r--nixos/tests/teleport.nix3
-rw-r--r--nixos/tests/terminal-emulators.nix7
-rw-r--r--nixos/tests/timescaledb.nix2
-rw-r--r--nixos/tests/tomcat.nix2
-rw-r--r--nixos/tests/trafficserver.nix1
-rw-r--r--nixos/tests/tsja.nix2
-rw-r--r--nixos/tests/tsm-client-gui.nix6
-rw-r--r--nixos/tests/typesense.nix3
-rw-r--r--nixos/tests/ulogd/ulogd.py1
-rw-r--r--nixos/tests/unbound.nix16
-rw-r--r--nixos/tests/upnp.nix7
-rw-r--r--nixos/tests/uptermd.nix1
-rw-r--r--nixos/tests/varnish.nix6
-rw-r--r--nixos/tests/watchdogd.nix22
-rw-r--r--nixos/tests/xrdp-with-audio-pulseaudio.nix97
-rw-r--r--nixos/tests/zammad.nix9
-rw-r--r--nixos/tests/zfs.nix3
-rw-r--r--nixos/tests/zrepl.nix1
544 files changed, 12743 insertions, 3239 deletions
diff --git a/nixos/doc/manual/administration/imperative-containers.section.md b/nixos/doc/manual/administration/imperative-containers.section.md
index f45991780c4b9..852305ad81486 100644
--- a/nixos/doc/manual/administration/imperative-containers.section.md
+++ b/nixos/doc/manual/administration/imperative-containers.section.md
@@ -77,7 +77,7 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
 
 There are several ways to change the configuration of the container.
 First, on the host, you can edit
-`/var/lib/container/name/etc/nixos/configuration.nix`, and run
+`/var/lib/nixos-containers/foo/etc/nixos/configuration.nix`, and run
 
 ```ShellSession
 # nixos-container update foo
diff --git a/nixos/doc/manual/administration/nixos-state.section.md b/nixos/doc/manual/administration/nixos-state.section.md
new file mode 100644
index 0000000000000..9819d613198c3
--- /dev/null
+++ b/nixos/doc/manual/administration/nixos-state.section.md
@@ -0,0 +1,28 @@
+# NixOS {#sec-nixos-state}
+
+## `/nix` {#sec-state-nix}
+
+NixOS needs the entirety of `/nix` to be persistent, as it includes:
+- `/nix/store`, which contains all the system's executables, libraries, and supporting data;
+- `/nix/var/nix`, which contains:
+  - the Nix daemon's database;
+  - roots whose transitive closure is preserved when garbage-collecting the Nix store;
+  - system-wide and per-user profiles.
+
+## `/boot` {#sec-state-boot}
+
+`/boot` should also be persistent, as it contains:
+- the kernel and initrd which the bootloader loads,
+- the bootloader's configuration, including the kernel's command-line which
+  determines the store path to use as system environment.
+
+
+## Users and groups {#sec-state-users}
+
+- `/var/lib/nixos` should persist: it holds state needed to generate stable
+  uids and gids for declaratively-managed users and groups, etc.
+- `users.mutableUsers` should be false, *or* the following files under `/etc`
+  should all persist:
+  - {manpage}`passwd(5)` and {manpage}`group(5)`,
+  - {manpage}`shadow(5)` and {manpage}`gshadow(5)`,
+  - {manpage}`subuid(5)` and {manpage}`subgid(5)`.
diff --git a/nixos/doc/manual/administration/running.md b/nixos/doc/manual/administration/running.md
index 48e8c7c6668b7..83412d9b7af58 100644
--- a/nixos/doc/manual/administration/running.md
+++ b/nixos/doc/manual/administration/running.md
@@ -8,6 +8,7 @@ rebooting.chapter.md
 user-sessions.chapter.md
 control-groups.chapter.md
 logging.chapter.md
+system-state.chapter.md
 cleaning-store.chapter.md
 containers.chapter.md
 troubleshooting.chapter.md
diff --git a/nixos/doc/manual/administration/system-state.chapter.md b/nixos/doc/manual/administration/system-state.chapter.md
new file mode 100644
index 0000000000000..6840cc3902578
--- /dev/null
+++ b/nixos/doc/manual/administration/system-state.chapter.md
@@ -0,0 +1,17 @@
+# Necessary system state {#ch-system-state}
+
+Normally — on systems with a persistent `rootfs` — system services can persist state to
+the filesystem without administrator intervention.
+
+However, it is possible and not-uncommon to create [impermanent systems], whose
+`rootfs` is either a `tmpfs` or reset during boot. While NixOS itself supports
+this kind of configuration, special care needs to be taken.
+
+[impermanent systems]: https://nixos.wiki/wiki/Impermanence
+
+
+```{=include=} sections
+nixos-state.section.md
+systemd-state.section.md
+zfs-state.section.md
+```
diff --git a/nixos/doc/manual/administration/systemd-state.section.md b/nixos/doc/manual/administration/systemd-state.section.md
new file mode 100644
index 0000000000000..84f074871a655
--- /dev/null
+++ b/nixos/doc/manual/administration/systemd-state.section.md
@@ -0,0 +1,52 @@
+# systemd {#sec-systemd-state}
+
+## `machine-id(5)` {#sec-machine-id}
+
+`systemd` uses per-machine identifier — {manpage}`machine-id(5)` — which must be
+unique and persistent; otherwise, the system journal may fail to list earlier
+boots, etc.
+
+`systemd` generates a random `machine-id(5)` during boot if it does not already exist,
+and persists it in `/etc/machine-id`.  As such, it suffices to make that file persistent.
+
+Alternatively, it is possible to generate a random `machine-id(5)`; while the
+specification allows for *any* hex-encoded 128b value, systemd itself uses
+[UUIDv4], *i.e.* random UUIDs, and it is thus preferable to do so as well, in
+case some software assumes `machine-id(5)` to be a UUIDv4. Those can be
+generated with `uuidgen -r | tr -d -` (`tr` being used to remove the dashes).
+
+Such a `machine-id(5)` can be set by writing it to `/etc/machine-id` or through
+the kernel's command-line, though NixOS' systemd maintainers [discourage] the
+latter approach.
+
+[UUIDv4]: https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)
+[discourage]: https://github.com/NixOS/nixpkgs/pull/268995
+
+
+## `/var/lib/systemd` {#sec-var-systemd}
+
+Moreover, `systemd` expects its state directory — `/var/lib/systemd` — to persist, for:
+- {manpage}`systemd-random-seed(8)`, which loads a 256b “seed” into the kernel's RNG
+  at boot time, and saves a fresh one during shutdown;
+- {manpage}`systemd.timer(5)` with `Persistent=yes`, which are then run after boot if
+  the timer would have triggered during the time the system was shut down;
+- {manpage}`systemd-coredump(8)` to store core dumps there by default;
+  (see {manpage}`coredump.conf(5)`)
+- {manpage}`systemd-timesyncd(8)`;
+- {manpage}`systemd-backlight(8)` and {manpage}`systemd-rfkill(8)` persist hardware-related
+  state;
+- possibly other things, this list is not meant to be exhaustive.
+
+In any case, making `/var/lib/systemd` persistent is recommended.
+
+
+## `/var/log/journal/{machine-id}` {#sec-var-journal}
+
+Lastly, {manpage}`systemd-journald(8)` writes the system's journal in binary
+form to `/var/log/journal/{machine-id}`; if (locally) persisting the entire log
+is desired, it is recommended to make all of `/var/log/journal` persistent.
+
+If not, one can set `Storage=volatile` in {manpage}`journald.conf(5)`
+([`services.journald.storage = "volatile";`](#opt-services.journald.storage)),
+which disables journal persistence and causes it to be written to
+`/run/log/journal`.
diff --git a/nixos/doc/manual/administration/zfs-state.section.md b/nixos/doc/manual/administration/zfs-state.section.md
new file mode 100644
index 0000000000000..11ad5badea7ed
--- /dev/null
+++ b/nixos/doc/manual/administration/zfs-state.section.md
@@ -0,0 +1,16 @@
+# ZFS {#sec-zfs-state}
+
+When using ZFS, `/etc/zfs/zpool.cache` should be persistent (or a symlink to a persistent
+location) as it is the default value for the `cachefile` [property](man:zpoolprops(7)).
+
+This cachefile is used on system startup to discover ZFS pools, so ZFS pools
+holding the `rootfs` and/or early-boot datasets such as `/nix` can be set to
+`cachefile=none`.
+
+In principle, if there are no other pools attached to the system, `zpool.cache`
+does not need to be persisted; it is however *strongly recommended* to persist
+it, in case additional pools are added later on, temporarily or permanently:
+
+While mishandling the cachefile does not lead to data loss by itself, it may
+cause zpools not to be imported during boot, and services may then write to a
+location where a dataset was expected to be mounted.
diff --git a/nixos/doc/manual/configuration/gpu-accel.chapter.md b/nixos/doc/manual/configuration/gpu-accel.chapter.md
index dfccdf291b736..aa63aec61669b 100644
--- a/nixos/doc/manual/configuration/gpu-accel.chapter.md
+++ b/nixos/doc/manual/configuration/gpu-accel.chapter.md
@@ -65,12 +65,10 @@ hardware.opengl.extraPackages = [
 [Intel Gen8 and later
 GPUs](https://en.wikipedia.org/wiki/List_of_Intel_graphics_processing_units#Gen8)
 are supported by the Intel NEO OpenCL runtime that is provided by the
-intel-compute-runtime package. For Gen7 GPUs, the deprecated Beignet
-runtime can be used, which is provided by the beignet package. The
-proprietary Intel OpenCL runtime, in the intel-ocl package, is an
-alternative for Gen7 GPUs.
+intel-compute-runtime package. The proprietary Intel OpenCL runtime, in
+the intel-ocl package, is an alternative for Gen7 GPUs.
 
-The intel-compute-runtime, beignet, or intel-ocl package can be added to
+The intel-compute-runtime or intel-ocl package can be added to
 [](#opt-hardware.opengl.extraPackages)
 to enable OpenCL support. For example, for Gen8 and later GPUs, the following
 configuration can be used:
diff --git a/nixos/doc/manual/configuration/luks-file-systems.section.md b/nixos/doc/manual/configuration/luks-file-systems.section.md
index b5d0407d16595..7615b95aef422 100644
--- a/nixos/doc/manual/configuration/luks-file-systems.section.md
+++ b/nixos/doc/manual/configuration/luks-file-systems.section.md
@@ -42,8 +42,12 @@ boot.loader.grub.enableCryptodisk = true;
 
 ## FIDO2 {#sec-luks-file-systems-fido2}
 
-NixOS also supports unlocking your LUKS-Encrypted file system using a
-FIDO2 compatible token. In the following example, we will create a new
+NixOS also supports unlocking your LUKS-Encrypted file system using a FIDO2
+compatible token.
+
+### Without systemd in initrd {#sec-luks-file-systems-fido2-legacy}
+
+In the following example, we will create a new
 FIDO2 credential and add it as a new key to our existing device
 `/dev/sda2`:
 
@@ -75,3 +79,37 @@ as [Trezor](https://trezor.io/).
 ```nix
 boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true;
 ```
+
+### systemd Stage 1 {#sec-luks-file-systems-fido2-systemd}
+
+If systemd stage 1 is enabled, it handles unlocking of LUKS-enrypted volumes
+during boot. The following example enables systemd stage1 and adds support for
+unlocking the existing LUKS2 volume `root` using any enrolled FIDO2 compatible
+tokens.
+
+```nix
+boot.initrd = {
+  luks.devices.root = {
+    crypttabExtraOpts = [ "fido2-device=auto" ];
+    device = "/dev/sda2";
+  };
+  systemd.enable = true;
+};
+```
+
+All tokens that should be used for unlocking the LUKS2-encrypted volume must
+first be enrolled using [systemd-cryptenroll](https://www.freedesktop.org/software/systemd/man/systemd-cryptenroll.html).
+In the following example, a new key slot for the first discovered token is
+added to the LUKS volume.
+
+```ShellSession
+# systemd-cryptenroll --fido2-device=auto /dev/sda2
+```
+
+Existing key slots are left intact, unless `--wipe-slot=` is specified. It is
+recommened to add a recovery key that should be stored in a secure physical
+location and can be entered wherever a password would be entered.
+
+```ShellSession
+# systemd-cryptenroll --recovery-key /dev/sda2
+```
diff --git a/nixos/doc/manual/development/option-types.section.md b/nixos/doc/manual/development/option-types.section.md
index 2ad3d6c4f9495..f9c7ac80018e4 100644
--- a/nixos/doc/manual/development/option-types.section.md
+++ b/nixos/doc/manual/development/option-types.section.md
@@ -13,6 +13,13 @@ merging is handled.
 `types.bool`
 
 :   A boolean, its values can be `true` or `false`.
+    All definitions must have the same value, after priorities. An error is thrown in case of a conflict.
+
+`types.boolByOr`
+
+:   A boolean, its values can be `true` or `false`.
+    The result is `true` if _any_ of multiple definitions is `true`.
+    In other words, definitions are merged with the logical _OR_ operator.
 
 `types.path`
 
diff --git a/nixos/doc/manual/development/unit-handling.section.md b/nixos/doc/manual/development/unit-handling.section.md
index 32d44dbfff054..d5ba6a9529d01 100644
--- a/nixos/doc/manual/development/unit-handling.section.md
+++ b/nixos/doc/manual/development/unit-handling.section.md
@@ -63,3 +63,42 @@ checks:
     is **restart**ed with the others. If it is set, both the service and the
     socket are **stop**ped and the socket is **start**ed, leaving socket
     activation to start the service when it's needed.
+
+## Sysinit reactivation {#sec-sysinit-reactivation}
+
+[`sysinit.target`](https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html#sysinit.target)
+is a systemd target that encodes system initialization (i.e. early startup). A
+few units that need to run very early in the bootup process are ordered to
+finish before this target is reached. Probably the most notable one of these is
+`systemd-tmpfiles-setup.service`. We will refer to these units as "sysinit
+units".
+
+"Normal" systemd units, by default, are ordered AFTER `sysinit.target`. In
+other words, these "normal" units expect all services ordered before
+`sysinit.target` to have finished without explicity declaring this dependency
+relationship for each dependency. See the [systemd
+bootup](https://www.freedesktop.org/software/systemd/man/latest/bootup.html)
+for more details on the bootup process.
+
+When restarting both a unit ordered before `sysinit.target` as well as one
+after, this presents a problem because they would be started at the same time
+as they do not explicitly declare their dependency relations.
+
+To solve this, NixOS has an artificial `sysinit-reactivation.target` which
+allows you to ensure that services ordered before `sysinit.target` are
+restarted correctly. This applies both to the ordering between these sysinit
+services as well as ensuring that sysinit units are restarted before "normal"
+units.
+
+To make an existing sysinit service restart correctly during system switch, you
+have to declare:
+
+```nix
+systemd.services.my-sysinit = {
+  requiredBy = [ "sysinit-reactivation.target" ];
+  before = [ "sysinit-reactivation.target" ];
+  restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
+};
+```
+
+You need to configure appropriate `restartTriggers` specific to your service.
diff --git a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
index ccadb819e061d..5d17a9c98514c 100644
--- a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
+++ b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
@@ -37,7 +37,7 @@ of actions is always the same:
 - Forget about the failed state of units (`systemctl reset-failed`)
 - Reload systemd (`systemctl daemon-reload`)
 - Reload systemd user instances (`systemctl --user daemon-reload`)
-- Set up tmpfiles (`systemd-tmpfiles --create`)
+- Reactivate sysinit (`systemctl restart sysinit-reactivation.target`)
 - Reload units (`systemctl reload`)
 - Restart units (`systemctl restart`)
 - Start units (`systemctl start`)
diff --git a/nixos/doc/manual/installation/changing-config.chapter.md b/nixos/doc/manual/installation/changing-config.chapter.md
index 12abf90b718fd..9e56b15a880f6 100644
--- a/nixos/doc/manual/installation/changing-config.chapter.md
+++ b/nixos/doc/manual/installation/changing-config.chapter.md
@@ -55,6 +55,14 @@ which causes the new configuration (and previous ones created using
 This can be useful to separate test configurations from "stable"
 configurations.
 
+A repl, or read-eval-print loop, is also available. You can inspect your configuration and use the Nix language with
+
+```ShellSession
+# nixos-rebuild repl
+```
+
+Your configuration is loaded into the `config` variable. Use tab for autocompletion, use the `:r` command to reload the configuration files. See `:?` or [`nix repl` in the Nix manual](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-repl.html) to learn more.
+
 Finally, you can do
 
 ```ShellSession
diff --git a/nixos/doc/manual/installation/installing-usb.section.md b/nixos/doc/manual/installation/installing-usb.section.md
index adfe22ea2f00e..3b9e2f492f04d 100644
--- a/nixos/doc/manual/installation/installing-usb.section.md
+++ b/nixos/doc/manual/installation/installing-usb.section.md
@@ -35,7 +35,7 @@ select the image, select the USB flash drive and click "Write".
 4. Then use the `dd` utility to write the image to the USB flash drive.
 
   ```ShellSession
-  sudo dd if=<path-to-image> of=/dev/sdX bs=4M conv=fsync
+  sudo dd bs=4M conv=fsync oflag=direct status=progress if=<path-to-image> of=/dev/sdX
   ```
 
 ## Creating bootable USB flash drive from a Terminal on macOS {#sec-booting-from-usb-macos}
diff --git a/nixos/doc/manual/release-notes/release-notes.md b/nixos/doc/manual/release-notes/release-notes.md
index 3f926fb21a5ca..0514a1b0044af 100644
--- a/nixos/doc/manual/release-notes/release-notes.md
+++ b/nixos/doc/manual/release-notes/release-notes.md
@@ -3,6 +3,7 @@
 This section lists the release notes for each stable version of NixOS and current unstable revision.
 
 ```{=include=} sections
+rl-2405.section.md
 rl-2311.section.md
 rl-2305.section.md
 rl-2211.section.md
diff --git a/nixos/doc/manual/release-notes/rl-2111.section.md b/nixos/doc/manual/release-notes/rl-2111.section.md
index 400eb1062d9a7..8edf4fd35e4fb 100644
--- a/nixos/doc/manual/release-notes/rl-2111.section.md
+++ b/nixos/doc/manual/release-notes/rl-2111.section.md
@@ -100,7 +100,7 @@ In addition to numerous new and upgraded packages, this release has the followin
 - [opensnitch](https://github.com/evilsocket/opensnitch), an application firewall. Available as [services.opensnitch](#opt-services.opensnitch.enable).
 
 - [snapraid](https://www.snapraid.it/), a backup program for disk arrays.
-  Available as [snapraid](#opt-snapraid.enable).
+  Available as [snapraid](#opt-services.snapraid.enable).
 
 - [Hockeypuck](https://github.com/hockeypuck/hockeypuck), a OpenPGP Key Server. Available as [services.hockeypuck](#opt-services.hockeypuck.enable).
 
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index 760c58d5050e3..1aef1828908f8 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -20,7 +20,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   - [Breaking Changes](#sec-release-23.11-nixos-breaking-changes)
   - [New Services](#sec-release-23.11-nixos-new-services)
   - [Other Notable Changes](#sec-release-23.11-nixos-notable-changes)
-- [Nixpkgs Library Changes](#sec-release-23.11-nixpkgs-lib)
+- [Nixpkgs Library](#sec-release-23.11-nixpkgs-lib)
   - [Breaking Changes](#sec-release-23.11-lib-breaking)
   - [Additions and Improvements](#sec-release-23.11-lib-additions-improvements)
   - [Deprecations](#sec-release-23.11-lib-deprecations)
@@ -71,7 +71,9 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 - `services.mastodon` doesn't support providing a TCP port to its `streaming`
   component anymore, as upstream implemented parallelization by running
   multiple instances instead of running multiple processes in one instance.
-  Please create a PR if you are interested in this feature.
+  Please create a PR if you are interested in this feature.\
+  Due to this, the desired number of such instances
+  {option}`services.mastodon.streamingProcesses` now needs to be declared explicitly.
 
 - The `services.hostapd` module was rewritten to support `passwordFile` like
   options, WPA3-SAE, and management of multiple interfaces. This breaks
@@ -1311,18 +1313,26 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 - When using [split parity files](https://www.snapraid.it/manual#7.1) in `snapraid`,
   the snapraid-sync systemd service will no longer fail to run.
 
+- `wpa_supplicant`'s configuration file cannot be read by non-root users, and
+  secrets (such as Pre-Shared Keys) can safely be passed via
+  `networking.wireless.environmentFile`.
+
+  The configuration file could previously be read, when `userControlled.enable` (non-default),
+  by users who are in both `wheel` and `userControlled.group` (defaults to `wheel`)
+
+
 ## Nixpkgs Library {#sec-release-23.11-nixpkgs-lib}
 
 ### Breaking Changes {#sec-release-23.11-lib-breaking}
 
-- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl-prime)
+- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime)
   now always evaluates the initial accumulator argument first. If you depend on
   the lazier behavior, consider using
-  [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl)
+  [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl)
   or
   [`builtins.foldl'`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-foldl')
   instead.
-- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.attrsets.foldlAttrs)
+- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.foldlAttrs)
   now always evaluates the initial accumulator argument first.
 - Now that the internal NixOS transition to Markdown documentation is complete,
   `lib.options.literalDocBook` has been removed after deprecation in 22.11.
@@ -1330,7 +1340,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
 
 ### Additions and Improvements {#sec-release-23.11-lib-additions-improvements}
 
-- [`lib.fileset`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-fileset):
+- [`lib.fileset`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-fileset):
   A new sub-library to select local files to use for sources, designed to be
   easy and safe to use.
 
@@ -1339,7 +1349,7 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   post](https://www.tweag.io/blog/2023-11-28-file-sets/) or [the
   tutorial](https://nix.dev/tutorials/file-sets).
 
-- [`lib.gvariant`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-gvariant):
+- [`lib.gvariant`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-gvariant):
   A partial and basic implementation of GVariant formatted strings. See
   [GVariant Format
   Strings](https://docs.gtk.org/glib/gvariant-format-strings.html) for details.
@@ -1349,58 +1359,58 @@ Make sure to also check the many updates in the [Nixpkgs library](#sec-release-2
   change in backwards incompatible ways without prior notice.
   :::
 
-- [`lib.asserts`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-asserts):
+- [`lib.asserts`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-asserts):
   New function:
-  [`assertEachOneOf`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.asserts.assertEachOneOf).
-- [`lib.attrsets`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-attrsets):
+  [`assertEachOneOf`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.asserts.assertEachOneOf).
+- [`lib.attrsets`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-attrsets):
   New function:
-  [`attrsToList`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.attrsets.attrsToList).
-- [`lib.customisation`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-customisation):
+  [`attrsToList`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.attrsToList).
+- [`lib.customisation`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-customisation):
   New function:
-  [`makeScopeWithSplicing'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.customisation.makeScopeWithSplicing-prime).
-- [`lib.fixedPoints`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-fixedPoints):
+  [`makeScopeWithSplicing'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.customisation.makeScopeWithSplicing-prime).
+- [`lib.fixedPoints`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-fixedPoints):
   Documentation improvements for
-  [`lib.fixedPoints.fix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.fixedPoints.fix).
+  [`lib.fixedPoints.fix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.fixedPoints.fix).
 - `lib.generators`: New functions:
-  [`mkDconfKeyValue`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.generators.mkDconfKeyValue),
-  [`toDconfINI`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.generators.toDconfINI).
+  [`mkDconfKeyValue`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.generators.mkDconfKeyValue),
+  [`toDconfINI`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.generators.toDconfINI).
 
   `lib.generators.toKeyValue` now supports the `indent` attribute in its first
   argument.
-- [`lib.lists`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-lists):
+- [`lib.lists`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-lists):
   New functions:
-  [`findFirstIndex`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.findFirstIndex),
-  [`hasPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.hasPrefix),
-  [`removePrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.removePrefix),
-  [`commonPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.commonPrefix),
-  [`allUnique`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.allUnique).
+  [`findFirstIndex`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.findFirstIndex),
+  [`hasPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.hasPrefix),
+  [`removePrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.removePrefix),
+  [`commonPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.commonPrefix),
+  [`allUnique`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.allUnique).
 
   Documentation improvements for
-  [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.lists.foldl-prime).
-- [`lib.meta`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-meta):
+  [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime).
+- [`lib.meta`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-meta):
   Documentation of functions now gets rendered
-- [`lib.path`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-path):
+- [`lib.path`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-path):
   New functions:
-  [`hasPrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.hasPrefix),
-  [`removePrefix`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.removePrefix),
-  [`splitRoot`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.splitRoot),
-  [`subpath.components`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.path.subpath.components).
-- [`lib.strings`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-strings):
+  [`hasPrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.hasPrefix),
+  [`removePrefix`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.removePrefix),
+  [`splitRoot`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.splitRoot),
+  [`subpath.components`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.path.subpath.components).
+- [`lib.strings`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-strings):
   New functions:
-  [`replicate`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.replicate),
-  [`cmakeOptionType`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeOptionType),
-  [`cmakeBool`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeBool),
-  [`cmakeFeature`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.strings.cmakeFeature).
-- [`lib.trivial`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-trivial):
+  [`replicate`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.replicate),
+  [`cmakeOptionType`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeOptionType),
+  [`cmakeBool`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeBool),
+  [`cmakeFeature`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.strings.cmakeFeature).
+- [`lib.trivial`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-trivial):
   New function:
-  [`mirrorFunctionArgs`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.trivial.mirrorFunctionArgs).
+  [`mirrorFunctionArgs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.trivial.mirrorFunctionArgs).
 - `lib.systems`: New function:
-  [`equals`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.systems.equals).
-- [`lib.options`](https://nixos.org/manual/nixpkgs/unstable#sec-functions-library-options):
+  [`equals`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.systems.equals).
+- [`lib.options`](https://nixos.org/manual/nixpkgs/stable#sec-functions-library-options):
   Improved documentation for
-  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.options.mkPackageOption).
+  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.options.mkPackageOption).
 
-  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/unstable#function-library-lib.options.mkPackageOption).
+  [`mkPackageOption`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.options.mkPackageOption).
   now also supports the `pkgsText` attribute.
 
 Module system:
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index 749ebc5cb13b6..f4434fd6b94ca 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -8,34 +8,225 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
-- Create the first release note entry in this section!
+- `screen`'s module has been cleaned, and will now require you to set `programs.screen.enable` in order to populate `screenrc` and add the program to the environment.
+
+- `linuxPackages_testing_bcachefs` is now fully deprecated by `linuxPackages_testing`, and is therefore no longer available.
+
+- NixOS now installs a stub ELF loader that prints an informative error message when users attempt to run binaries not made for NixOS.
+   - This can be disabled through the `environment.stub-ld.enable` option.
+   - If you use `programs.nix-ld.enable`, no changes are needed. The stub will be disabled automatically.
+
+- Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
 ## New Services {#sec-release-24.05-new-services}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
+- [Guix](https://guix.gnu.org), a functional package manager inspired by Nix. Available as [services.guix](#opt-services.guix.enable).
+
 - [maubot](https://github.com/maubot/maubot), a plugin-based Matrix bot framework. Available as [services.maubot](#opt-services.maubot.enable).
 
+- systemd's gateway, upload, and remote services, which provides ways of sending journals across the network. Enable using [services.journald.gateway](#opt-services.journald.gateway.enable), [services.journald.upload](#opt-services.journald.upload.enable), and [services.journald.remote](#opt-services.journald.remote.enable).
+
+- [GNS3](https://www.gns3.com/), a network software emulator. Available as [services.gns3-server](#opt-services.gns3-server.enable).
+
+- [rspamd-trainer](https://gitlab.com/onlime/rspamd-trainer), script triggered by a helper which reads mails from a specific mail inbox and feeds them into rspamd for spam/ham training.
+
+- [ollama](https://ollama.ai), server for running large language models locally.
+
 - [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
+The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
+
+- [Suwayomi Server](https://github.com/Suwayomi/Suwayomi-Server), a free and open source manga reader server that runs extensions built for [Tachiyomi](https://tachiyomi.org). Available as [services.suwayomi-server](#opt-services.suwayomi-server.enable).
+
+- [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
+
+- [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
+
+- [TuxClocker](https://github.com/Lurkki14/tuxclocker), a hardware control and monitoring program. Available as [programs.tuxclocker](#opt-programs.tuxclocker.enable).
 
 ## Backward Incompatibilities {#sec-release-24.05-incompatibilities}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
-- `mkosi` was updated to v19. Parts of the user interface have changed. Consult the
-  [release notes](https://github.com/systemd/mkosi/releases/tag/v19) for a list of changes.
+- `himalaya` was updated to v1.0.0-beta, which introduces breaking changes. Check out the [release note](https://github.com/soywod/himalaya/releases/tag/v1.0.0-beta) for details.
+
+- The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
+
+- `k9s` was updated to v0.31. There have been various breaking changes in the config file format,
+  check out the changelog of [v0.29](https://github.com/derailed/k9s/releases/tag/v0.29.0),
+  [v0.30](https://github.com/derailed/k9s/releases/tag/v0.30.0) and
+  [v0.31](https://github.com/derailed/k9s/releases/tag/v0.31.0) for details. It is recommended
+  to back up your current configuration and let k9s recreate the new base configuration.
+
+- `idris2` was updated to v0.7.0. This version introduces breaking changes. Check out the [changelog](https://github.com/idris-lang/Idris2/blob/v0.7.0/CHANGELOG.md#v070) for details.
+
+- `nitter` requires a `guest_accounts.jsonl` to be provided as a path or loaded into the default location at `/var/lib/nitter/guest_accounts.jsonl`. See [Guest Account Branch Deployment](https://github.com/zedeus/nitter/wiki/Guest-Account-Branch-Deployment) for details.
+
+- Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857)
+
+- `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
+
+- The legacy and long deprecated systemd target `network-interfaces.target` has been removed. Use `network.target` instead.
+
+- `services.frp.settings` now generates the frp configuration file in TOML format as [recommended by upstream](https://github.com/fatedier/frp#configuration-files), instead of the legacy INI format. This has also introduced other changes in the configuration file structure and options.
+  - The `settings.common` section in the configuration is no longer valid and all the options form inside it now goes directly under `settings`.
+  - The `_` separating words in the configuration options is removed so the options are now in camel case. For example: `server_addr` becomes `serverAddr`, `server_port` becomes `serverPort` etc.
+  - Proxies are now defined with a new option `settings.proxies` which takes a list of proxies.
+  - Consult the [upstream documentation](https://github.com/fatedier/frp#example-usage) for more details on the changes.
+
+- `mkosi` was updated to v20. Parts of the user interface have changed. Consult the
+  release notes of [v19](https://github.com/systemd/mkosi/releases/tag/v19) and
+  [v20](https://github.com/systemd/mkosi/releases/tag/v20) for a list of changes.
+
+- `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block.
+  Example:
+
+  ```nix
+    locations."/".extraConfig = ''
+      add_header Alt-Svc 'h3=":$server_port"; ma=86400';
+    '';
+    locations."^~ /assets/".extraConfig = ''
+      add_header Alt-Svc 'h3=":$server_port"; ma=86400';
+    '';
+
+  ```
+- The `kanata` package has been updated to v1.5.0, which includes [breaking changes](https://github.com/jtroo/kanata/releases/tag/v1.5.0).
+
+- The `craftos-pc` package has been updated to v2.8, which includes [breaking changes](https://github.com/MCJack123/craftos2/releases/tag/v2.8).
+  - Files are now handled in binary mode; this could break programs with embedded UTF-8 characters.
+  - The ROM was updated to match ComputerCraft version v1.109.2.
+  - The bundled Lua was updated to Lua v5.2, which includes breaking changes. See the [Lua manual](https://www.lua.org/manual/5.2/manual.html#8) for more information.
+  - The WebSocket API [was rewritten](https://github.com/MCJack123/craftos2/issues/337), which introduced breaking changes.
+
+- The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows:
+  - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default.
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=23.11, `pkgs.nextcloud27` will be installed by default.
+  - Please note that an upgrade from v26 (or older) to v28 directly is not possible. Please upgrade to `nextcloud27` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud27;`](options.html#opt-services.nextcloud.package).
+
+- The vendored third party libraries have been mostly removed from `cudaPackages.nsight_systems`, which we now only ship for `cudaPackages_11_8` and later due to outdated dependencies. Users comfortable with the vendored dependencies may use `overrideAttrs` to amend the `postPatch` phase and the `meta.broken` correspondingly. Alternatively, one could package the deprecated `boost170` locally, as required for `cudaPackages_11_4.nsight_systems`.
+
+- The `cudaPackages` package scope has been updated to `cudaPackages_12`.
+
+- `services.resolved.fallbackDns` can now be used to disable the upstream fallback servers entirely by setting it to an empty list. To get the previous behaviour of the upstream defaults set it to null, the new default, instead.
+
+- `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively.
+  Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts.
+
+- `multi-user.target` no longer depends on `network-online.target`.
+  This will potentially break services that assumed this was the case in the past.
+  This was changed for consistency with other distributions as well as improved boot times.
+
+  We have added a warning for services that are
+  `after = [ "network-online.target" ]` but do not depend on it (e.g. using `wants`).
+
+- `networking.iproute2.enable` now does not set `environment.etc."iproute2/rt_tables".text`.
+
+  Setting `environment.etc."iproute2/{CONFIG_FILE_NAME}".text` will override the whole configuration file instead of appending it to the upstream configuration file.
+
+  `CONFIG_FILE_NAME` includes `bpf_pinning`, `ematch_map`, `group`, `nl_protos`, `rt_dsfield`, `rt_protos`, `rt_realms`, `rt_scopes`, and `rt_tables`.
+
+- The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher.
+
+- switch-to-configuration does not directly call systemd-tmpfiles anymore.
+  Instead, the new artificial sysinit-reactivation.target is introduced which
+  allows to restart multiple services that are ordered before sysinit.target
+  and respect the ordering between the services.
+
+- The `systemd.oomd` module behavior is changed as:
+
+  - Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358).
+    Reference: [commit](https://src.fedoraproject.org/rpms/systemd/c/806c95e1c70af18f81d499b24cd7acfa4c36ffd6?branch=806c95e1c70af18f81d499b24cd7acfa4c36ffd6)
+
+  - Remove swap policy. This helps prevent killing processes when user's swap is small.
+
+  - Expand the memory pressure policy to system.slice, user-.slice, and all user owned slices. Reference: [commit](https://src.fedoraproject.org/rpms/systemd/c/7665e1796f915dedbf8e014f0a78f4f576d609bb)
+
+  - `systemd.oomd.enableUserServices` is renamed to `systemd.oomd.enableUserSlices`.
+
+- `security.pam.enableSSHAgentAuth` now requires `services.openssh.authorizedKeysFiles` to be non-empty,
+  which is the case when `services.openssh.enable` is true. Previously, `pam_ssh_agent_auth` silently failed to work.
+
+- The configuration format for `services.prometheus.exporters.snmp` changed with release 0.23.0.
+  The module now includes an optional config check, that is enabled by default, to make the change obvious before any deployment.
+  More information about the configuration syntax change is available in the [upstream repository](https://github.com/prometheus/snmp_exporter/blob/b75fc6b839ee3f3ccbee68bee55f1ae99555084a/auth-split-migration.md).
+
+- [watchdogd](https://troglobit.com/projects/watchdogd/), a system and process supervisor using watchdog timers. Available as [services.watchdogd](#opt-services.watchdogd.enable).
 
 ## Other Notable Changes {#sec-release-24.05-notable-changes}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
+- `addDriverRunpath` has been added to facilitate the deprecation of the old `addOpenGLRunpath` setuphook. This change is motivated by the evolution of the setuphook to include all hardware acceleration.
+
+- Cinnamon has been updated to 6.0. Please beware that the [Wayland session](https://blog.linuxmint.com/?p=4591) is still experimental in this release.
+
+- `services.postgresql.extraPlugins` changed its type from just a list of packages to also a function that returns such a list.
+  For example a config line like ``services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [ postgis ];`` is recommended to be changed to ``services.postgresql.extraPlugins = ps: with ps; [ postgis ];``;
+
 - Programs written in [Nim](https://nim-lang.org/) are built with libraries selected by lockfiles.
   The `nimPackages` and `nim2Packages` sets have been removed.
   See https://nixos.org/manual/nixpkgs/unstable#nim for more information.
 
+- [Portunus](https://github.com/majewsky/portunus) has been updated to major version 2.
+  This version of Portunus supports strong password hashes, but the legacy hash SHA-256 is also still supported to ensure a smooth migration of existing user accounts.
+  After upgrading, follow the instructions on the [upstream release notes](https://github.com/majewsky/portunus/releases/tag/v2.0.0) to upgrade all user accounts to strong password hashes.
+  Support for weak password hashes will be removed in NixOS 24.11.
+
+- `libass` now uses the native CoreText backend on Darwin, which may fix subtitle rendering issues with `mpv`, `ffmpeg`, etc.
+
+- [Lilypond](https://lilypond.org/index.html) and [Denemo](https://www.denemo.org) are now compiled with Guile 3.0.
+
+- The following options of the Nextcloud module were moved into [`services.nextcloud.extraOptions`](#opt-services.nextcloud.extraOptions) and renamed to match the name from Nextcloud's `config.php`:
+  - `logLevel` -> [`loglevel`](#opt-services.nextcloud.extraOptions.loglevel),
+  - `logType` -> [`log_type`](#opt-services.nextcloud.extraOptions.log_type),
+  - `defaultPhoneRegion` -> [`default_phone_region`](#opt-services.nextcloud.extraOptions.default_phone_region),
+  - `overwriteProtocol` -> [`overwriteprotocol`](#opt-services.nextcloud.extraOptions.overwriteprotocol),
+  - `skeletonDirectory` -> [`skeletondirectory`](#opt-services.nextcloud.extraOptions.skeletondirectory),
+  - `globalProfiles` -> [`profile.enabled`](#opt-services.nextcloud.extraOptions._profile.enabled_),
+  - `extraTrustedDomains` -> [`trusted_domains`](#opt-services.nextcloud.extraOptions.trusted_domains) and
+  - `trustedProxies` -> [`trusted_proxies`](#opt-services.nextcloud.extraOptions.trusted_proxies).
+
+- The option [`services.nextcloud.config.dbport`] of the Nextcloud module was removed to match upstream.
+  The port can be specified in [`services.nextcloud.config.dbhost`](#opt-services.nextcloud.config.dbhost).
+
 - The Yama LSM is now enabled by default in the kernel, which prevents ptracing
   non-child processes. This means you will not be able to attach gdb to an
   existing process, but will need to start that process from gdb (so it is a
   child). Or you can set `boot.kernel.sysctl."kernel.yama.ptrace_scope"` to 0.
 
+- [Nginx virtual hosts](#opt-services.nginx.virtualHosts) using `forceSSL` or
+  `globalRedirect` can now have redirect codes other than 301 through
+  `redirectCode`.
+
+- The source of the `mockgen` package has changed to the [go.uber.org/mock](https://github.com/uber-go/mock) fork because [the original repository is no longer maintained](https://github.com/golang/mock#gomock).
+
+- `security.pam.enableSSHAgentAuth` was renamed to `security.pam.sshAgentAuth.enable` and an `authorizedKeysFiles`
+  option was added, to control which `authorized_keys` files are trusted.  It defaults to the previous behaviour,
+  **which is insecure**: see [#31611](https://github.com/NixOS/nixpkgs/issues/31611).
+
+- [](#opt-boot.kernel.sysctl._net.core.wmem_max_) changed from a string to an integer because of the addition of a custom merge option (taking the highest value defined to avoid conflicts between 2 services trying to set that value), just as [](#opt-boot.kernel.sysctl._net.core.rmem_max_) since 22.11.
+
+- `services.zfs.zed.enableMail` now uses the global `sendmail` wrapper defined by an email module
+  (such as msmtp or Postfix). It no longer requires using a special ZFS build with email support.
+
+- `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
+
+- The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
+
+- Gitea 1.21 upgrade has several breaking changes, including:
+  - Custom themes and other assets that were previously stored in `custom/public/*` now belong in `custom/public/assets/*`
+  - New instances of Gitea using MySQL now ignore the `[database].CHARSET` config option and always use the `utf8mb4` charset, existing instances should migrate via the `gitea doctor convert` CLI command.
+
 - The `hardware.pulseaudio` module now sets permission of pulse user home directory to 755 when running in "systemWide" mode. It fixes [issue 114399](https://github.com/NixOS/nixpkgs/issues/114399).
+
+- The `btrbk` module now automatically selects and provides required compression
+  program depending on the configured `stream_compress` option. Since this
+  replaces the need for the `extraPackages` option, this option will be
+  deprecated in future releases.
+
+- The `mpich` package expression now requires `withPm` to be a list, e.g. `"hydra:gforker"` becomes `[ "hydra" "gforker" ]`.
+
+- QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS).
+  The previous native backends remain available but are now minimally maintained. Refer to [upstream documentation](https://doc.qt.io/qt-6/qtmultimedia-index.html#ffmpeg-as-the-default-backend) for further details about each platform.
diff --git a/nixos/lib/eval-config.nix b/nixos/lib/eval-config.nix
index da099f86aa2ce..8bab3752073ff 100644
--- a/nixos/lib/eval-config.nix
+++ b/nixos/lib/eval-config.nix
@@ -110,6 +110,7 @@ let
   withExtraAttrs = configuration: configuration // {
     inherit extraArgs;
     inherit (configuration._module.args) pkgs;
+    inherit lib;
     extendModules = args: withExtraAttrs (configuration.extendModules args);
   };
 in
diff --git a/nixos/lib/make-disk-image.nix b/nixos/lib/make-disk-image.nix
index e5d82f4de7c9d..1a33abd01ea18 100644
--- a/nixos/lib/make-disk-image.nix
+++ b/nixos/lib/make-disk-image.nix
@@ -522,11 +522,16 @@ let format' = format; in let
     chmod 0644 $efiVars
   '';
 
+  createHydraBuildProducts = ''
+    mkdir -p $out/nix-support
+    echo "file ${format}-image $out/${filename}" >> $out/nix-support/hydra-build-products
+  '';
+
   buildImage = pkgs.vmTools.runInLinuxVM (
     pkgs.runCommand name {
       preVM = prepareImage + lib.optionalString touchEFIVars createEFIVars;
       buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
-      postVM = moveOrConvertImage + postVM;
+      postVM = moveOrConvertImage + createHydraBuildProducts + postVM;
       QEMU_OPTS =
         concatStringsSep " " (lib.optional useEFIBoot "-drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
         ++ lib.optionals touchEFIVars [
@@ -616,5 +621,5 @@ let format' = format; in let
 in
   if onlyNixStore then
     pkgs.runCommand name {}
-      (prepareImage + moveOrConvertImage + postVM)
+      (prepareImage + moveOrConvertImage + createHydraBuildProducts + postVM)
   else buildImage
diff --git a/nixos/lib/make-options-doc/default.nix b/nixos/lib/make-options-doc/default.nix
index 99515b5b8276e..284934a7608ef 100644
--- a/nixos/lib/make-options-doc/default.nix
+++ b/nixos/lib/make-options-doc/default.nix
@@ -120,7 +120,7 @@ in rec {
     { meta.description = "List of NixOS options in JSON format";
       nativeBuildInputs = [
         pkgs.brotli
-        pkgs.python3Minimal
+        pkgs.python3
       ];
       options = builtins.toFile "options.json"
         (builtins.unsafeDiscardStringContext (builtins.toJSON optionsNix));
diff --git a/nixos/lib/make-single-disk-zfs-image.nix b/nixos/lib/make-single-disk-zfs-image.nix
index a3564f9a8b68e..585fa93b7fa0f 100644
--- a/nixos/lib/make-single-disk-zfs-image.nix
+++ b/nixos/lib/make-single-disk-zfs-image.nix
@@ -21,6 +21,9 @@
 , # size of the FAT partition, in megabytes.
   bootSize ? 1024
 
+  , # memory allocated for virtualized build instance
+  memSize ? 1024
+
 , # The size of the root partition, in megabytes.
   rootSize ? 2048
 
@@ -230,7 +233,7 @@ let
   ).runInLinuxVM (
     pkgs.runCommand name
       {
-        memSize = 1024;
+        inherit memSize;
         QEMU_OPTS = "-drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
         preVM = ''
           PATH=$PATH:${pkgs.qemu_kvm}/bin
diff --git a/nixos/lib/make-squashfs.nix b/nixos/lib/make-squashfs.nix
index 4b6b567399484..f28e2c6715805 100644
--- a/nixos/lib/make-squashfs.nix
+++ b/nixos/lib/make-squashfs.nix
@@ -14,6 +14,7 @@
 
 let
   pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles;
+  compFlag = if comp == null then "-no-compression" else "-comp ${comp}";
 in
 stdenv.mkDerivation {
   name = "${fileName}.img";
@@ -39,7 +40,7 @@ stdenv.mkDerivation {
 
       # Generate the squashfs image.
       mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \
-        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 -comp ${comp} \
+        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 ${compFlag} \
         -processors $NIX_BUILD_CORES
     '';
 }
diff --git a/nixos/lib/systemd-lib.nix b/nixos/lib/systemd-lib.nix
index 820ccbcbf72a1..347ee73039364 100644
--- a/nixos/lib/systemd-lib.nix
+++ b/nixos/lib/systemd-lib.nix
@@ -360,9 +360,13 @@ in rec {
     };
   };
 
-  commonUnitText = def: ''
+  commonUnitText = def: lines: ''
       [Unit]
       ${attrsToSection def.unitConfig}
+    '' + lines + lib.optionalString (def.wantedBy != [ ]) ''
+
+      [Install]
+      WantedBy=${concatStringsSep " " def.wantedBy}
     '';
 
   targetToUnit = name: def:
@@ -376,7 +380,7 @@ in rec {
 
   serviceToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def + ''
+      text = commonUnitText def (''
         [Service]
       '' + (let env = cfg.globalEnvironment // def.environment;
         in concatMapStrings (n:
@@ -392,63 +396,57 @@ in rec {
       '' else "")
        + optionalString (def ? stopIfChanged && !def.stopIfChanged) ''
          X-StopIfChanged=false
-      '' + attrsToSection def.serviceConfig;
+      '' + attrsToSection def.serviceConfig);
     };
 
   socketToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Socket]
-          ${attrsToSection def.socketConfig}
-          ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
-          ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
-        '';
+      text = commonUnitText def ''
+        [Socket]
+        ${attrsToSection def.socketConfig}
+        ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
+        ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
+      '';
     };
 
   timerToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Timer]
-          ${attrsToSection def.timerConfig}
-        '';
+      text = commonUnitText def ''
+        [Timer]
+        ${attrsToSection def.timerConfig}
+      '';
     };
 
   pathToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Path]
-          ${attrsToSection def.pathConfig}
-        '';
+      text = commonUnitText def ''
+        [Path]
+        ${attrsToSection def.pathConfig}
+      '';
     };
 
   mountToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Mount]
-          ${attrsToSection def.mountConfig}
-        '';
+      text = commonUnitText def ''
+        [Mount]
+        ${attrsToSection def.mountConfig}
+      '';
     };
 
   automountToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Automount]
-          ${attrsToSection def.automountConfig}
-        '';
+      text = commonUnitText def ''
+        [Automount]
+        ${attrsToSection def.automountConfig}
+      '';
     };
 
   sliceToUnit = name: def:
     { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
-      text = commonUnitText def +
-        ''
-          [Slice]
-          ${attrsToSection def.sliceConfig}
-        '';
+      text = commonUnitText def ''
+        [Slice]
+        ${attrsToSection def.sliceConfig}
+      '';
     };
 
   # Create a directory that contains systemd definition files from an attrset
diff --git a/nixos/lib/test-driver/default.nix b/nixos/lib/test-driver/default.nix
index 09d80deb85467..1acdaacc4e658 100644
--- a/nixos/lib/test-driver/default.nix
+++ b/nixos/lib/test-driver/default.nix
@@ -18,7 +18,7 @@ python3Packages.buildPythonApplication {
   pname = "nixos-test-driver";
   version = "1.1";
   src = ./.;
-  format = "pyproject";
+  pyproject = true;
 
   propagatedBuildInputs = [
     coreutils
@@ -32,6 +32,10 @@ python3Packages.buildPythonApplication {
     ++ (lib.optionals enableOCR [ imagemagick_light tesseract4 ])
     ++ extraPythonPackages python3Packages;
 
+  nativeBuildInputs = [
+    python3Packages.setuptools
+  ];
+
   passthru.tests = {
     inherit (nixosTests.nixos-test-driver) driver-timeout;
   };
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
index f430321bb6076..da60b669fa27e 100644
--- a/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -447,8 +447,7 @@ class Machine:
         """
 
         def check_active(_: Any) -> bool:
-            info = self.get_unit_info(unit, user)
-            state = info["ActiveState"]
+            state = self.get_unit_property(unit, "ActiveState", user)
             if state == "failed":
                 raise Exception(f'unit "{unit}" reached state "{state}"')
 
@@ -491,6 +490,35 @@ class Machine:
             if line_pattern.match(line)
         )
 
+    def get_unit_property(
+        self,
+        unit: str,
+        property: str,
+        user: Optional[str] = None,
+    ) -> str:
+        status, lines = self.systemctl(
+            f'--no-pager show "{unit}" --property="{property}"',
+            user,
+        )
+        if status != 0:
+            raise Exception(
+                f'retrieving systemctl property "{property}" for unit "{unit}"'
+                + ("" if user is None else f' under user "{user}"')
+                + f" failed with exit code {status}"
+            )
+
+        invalid_output_message = (
+            f'systemctl show --property "{property}" "{unit}"'
+            f"produced invalid output: {lines}"
+        )
+
+        line_pattern = re.compile(r"^([^=]+)=(.*)$")
+        match = line_pattern.match(lines)
+        assert match is not None, invalid_output_message
+
+        assert match[1] == property, invalid_output_message
+        return match[2]
+
     def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
         """
         Runs `systemctl` commands with optional support for
diff --git a/nixos/maintainers/option-usages.nix b/nixos/maintainers/option-usages.nix
index 11247666ecda9..e9bafa21a58ae 100644
--- a/nixos/maintainers/option-usages.nix
+++ b/nixos/maintainers/option-usages.nix
@@ -9,17 +9,17 @@
 
 # This file is made to be used as follow:
 #
-#   $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
+#   $ nix-instantiate ./option-usages.nix --argstr testOption service.xserver.enable -A txtContent --eval
 #
 # or
 #
-#   $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
+#   $ nix-build ./option-usages.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
 #
 # Other targets exists such as `dotContent`, `dot`, and `pdf`.  If you are
 # looking for the option usage of multiple options, you can provide a list
 # as argument.
 #
-#   $ nix-build ./option-usage.nix --arg testOptions \
+#   $ nix-build ./option-usages.nix --arg testOptions \
 #      '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
 #      -A txt -o gummiboot.list
 #
diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh
index 0c1656efaf1ca..d182c5c2a4794 100755
--- a/nixos/maintainers/scripts/ec2/create-amis.sh
+++ b/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -27,31 +27,37 @@ var ${bucket:=nixos-amis}
 var ${service_role_name:=vmimport}
 
 # Output of the command:
-# > aws ec2 describe-regions --all-regions --query "Regions[].{Name:RegionName}" --output text | sort
+# $ nix-shell -I nixpkgs=. -p awscli --run 'aws ec2 describe-regions --region us-east-1 --all-regions --query "Regions[].{Name:RegionName}" --output text | sort | sed -e s/^/\ \ /'
 var ${regions:=
-         af-south-1
-         ap-east-1
-         ap-northeast-1
-         ap-northeast-2
-         ap-northeast-3
-         ap-south-1
-         ap-southeast-1
-         ap-southeast-2
-         ap-southeast-3
-         ca-central-1
-         eu-central-1
-         eu-north-1
-         eu-south-1
-         eu-west-1
-         eu-west-2
-         eu-west-3
-         me-south-1
-         sa-east-1
-         us-east-1
-         us-east-2
-         us-west-1
-         us-west-2
-     }
+  af-south-1
+  ap-east-1
+  ap-northeast-1
+  ap-northeast-2
+  ap-northeast-3
+  ap-south-1
+  ap-south-2
+  ap-southeast-1
+  ap-southeast-2
+  ap-southeast-3
+  ap-southeast-4
+  ca-central-1
+  eu-central-1
+  eu-central-2
+  eu-north-1
+  eu-south-1
+  eu-south-2
+  eu-west-1
+  eu-west-2
+  eu-west-3
+  il-central-1
+  me-central-1
+  me-south-1
+  sa-east-1
+  us-east-1
+  us-east-2
+  us-west-1
+  us-west-2
+}
 
 regions=($regions)
 
diff --git a/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix b/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
index 936dcee12949e..60f0535854dd5 100644
--- a/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
+++ b/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
@@ -20,6 +20,12 @@ in
       default = "nixos-openstack-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
     };
 
+    ramMB = mkOption {
+      type = types.int;
+      default = 1024;
+      description = lib.mdDoc "RAM allocation for build VM";
+    };
+
     sizeMB = mkOption {
       type = types.int;
       default = 8192;
@@ -64,7 +70,7 @@ in
       includeChannel = copyChannel;
 
       bootSize = 1000;
-
+      memSize = cfg.ramMB;
       rootSize = cfg.sizeMB;
       rootPoolProperties = {
         ashift = 12;
diff --git a/nixos/modules/config/iproute2.nix b/nixos/modules/config/iproute2.nix
index 78bd07d680e20..0cde57b759be3 100644
--- a/nixos/modules/config/iproute2.nix
+++ b/nixos/modules/config/iproute2.nix
@@ -18,10 +18,9 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.etc."iproute2/rt_tables" = {
+    environment.etc."iproute2/rt_tables.d/nixos.conf" = {
       mode = "0644";
-      text = (fileContents "${pkgs.iproute2}/lib/iproute2/rt_tables")
-        + (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}");
+      text = cfg.rttablesExtraConfig;
     };
   };
 }
diff --git a/nixos/modules/config/krb5/default.nix b/nixos/modules/config/krb5/default.nix
deleted file mode 100644
index df7a3f48236f0..0000000000000
--- a/nixos/modules/config/krb5/default.nix
+++ /dev/null
@@ -1,369 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-
-  cfg = config.krb5;
-
-  # This is to provide support for old configuration options (as much as is
-  # reasonable). This can be removed after 18.03 was released.
-  defaultConfig = {
-    libdefaults = optionalAttrs (cfg.defaultRealm != null)
-      { default_realm = cfg.defaultRealm; };
-
-    realms = optionalAttrs (lib.all (value: value != null) [
-      cfg.defaultRealm cfg.kdc cfg.kerberosAdminServer
-    ]) {
-      ${cfg.defaultRealm} = {
-        kdc = cfg.kdc;
-        admin_server = cfg.kerberosAdminServer;
-      };
-    };
-
-    domain_realm = optionalAttrs (lib.all (value: value != null) [
-      cfg.domainRealm cfg.defaultRealm
-    ]) {
-      ".${cfg.domainRealm}" = cfg.defaultRealm;
-      ${cfg.domainRealm} = cfg.defaultRealm;
-    };
-  };
-
-  mergedConfig = (recursiveUpdate defaultConfig {
-    inherit (config.krb5)
-      kerberos libdefaults realms domain_realm capaths appdefaults plugins
-      extraConfig config;
-  });
-
-  filterEmbeddedMetadata = value: if isAttrs value then
-    (filterAttrs
-      (attrName: attrValue: attrName != "_module" && attrValue != null)
-        value)
-    else value;
-
-  indent = "  ";
-
-  mkRelation = name: value:
-    if (isList value) then
-      concatMapStringsSep "\n" (mkRelation name) value
-    else "${name} = ${mkVal value}";
-
-  mkVal = value:
-    if (value == true) then "true"
-    else if (value == false) then "false"
-    else if (isInt value) then (toString value)
-    else if (isAttrs value) then
-      let configLines = concatLists
-        (map (splitString "\n")
-          (mapAttrsToList mkRelation value));
-      in
-      (concatStringsSep "\n${indent}"
-        ([ "{" ] ++ configLines))
-      + "\n}"
-    else value;
-
-  mkMappedAttrsOrString = value: concatMapStringsSep "\n"
-    (line: if builtins.stringLength line > 0
-      then "${indent}${line}"
-      else line)
-    (splitString "\n"
-      (if isAttrs value then
-        concatStringsSep "\n"
-            (mapAttrsToList mkRelation value)
-        else value));
-
-in {
-
-  ###### interface
-
-  options = {
-    krb5 = {
-      enable = mkEnableOption (lib.mdDoc "building krb5.conf, configuration file for Kerberos V");
-
-      kerberos = mkOption {
-        type = types.package;
-        default = pkgs.krb5;
-        defaultText = literalExpression "pkgs.krb5";
-        example = literalExpression "pkgs.heimdal";
-        description = lib.mdDoc ''
-          The Kerberos implementation that will be present in
-          `environment.systemPackages` after enabling this
-          service.
-        '';
-      };
-
-      libdefaults = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        apply = attrs: filterEmbeddedMetadata attrs;
-        example = literalExpression ''
-          {
-            default_realm = "ATHENA.MIT.EDU";
-          };
-        '';
-        description = lib.mdDoc ''
-          Settings used by the Kerberos V5 library.
-        '';
-      };
-
-      realms = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        example = literalExpression ''
-          {
-            "ATHENA.MIT.EDU" = {
-              admin_server = "athena.mit.edu";
-              kdc = [
-                "athena01.mit.edu"
-                "athena02.mit.edu"
-              ];
-            };
-          };
-        '';
-        apply = attrs: filterEmbeddedMetadata attrs;
-        description = lib.mdDoc "Realm-specific contact information and settings.";
-      };
-
-      domain_realm = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        example = literalExpression ''
-          {
-            "example.com" = "EXAMPLE.COM";
-            ".example.com" = "EXAMPLE.COM";
-          };
-        '';
-        apply = attrs: filterEmbeddedMetadata attrs;
-        description = lib.mdDoc ''
-          Map of server hostnames to Kerberos realms.
-        '';
-      };
-
-      capaths = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        example = literalExpression ''
-          {
-            "ATHENA.MIT.EDU" = {
-              "EXAMPLE.COM" = ".";
-            };
-            "EXAMPLE.COM" = {
-              "ATHENA.MIT.EDU" = ".";
-            };
-          };
-        '';
-        apply = attrs: filterEmbeddedMetadata attrs;
-        description = lib.mdDoc ''
-          Authentication paths for non-hierarchical cross-realm authentication.
-        '';
-      };
-
-      appdefaults = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        example = literalExpression ''
-          {
-            pam = {
-              debug = false;
-              ticket_lifetime = 36000;
-              renew_lifetime = 36000;
-              max_timeout = 30;
-              timeout_shift = 2;
-              initial_timeout = 1;
-            };
-          };
-        '';
-        apply = attrs: filterEmbeddedMetadata attrs;
-        description = lib.mdDoc ''
-          Settings used by some Kerberos V5 applications.
-        '';
-      };
-
-      plugins = mkOption {
-        type = with types; either attrs lines;
-        default = {};
-        example = literalExpression ''
-          {
-            ccselect = {
-              disable = "k5identity";
-            };
-          };
-        '';
-        apply = attrs: filterEmbeddedMetadata attrs;
-        description = lib.mdDoc ''
-          Controls plugin module registration.
-        '';
-      };
-
-      extraConfig = mkOption {
-        type = with types; nullOr lines;
-        default = null;
-        example = ''
-          [logging]
-            kdc          = SYSLOG:NOTICE
-            admin_server = SYSLOG:NOTICE
-            default      = SYSLOG:NOTICE
-        '';
-        description = lib.mdDoc ''
-          These lines go to the end of `krb5.conf` verbatim.
-          `krb5.conf` may include any of the relations that are
-          valid for `kdc.conf` (see `man kdc.conf`),
-          but it is not a recommended practice.
-        '';
-      };
-
-      config = mkOption {
-        type = with types; nullOr lines;
-        default = null;
-        example = ''
-          [libdefaults]
-            default_realm = EXAMPLE.COM
-
-          [realms]
-            EXAMPLE.COM = {
-              admin_server = kerberos.example.com
-              kdc = kerberos.example.com
-              default_principal_flags = +preauth
-            }
-
-          [domain_realm]
-            example.com  = EXAMPLE.COM
-            .example.com = EXAMPLE.COM
-
-          [logging]
-            kdc          = SYSLOG:NOTICE
-            admin_server = SYSLOG:NOTICE
-            default      = SYSLOG:NOTICE
-        '';
-        description = lib.mdDoc ''
-          Verbatim `krb5.conf` configuration.  Note that this
-          is mutually exclusive with configuration via
-          `libdefaults`, `realms`,
-          `domain_realm`, `capaths`,
-          `appdefaults`, `plugins` and
-          `extraConfig` configuration options.  Consult
-          `man krb5.conf` for documentation.
-        '';
-      };
-
-      defaultRealm = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "ATHENA.MIT.EDU";
-        description = lib.mdDoc ''
-          DEPRECATED, please use
-          `krb5.libdefaults.default_realm`.
-        '';
-      };
-
-      domainRealm = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "athena.mit.edu";
-        description = lib.mdDoc ''
-          DEPRECATED, please create a map of server hostnames to Kerberos realms
-          in `krb5.domain_realm`.
-        '';
-      };
-
-      kdc = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "kerberos.mit.edu";
-        description = lib.mdDoc ''
-          DEPRECATED, please pass a `kdc` attribute to a realm
-          in `krb5.realms`.
-        '';
-      };
-
-      kerberosAdminServer = mkOption {
-        type = with types; nullOr str;
-        default = null;
-        example = "kerberos.mit.edu";
-        description = lib.mdDoc ''
-          DEPRECATED, please pass an `admin_server` attribute
-          to a realm in `krb5.realms`.
-        '';
-      };
-    };
-  };
-
-  ###### implementation
-
-  config = mkIf cfg.enable {
-
-    environment.systemPackages = [ cfg.kerberos ];
-
-    environment.etc."krb5.conf".text = if isString cfg.config
-      then cfg.config
-      else (''
-        [libdefaults]
-        ${mkMappedAttrsOrString mergedConfig.libdefaults}
-
-        [realms]
-        ${mkMappedAttrsOrString mergedConfig.realms}
-
-        [domain_realm]
-        ${mkMappedAttrsOrString mergedConfig.domain_realm}
-
-        [capaths]
-        ${mkMappedAttrsOrString mergedConfig.capaths}
-
-        [appdefaults]
-        ${mkMappedAttrsOrString mergedConfig.appdefaults}
-
-        [plugins]
-        ${mkMappedAttrsOrString mergedConfig.plugins}
-      '' + optionalString (mergedConfig.extraConfig != null)
-          ("\n" + mergedConfig.extraConfig));
-
-    warnings = flatten [
-      (optional (cfg.defaultRealm != null) ''
-        The option krb5.defaultRealm is deprecated, please use
-        krb5.libdefaults.default_realm.
-      '')
-      (optional (cfg.domainRealm != null) ''
-        The option krb5.domainRealm is deprecated, please use krb5.domain_realm.
-      '')
-      (optional (cfg.kdc != null) ''
-        The option krb5.kdc is deprecated, please pass a kdc attribute to a
-        realm in krb5.realms.
-      '')
-      (optional (cfg.kerberosAdminServer != null) ''
-        The option krb5.kerberosAdminServer is deprecated, please pass an
-        admin_server attribute to a realm in krb5.realms.
-      '')
-    ];
-
-    assertions = [
-      { assertion = !((builtins.any (value: value != null) [
-            cfg.defaultRealm cfg.domainRealm cfg.kdc cfg.kerberosAdminServer
-          ]) && ((builtins.any (value: value != {}) [
-              cfg.libdefaults cfg.realms cfg.domain_realm cfg.capaths
-              cfg.appdefaults cfg.plugins
-            ]) || (builtins.any (value: value != null) [
-              cfg.config cfg.extraConfig
-            ])));
-        message = ''
-          Configuration of krb5.conf by deprecated options is mutually exclusive
-          with configuration by section.  Please migrate your config using the
-          attributes suggested in the warnings.
-        '';
-      }
-      { assertion = !(cfg.config != null
-          && ((builtins.any (value: value != {}) [
-              cfg.libdefaults cfg.realms cfg.domain_realm cfg.capaths
-              cfg.appdefaults cfg.plugins
-            ]) || (builtins.any (value: value != null) [
-              cfg.extraConfig cfg.defaultRealm cfg.domainRealm cfg.kdc
-              cfg.kerberosAdminServer
-            ])));
-        message = ''
-          Configuration of krb5.conf using krb.config is mutually exclusive with
-          configuration by section.  If you want to mix the two, you can pass
-          lines to any configuration section or lines to krb5.extraConfig.
-        '';
-      }
-    ];
-  };
-}
diff --git a/nixos/modules/config/ldap.nix b/nixos/modules/config/ldap.nix
index d2f01fb87d32d..e374e4a7a27e9 100644
--- a/nixos/modules/config/ldap.nix
+++ b/nixos/modules/config/ldap.nix
@@ -226,18 +226,6 @@ in
       "ldap.conf" = ldapConfig;
     };
 
-    system.activationScripts = mkIf (!cfg.daemon.enable) {
-      ldap = stringAfter [ "etc" "groups" "users" ] ''
-        if test -f "${cfg.bind.passwordFile}" ; then
-          umask 0077
-          conf="$(mktemp)"
-          printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
-          cat ${ldapConfig.source} - >"$conf"
-          mv -fT "$conf" /etc/ldap.conf
-        fi
-      '';
-    };
-
     system.nssModules = mkIf cfg.nsswitch (singleton (
       if cfg.daemon.enable then nss_pam_ldapd else nss_ldap
     ));
@@ -258,42 +246,63 @@ in
       };
     };
 
-    systemd.services = mkIf cfg.daemon.enable {
-      nslcd = {
-        wantedBy = [ "multi-user.target" ];
-
-        preStart = ''
-          umask 0077
-          conf="$(mktemp)"
-          {
-            cat ${nslcdConfig}
-            test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
-            printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
-            test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
-            printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
-          } >"$conf"
-          mv -fT "$conf" /run/nslcd/nslcd.conf
-        '';
-
-        restartTriggers = [
-          nslcdConfig
-          cfg.bind.passwordFile
-          cfg.daemon.rootpwmodpwFile
-        ];
-
-        serviceConfig = {
-          ExecStart = "${nslcdWrapped}/bin/nslcd";
-          Type = "forking";
-          Restart = "always";
-          User = "nslcd";
-          Group = "nslcd";
-          RuntimeDirectory = [ "nslcd" ];
-          PIDFile = "/run/nslcd/nslcd.pid";
-          AmbientCapabilities = "CAP_SYS_RESOURCE";
+    systemd.services = mkMerge [
+      (mkIf (!cfg.daemon.enable) {
+        ldap-password = {
+          wantedBy = [ "sysinit.target" ];
+          before = [ "sysinit.target" "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
+          unitConfig.DefaultDependencies = false;
+          serviceConfig.Type = "oneshot";
+          serviceConfig.RemainAfterExit = true;
+          script = ''
+            if test -f "${cfg.bind.passwordFile}" ; then
+              umask 0077
+              conf="$(mktemp)"
+              printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
+              cat ${ldapConfig.source} - >"$conf"
+              mv -fT "$conf" /etc/ldap.conf
+            fi
+          '';
         };
-      };
+      })
+
+      (mkIf cfg.daemon.enable {
+        nslcd = {
+          wantedBy = [ "multi-user.target" ];
+
+          preStart = ''
+            umask 0077
+            conf="$(mktemp)"
+            {
+              cat ${nslcdConfig}
+              test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
+              printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
+              test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
+              printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
+            } >"$conf"
+            mv -fT "$conf" /run/nslcd/nslcd.conf
+          '';
 
-    };
+          restartTriggers = [
+            nslcdConfig
+            cfg.bind.passwordFile
+            cfg.daemon.rootpwmodpwFile
+          ];
+
+          serviceConfig = {
+            ExecStart = "${nslcdWrapped}/bin/nslcd";
+            Type = "forking";
+            Restart = "always";
+            User = "nslcd";
+            Group = "nslcd";
+            RuntimeDirectory = [ "nslcd" ];
+            PIDFile = "/run/nslcd/nslcd.pid";
+            AmbientCapabilities = "CAP_SYS_RESOURCE";
+          };
+        };
+      })
+    ];
 
   };
 
diff --git a/nixos/modules/config/ldso.nix b/nixos/modules/config/ldso.nix
new file mode 100644
index 0000000000000..72ae3958d8869
--- /dev/null
+++ b/nixos/modules/config/ldso.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) last splitString mkOption types mdDoc optionals;
+
+  libDir = pkgs.stdenv.hostPlatform.libDir;
+  ldsoBasename = builtins.unsafeDiscardStringContext (last (splitString "/" pkgs.stdenv.cc.bintools.dynamicLinker));
+
+  pkgs32 = pkgs.pkgsi686Linux;
+  libDir32 = pkgs32.stdenv.hostPlatform.libDir;
+  ldsoBasename32 = builtins.unsafeDiscardStringContext (last (splitString "/" pkgs32.stdenv.cc.bintools.dynamicLinker));
+in {
+  options = {
+    environment.ldso = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = mdDoc ''
+        The executable to link into the normal FHS location of the ELF loader.
+      '';
+    };
+
+    environment.ldso32 = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = mdDoc ''
+        The executable to link into the normal FHS location of the 32-bit ELF loader.
+
+        This currently only works on x86_64 architectures.
+      '';
+    };
+  };
+
+  config = {
+    assertions = [
+      { assertion = isNull config.environment.ldso32 || pkgs.stdenv.isx86_64;
+        message = "Option environment.ldso32 currently only works on x86_64.";
+      }
+    ];
+
+    systemd.tmpfiles.rules = (
+      if isNull config.environment.ldso then [
+        "r /${libDir}/${ldsoBasename} - - - - -"
+      ] else [
+        "d /${libDir} 0755 root root - -"
+        "L+ /${libDir}/${ldsoBasename} - - - - ${config.environment.ldso}"
+      ]
+    ) ++ optionals pkgs.stdenv.isx86_64 (
+      if isNull config.environment.ldso32 then [
+        "r /${libDir32}/${ldsoBasename32} - - - - -"
+      ] else [
+        "d /${libDir32} 0755 root root - -"
+        "L+ /${libDir32}/${ldsoBasename32} - - - - ${config.environment.ldso32}"
+      ]
+    );
+  };
+
+  meta.maintainers = with lib.maintainers; [ tejing ];
+}
diff --git a/nixos/modules/config/mysql.nix b/nixos/modules/config/mysql.nix
index 95c9ba76663ea..4f72d22c4f0ec 100644
--- a/nixos/modules/config/mysql.nix
+++ b/nixos/modules/config/mysql.nix
@@ -6,6 +6,8 @@ let
   cfg = config.users.mysql;
 in
 {
+  meta.maintainers = [ maintainers.netali ];
+
   options = {
     users.mysql = {
       enable = mkEnableOption (lib.mdDoc "Authentication against a MySQL/MariaDB database");
@@ -358,7 +360,7 @@ in
       user = "root";
       group = "root";
       mode = "0600";
-      # password will be added from password file in activation script
+      # password will be added from password file in systemd oneshot
       text = ''
         users.host=${cfg.host}
         users.db_user=${cfg.user}
@@ -423,34 +425,45 @@ in
       mode = "0600";
       user = config.services.nscd.user;
       group = config.services.nscd.group;
-      # password will be added from password file in activation script
+      # password will be added from password file in systemd oneshot
       text = ''
         username ${cfg.user}
       '';
     };
 
-    # preStart script to append the password from the password file
-    # to the configuration files. It also fixes the owner of the
-    # libnss-mysql-root.cfg because it is changed to root after the
-    # password is appended.
-    systemd.services.mysql.preStart = ''
-      if [[ -r ${cfg.passwordFile} ]]; then
-        org_umask=$(umask)
-        umask 0077
+    systemd.services.mysql-auth-pw-init = {
+      description = "Adds the mysql password to the mysql auth config files";
+
+      before = [ "nscd.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        Group = "root";
+      };
 
-        conf_nss="$(mktemp)"
-        cp /etc/libnss-mysql-root.cfg $conf_nss
-        printf 'password %s\n' "$(cat ${cfg.passwordFile})" >> $conf_nss
-        mv -fT "$conf_nss" /etc/libnss-mysql-root.cfg
-        chown ${config.services.nscd.user}:${config.services.nscd.group} /etc/libnss-mysql-root.cfg
+      restartTriggers = [
+        config.environment.etc."security/pam_mysql.conf".source
+        config.environment.etc."libnss-mysql.cfg".source
+        config.environment.etc."libnss-mysql-root.cfg".source
+      ];
 
-        conf_pam="$(mktemp)"
-        cp /etc/security/pam_mysql.conf $conf_pam
-        printf 'users.db_passwd=%s\n' "$(cat ${cfg.passwordFile})" >> $conf_pam
-        mv -fT "$conf_pam" /etc/security/pam_mysql.conf
+      script = ''
+        if [[ -r ${cfg.passwordFile} ]]; then
+          umask 0077
+          conf_nss="$(mktemp)"
+          cp /etc/libnss-mysql-root.cfg $conf_nss
+          printf 'password %s\n' "$(cat ${cfg.passwordFile})" >> $conf_nss
+          mv -fT "$conf_nss" /etc/libnss-mysql-root.cfg
+          chown ${config.services.nscd.user}:${config.services.nscd.group} /etc/libnss-mysql-root.cfg
 
-        umask $org_umask
-      fi
-    '';
+          conf_pam="$(mktemp)"
+          cp /etc/security/pam_mysql.conf $conf_pam
+          printf 'users.db_passwd=%s\n' "$(cat ${cfg.passwordFile})" >> $conf_pam
+          mv -fT "$conf_pam" /etc/security/pam_mysql.conf
+        fi
+      '';
+    };
   };
 }
diff --git a/nixos/modules/config/nix-channel.nix b/nixos/modules/config/nix-channel.nix
index a7ca7a5c74a40..dd97cb730ae41 100644
--- a/nixos/modules/config/nix-channel.nix
+++ b/nixos/modules/config/nix-channel.nix
@@ -12,7 +12,6 @@ let
     mkDefault
     mkIf
     mkOption
-    stringAfter
     types
     ;
 
diff --git a/nixos/modules/config/nix.nix b/nixos/modules/config/nix.nix
index cee4f54db0cb5..2769d8b25ef6f 100644
--- a/nixos/modules/config/nix.nix
+++ b/nixos/modules/config/nix.nix
@@ -109,13 +109,17 @@ let
         if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
           echo "Ignoring validation for cross-compilation"
         ''
-        else ''
+        else
+        let
+          showCommand = if isNixAtLeast "2.20pre" then "config show" else "show-config";
+        in
+        ''
           echo "Validating generated nix.conf"
           ln -s $out ./nix.conf
           set -e
           set +o pipefail
           NIX_CONF_DIR=$PWD \
-            ${cfg.package}/bin/nix show-config ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
+            ${cfg.package}/bin/nix ${showCommand} ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
               ${optionalString (isNixAtLeast "2.4pre") "--option experimental-features nix-command"} \
             |& sed -e 's/^warning:/error:/' \
             | (! grep '${if cfg.checkAllErrors then "^error:" else "^error: unknown setting"}')
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index b2eb46f273b14..4727e5b85ef22 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -34,6 +34,8 @@ with lib;
       ffmpeg_5 = super.ffmpeg_5.override { ffmpegVariant = "headless"; };
       # dep of graphviz, libXpm is optional for Xpm support
       gd = super.gd.override { withXorg = false; };
+      ghostscript = super.ghostscript.override { cupsSupport = false; x11Support = false; };
+      gjs = super.gjs.overrideAttrs { doCheck = false; installTests = false; }; # avoid test dependency on gtk3
       gobject-introspection = super.gobject-introspection.override { x11Support = false; };
       gpsd = super.gpsd.override { guiSupport = false; };
       graphviz = super.graphviz-nox;
@@ -44,6 +46,7 @@ with lib;
       };
       imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
       imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
+      intel-vaapi-driver = super.intel-vaapi-driver.override { enableGui = false; };
       libdevil = super.libdevil-nox;
       libextractor = super.libextractor.override { gtkSupport = false; };
       libva = super.libva-minimal;
@@ -51,6 +54,7 @@ with lib;
       mc = super.mc.override { x11Support = false; };
       mpv-unwrapped = super.mpv-unwrapped.override { sdl2Support = false; x11Support = false; waylandSupport = false; };
       msmtp = super.msmtp.override { withKeyring = false; };
+      mupdf = super.mupdf.override { enableGL = false; enableX11 = false; };
       neofetch = super.neofetch.override { x11Support = false; };
       networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
@@ -71,7 +75,7 @@ with lib;
       qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
       qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
       qt5 = super.qt5.overrideScope (const (super': {
-        qtbase = super'.qtbase.override { withGtk3 = false; };
+        qtbase = super'.qtbase.override { withGtk3 = false; withQttranslation = false; };
       }));
       stoken = super.stoken.override { withGTK3 = false; };
       # translateManpages -> perlPackages.po4a -> texlive-combined-basic -> texlive-core-big -> libX11
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index b10edeb75604d..662959bf0071b 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -8,8 +8,6 @@ let
   cfg = config.hardware.pulseaudio;
   alsaCfg = config.sound;
 
-  systemWide = cfg.enable && cfg.systemWide;
-  nonSystemWide = cfg.enable && !cfg.systemWide;
   hasZeroconf = let z = cfg.zeroconf; in z.publish.enable || z.discovery.enable;
 
   overriddenPackage = cfg.package.override
@@ -217,16 +215,10 @@ in {
   };
 
 
-  config = mkMerge [
+  config = lib.mkIf cfg.enable (mkMerge [
     {
-      environment.etc = {
-        "pulse/client.conf".source = clientConf;
-      };
-
-      hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
-    }
+      environment.etc."pulse/client.conf".source = clientConf;
 
-    (mkIf cfg.enable {
       environment.systemPackages = [ overriddenPackage ];
 
       sound.enable = true;
@@ -242,6 +234,8 @@ in {
         "libao.conf".source = writeText "libao.conf" "default_driver=pulse";
       };
 
+      hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
+
       # Disable flat volumes to enable relative ones
       hardware.pulseaudio.daemon.config.flat-volumes = mkDefault "no";
 
@@ -255,7 +249,7 @@ in {
 
       # PulseAudio is packaged with udev rules to handle various audio device quirks
       services.udev.packages = [ overriddenPackage ];
-    })
+    }
 
     (mkIf (cfg.extraModules != []) {
       hardware.pulseaudio.daemon.config.dl-search-path = let
@@ -277,7 +271,7 @@ in {
       services.avahi.publish.userServices = true;
     })
 
-    (mkIf nonSystemWide {
+    (mkIf (!cfg.systemWide) {
       environment.etc = {
         "pulse/default.pa".source = myConfigFile;
       };
@@ -297,7 +291,7 @@ in {
       };
     })
 
-    (mkIf systemWide {
+    (mkIf cfg.systemWide {
       users.users.pulse = {
         # For some reason, PulseAudio wants UID == GID.
         uid = assert uid == gid; uid;
@@ -328,6 +322,6 @@ in {
 
       environment.variables.PULSE_COOKIE = "${stateDir}/.config/pulse/cookie";
     })
-  ];
+  ]);
 
 }
diff --git a/nixos/modules/config/shells-environment.nix b/nixos/modules/config/shells-environment.nix
index bc6583442edf2..a8476bd2aaedd 100644
--- a/nixos/modules/config/shells-environment.nix
+++ b/nixos/modules/config/shells-environment.nix
@@ -214,7 +214,8 @@ in
       ''
         # Create the required /bin/sh symlink; otherwise lots of things
         # (notably the system() function) won't work.
-        mkdir -m 0755 -p /bin
+        mkdir -p /bin
+        chmod 0755 /bin
         ln -sfn "${cfg.binsh}" /bin/.sh.tmp
         mv /bin/.sh.tmp /bin/sh # atomically replace /bin/sh
       '';
diff --git a/nixos/modules/config/stub-ld.nix b/nixos/modules/config/stub-ld.nix
new file mode 100644
index 0000000000000..14c07466d0611
--- /dev/null
+++ b/nixos/modules/config/stub-ld.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) optionalString mkOption types mdDoc mkIf mkDefault;
+
+  cfg = config.environment.stub-ld;
+
+  message = ''
+    NixOS cannot run dynamically linked executables intended for generic
+    linux environments out of the box. For more information, see:
+    https://nix.dev/permalink/stub-ld
+  '';
+
+  stub-ld-for = pkgsArg: messageArg: pkgsArg.pkgsStatic.runCommandCC "stub-ld" {
+    nativeBuildInputs = [ pkgsArg.unixtools.xxd ];
+    inherit messageArg;
+  } ''
+    printf "%s" "$messageArg" | xxd -i -n message >main.c
+    cat <<EOF >>main.c
+    #include <stdio.h>
+    int main(int argc, char * argv[]) {
+      fprintf(stderr, "Could not start dynamically linked executable: %s\n", argv[0]);
+      fwrite(message, sizeof(unsigned char), message_len, stderr);
+      return 127; // matches behavior of bash and zsh without a loader. fish uses 139
+    }
+    EOF
+    $CC -Os main.c -o $out
+  '';
+
+  pkgs32 = pkgs.pkgsi686Linux;
+
+  stub-ld = stub-ld-for pkgs message;
+  stub-ld32 = stub-ld-for pkgs32 message;
+in {
+  options = {
+    environment.stub-ld = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = mdDoc ''
+          Install a stub ELF loader to print an informative error message
+          in the event that a user attempts to run an ELF binary not
+          compiled for NixOS.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.ldso = mkDefault stub-ld;
+    environment.ldso32 = mkIf pkgs.stdenv.isx86_64 (mkDefault stub-ld32);
+  };
+
+  meta.maintainers = with lib.maintainers; [ tejing ];
+}
diff --git a/nixos/modules/config/swap.nix b/nixos/modules/config/swap.nix
index 8989a64082643..21046d6f1697b 100644
--- a/nixos/modules/config/swap.nix
+++ b/nixos/modules/config/swap.nix
@@ -258,7 +258,8 @@ in
             # avoid this race condition.
             after = [ "systemd-modules-load.service" ];
             wantedBy = [ "${realDevice'}.swap" ];
-            before = [ "${realDevice'}.swap" ];
+            before = [ "${realDevice'}.swap" "shutdown.target"];
+            conflicts = [ "shutdown.target" ];
             path = [ pkgs.util-linux pkgs.e2fsprogs ]
               ++ optional sw.randomEncryption.enable pkgs.cryptsetup;
 
diff --git a/nixos/modules/config/sysctl.nix b/nixos/modules/config/sysctl.nix
index 452c050b6dda9..bedba984a3c23 100644
--- a/nixos/modules/config/sysctl.nix
+++ b/nixos/modules/config/sysctl.nix
@@ -21,18 +21,28 @@ in
   options = {
 
     boot.kernel.sysctl = mkOption {
-      type = types.submodule {
+      type = let
+        highestValueType = types.ints.unsigned // {
+          merge = loc: defs:
+            foldl
+              (a: b: if b.value == null then null else lib.max a b.value)
+              0
+              (filterOverrides defs);
+        };
+      in types.submodule {
         freeformType = types.attrsOf sysctlOption;
-        options."net.core.rmem_max" = mkOption {
-          type = types.nullOr types.ints.unsigned // {
-            merge = loc: defs:
-              foldl
-                (a: b: if b.value == null then null else lib.max a b.value)
-                0
-                (filterOverrides defs);
+        options = {
+          "net.core.rmem_max" = mkOption {
+            type = types.nullOr highestValueType;
+            default = null;
+            description = lib.mdDoc "The maximum receive socket buffer size in bytes. In case of conflicting values, the highest will be used.";
+          };
+
+          "net.core.wmem_max" = mkOption {
+            type = types.nullOr highestValueType;
+            default = null;
+            description = lib.mdDoc "The maximum send socket buffer size in bytes. In case of conflicting values, the highest will be used.";
           };
-          default = null;
-          description = lib.mdDoc "The maximum socket receive buffer size. In case of conflicting values, the highest will be used.";
         };
       };
       default = {};
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 39aac9fb821bd..2aed620eb154c 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -475,7 +475,7 @@ let
   sdInitrdUidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) config.boot.initrd.systemd.users) "uid";
   sdInitrdGidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) config.boot.initrd.systemd.groups) "gid";
   groupNames = lib.mapAttrsToList (n: g: g.name) cfg.groups;
-  usersWithoutExistingGroup = lib.filterAttrs (n: u: !lib.elem u.group groupNames) cfg.users;
+  usersWithoutExistingGroup = lib.filterAttrs (n: u: u.group != "" && !lib.elem u.group groupNames) cfg.users;
 
   spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
     inherit (cfg) mutableUsers;
diff --git a/nixos/modules/hardware/all-firmware.nix b/nixos/modules/hardware/all-firmware.nix
index 6f58e848b38ae..a97c8c418c865 100644
--- a/nixos/modules/hardware/all-firmware.nix
+++ b/nixos/modules/hardware/all-firmware.nix
@@ -48,10 +48,7 @@ in {
         alsa-firmware
         sof-firmware
         libreelec-dvb-firmware
-      ] ++ optional pkgs.stdenv.hostPlatform.isAarch raspberrypiWirelessFirmware
-        ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
-        rtl8723bs-firmware
-      ];
+      ] ++ optional pkgs.stdenv.hostPlatform.isAarch raspberrypiWirelessFirmware;
     })
     (mkIf cfg.enableAllFirmware {
       assertions = [{
diff --git a/nixos/modules/hardware/keyboard/qmk.nix b/nixos/modules/hardware/keyboard/qmk.nix
index df3bcaeccd2ec..d95d36dedb44e 100644
--- a/nixos/modules/hardware/keyboard/qmk.nix
+++ b/nixos/modules/hardware/keyboard/qmk.nix
@@ -12,5 +12,6 @@ in
 
   config = mkIf cfg.enable {
     services.udev.packages = [ pkgs.qmk-udev-rules ];
+    users.groups.plugdev = {};
   };
 }
diff --git a/nixos/modules/hardware/usb-storage.nix b/nixos/modules/hardware/usb-storage.nix
index 9c1b7a125fd18..3cb2c60d7ccd5 100644
--- a/nixos/modules/hardware/usb-storage.nix
+++ b/nixos/modules/hardware/usb-storage.nix
@@ -14,7 +14,7 @@ with lib;
 
   config = mkIf config.hardware.usbStorage.manageStartStop {
     services.udev.extraRules = ''
-      ACTION=="add|change", SUBSYSTEM=="scsi_disk", DRIVERS=="usb-storage", ATTR{manage_start_stop}="1"
+      ACTION=="add|change", SUBSYSTEM=="scsi_disk", DRIVERS=="usb-storage", ATTR{manage_system_start_stop}="1"
     '';
   };
 }
diff --git a/nixos/modules/hardware/video/amdgpu-pro.nix b/nixos/modules/hardware/video/amdgpu-pro.nix
index 605aa6ef8b88a..2a86280eec8cb 100644
--- a/nixos/modules/hardware/video/amdgpu-pro.nix
+++ b/nixos/modules/hardware/video/amdgpu-pro.nix
@@ -39,9 +39,10 @@ in
 
     hardware.firmware = [ package.fw ];
 
-    system.activationScripts.setup-amdgpu-pro = ''
-      ln -sfn ${package}/opt/amdgpu{,-pro} /run
-    '';
+    systemd.tmpfiles.settings.amdgpu-pro = {
+      "/run/amdgpu"."L+".argument = "${package}/opt/amdgpu";
+      "/run/amdgpu-pro"."L+".argument = "${package}/opt/amdgpu-pro";
+    };
 
     system.requiredKernelConfig = with config.lib.kernelConfig; [
       (isYes "DEVICE_PRIVATE")
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index c76883b656d40..3b983f768f91a 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -47,7 +47,8 @@ in {
           TRUNK_LINK_FAILURE_MODE=0;
           NVSWITCH_FAILURE_MODE=0;
           ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+          TOPOLOGY_FILE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          DATABASE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
         };
         defaultText = lib.literalExpression ''
         {
@@ -69,7 +70,8 @@ in {
           TRUNK_LINK_FAILURE_MODE=0;
           NVSWITCH_FAILURE_MODE=0;
           ABORT_CUDA_JOBS_ON_FM_EXIT=1;
-          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+          TOPOLOGY_FILE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
+          DATABASE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
         }
         '';
         description = lib.mdDoc ''
@@ -584,24 +586,50 @@ in {
         boot.extraModulePackages = [
           nvidia_x11.bin
         ];
-        systemd.services.nvidia-fabricmanager = {
-          enable = true;
-          description = "Start NVIDIA NVLink Management";
-          wantedBy = [ "multi-user.target" ];
-          unitConfig.After = [ "network-online.target" ];
-          unitConfig.Requires = [ "network-online.target" ];
-          serviceConfig = {
-            Type = "forking";
-            TimeoutStartSec = 240;
-            ExecStart = let
-              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
-              in
-                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
-            LimitCORE="infinity";
-          };
-        };
-        environment.systemPackages =
-          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
-      })
-    ]);
+
+        systemd = {
+          tmpfiles.rules =
+            lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+            "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+          services = lib.mkMerge [
+            ({
+              nvidia-fabricmanager = {
+                enable = true;
+                description = "Start NVIDIA NVLink Management";
+                wantedBy = [ "multi-user.target" ];
+                unitConfig.After = [ "network-online.target" ];
+                unitConfig.Requires = [ "network-online.target" ];
+                serviceConfig = {
+                  Type = "forking";
+                  TimeoutStartSec = 240;
+                  ExecStart = let
+                    nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+                    in
+                      "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
+                  LimitCORE="infinity";
+                };
+              };
+            })
+            (lib.mkIf cfg.nvidiaPersistenced {
+              "nvidia-persistenced" = {
+                description = "NVIDIA Persistence Daemon";
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "forking";
+                  Restart = "always";
+                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                };
+              };
+            })
+          ];
+      };
+
+      environment.systemPackages =
+        lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
+        ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
+    })
+  ]);
 }
diff --git a/nixos/modules/hardware/video/webcam/ipu6.nix b/nixos/modules/hardware/video/webcam/ipu6.nix
index fce78cda34c71..c2dbdc217bd60 100644
--- a/nixos/modules/hardware/video/webcam/ipu6.nix
+++ b/nixos/modules/hardware/video/webcam/ipu6.nix
@@ -13,11 +13,12 @@ in
     enable = mkEnableOption (lib.mdDoc "support for Intel IPU6/MIPI cameras");
 
     platform = mkOption {
-      type = types.enum [ "ipu6" "ipu6ep" ];
+      type = types.enum [ "ipu6" "ipu6ep" "ipu6epmtl" ];
       description = lib.mdDoc ''
         Choose the version for your hardware platform.
 
-        Use `ipu6` for Tiger Lake and `ipu6ep` for Alder Lake respectively.
+        Use `ipu6` for Tiger Lake, `ipu6ep` for Alder Lake or Raptor Lake,
+        and `ipu6epmtl` for Meteor Lake.
       '';
     };
 
@@ -29,9 +30,7 @@ in
       ipu6-drivers
     ];
 
-    hardware.firmware = with pkgs; [ ]
-      ++ optional (cfg.platform == "ipu6") ipu6-camera-bin
-      ++ optional (cfg.platform == "ipu6ep") ipu6ep-camera-bin;
+    hardware.firmware = [ pkgs.ipu6-camera-bins ];
 
     services.udev.extraRules = ''
       SUBSYSTEM=="intel-ipu6-psys", MODE="0660", GROUP="video"
@@ -44,14 +43,13 @@ in
 
       extraPackages = with pkgs.gst_all_1; [ ]
         ++ optional (cfg.platform == "ipu6") icamerasrc-ipu6
-        ++ optional (cfg.platform == "ipu6ep") icamerasrc-ipu6ep;
+        ++ optional (cfg.platform == "ipu6ep") icamerasrc-ipu6ep
+        ++ optional (cfg.platform == "ipu6epmtl") icamerasrc-ipu6epmtl;
 
       input = {
         pipeline = "icamerasrc";
-        format = mkIf (cfg.platform == "ipu6ep") (mkDefault "NV12");
+        format = mkIf (cfg.platform != "ipu6") (mkDefault "NV12");
       };
     };
-
   };
-
 }
diff --git a/nixos/modules/i18n/input-method/fcitx5.nix b/nixos/modules/i18n/input-method/fcitx5.nix
index 3d52c08888eae..530727f3f2928 100644
--- a/nixos/modules/i18n/input-method/fcitx5.nix
+++ b/nixos/modules/i18n/input-method/fcitx5.nix
@@ -19,6 +19,14 @@ in
           Enabled Fcitx5 addons.
         '';
       };
+      waylandFrontend = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Use the Wayland input method frontend.
+          See [Using Fcitx 5 on Wayland](https://fcitx-im.org/wiki/Using_Fcitx_5_on_Wayland).
+        '';
+      };
       quickPhrase = mkOption {
         type = with types; attrsOf str;
         default = { };
@@ -118,10 +126,11 @@ in
       ];
 
     environment.variables = {
-      GTK_IM_MODULE = "fcitx";
-      QT_IM_MODULE = "fcitx";
       XMODIFIERS = "@im=fcitx";
       QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
+    } // lib.optionalAttrs (!cfg.waylandFrontend) {
+      GTK_IM_MODULE = "fcitx";
+      QT_IM_MODULE = "fcitx";
     } // lib.optionalAttrs cfg.ignoreUserConfig {
       SKIP_FCITX_USER_PATH = "1";
     };
diff --git a/nixos/modules/image/repart-image.nix b/nixos/modules/image/repart-image.nix
new file mode 100644
index 0000000000000..a12b4fb14fb16
--- /dev/null
+++ b/nixos/modules/image/repart-image.nix
@@ -0,0 +1,110 @@
+# This is an expression meant to be called from `./repart.nix`, it is NOT a
+# NixOS module that can be imported.
+
+{ lib
+, runCommand
+, python3
+, black
+, ruff
+, mypy
+, systemd
+, fakeroot
+, util-linux
+
+  # filesystem tools
+, dosfstools
+, mtools
+, e2fsprogs
+, squashfsTools
+, erofs-utils
+, btrfs-progs
+, xfsprogs
+
+  # compression tools
+, zstd
+, xz
+
+  # arguments
+, imageFileBasename
+, compression
+, fileSystems
+, partitions
+, split
+, seed
+, definitionsDirectory
+}:
+
+let
+  amendRepartDefinitions = runCommand "amend-repart-definitions.py"
+    {
+      # TODO: ruff does not splice properly in nativeBuildInputs
+      depsBuildBuild = [ ruff ];
+      nativeBuildInputs = [ python3 black mypy ];
+    } ''
+    install ${./amend-repart-definitions.py} $out
+    patchShebangs --build $out
+
+    black --check --diff $out
+    ruff --line-length 88 $out
+    mypy --strict $out
+  '';
+
+  fileSystemToolMapping = {
+    "vfat" = [ dosfstools mtools ];
+    "ext4" = [ e2fsprogs.bin ];
+    "squashfs" = [ squashfsTools ];
+    "erofs" = [ erofs-utils ];
+    "btrfs" = [ btrfs-progs ];
+    "xfs" = [ xfsprogs ];
+  };
+
+  fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
+
+  compressionPkg = {
+    "zstd" = zstd;
+    "xz" = xz;
+  }."${compression.algorithm}";
+
+  compressionCommand = {
+    "zstd" = "zstd --no-progress --threads=0 -${toString compression.level}";
+    "xz" = "xz --keep --verbose --threads=0 -${toString compression.level}";
+  }."${compression.algorithm}";
+in
+
+runCommand imageFileBasename
+{
+  nativeBuildInputs = [
+    systemd
+    fakeroot
+    util-linux
+    compressionPkg
+  ] ++ fileSystemTools;
+} ''
+  amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
+
+  mkdir -p $out
+  cd $out
+
+  echo "Building image with systemd-repart..."
+  unshare --map-root-user fakeroot systemd-repart \
+    --dry-run=no \
+    --empty=create \
+    --size=auto \
+    --seed="${seed}" \
+    --definitions="$amendedRepartDefinitions" \
+    --split="${lib.boolToString split}" \
+    --json=pretty \
+    ${imageFileBasename}.raw \
+    | tee repart-output.json
+
+  # Compression is implemented in the same derivation as opposed to in a
+  # separate derivation to allow users to save disk space. Disk images are
+  # already very space intensive so we want to allow users to mitigate this.
+  if ${lib.boolToString compression.enable}; then
+    for f in ${imageFileBasename}*; do
+      echo "Compressing $f with ${compression.algorithm}..."
+      # Keep the original file when compressing and only delete it afterwards
+      ${compressionCommand} $f && rm $f
+    done
+  fi
+''
diff --git a/nixos/modules/image/repart.nix b/nixos/modules/image/repart.nix
index 41e6110885b85..ed584d9bf997b 100644
--- a/nixos/modules/image/repart.nix
+++ b/nixos/modules/image/repart.nix
@@ -66,7 +66,53 @@ in
 
     name = lib.mkOption {
       type = lib.types.str;
-      description = lib.mdDoc "The name of the image.";
+      description = lib.mdDoc ''
+        Name of the image.
+
+        If this option is unset but config.system.image.id is set,
+        config.system.image.id is used as the default value.
+      '';
+    };
+
+    version = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = config.system.image.version;
+      defaultText = lib.literalExpression "config.system.image.version";
+      description = lib.mdDoc "Version of the image";
+    };
+
+    imageFileBasename = lib.mkOption {
+      type = lib.types.str;
+      readOnly = true;
+      description = lib.mdDoc ''
+        Basename of the image filename without any extension (e.g. `image_1`).
+      '';
+    };
+
+    imageFile = lib.mkOption {
+      type = lib.types.str;
+      readOnly = true;
+      description = lib.mdDoc ''
+        Filename of the image including all extensions (e.g `image_1.raw` or
+        `image_1.raw.zst`).
+      '';
+    };
+
+    compression = {
+      enable = lib.mkEnableOption (lib.mdDoc "Image compression");
+
+      algorithm = lib.mkOption {
+        type = lib.types.enum [ "zstd" "xz" ];
+        default = "zstd";
+        description = lib.mdDoc "Compression algorithm";
+      };
+
+      level = lib.mkOption {
+        type = lib.types.int;
+        description = lib.mdDoc ''
+          Compression level. The available range depends on the used algorithm.
+        '';
+      };
     };
 
     seed = lib.mkOption {
@@ -90,8 +136,10 @@ in
     };
 
     package = lib.mkPackageOption pkgs "systemd-repart" {
-      default = "systemd";
-      example = "pkgs.systemdMinimal.override { withCryptsetup = true; }";
+      # We use buildPackages so that repart images are built with the build
+      # platform's systemd, allowing for cross-compiled systems to work.
+      default = [ "buildPackages" "systemd" ];
+      example = "pkgs.buildPackages.systemdMinimal.override { withCryptsetup = true; }";
     };
 
     partitions = lib.mkOption {
@@ -129,24 +177,38 @@ in
 
   config = {
 
-    system.build.image =
+    image.repart =
       let
-        fileSystemToolMapping = with pkgs; {
-          "vfat" = [ dosfstools mtools ];
-          "ext4" = [ e2fsprogs.bin ];
-          "squashfs" = [ squashfsTools ];
-          "erofs" = [ erofs-utils ];
-          "btrfs" = [ btrfs-progs ];
-          "xfs" = [ xfsprogs ];
+        version = config.image.repart.version;
+        versionInfix = if version != null then "_${version}" else "";
+        compressionSuffix = lib.optionalString cfg.compression.enable
+          {
+            "zstd" = ".zst";
+            "xz" = ".xz";
+          }."${cfg.compression.algorithm}";
+      in
+      {
+        name = lib.mkIf (config.system.image.id != null) (lib.mkOptionDefault config.system.image.id);
+        imageFileBasename = cfg.name + versionInfix;
+        imageFile = cfg.imageFileBasename + ".raw" + compressionSuffix;
+
+        compression = {
+          # Generally default to slightly faster than default compression
+          # levels under the assumption that most of the building will be done
+          # for development and release builds will be customized.
+          level = lib.mkOptionDefault {
+            "zstd" = 3;
+            "xz" = 3;
+          }."${cfg.compression.algorithm}";
         };
+      };
 
+    system.build.image =
+      let
         fileSystems = lib.filter
           (f: f != null)
           (lib.mapAttrsToList (_n: v: v.repartConfig.Format or null) cfg.partitions);
 
-        fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
-
-
         makeClosure = paths: pkgs.closureInfo { rootPaths = paths; };
 
         # Add the closure of the provided Nix store paths to cfg.partitions so
@@ -157,23 +219,8 @@ in
             { closure = "${makeClosure partitionConfig.storePaths}/store-paths"; }
         );
 
-
         finalPartitions = lib.mapAttrs addClosure cfg.partitions;
 
-
-        amendRepartDefinitions = pkgs.runCommand "amend-repart-definitions.py"
-          {
-            nativeBuildInputs = with pkgs; [ black ruff mypy ];
-            buildInputs = [ pkgs.python3 ];
-          } ''
-          install ${./amend-repart-definitions.py} $out
-          patchShebangs --host $out
-
-          black --check --diff $out
-          ruff --line-length 88 $out
-          mypy --strict $out
-        '';
-
         format = pkgs.formats.ini { };
 
         definitionsDirectory = utils.systemdUtils.lib.definitions
@@ -183,30 +230,11 @@ in
 
         partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
       in
-      pkgs.runCommand cfg.name
-        {
-          nativeBuildInputs = [
-            cfg.package
-            pkgs.fakeroot
-            pkgs.util-linux
-          ] ++ fileSystemTools;
-        } ''
-        amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
-
-        mkdir -p $out
-        cd $out
-
-        unshare --map-root-user fakeroot systemd-repart \
-          --dry-run=no \
-          --empty=create \
-          --size=auto \
-          --seed="${cfg.seed}" \
-          --definitions="$amendedRepartDefinitions" \
-          --split="${lib.boolToString cfg.split}" \
-          --json=pretty \
-          image.raw \
-          | tee repart-output.json
-      '';
+      pkgs.callPackage ./repart-image.nix {
+        systemd = cfg.package;
+        inherit (cfg) imageFileBasename compression split seed;
+        inherit fileSystems definitionsDirectory partitions;
+      };
 
     meta.maintainers = with lib.maintainers; [ nikstur ];
 
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix b/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix
index 29afdd4710917..1932f90d4c360 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix
@@ -18,7 +18,7 @@
   # not including it may cause annoying cache misses in the case of the NixOS manual.
   documentation.doc.enable = lib.mkOverride 500 true;
 
-  fonts.fontconfig.enable = lib.mkForce false;
+  fonts.fontconfig.enable = lib.mkOverride 500 false;
 
-  isoImage.edition = lib.mkForce "minimal";
+  isoImage.edition = lib.mkOverride 500 "minimal";
 }
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index 0b5135c088eaf..6adb94e09aff3 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -512,9 +512,10 @@ in
                 + lib.optionalString isAarch "-Xbcj arm"
                 + lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
                 + lib.optionalString (isSparc) "-Xbcj sparc";
-      type = lib.types.str;
+      type = lib.types.nullOr lib.types.str;
       description = lib.mdDoc ''
         Compression settings to use for the squashfs nix store.
+        `null` disables compression.
       '';
       example = "zstd -Xcompression-level 6";
     };
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 9ccc76a82c95a..a7d11370d445e 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -231,7 +231,8 @@ in
         # even if you've upgraded your system to a new NixOS release.
         #
         # This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
-        # so changing it will NOT upgrade your system.
+        # so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
+        # to actually do that.
         #
         # This value being lower than the current NixOS release does NOT mean your system is
         # out of date, out of support, or vulnerable.
diff --git a/nixos/modules/misc/documentation.nix b/nixos/modules/misc/documentation.nix
index 46462c5abd435..f3e698468e642 100644
--- a/nixos/modules/misc/documentation.nix
+++ b/nixos/modules/misc/documentation.nix
@@ -77,7 +77,11 @@ let
           libPath = filter (pkgs.path + "/lib");
           pkgsLibPath = filter (pkgs.path + "/pkgs/pkgs-lib");
           nixosPath = filter (pkgs.path + "/nixos");
-          modules = map (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy;
+          modules =
+            "[ "
+            + concatMapStringsSep " " (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy
+            + " ]";
+          passAsFile = [ "modules" ];
         } ''
           export NIX_STORE_DIR=$TMPDIR/store
           export NIX_STATE_DIR=$TMPDIR/state
@@ -87,7 +91,7 @@ let
             --argstr libPath "$libPath" \
             --argstr pkgsLibPath "$pkgsLibPath" \
             --argstr nixosPath "$nixosPath" \
-            --arg modules "[ $modules ]" \
+            --arg modules "import $modulesPath" \
             --argstr stateVersion "${options.system.stateVersion.default}" \
             --argstr release "${config.system.nixos.release}" \
             $nixosPath/lib/eval-cacheable-options.nix > $out \
diff --git a/nixos/modules/misc/ids.nix b/nixos/modules/misc/ids.nix
index 18928a6bf21bb..5af7284ac71af 100644
--- a/nixos/modules/misc/ids.nix
+++ b/nixos/modules/misc/ids.nix
@@ -288,7 +288,7 @@ in
       telegraf = 256;
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs  = 261;
       # stanchion = 262; # unused, removed 2020-10-14
@@ -599,7 +599,7 @@ in
       #telegraf = 256; # unused
       gitlab-runner = 257;
       postgrey = 258;
-      hound = 259;
+      # hound = 259; # unused, removed 2023-11-21
       leaps = 260;
       ipfs = 261;
       # stanchion = 262; # unused, removed 2020-10-14
diff --git a/nixos/modules/misc/mandoc.nix b/nixos/modules/misc/mandoc.nix
index 9bcef5b1a09bd..73646a60aabb2 100644
--- a/nixos/modules/misc/mandoc.nix
+++ b/nixos/modules/misc/mandoc.nix
@@ -5,25 +5,39 @@ let
 
   cfg = config.documentation.man.mandoc;
 
-in {
+  toMandocOutput = output: (
+    lib.mapAttrsToList
+      (
+        name: value:
+          if lib.isString value || lib.isPath value then "output ${name} ${value}"
+          else if lib.isInt value then "output ${name} ${builtins.toString value}"
+          else if lib.isBool value then lib.optionalString value "output ${name}"
+          else if value == null then ""
+          else throw "Unrecognized value type ${builtins.typeOf value} of key ${name} in mandoc output settings"
+      )
+      output
+  );
+in
+{
   meta.maintainers = [ lib.maintainers.sternenseemann ];
 
   options = {
     documentation.man.mandoc = {
-      enable = lib.mkEnableOption (lib.mdDoc "mandoc as the default man page viewer");
+      enable = lib.mkEnableOption "mandoc as the default man page viewer";
 
       manPath = lib.mkOption {
         type = with lib.types; listOf str;
         default = [ "share/man" ];
         example = lib.literalExpression "[ \"share/man\" \"share/man/fr\" ]";
-        description = lib.mdDoc ''
-          Change the manpath, i. e. the directories where
-          {manpage}`man(1)`
+        description = ''
+          Change the paths included in the MANPATH environment variable,
+          i. e. the directories where {manpage}`man(1)`
           looks for section-specific directories of man pages.
           You only need to change this setting if you want extra man pages
           (e. g. in non-english languages). All values must be strings that
           are a valid path from the target prefix (without including it).
-          The first value given takes priority.
+          The first value given takes priority. Note that this will not
+          add manpath directives to {manpage}`man.conf(5)`.
         '';
       };
 
@@ -31,11 +45,122 @@ in {
         type = lib.types.package;
         default = pkgs.mandoc;
         defaultText = lib.literalExpression "pkgs.mandoc";
-        description = lib.mdDoc ''
+        description = ''
           The `mandoc` derivation to use. Useful to override
           configuration options used for the package.
         '';
       };
+
+      settings = lib.mkOption {
+        description = "Configuration for {manpage}`man.conf(5)`";
+        default = { };
+        type = lib.types.submodule {
+          options = {
+            manpath = lib.mkOption {
+              type = with lib.types; listOf str;
+              default = [ ];
+              example = lib.literalExpression "[ \"/run/current-system/sw/share/man\" ]";
+              description = ''
+                Override the default search path for {manpage}`man(1)`,
+                {manpage}`apropos(1)`, and {manpage}`makewhatis(8)`. It can be
+                used multiple times to specify multiple paths, with the order
+                determining the manual page search order.
+                This is not recommended in favor of
+                {option}`documentation.man.mandoc.manPath`, but if it's needed to
+                specify the manpath in this way, set
+                {option}`documentation.man.mandoc.manPath` to an empty list (`[]`).
+              '';
+            };
+            output.fragment = lib.mkEnableOption ''
+              Omit the <!DOCTYPE> declaration and the <html>, <head>, and <body>
+              elements and only emit the subtree below the <body> element in HTML
+              output of {manpage}`mandoc(1)`. The style argument will be ignored.
+              This is useful when embedding manual content within existing documents.
+            '';
+            output.includes = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              example = lib.literalExpression "../src/%I.html";
+              description = ''
+                A string of relative path used as a template for the output path of
+                linked header files (usually via the In macro) in HTML output.
+                Instances of `%I` are replaced with the include filename. The
+                default is not to present a hyperlink.
+              '';
+            };
+            output.indent = lib.mkOption {
+              type = with lib.types; nullOr int;
+              default = null;
+              description = ''
+                Number of blank characters at the left margin for normal text,
+                default of `5` for {manpage}`mdoc(7)` and `7` for
+                {manpage}`man(7)`. Increasing this is not recommended; it may
+                result in degraded formatting, for example overfull lines or ugly
+                line breaks. When output is to a pager on a terminal that is less
+                than 66 columns wide, the default is reduced to three columns.
+              '';
+            };
+            output.man = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              example = lib.literalExpression "../html%S/%N.%S.html";
+              description = ''
+                A template for linked manuals (usually via the Xr macro) in HTML
+                output. Instances of ‘%N’ and ‘%S’ are replaced with the linked
+                manual's name and section, respectively. If no section is included,
+                section 1 is assumed. The default is not to present a hyperlink.
+                If two formats are given and a file %N.%S exists in the current
+                directory, the first format is used; otherwise, the second format is used.
+              '';
+            };
+            output.paper = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = ''
+                This option is for generating PostScript and PDF output. The paper
+                size name may be one of `a3`, `a4`, `a5`, `legal`, or `letter`.
+                You may also manually specify dimensions as `NNxNN`, width by
+                height in millimetres. If an unknown value is encountered, letter
+                is used. Output pages default to letter sized and are rendered in
+                the Times font family, 11-point. Margins are calculated as 1/9 the
+                page length and width. Line-height is 1.4m.
+              '';
+            };
+            output.style = lib.mkOption {
+              type = with lib.types; nullOr path;
+              default = null;
+              description = ''
+                Path to the file used for an external style-sheet. This must be a
+                valid absolute or relative URI.
+              '';
+            };
+            output.toc = lib.mkEnableOption ''
+              In HTML output of {manpage}`mandoc(1)`, If an input file contains
+              at least two non-standard sections, print a table of contents near
+              the beginning of the output.
+            '';
+            output.width = lib.mkOption {
+              type = with lib.types; nullOr int;
+              default = null;
+              description = ''
+                The ASCII and UTF-8 output width, default is `78`. When output is a
+                pager on a terminal that is less than 79 columns wide, the
+                default is reduced to one less than the terminal width. In any case,
+                lines that are output in literal mode are never wrapped and may
+                exceed the output width.
+              '';
+            };
+          };
+        };
+      };
+
+      extraConfig = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        description = ''
+          Extra configuration to write to {manpage}`man.conf(5)`.
+        '';
+      };
     };
   };
 
@@ -43,21 +168,29 @@ in {
     environment = {
       systemPackages = [ cfg.package ];
 
-      # tell mandoc about man pages
-      etc."man.conf".text = lib.concatMapStrings (path: ''
-        manpath /run/current-system/sw/${path}
-      '') cfg.manPath;
+      etc."man.conf".text = lib.concatStringsSep "\n" (
+        (map (path: "manpath ${path}") cfg.settings.manpath)
+        ++ (toMandocOutput cfg.settings.output)
+        ++ [ cfg.extraConfig ]
+      );
 
       # create mandoc.db for whatis(1), apropos(1) and man(1) -k
       # TODO(@sternenseemman): fix symlinked directories not getting indexed,
       # see: https://inbox.vuxu.org/mandoc-tech/20210906171231.GF83680@athene.usta.de/T/#e85f773c1781e3fef85562b2794f9cad7b2909a3c
       extraSetup = lib.mkIf config.documentation.man.generateCaches ''
-        ${makewhatis} -T utf8 ${
+        for man_path in ${
           lib.concatMapStringsSep " " (path:
             "$out/" + lib.escapeShellArg path
-          ) cfg.manPath
-        }
+            ) cfg.manPath} ${lib.concatMapStringsSep " " (path:
+            lib.escapeShellArg path) cfg.settings.manpath
+          }
+        do
+          [[ -d "$man_path" ]] && ${makewhatis} -T utf8 $man_path
+        done
       '';
+
+      # tell mandoc the paths containing man pages
+      profileRelativeSessionVariables."MANPATH" = map (path: if builtins.substring 0 1 path != "/" then "/${path}" else path) cfg.manPath;
     };
   };
 }
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index 45dbf45b3ae70..c929c3b37285b 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -28,6 +28,8 @@ let
     DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html";
     SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html";
     BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues";
+    IMAGE_ID = lib.optionalString (config.system.image.id != null) config.system.image.id;
+    IMAGE_VERSION = lib.optionalString (config.system.image.version != null) config.system.image.version;
   } // lib.optionalAttrs (cfg.variant_id != null) {
     VARIANT_ID = cfg.variant_id;
   };
@@ -110,6 +112,38 @@ in
       example = "installer";
     };
 
+    image = {
+
+      id = lib.mkOption {
+        type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
+        default = null;
+        description = lib.mdDoc ''
+          Image identifier.
+
+          This corresponds to the IMAGE_ID field in os-release. See the
+          upstream docs for more details on valid characters for this field:
+          https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_ID=
+
+          You would only want to set this option if you're build NixOS appliance images.
+        '';
+      };
+
+      version = lib.mkOption {
+        type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
+        default = null;
+        description = lib.mdDoc ''
+          Image version.
+
+          This corresponds to the IMAGE_VERSION field in os-release. See the
+          upstream docs for more details on valid characters for this field:
+          https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_VERSION=
+
+          You would only want to set this option if you're build NixOS appliance images.
+        '';
+      };
+
+    };
+
     stateVersion = mkOption {
       type = types.str;
       # TODO Remove this and drop the default of the option so people are forced to set it.
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 7f708f6e57c70..00e6240f531d5 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -10,8 +10,8 @@
   ./config/gtk/gtk-icon-cache.nix
   ./config/i18n.nix
   ./config/iproute2.nix
-  ./config/krb5/default.nix
   ./config/ldap.nix
+  ./config/ldso.nix
   ./config/locale.nix
   ./config/malloc.nix
   ./config/mysql.nix
@@ -28,6 +28,7 @@
   ./config/resolvconf.nix
   ./config/shells-environment.nix
   ./config/stevenblack.nix
+  ./config/stub-ld.nix
   ./config/swap.nix
   ./config/sysctl.nix
   ./config/system-environment.nix
@@ -271,11 +272,13 @@
   ./programs/virt-manager.nix
   ./programs/wavemon.nix
   ./programs/wayland/cardboard.nix
+  ./programs/wayland/labwc.nix
   ./programs/wayland/river.nix
   ./programs/wayland/sway.nix
   ./programs/wayland/waybar.nix
   ./programs/wayland/wayfire.nix
   ./programs/weylus.nix
+  ./programs/winbox.nix
   ./programs/wireshark.nix
   ./programs/xastir.nix
   ./programs/wshowkeys.nix
@@ -305,6 +308,7 @@
   ./security/duosec.nix
   ./security/google_oslogin.nix
   ./security/ipa.nix
+  ./security/krb5
   ./security/lock-kernel-modules.nix
   ./security/misc.nix
   ./security/oath.nix
@@ -341,6 +345,7 @@
   ./services/audio/mopidy.nix
   ./services/audio/mpd.nix
   ./services/audio/mpdscribble.nix
+  ./services/audio/mympd.nix
   ./services/audio/navidrome.nix
   ./services/audio/networkaudiod.nix
   ./services/audio/roon-bridge.nix
@@ -364,6 +369,7 @@
   ./services/backup/mysql-backup.nix
   ./services/backup/postgresql-backup.nix
   ./services/backup/postgresql-wal-receiver.nix
+  ./services/backup/snapraid.nix
   ./services/backup/restic-rest-server.nix
   ./services/backup/restic.nix
   ./services/backup/rsnapshot.nix
@@ -442,6 +448,7 @@
   ./services/databases/surrealdb.nix
   ./services/databases/victoriametrics.nix
   ./services/desktops/accountsservice.nix
+  ./services/desktops/ayatana-indicators.nix
   ./services/desktops/bamf.nix
   ./services/desktops/blueman.nix
   ./services/desktops/cpupower-gui.nix
@@ -491,6 +498,7 @@
   ./services/development/jupyterhub/default.nix
   ./services/development/livebook.nix
   ./services/development/lorri.nix
+  ./services/development/nixseparatedebuginfod.nix
   ./services/development/rstudio-server/default.nix
   ./services/development/zammad.nix
   ./services/display-managers/greetd.nix
@@ -614,6 +622,7 @@
   ./services/mail/public-inbox.nix
   ./services/mail/roundcube.nix
   ./services/mail/rspamd.nix
+  ./services/mail/rspamd-trainer.nix
   ./services/mail/rss2email.nix
   ./services/mail/schleuder.nix
   ./services/mail/spamassassin.nix
@@ -683,6 +692,7 @@
   ./services/misc/gollum.nix
   ./services/misc/gpsd.nix
   ./services/misc/greenclip.nix
+  ./services/misc/guix
   ./services/misc/headphones.nix
   ./services/misc/heisenbridge.nix
   ./services/misc/homepage-dashboard.nix
@@ -715,6 +725,7 @@
   ./services/misc/nzbget.nix
   ./services/misc/nzbhydra2.nix
   ./services/misc/octoprint.nix
+  ./services/misc/ollama.nix
   ./services/misc/ombi.nix
   ./services/misc/osrm.nix
   ./services/misc/owncast.nix
@@ -764,6 +775,7 @@
   ./services/misc/tautulli.nix
   ./services/misc/tiddlywiki.nix
   ./services/misc/tp-auto-kbbl.nix
+  ./services/misc/tuxclocker.nix
   ./services/misc/tzupdate.nix
   ./services/misc/uhub.nix
   ./services/misc/weechat.nix
@@ -822,6 +834,7 @@
   ./services/monitoring/riemann.nix
   ./services/monitoring/scollector.nix
   ./services/monitoring/smartd.nix
+  ./services/monitoring/snmpd.nix
   ./services/monitoring/statsd.nix
   ./services/monitoring/sysstat.nix
   ./services/monitoring/teamviewer.nix
@@ -836,6 +849,7 @@
   ./services/monitoring/vmagent.nix
   ./services/monitoring/vmalert.nix
   ./services/monitoring/vnstat.nix
+  ./services/monitoring/watchdogd.nix
   ./services/monitoring/zabbix-agent.nix
   ./services/monitoring/zabbix-proxy.nix
   ./services/monitoring/zabbix-server.nix
@@ -939,6 +953,7 @@
   ./services/networking/ghostunnel.nix
   ./services/networking/git-daemon.nix
   ./services/networking/globalprotect-vpn.nix
+  ./services/networking/gns3-server.nix
   ./services/networking/gnunet.nix
   ./services/networking/go-autoconfig.nix
   ./services/networking/go-neb.nix
@@ -969,6 +984,7 @@
   ./services/networking/iwd.nix
   ./services/networking/jibri/default.nix
   ./services/networking/jicofo.nix
+  ./services/networking/jigasi.nix
   ./services/networking/jitsi-videobridge.nix
   ./services/networking/jool.nix
   ./services/networking/kea.nix
@@ -1029,6 +1045,7 @@
   ./services/networking/ntopng.nix
   ./services/networking/ntp/chrony.nix
   ./services/networking/ntp/ntpd.nix
+  ./services/networking/ntp/ntpd-rs.nix
   ./services/networking/ntp/openntpd.nix
   ./services/networking/nullidentdmod.nix
   ./services/networking/nylon.nix
@@ -1163,6 +1180,7 @@
   ./services/search/typesense.nix
   ./services/security/aesmd.nix
   ./services/security/authelia.nix
+  ./services/security/bitwarden-directory-connector-cli.nix
   ./services/security/certmgr.nix
   ./services/security/cfssl.nix
   ./services/security/clamav.nix
@@ -1251,6 +1269,7 @@
   ./services/web-apps/changedetection-io.nix
   ./services/web-apps/chatgpt-retrieval-plugin.nix
   ./services/web-apps/cloudlog.nix
+  ./services/web-apps/code-server.nix
   ./services/web-apps/convos.nix
   ./services/web-apps/dex.nix
   ./services/web-apps/discourse.nix
@@ -1321,6 +1340,7 @@
   ./services/web-apps/restya-board.nix
   ./services/web-apps/rimgo.nix
   ./services/web-apps/sftpgo.nix
+  ./services/web-apps/suwayomi-server.nix
   ./services/web-apps/rss-bridge.nix
   ./services/web-apps/selfoss.nix
   ./services/web-apps/shiori.nix
@@ -1332,6 +1352,7 @@
   ./services/web-apps/vikunja.nix
   ./services/web-apps/whitebophir.nix
   ./services/web-apps/wiki-js.nix
+  ./services/web-apps/windmill.nix
   ./services/web-apps/wordpress.nix
   ./services/web-apps/writefreely.nix
   ./services/web-apps/youtrack.nix
@@ -1357,6 +1378,7 @@
   ./services/web-servers/molly-brown.nix
   ./services/web-servers/nginx/default.nix
   ./services/web-servers/nginx/gitweb.nix
+  ./services/web-servers/nginx/tailscale-auth.nix
   ./services/web-servers/phpfpm/default.nix
   ./services/web-servers/pomerium.nix
   ./services/web-servers/rustus.nix
@@ -1423,6 +1445,7 @@
   ./system/activation/bootspec.nix
   ./system/activation/top-level.nix
   ./system/boot/binfmt.nix
+  ./system/boot/clevis.nix
   ./system/boot/emergency-mode.nix
   ./system/boot/grow-partition.nix
   ./system/boot/initrd-network.nix
@@ -1456,6 +1479,9 @@
   ./system/boot/systemd/initrd-secrets.nix
   ./system/boot/systemd/initrd.nix
   ./system/boot/systemd/journald.nix
+  ./system/boot/systemd/journald-gateway.nix
+  ./system/boot/systemd/journald-remote.nix
+  ./system/boot/systemd/journald-upload.nix
   ./system/boot/systemd/logind.nix
   ./system/boot/systemd/nspawn.nix
   ./system/boot/systemd/oomd.nix
@@ -1501,7 +1527,6 @@
   ./tasks/network-interfaces.nix
   ./tasks/powertop.nix
   ./tasks/scsi-link-power-management.nix
-  ./tasks/snapraid.nix
   ./tasks/stratis.nix
   ./tasks/swraid.nix
   ./tasks/trackpoint.nix
diff --git a/nixos/modules/profiles/installation-device.nix b/nixos/modules/profiles/installation-device.nix
index 52750cd472dad..58f07b050b5c4 100644
--- a/nixos/modules/profiles/installation-device.nix
+++ b/nixos/modules/profiles/installation-device.nix
@@ -105,6 +105,8 @@ with lib;
       ];
 
     boot.swraid.enable = true;
+    # remove warning about unset mail
+    boot.swraid.mdadmConf = "PROGRAM ${pkgs.coreutils}/bin/true";
 
     # Show all debug messages from the kernel but don't log refused packets
     # because we have the firewall enabled. This makes installs from the
diff --git a/nixos/modules/profiles/minimal.nix b/nixos/modules/profiles/minimal.nix
index 75f355b4a002b..b76740f7cc587 100644
--- a/nixos/modules/profiles/minimal.nix
+++ b/nixos/modules/profiles/minimal.nix
@@ -21,6 +21,8 @@ with lib;
   # Perl is a default package.
   environment.defaultPackages = mkDefault [ ];
 
+  environment.stub-ld.enable = false;
+
   # The lessopen package pulls in Perl.
   programs.less.lessopen = mkDefault null;
 
diff --git a/nixos/modules/programs/atop.nix b/nixos/modules/programs/atop.nix
index 7d9491d1fc1f3..003cfdbfc8fad 100644
--- a/nixos/modules/programs/atop.nix
+++ b/nixos/modules/programs/atop.nix
@@ -137,6 +137,7 @@ in
                 atop.preStart = ''
                   set -e -u
                   shopt -s nullglob
+                  rm -f "$LOGPATH"/atop_*.new
                   for logfile in "$LOGPATH"/atop_*
                   do
                     ${atop}/bin/atopconvert "$logfile" "$logfile".new
@@ -144,9 +145,9 @@ in
                     # false positives for atop-rotate.service
                     if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
                     then
-                      ${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
+                      mv -v -f "$logfile".new "$logfile"
                     else
-                      ${pkgs.coreutils}/bin/rm -f "$logfile".new
+                      rm -f "$logfile".new
                     fi
                   done
                 '';
diff --git a/nixos/modules/programs/direnv.nix b/nixos/modules/programs/direnv.nix
index 1aa62ea54d2c5..fdc646eb4b164 100644
--- a/nixos/modules/programs/direnv.nix
+++ b/nixos/modules/programs/direnv.nix
@@ -49,7 +49,14 @@ in {
           default = true;
         };
 
-      package = lib.mkPackageOption pkgs "nix-direnv" {};
+      package = lib.mkOption {
+        default = pkgs.nix-direnv.override { nix = config.nix.package; };
+        defaultText = "pkgs.nix-direnv";
+        type = lib.types.package;
+        description = lib.mdDoc ''
+          The nix-direnv package to use
+        '';
+      };
     };
   };
 
diff --git a/nixos/modules/programs/firefox.nix b/nixos/modules/programs/firefox.nix
index 1edf935d1649e..29c567783e27a 100644
--- a/nixos/modules/programs/firefox.nix
+++ b/nixos/modules/programs/firefox.nix
@@ -284,6 +284,7 @@ in
 
     # Preferences are converted into a policy
     programs.firefox.policies = {
+      DisableAppUpdate = true;
       Preferences = (mapAttrs
         (_: value: { Value = value; Status = cfg.preferencesStatus; })
         cfg.preferences);
diff --git a/nixos/modules/programs/firejail.nix b/nixos/modules/programs/firejail.nix
index 6f79c13d94b44..046c31ce64f6b 100644
--- a/nixos/modules/programs/firejail.nix
+++ b/nixos/modules/programs/firejail.nix
@@ -53,7 +53,7 @@ in {
           desktop = mkOption {
             type = types.nullOr types.path;
             default = null;
-            description = lib.mkDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
+            description = lib.mdDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
             example = literalExpression ''"''${pkgs.firefox}/share/applications/firefox.desktop"'';
           };
           profile = mkOption {
diff --git a/nixos/modules/programs/gamemode.nix b/nixos/modules/programs/gamemode.nix
index c43e2c2296f5a..344f392852e2a 100644
--- a/nixos/modules/programs/gamemode.nix
+++ b/nixos/modules/programs/gamemode.nix
@@ -18,7 +18,7 @@ in
 
       settings = mkOption {
         type = settingsFormat.type;
-        default = {};
+        default = { };
         description = lib.mdDoc ''
           System-wide configuration for GameMode (/etc/gamemode.ini).
           See gamemoded(8) man page for available settings.
diff --git a/nixos/modules/programs/gpaste.nix b/nixos/modules/programs/gpaste.nix
index 074b4d59a365a..37172c9583a37 100644
--- a/nixos/modules/programs/gpaste.nix
+++ b/nixos/modules/programs/gpaste.nix
@@ -32,5 +32,7 @@ with lib;
     systemd.packages = [ pkgs.gnome.gpaste ];
     # gnome-control-center crashes in Keyboard Shortcuts pane without the GSettings schemas.
     services.xserver.desktopManager.gnome.sessionPath = [ pkgs.gnome.gpaste ];
+    # gpaste-reloaded applet doesn't work without the typelib
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
   };
 }
diff --git a/nixos/modules/programs/hyprland.nix b/nixos/modules/programs/hyprland.nix
index 166c6cbc5c183..9061ce5da83a8 100644
--- a/nixos/modules/programs/hyprland.nix
+++ b/nixos/modules/programs/hyprland.nix
@@ -30,7 +30,6 @@ in
       readOnly = true;
       default = cfg.package.override {
         enableXWayland = cfg.xwayland.enable;
-        enableNvidiaPatches = cfg.enableNvidiaPatches;
       };
       defaultText = literalExpression
         "`programs.hyprland.package` with applied configuration";
@@ -42,8 +41,6 @@ in
     portalPackage = mkPackageOption pkgs "xdg-desktop-portal-hyprland" { };
 
     xwayland.enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
-
-    enableNvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
   };
 
   config = mkIf cfg.enable {
@@ -73,9 +70,13 @@ in
       [ "programs" "hyprland" "xwayland" "hidpi" ]
       "XWayland patches are deprecated. Refer to https://wiki.hyprland.org/Configuring/XWayland"
     )
-    (mkRenamedOptionModule
-      [ "programs" "hyprland" "nvidiaPatches" ]
+    (mkRemovedOptionModule
       [ "programs" "hyprland" "enableNvidiaPatches" ]
+      "Nvidia patches are no longer needed"
+    )
+    (mkRemovedOptionModule
+      [ "programs" "hyprland" "nvidiaPatches" ]
+      "Nvidia patches are no longer needed"
     )
   ];
 }
diff --git a/nixos/modules/programs/mininet.nix b/nixos/modules/programs/mininet.nix
index 01ffd811e70e2..3568736854d8e 100644
--- a/nixos/modules/programs/mininet.nix
+++ b/nixos/modules/programs/mininet.nix
@@ -6,39 +6,6 @@ with lib;
 
 let
   cfg = config.programs.mininet;
-
-  telnet = pkgs.runCommand "inetutils-telnet"
-    { }
-    ''
-      mkdir -p $out/bin
-      ln -s ${pkgs.inetutils}/bin/telnet $out/bin
-    '';
-
-  generatedPath = with pkgs; makeSearchPath "bin" [
-    iperf
-    ethtool
-    iproute2
-    socat
-    # mn errors out without a telnet binary
-    # pkgs.inetutils brings an undesired ifconfig into PATH see #43105
-    nettools
-    telnet
-  ];
-
-  pyEnv = pkgs.python3.withPackages (ps: [ ps.mininet-python ]);
-
-  mnexecWrapped = pkgs.runCommand "mnexec-wrapper"
-    { nativeBuildInputs = [ pkgs.makeWrapper pkgs.python3Packages.wrapPython ]; }
-    ''
-      makeWrapper ${pkgs.mininet}/bin/mnexec \
-        $out/bin/mnexec \
-        --prefix PATH : "${generatedPath}"
-
-      makeWrapper ${pyEnv}/bin/mn \
-        $out/bin/mn \
-        --prefix PYTHONPATH : "${pyEnv}/${pyEnv.sitePackages}" \
-        --prefix PATH : "${generatedPath}"
-    '';
 in
 {
   options.programs.mininet.enable = mkEnableOption (lib.mdDoc "Mininet");
@@ -47,6 +14,6 @@ in
 
     virtualisation.vswitch.enable = true;
 
-    environment.systemPackages = [ mnexecWrapped ];
+    environment.systemPackages = [ pkgs.mininet ];
   };
 }
diff --git a/nixos/modules/programs/mosh.nix b/nixos/modules/programs/mosh.nix
index 9e56e1731d7cc..593246ab6dcd1 100644
--- a/nixos/modules/programs/mosh.nix
+++ b/nixos/modules/programs/mosh.nix
@@ -1,7 +1,5 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
 
   cfg  = config.programs.mosh;
@@ -9,28 +7,26 @@ let
 in
 {
   options.programs.mosh = {
-    enable = mkOption {
-      description = lib.mdDoc ''
-        Whether to enable mosh. Note, this will open ports in your firewall!
-      '';
-      default = false;
-      type = lib.types.bool;
+    enable = lib.mkEnableOption "mosh";
+    openFirewall = lib.mkEnableOption "" // {
+      description = "Whether to automatically open the necessary ports in the firewall.";
+      default = true;
     };
-    withUtempter = mkOption {
+    withUtempter = lib.mkEnableOption "" // {
       description = lib.mdDoc ''
         Whether to enable libutempter for mosh.
+
         This is required so that mosh can write to /var/run/utmp (which can be queried with `who` to display currently connected user sessions).
         Note, this will add a guid wrapper for the group utmp!
       '';
       default = true;
-      type = lib.types.bool;
     };
   };
 
-  config = mkIf cfg.enable {
-    environment.systemPackages = with pkgs; [ mosh ];
-    networking.firewall.allowedUDPPortRanges = [ { from = 60000; to = 61000; } ];
-    security.wrappers = mkIf cfg.withUtempter {
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.mosh ];
+    networking.firewall.allowedUDPPortRanges = lib.optional cfg.openFirewall { from = 60000; to = 61000; };
+    security.wrappers = lib.mkIf cfg.withUtempter {
       utempter = {
         source = "${pkgs.libutempter}/lib/utempter/utempter";
         owner = "root";
diff --git a/nixos/modules/programs/nix-ld.nix b/nixos/modules/programs/nix-ld.nix
index e3a9bb16410c9..6f36ce33640cd 100644
--- a/nixos/modules/programs/nix-ld.nix
+++ b/nixos/modules/programs/nix-ld.nix
@@ -47,7 +47,7 @@ in
   };
 
   config = lib.mkIf config.programs.nix-ld.enable {
-    systemd.tmpfiles.packages = [ cfg.package ];
+    environment.ldso = "${cfg.package}/libexec/nix-ld";
 
     environment.systemPackages = [ nix-ld-libraries ];
 
diff --git a/nixos/modules/programs/partition-manager.nix b/nixos/modules/programs/partition-manager.nix
index c18598b7c25d6..cf0491ff028fc 100644
--- a/nixos/modules/programs/partition-manager.nix
+++ b/nixos/modules/programs/partition-manager.nix
@@ -14,6 +14,6 @@ with lib;
   config = mkIf config.programs.partition-manager.enable {
     services.dbus.packages = [ pkgs.libsForQt5.kpmcore ];
     # `kpmcore` need to be installed to pull in polkit actions.
-    environment.systemPackages = [ pkgs.libsForQt5.kpmcore pkgs.partition-manager ];
+    environment.systemPackages = [ pkgs.libsForQt5.kpmcore pkgs.libsForQt5.partitionmanager ];
   };
 }
diff --git a/nixos/modules/programs/screen.nix b/nixos/modules/programs/screen.nix
index 68de9e52d7be3..41bfb5d7809af 100644
--- a/nixos/modules/programs/screen.nix
+++ b/nixos/modules/programs/screen.nix
@@ -1,33 +1,41 @@
 { config, lib, pkgs, ... }:
 
 let
-  inherit (lib) mkOption mkIf types;
   cfg = config.programs.screen;
 in
 
 {
-  ###### interface
-
   options = {
     programs.screen = {
+      enable = lib.mkEnableOption (lib.mdDoc "screen, a basic terminal multiplexer");
+
+      package = lib.mkPackageOptionMD pkgs "screen" { };
 
-      screenrc = mkOption {
-        default = "";
-        description = lib.mdDoc ''
-          The contents of /etc/screenrc file.
+      screenrc = lib.mkOption {
+        type = with lib.types; nullOr lines;
+        example = ''
+          defscrollback 10000
+          startup_message off
         '';
-        type = types.lines;
+        description = lib.mdDoc "The contents of {file}`/etc/screenrc` file";
       };
     };
   };
 
-  ###### implementation
-
-  config = mkIf (cfg.screenrc != "") {
-    environment.etc.screenrc.text = cfg.screenrc;
-
-    environment.systemPackages = [ pkgs.screen ];
+  config = {
+    # TODO: Added in 24.05, remove before 24.11
+    assertions = [
+      {
+        assertion = cfg.screenrc != null -> cfg.enable;
+        message = "`programs.screen.screenrc` has been configured, but `programs.screen.enable` is not true";
+      }
+    ];
+  } // lib.mkIf cfg.enable {
+    environment.etc.screenrc = {
+      enable = cfg.screenrc != null;
+      text = cfg.screenrc;
+    };
+    environment.systemPackages = [ cfg.package ];
     security.pam.services.screen = {};
   };
-
 }
diff --git a/nixos/modules/programs/singularity.nix b/nixos/modules/programs/singularity.nix
index 9fd37e1793a7f..7f285ab05537a 100644
--- a/nixos/modules/programs/singularity.nix
+++ b/nixos/modules/programs/singularity.nix
@@ -61,7 +61,12 @@ in
     };
     enableSuid = mkOption {
       type = types.bool;
-      default = true;
+      # SingularityCE requires SETUID for most things. Apptainer prefers user
+      # namespaces, e.g. `apptainer exec --nv` would fail if built
+      # `--with-suid`:
+      # > `FATAL: nvidia-container-cli not allowed in setuid mode`
+      default = cfg.package.projectName != "apptainer";
+      defaultText = literalExpression ''config.services.singularity.package.projectName != "apptainer"'';
       example = false;
       description = mdDoc ''
         Whether to enable the SUID support of Singularity/Apptainer.
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index 18eb3f938f3d8..c39a3c8d509be 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -8,14 +8,12 @@ let
 
   cfg  = config.programs.ssh;
 
-  askPassword = cfg.askPassword;
-
   askPasswordWrapper = pkgs.writeScript "ssh-askpass-wrapper"
     ''
       #! ${pkgs.runtimeShell} -e
       export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
       export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
-      exec ${askPassword} "$@"
+      exec ${cfg.askPassword} "$@"
     '';
 
   knownHosts = attrValues cfg.knownHosts;
@@ -52,10 +50,11 @@ in
       };
 
       forwardX11 = mkOption {
-        type = types.bool;
+        type = with lib.types; nullOr bool;
         default = false;
         description = lib.mdDoc ''
           Whether to request X11 forwarding on outgoing connections by default.
+          If set to null, the option is not set at all.
           This is useful for running graphical programs on the remote machine and have them display to your local X11 server.
           Historically, this value has depended on the value used by the local sshd daemon, but there really isn't a relation between the two.
           Note: there are some security risks to forwarding an X11 connection.
@@ -274,10 +273,10 @@ in
   config = {
 
     programs.ssh.setXAuthLocation =
-      mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 || config.services.openssh.settings.X11Forwarding);
+      mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 == true || config.services.openssh.settings.X11Forwarding);
 
     assertions =
-      [ { assertion = cfg.forwardX11 -> cfg.setXAuthLocation;
+      [ { assertion = cfg.forwardX11 == true -> cfg.setXAuthLocation;
           message = "cannot enable X11 forwarding without setting XAuth location";
         }
       ] ++ flip mapAttrsToList cfg.knownHosts (name: data: {
@@ -298,11 +297,8 @@ in
         AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
         GlobalKnownHostsFile ${concatStringsSep " " knownHostsFiles}
 
-        ${optionalString cfg.setXAuthLocation ''
-          XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
-        ''}
-
-        ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}
+        ${optionalString cfg.setXAuthLocation "XAuthLocation ${pkgs.xorg.xauth}/bin/xauth"}
+        ${lib.optionalString (cfg.forwardX11 != null) "ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}"}
 
         ${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
         ${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
@@ -344,7 +340,7 @@ in
         fi
       '';
 
-    environment.variables.SSH_ASKPASS = optionalString cfg.enableAskPassword askPassword;
+    environment.variables.SSH_ASKPASS = optionalString cfg.enableAskPassword cfg.askPassword;
 
   };
 }
diff --git a/nixos/modules/programs/starship.nix b/nixos/modules/programs/starship.nix
index 9dca39da5edc0..34f6f0882c617 100644
--- a/nixos/modules/programs/starship.nix
+++ b/nixos/modules/programs/starship.nix
@@ -1,13 +1,21 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.programs.starship;
 
   settingsFormat = pkgs.formats.toml { };
 
-  settingsFile = settingsFormat.generate "starship.toml" cfg.settings;
+  userSettingsFile = settingsFormat.generate "starship.toml" cfg.settings;
+
+  settingsFile = if cfg.presets == [] then userSettingsFile else pkgs.runCommand "starship.toml"
+    {
+      nativeBuildInputs = [ pkgs.yq ];
+    } ''
+    tomlq -s -t 'reduce .[] as $item ({}; . * $item)' \
+      ${lib.concatStringsSep " " (map (f: "${pkgs.starship}/share/starship/presets/${f}.toml") cfg.presets)} \
+      ${userSettingsFile} \
+      > $out
+  '';
 
   initOption =
     if cfg.interactiveOnly then
@@ -18,19 +26,28 @@ let
 in
 {
   options.programs.starship = {
-    enable = mkEnableOption (lib.mdDoc "the Starship shell prompt");
+    enable = lib.mkEnableOption (lib.mdDoc "the Starship shell prompt");
 
-    interactiveOnly = mkOption {
+    interactiveOnly = lib.mkOption {
       default = true;
       example = false;
-      type = types.bool;
+      type = lib.types.bool;
       description = lib.mdDoc ''
         Whether to enable starship only when the shell is interactive.
         Some plugins require this to be set to false to function correctly.
       '';
     };
 
-    settings = mkOption {
+    presets = lib.mkOption {
+      default = [ ];
+      example = [ "nerd-font-symbols" ];
+      type = with lib.types; listOf str;
+      description = lib.mdDoc ''
+        Presets files to be merged with settings in order.
+      '';
+    };
+
+    settings = lib.mkOption {
       inherit (settingsFormat) type;
       default = { };
       description = lib.mdDoc ''
@@ -41,24 +58,42 @@ in
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     programs.bash.${initOption} = ''
       if [[ $TERM != "dumb" ]]; then
-        export STARSHIP_CONFIG=${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if [[ ! -f "$HOME/.config/starship.toml" ]]; then
+          export STARSHIP_CONFIG=${settingsFile}
+        fi
         eval "$(${pkgs.starship}/bin/starship init bash)"
       fi
     '';
 
     programs.fish.${initOption} = ''
       if test "$TERM" != "dumb"
-        set -x STARSHIP_CONFIG ${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if not test -f "$HOME/.config/starship.toml";
+          set -x STARSHIP_CONFIG ${settingsFile}
+        end
         eval (${pkgs.starship}/bin/starship init fish)
       end
     '';
 
     programs.zsh.${initOption} = ''
       if [[ $TERM != "dumb" ]]; then
-        export STARSHIP_CONFIG=${settingsFile}
+        # don't set STARSHIP_CONFIG automatically if there's a user-specified
+        # config file.  starship appears to use a hardcoded config location
+        # rather than one inside an XDG folder:
+        # https://github.com/starship/starship/blob/686bda1706e5b409129e6694639477a0f8a3f01b/src/configure.rs#L651
+        if [[ ! -f "$HOME/.config/starship.toml" ]]; then
+          export STARSHIP_CONFIG=${settingsFile}
+        fi
         eval "$(${pkgs.starship}/bin/starship init zsh)"
       fi
     '';
diff --git a/nixos/modules/programs/tsm-client.nix b/nixos/modules/programs/tsm-client.nix
index 6cb225d102de2..45d436221ee38 100644
--- a/nixos/modules/programs/tsm-client.nix
+++ b/nixos/modules/programs/tsm-client.nix
@@ -1,193 +1,144 @@
-{ config, lib, pkgs, ... }:
+{ config, lib, options, pkgs, ... }:  # XXX migration code for freeform settings: `options` can be removed in 2025
+let optionsGlobal = options; in
 
 let
 
-  inherit (builtins) length map;
-  inherit (lib.attrsets) attrNames filterAttrs hasAttr mapAttrs mapAttrsToList optionalAttrs;
+  inherit (lib.attrsets) attrNames attrValues mapAttrsToList removeAttrs;
+  inherit (lib.lists) all allUnique concatLists elem isList map;
   inherit (lib.modules) mkDefault mkIf;
-  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
-  inherit (lib.strings) concatLines optionalString toLower;
-  inherit (lib.types) addCheck attrsOf lines nonEmptyStr nullOr package path port str strMatching submodule;
+  inherit (lib.options) mkEnableOption mkOption mkPackageOption;
+  inherit (lib.strings) concatLines match optionalString toLower;
+  inherit (lib.trivial) isInt;
+  inherit (lib.types) addCheck attrsOf coercedTo either enum int lines listOf nonEmptyStr nullOr oneOf path port singleLineStr strMatching submodule;
 
-  # Checks if given list of strings contains unique
-  # elements when compared without considering case.
-  # Type: checkIUnique :: [string] -> bool
-  # Example: checkIUnique ["foo" "Foo"] => false
-  checkIUnique = lst:
-    let
-      lenUniq = l: length (lib.lists.unique l);
-    in
-      lenUniq lst == lenUniq (map toLower lst);
+  scalarType =
+    # see the option's description below for the
+    # handling/transformation of each possible type
+    oneOf [ (enum [ true null ]) int path singleLineStr ];
 
   # TSM rejects servername strings longer than 64 chars.
-  servernameType = strMatching ".{1,64}";
+  servernameType = strMatching "[^[:space:]]{1,64}";
 
   serverOptions = { name, config, ... }: {
-    options.name = mkOption {
+    freeformType = attrsOf (either scalarType (listOf scalarType));
+    # Client system-options file directives are explained here:
+    # https://www.ibm.com/docs/en/storage-protect/8.1.20?topic=commands-processing-options
+    options.servername = mkOption {
       type = servernameType;
+      default = name;
       example = "mainTsmServer";
       description = lib.mdDoc ''
         Local name of the IBM TSM server,
-        must be uncapitalized and no longer than 64 chars.
-        The value will be used for the
-        `server`
-        directive in {file}`dsm.sys`.
+        must not contain space or more than 64 chars.
       '';
     };
-    options.server = mkOption {
+    options.tcpserveraddress = mkOption {
       type = nonEmptyStr;
       example = "tsmserver.company.com";
       description = lib.mdDoc ''
         Host/domain name or IP address of the IBM TSM server.
-        The value will be used for the
-        `tcpserveraddress`
-        directive in {file}`dsm.sys`.
       '';
     };
-    options.port = mkOption {
+    options.tcpport = mkOption {
       type = addCheck port (p: p<=32767);
       default = 1500;  # official default
       description = lib.mdDoc ''
         TCP port of the IBM TSM server.
-        The value will be used for the
-        `tcpport`
-        directive in {file}`dsm.sys`.
         TSM does not support ports above 32767.
       '';
     };
-    options.node = mkOption {
+    options.nodename = mkOption {
       type = nonEmptyStr;
       example = "MY-TSM-NODE";
       description = lib.mdDoc ''
         Target node name on the IBM TSM server.
-        The value will be used for the
-        `nodename`
-        directive in {file}`dsm.sys`.
       '';
     };
     options.genPasswd = mkEnableOption (lib.mdDoc ''
       automatic client password generation.
-      This option influences the
-      `passwordaccess`
-      directive in {file}`dsm.sys`.
+      This option does *not* cause a line in
+      {file}`dsm.sys` by itself, but generates a
+      corresponding `passwordaccess` directive.
       The password will be stored in the directory
-      given by the option {option}`passwdDir`.
+      given by the option {option}`passworddir`.
       *Caution*:
       If this option is enabled and the server forces
       to renew the password (e.g. on first connection),
       a random password will be generated and stored
     '');
-    options.passwdDir = mkOption {
-      type = path;
+    options.passwordaccess = mkOption {
+      type = enum [ "generate" "prompt" ];
+      visible = false;
+    };
+    options.passworddir = mkOption {
+      type = nullOr path;
+      default = null;
       example = "/home/alice/tsm-password";
       description = lib.mdDoc ''
         Directory that holds the TSM
         node's password information.
-        The value will be used for the
-        `passworddir`
-        directive in {file}`dsm.sys`.
       '';
     };
-    options.includeExclude = mkOption {
-      type = lines;
-      default = "";
+    options.inclexcl = mkOption {
+      type = coercedTo lines
+        (pkgs.writeText "inclexcl.dsm.sys")
+        (nullOr path);
+      default = null;
       example = ''
         exclude.dir     /nix/store
         include.encrypt /home/.../*
       '';
       description = lib.mdDoc ''
-        `include.*` and
-        `exclude.*` directives to be
-        used when sending files to the IBM TSM server.
-        The lines will be written into a file that the
-        `inclexcl`
-        directive in {file}`dsm.sys` points to.
-      '';
-    };
-    options.extraConfig = mkOption {
-      # TSM option keys are case insensitive;
-      # we have to ensure there are no keys that
-      # differ only by upper and lower case.
-      type = addCheck
-        (attrsOf (nullOr str))
-        (attrs: checkIUnique (attrNames attrs));
-      default = {};
-      example.compression = "yes";
-      example.passwordaccess = null;
-      description = lib.mdDoc ''
-        Additional key-value pairs for the server stanza.
-        Values must be strings, or `null`
-        for the key not to be used in the stanza
-        (e.g. to overrule values generated by other options).
-      '';
-    };
-    options.text = mkOption {
-      type = lines;
-      example = literalExpression
-        ''lib.modules.mkAfter "compression no"'';
-      description = lib.mdDoc ''
-        Additional text lines for the server stanza.
-        This option can be used if certion configuration keys
-        must be used multiple times or ordered in a certain way
-        as the {option}`extraConfig` option can't
-        control the order of lines in the resulting stanza.
-        Note that the `server`
-        line at the beginning of the stanza is
-        not part of this option's value.
+        Text lines with `include.*` and `exclude.*` directives
+        to be used when sending files to the IBM TSM server,
+        or an absolute path pointing to a file with such lines.
       '';
     };
-    options.stanza = mkOption {
-      type = str;
-      internal = true;
-      visible = false;
-      description = lib.mdDoc "Server stanza text generated from the options.";
-    };
-    config.name = mkDefault name;
-    # Client system-options file directives are explained here:
-    # https://www.ibm.com/docs/en/spectrum-protect/8.1.13?topic=commands-processing-options
-    config.extraConfig =
-      mapAttrs (lib.trivial.const mkDefault) (
-        {
-          commmethod = "v6tcpip";  # uses v4 or v6, based on dns lookup result
-          tcpserveraddress = config.server;
-          tcpport = builtins.toString config.port;
-          nodename = config.node;
-          passwordaccess = if config.genPasswd then "generate" else "prompt";
-          passworddir = ''"${config.passwdDir}"'';
-        } // optionalAttrs (config.includeExclude!="") {
-          inclexcl = ''"${pkgs.writeText "inclexcl.dsm.sys" config.includeExclude}"'';
-        }
-      );
-    config.text =
-      let
-        attrset = filterAttrs (k: v: v!=null) config.extraConfig;
-        mkLine = k: v: k + optionalString (v!="") "  ${v}";
-        lines = mapAttrsToList mkLine attrset;
-      in
-        concatLines lines;
-    config.stanza = ''
-      server  ${config.name}
-      ${config.text}
-    '';
+    config.commmethod = mkDefault "v6tcpip";  # uses v4 or v6, based on dns lookup result
+    config.passwordaccess = if config.genPasswd then "generate" else "prompt";
+    # XXX migration code for freeform settings, these can be removed in 2025:
+    options.warnings = optionsGlobal.warnings;
+    options.assertions = optionsGlobal.assertions;
+    imports = let inherit (lib.modules) mkRemovedOptionModule mkRenamedOptionModule; in [
+      (mkRemovedOptionModule [ "extraConfig" ] "Please just add options directly to the server attribute set, cf. the description of `programs.tsmClient.servers`.")
+      (mkRemovedOptionModule [ "text" ] "Please just add options directly to the server attribute set, cf. the description of `programs.tsmClient.servers`.")
+      (mkRenamedOptionModule [ "name" ] [ "servername" ])
+      (mkRenamedOptionModule [ "server" ] [ "tcpserveraddress" ])
+      (mkRenamedOptionModule [ "port" ] [ "tcpport" ])
+      (mkRenamedOptionModule [ "node" ] [ "nodename" ])
+      (mkRenamedOptionModule [ "passwdDir" ] [ "passworddir" ])
+      (mkRenamedOptionModule [ "includeExclude" ] [ "inclexcl" ])
+    ];
   };
 
   options.programs.tsmClient = {
     enable = mkEnableOption (lib.mdDoc ''
-      IBM Spectrum Protect (Tivoli Storage Manager, TSM)
+      IBM Storage Protect (Tivoli Storage Manager, TSM)
       client command line applications with a
       client system-options file "dsm.sys"
     '');
     servers = mkOption {
-      type = attrsOf (submodule [ serverOptions ]);
+      type = attrsOf (submodule serverOptions);
       default = {};
       example.mainTsmServer = {
-        server = "tsmserver.company.com";
-        node = "MY-TSM-NODE";
-        extraConfig.compression = "yes";
+        tcpserveraddress = "tsmserver.company.com";
+        nodename = "MY-TSM-NODE";
+        compression = "yes";
       };
       description = lib.mdDoc ''
         Server definitions ("stanzas")
         for the client system-options file.
+        The name of each entry will be used for
+        the internal `servername` by default.
+        Each attribute will be transformed into a line
+        with a key-value pair within the server's stanza.
+        Integers as values will be
+        canonically turned into strings.
+        The boolean value `true` will be turned
+        into a line with just the attribute's name.
+        The value `null` will not generate a line.
+        A list as values generates an entry for
+        each value, according to the rules above.
       '';
     };
     defaultServername = mkOption {
@@ -222,45 +173,107 @@ let
         to add paths to the client system-options file.
       '';
     };
-    wrappedPackage = mkOption {
-      type = package;
-      readOnly = true;
-      description = lib.mdDoc ''
-        The TSM client derivation, wrapped with the path
-        to the client system-options file "dsm.sys".
-        This option is to provide the effective derivation
+    wrappedPackage = mkPackageOption pkgs "tsm-client" {
+      default = null;
+      extraDescription = ''
+        This option is to provide the effective derivation,
+        wrapped with the path to the
+        client system-options file "dsm.sys".
+        It should not be changed, but exists
         for other modules that want to call TSM executables.
       '';
-    };
+    } // { readOnly = true; };
   };
 
   cfg = config.programs.tsmClient;
+  servernames = map (s: s.servername) (attrValues cfg.servers);
 
-  assertions = [
-    {
-      assertion = checkIUnique (mapAttrsToList (k: v: v.name) cfg.servers);
+  assertions =
+    [
+      {
+        assertion = allUnique (map toLower servernames);
+        message = ''
+          TSM server names
+          (option `programs.tsmClient.servers`)
+          contain duplicate name
+          (note that server names are case insensitive).
+        '';
+      }
+      {
+        assertion = (cfg.defaultServername!=null)->(elem cfg.defaultServername servernames);
+        message = ''
+          TSM default server name
+          `programs.tsmClient.defaultServername="${cfg.defaultServername}"`
+          not found in server names in
+          `programs.tsmClient.servers`.
+        '';
+      }
+    ] ++ (mapAttrsToList (name: serverCfg: {
+      assertion = all (key: null != match "[^[:space:]]+" key) (attrNames serverCfg);
       message = ''
-        TSM servernames contain duplicate name
-        (note that case doesn't matter!)
+        TSM server setting names in
+        `programs.tsmClient.servers.${name}.*`
+        contain spaces, but that's not allowed.
+      '';
+    }) cfg.servers) ++ (mapAttrsToList (name: serverCfg: {
+      assertion = allUnique (map toLower (attrNames serverCfg));
+      message = ''
+        TSM server setting names in
+        `programs.tsmClient.servers.${name}.*`
+        contain duplicate names
+        (note that setting names are case insensitive).
+      '';
+    }) cfg.servers)
+    # XXX migration code for freeform settings, this can be removed in 2025:
+    ++ (enrichMigrationInfos "assertions" (addText: { assertion, message }: { inherit assertion; message = addText message; }));
+
+  makeDsmSysLines = key: value:
+    # Turn a key-value pair from the server options attrset
+    # into zero (value==null), one (scalar value) or
+    # more (value is list) configuration stanza lines.
+    if isList value then map (makeDsmSysLines key) value else  # recurse into list
+    if value == null then [ ] else  # skip `null` value
+    [ ("  ${key}${
+      if value == true then "" else  # just output key if value is `true`
+      if isInt value then "  ${builtins.toString value}" else
+      if path.check value then "  \"${value}\"" else  # enclose path in ".."
+      if singleLineStr.check value then "  ${value}" else
+      throw "assertion failed: cannot convert type"  # should never happen
+    }") ];
+
+  makeDsmSysStanza = {servername, ... }@serverCfg:
+    let
+      # drop special values that should not go into server config block
+      attrs = removeAttrs serverCfg [ "servername" "genPasswd"
+        # XXX migration code for freeform settings, these can be removed in 2025:
+        "assertions" "warnings"
+        "extraConfig" "text"
+        "name" "server" "port" "node" "passwdDir" "includeExclude"
+      ];
+    in
+      ''
+        servername  ${servername}
+        ${concatLines (concatLists (mapAttrsToList makeDsmSysLines attrs))}
       '';
-    }
-    {
-      assertion = (cfg.defaultServername!=null)->(hasAttr cfg.defaultServername cfg.servers);
-      message = "TSM defaultServername not found in list of servers";
-    }
-  ];
 
   dsmSysText = ''
-    ****  IBM Spectrum Protect (Tivoli Storage Manager)
+    ****  IBM Storage Protect (Tivoli Storage Manager)
     ****  client system-options file "dsm.sys".
     ****  Do not edit!
     ****  This file is generated by NixOS configuration.
 
     ${optionalString (cfg.defaultServername!=null) "defaultserver  ${cfg.defaultServername}"}
 
-    ${concatLines (mapAttrsToList (k: v: v.stanza) cfg.servers)}
+    ${concatLines (map makeDsmSysStanza (attrValues cfg.servers))}
   '';
 
+  # XXX migration code for freeform settings, this can be removed in 2025:
+  enrichMigrationInfos = what: how: concatLists (
+    mapAttrsToList
+    (name: serverCfg: map (how (text: "In `programs.tsmClient.servers.${name}`: ${text}")) serverCfg."${what}")
+    cfg.servers
+  );
+
 in
 
 {
@@ -275,6 +288,8 @@ in
       dsmSysApi = dsmSysCli;
     };
     environment.systemPackages = [ cfg.wrappedPackage ];
+    # XXX migration code for freeform settings, this can be removed in 2025:
+    warnings = enrichMigrationInfos "warnings" (addText: addText);
   };
 
   meta.maintainers = [ lib.maintainers.yarny ];
diff --git a/nixos/modules/programs/wayland/labwc.nix b/nixos/modules/programs/wayland/labwc.nix
new file mode 100644
index 0000000000000..d0806c3aa5d0e
--- /dev/null
+++ b/nixos/modules/programs/wayland/labwc.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.labwc;
+in
+{
+  meta.maintainers = with lib.maintainers; [ AndersonTorres ];
+
+  options.programs.labwc = {
+    enable = lib.mkEnableOption (lib.mdDoc "labwc");
+    package = lib.mkPackageOption pkgs "labwc" { };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ cfg.package ];
+
+      xdg.portal.config.wlroots.default = lib.mkDefault [ "wlr" "gtk" ];
+
+      # To make a labwc session available for certain DMs like SDDM
+      services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    }
+    (import ./wayland-session.nix { inherit lib pkgs; })
+  ]);
+}
diff --git a/nixos/modules/programs/wayland/river.nix b/nixos/modules/programs/wayland/river.nix
index ec59bd50a0150..995129b9710ae 100644
--- a/nixos/modules/programs/wayland/river.nix
+++ b/nixos/modules/programs/wayland/river.nix
@@ -48,6 +48,9 @@ in {
 
         # To make a river session available if a display manager like SDDM is enabled:
         services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ];
+
+        # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050913
+        xdg.portal.config.river.default = mkDefault [ "wlr" "gtk" ];
       }
       (import ./wayland-session.nix { inherit lib pkgs; })
     ]);
diff --git a/nixos/modules/programs/wayland/sway.nix b/nixos/modules/programs/wayland/sway.nix
index f96c833856dbb..57ee629b28810 100644
--- a/nixos/modules/programs/wayland/sway.nix
+++ b/nixos/modules/programs/wayland/sway.nix
@@ -26,13 +26,28 @@ let
     };
   };
 
-  defaultSwayPackage = pkgs.sway.override {
-    extraSessionCommands = cfg.extraSessionCommands;
-    extraOptions = cfg.extraOptions;
-    withBaseWrapper = cfg.wrapperFeatures.base;
-    withGtkWrapper = cfg.wrapperFeatures.gtk;
-    isNixOS = true;
-  };
+  genFinalPackage = pkg:
+    let
+      expectedArgs = lib.naturalSort [
+        "extraSessionCommands"
+        "extraOptions"
+        "withBaseWrapper"
+        "withGtkWrapper"
+        "isNixOS"
+      ];
+      existedArgs = with lib;
+        naturalSort
+        (intersectLists expectedArgs (attrNames (functionArgs pkg.override)));
+    in if existedArgs != expectedArgs then
+      pkg
+    else
+      pkg.override {
+        extraSessionCommands = cfg.extraSessionCommands;
+        extraOptions = cfg.extraOptions;
+        withBaseWrapper = cfg.wrapperFeatures.base;
+        withGtkWrapper = cfg.wrapperFeatures.gtk;
+        isNixOS = true;
+      };
 in {
   options.programs.sway = {
     enable = mkEnableOption (lib.mdDoc ''
@@ -44,14 +59,16 @@ in {
 
     package = mkOption {
       type = with types; nullOr package;
-      default = defaultSwayPackage;
+      default = pkgs.sway;
+      apply = p: if p == null then null else genFinalPackage p;
       defaultText = literalExpression "pkgs.sway";
       description = lib.mdDoc ''
-        Sway package to use. Will override the options
-        'wrapperFeatures', 'extraSessionCommands', and 'extraOptions'.
-        Set to `null` to not add any Sway package to your
-        path. This should be done if you want to use the Home Manager Sway
-        module to install Sway.
+        Sway package to use. If the package does not contain the override arguments
+        `extraSessionCommands`, `extraOptions`, `withBaseWrapper`, `withGtkWrapper`,
+        `isNixOS`, then the module options {option}`wrapperFeatures`,
+        {option}`wrapperFeatures` and {option}`wrapperFeatures` will have no effect.
+        Set to `null` to not add any Sway package to your path. This should be done if
+        you want to use the Home Manager Sway module to install Sway.
       '';
     };
 
diff --git a/nixos/modules/programs/winbox.nix b/nixos/modules/programs/winbox.nix
new file mode 100644
index 0000000000000..1337f57839b02
--- /dev/null
+++ b/nixos/modules/programs/winbox.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg  = config.programs.winbox;
+in
+{
+  options.programs.winbox = {
+    enable = lib.mkEnableOption ("MikroTik Winbox");
+    package = lib.mkPackageOption pkgs "winbox" { };
+
+    openFirewall = lib.mkOption {
+      description = ''
+        Whether to open ports for the MikroTik Neighbor Discovery protocol. Required for Winbox neighbor discovery.
+      '';
+      default = false;
+      type = lib.types.bool;
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    networking.firewall.allowedUDPPorts = lib.optionals cfg.openFirewall [ 5678 ];
+  };
+}
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 31548ad181a73..38fbfbf0caece 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -45,7 +45,7 @@ placeholder certificates in place of the real ACME certs. The placeholder
 certs are overwritten when the ACME certs arrive. For
 `foo.example.com` the config would look like this:
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 services.nginx = {
@@ -72,7 +72,7 @@ services.nginx = {
       };
     };
   };
-}
+};
 ```
 
 ## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
@@ -88,7 +88,7 @@ This example uses a vhost called `certs.example.com`, with
 the intent that you will generate certs for all your vhosts and redirect
 everyone to HTTPS.
 
-```
+```nix
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 
@@ -111,7 +111,7 @@ services.nginx = {
       };
     };
   };
-}
+};
 # Alternative config for Apache
 users.users.wwwrun.extraGroups = [ "acme" ];
 services.httpd = {
@@ -131,12 +131,12 @@ services.httpd = {
       '';
     };
   };
-}
+};
 ```
 
 Now you need to configure ACME to generate a certificate.
 
-```
+```nix
 security.acme.certs."foo.example.com" = {
   webroot = "/var/lib/acme/.challenges";
   email = "foo@example.com";
@@ -167,7 +167,7 @@ see the [lego docs](https://go-acme.github.io/lego/dns/)
 for provider/server specific configuration values. For the sake of these
 docs, we will provide a fully self-hosted example using bind.
 
-```
+```nix
 services.bind = {
   enable = true;
   extraConfig = ''
@@ -181,7 +181,7 @@ services.bind = {
       extraConfig = "allow-update { key rfc2136key.example.com.; };";
     }
   ];
-}
+};
 
 # Now we can configure ACME
 security.acme.acceptTerms = true;
@@ -199,7 +199,7 @@ The {file}`dnskeys.conf` and {file}`certs.secret`
 must be kept secure and thus you should not keep their contents in your
 Nix config. Instead, generate them one time with a systemd service:
 
-```
+```nix
 systemd.services.dns-rfc2136-conf = {
   requiredBy = ["acme-example.com.service" "bind.service"];
   before = ["acme-example.com.service" "bind.service"];
@@ -250,7 +250,7 @@ first, however instead of setting the options for one certificate
 you will set them as defaults
 (e.g. [](#opt-security.acme.defaults.dnsProvider)).
 
-```
+```nix
 # Configure ACME appropriately
 security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
@@ -271,7 +271,7 @@ services.nginx = {
       acmeRoot = null;
     };
   };
-}
+};
 ```
 
 And that's it! Next time your configuration is rebuilt, or when
@@ -287,7 +287,7 @@ There is no way to change the user the ACME module uses (it will always be
 Below is an example configuration for OpenSMTPD, but this pattern
 can be applied to any service.
 
-```
+```nix
 # Configure ACME however you like (DNS or HTTP validation), adding
 # the following configuration for the relevant certificate.
 # Note: You cannot use `systemctl reload` here as that would mean
@@ -340,7 +340,7 @@ to be regenerated. In this scenario lego will produce the error `JWS verificatio
 The solution is to simply delete the associated accounts file and
 re-run the affected service(s).
 
-```
+```shell
 # Find the accounts folder for the certificate
 systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
 export accountdir="$(!!)"
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 7cc302969fb6d..40d9c487996b5 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -897,10 +897,10 @@ in {
         certs = attrValues cfg.certs;
       in [
         {
-          assertion = cfg.email != null || all (certOpts: certOpts.email != null) certs;
+          assertion = cfg.defaults.email != null || all (certOpts: certOpts.email != null) certs;
           message = ''
             You must define `security.acme.certs.<name>.email` or
-            `security.acme.email` to register with the CA. Note that using
+            `security.acme.defaults.email` to register with the CA. Note that using
             many different addresses for certs may trigger account rate limits.
           '';
         }
diff --git a/nixos/modules/security/apparmor.nix b/nixos/modules/security/apparmor.nix
index 24b48338ed772..ea1af6c6e2f29 100644
--- a/nixos/modules/security/apparmor.nix
+++ b/nixos/modules/security/apparmor.nix
@@ -164,7 +164,8 @@ in
         "local-fs.target"
         "systemd-journald-audit.socket"
       ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       wantedBy = [ "multi-user.target" ];
       unitConfig = {
         Description="Load AppArmor policies";
diff --git a/nixos/modules/security/auditd.nix b/nixos/modules/security/auditd.nix
index db4b2701ee2e9..253ee1d4dd0e5 100644
--- a/nixos/modules/security/auditd.nix
+++ b/nixos/modules/security/auditd.nix
@@ -13,6 +13,8 @@ with lib;
     systemd.services.auditd = {
       description = "Linux Audit daemon";
       wantedBy = [ "basic.target" ];
+      before = [ "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
 
       unitConfig = {
         ConditionVirtualization = "!container";
@@ -23,7 +25,7 @@ with lib;
       path = [ pkgs.audit ];
 
       serviceConfig = {
-        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/log/audit";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /var/log/audit";
         ExecStart = "${pkgs.audit}/bin/auditd -l -n -s nochange";
       };
     };
diff --git a/nixos/modules/security/duosec.nix b/nixos/modules/security/duosec.nix
index 2a855a77e3a39..ef76bfeb6d66a 100644
--- a/nixos/modules/security/duosec.nix
+++ b/nixos/modules/security/duosec.nix
@@ -195,7 +195,8 @@ in
 
     systemd.services.login-duo = lib.mkIf cfg.ssh.enable {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       script = ''
         if test -f "${cfg.secretKeyFile}"; then
@@ -216,7 +217,8 @@ in
 
     systemd.services.pam-duo = lib.mkIf cfg.ssh.enable {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       script = ''
         if test -f "${cfg.secretKeyFile}"; then
diff --git a/nixos/modules/security/ipa.nix b/nixos/modules/security/ipa.nix
index 69a670cd5e4a3..3bf8b11f86261 100644
--- a/nixos/modules/security/ipa.nix
+++ b/nixos/modules/security/ipa.nix
@@ -117,8 +117,8 @@ in {
   config = mkIf cfg.enable {
     assertions = [
       {
-        assertion = !config.krb5.enable;
-        message = "krb5 must be disabled through `krb5.enable` for FreeIPA integration to work.";
+        assertion = !config.security.krb5.enable;
+        message = "krb5 must be disabled through `security.krb5.enable` for FreeIPA integration to work.";
       }
       {
         assertion = !config.users.ldap.enable;
@@ -181,25 +181,33 @@ in {
       '';
     };
 
-    system.activationScripts.ipa = stringAfter ["etc"] ''
-      # libcurl requires a hard copy of the certificate
-      if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
-        rm -f /etc/ipa/ca.crt
-        cp ${cfg.certificate} /etc/ipa/ca.crt
-      fi
-
-      if [ ! -f /etc/krb5.keytab ]; then
-        cat <<EOF
-
-          In order to complete FreeIPA integration, please join the domain by completing the following steps:
-          1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
-          2. Join the domain and obtain the keytab file: ipa-join
-          3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
-          4. Restart sssd systemd service: sudo systemctl restart sssd
-
-      EOF
-      fi
-    '';
+    systemd.services."ipa-activation" = {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      script = ''
+        # libcurl requires a hard copy of the certificate
+        if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
+          rm -f /etc/ipa/ca.crt
+          cp ${cfg.certificate} /etc/ipa/ca.crt
+        fi
+
+        if [ ! -f /etc/krb5.keytab ]; then
+          cat <<EOF
+
+            In order to complete FreeIPA integration, please join the domain by completing the following steps:
+            1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
+            2. Join the domain and obtain the keytab file: ipa-join
+            3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
+            4. Restart sssd systemd service: sudo systemctl restart sssd
+
+        EOF
+        fi
+      '';
+    };
 
     services.sssd.config = ''
       [domain/${cfg.domain}]
diff --git a/nixos/modules/security/krb5/default.nix b/nixos/modules/security/krb5/default.nix
new file mode 100644
index 0000000000000..5921982f954ca
--- /dev/null
+++ b/nixos/modules/security/krb5/default.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mdDoc mkIf mkOption mkPackageOption mkRemovedOptionModule;
+  inherit (lib.types) bool;
+
+  mkRemovedOptionModule' = name: reason: mkRemovedOptionModule ["krb5" name] reason;
+  mkRemovedOptionModuleCfg = name: mkRemovedOptionModule' name ''
+    The option `krb5.${name}' has been removed. Use
+    `security.krb5.settings.${name}' for structured configuration.
+  '';
+
+  cfg = config.security.krb5;
+  format = import ./krb5-conf-format.nix { inherit pkgs lib; } { };
+in {
+  imports = [
+    (mkRemovedOptionModuleCfg "libdefaults")
+    (mkRemovedOptionModuleCfg "realms")
+    (mkRemovedOptionModuleCfg "domain_realm")
+    (mkRemovedOptionModuleCfg "capaths")
+    (mkRemovedOptionModuleCfg "appdefaults")
+    (mkRemovedOptionModuleCfg "plugins")
+    (mkRemovedOptionModuleCfg "config")
+    (mkRemovedOptionModuleCfg "extraConfig")
+    (mkRemovedOptionModule' "kerberos" ''
+      The option `krb5.kerberos' has been moved to `security.krb5.package'.
+    '')
+  ];
+
+  options = {
+    security.krb5 = {
+      enable = mkOption {
+        default = false;
+        description = mdDoc "Enable and configure Kerberos utilities";
+        type = bool;
+      };
+
+      package = mkPackageOption pkgs "krb5" {
+        example = "heimdal";
+      };
+
+      settings = mkOption {
+        default = { };
+        type = format.type;
+        description = mdDoc ''
+          Structured contents of the {file}`krb5.conf` file. See
+          {manpage}`krb5.conf(5)` for details about configuration.
+        '';
+        example = {
+          include = [ "/run/secrets/secret-krb5.conf" ];
+          includedir = [ "/run/secrets/secret-krb5.conf.d" ];
+
+          libdefaults = {
+            default_realm = "ATHENA.MIT.EDU";
+          };
+
+          realms = {
+            "ATHENA.MIT.EDU" = {
+              admin_server = "athena.mit.edu";
+              kdc = [
+                "athena01.mit.edu"
+                "athena02.mit.edu"
+              ];
+            };
+          };
+
+          domain_realm = {
+            "mit.edu" = "ATHENA.MIT.EDU";
+          };
+
+          logging = {
+            kdc = "SYSLOG:NOTICE";
+            admin_server = "SYSLOG:NOTICE";
+            default = "SYSLOG:NOTICE";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."krb5.conf".source = format.generate "krb5.conf" cfg.settings;
+    };
+  };
+
+  meta.maintainers = builtins.attrValues {
+    inherit (lib.maintainers) dblsaiko h7x4;
+  };
+}
diff --git a/nixos/modules/security/krb5/krb5-conf-format.nix b/nixos/modules/security/krb5/krb5-conf-format.nix
new file mode 100644
index 0000000000000..d01e47a40be05
--- /dev/null
+++ b/nixos/modules/security/krb5/krb5-conf-format.nix
@@ -0,0 +1,88 @@
+{ pkgs, lib, ... }:
+
+# Based on
+# - https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html
+# - https://manpages.debian.org/unstable/heimdal-docs/krb5.conf.5heimdal.en.html
+
+let
+  inherit (lib) boolToString concatMapStringsSep concatStringsSep filter
+    isAttrs isBool isList mapAttrsToList mdDoc mkOption singleton splitString;
+  inherit (lib.types) attrsOf bool coercedTo either int listOf oneOf path
+    str submodule;
+in
+{ }: {
+  type = let
+    section = attrsOf relation;
+    relation = either (attrsOf value) value;
+    value = either (listOf atom) atom;
+    atom = oneOf [int str bool];
+  in submodule {
+    freeformType = attrsOf section;
+    options = {
+      include = mkOption {
+        default = [ ];
+        description = mdDoc ''
+          Files to include in the Kerberos configuration.
+        '';
+        type = coercedTo path singleton (listOf path);
+      };
+      includedir = mkOption {
+        default = [ ];
+        description = mdDoc ''
+          Directories containing files to include in the Kerberos configuration.
+        '';
+        type = coercedTo path singleton (listOf path);
+      };
+      module = mkOption {
+        default = [ ];
+        description = mdDoc ''
+          Modules to obtain Kerberos configuration from.
+        '';
+        type = coercedTo path singleton (listOf path);
+      };
+    };
+  };
+
+  generate = let
+    indent = str: concatMapStringsSep "\n" (line: "  " + line) (splitString "\n" str);
+
+    formatToplevel = args @ {
+      include ? [ ],
+      includedir ? [ ],
+      module ? [ ],
+      ...
+    }: let
+      sections = removeAttrs args [ "include" "includedir" "module" ];
+    in concatStringsSep "\n" (filter (x: x != "") [
+      (concatStringsSep "\n" (mapAttrsToList formatSection sections))
+      (concatMapStringsSep "\n" (m: "module ${m}") module)
+      (concatMapStringsSep "\n" (i: "include ${i}") include)
+      (concatMapStringsSep "\n" (i: "includedir ${i}") includedir)
+    ]);
+
+    formatSection = name: section: ''
+      [${name}]
+      ${indent (concatStringsSep "\n" (mapAttrsToList formatRelation section))}
+    '';
+
+    formatRelation = name: relation:
+      if isAttrs relation
+      then ''
+        ${name} = {
+        ${indent (concatStringsSep "\n" (mapAttrsToList formatValue relation))}
+        }''
+      else formatValue name relation;
+
+    formatValue = name: value:
+      if isList value
+      then concatMapStringsSep "\n" (formatAtom name) value
+      else formatAtom name value;
+
+    formatAtom = name: atom: let
+      v = if isBool atom then boolToString atom else toString atom;
+    in "${name} = ${v}";
+  in
+    name: value: pkgs.writeText name ''
+      ${formatToplevel value}
+    '';
+}
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index c99615d5a6362..111be7057afc0 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -654,8 +654,8 @@ let
           { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
             config_file = "/etc/security/pam_mysql.conf";
           }; }
-          { name = "ssh_agent_auth"; enable = config.security.pam.enableSSHAgentAuth && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = {
-            file = lib.concatStringsSep ":" config.services.openssh.authorizedKeysFiles;
+          { name = "ssh_agent_auth"; enable = config.security.pam.sshAgentAuth.enable && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = {
+            file = lib.concatStringsSep ":" config.security.pam.sshAgentAuth.authorizedKeysFiles;
           }; }
           (let p11 = config.security.pam.p11; in { name = "p11"; enable = cfg.p11Auth; control = p11.control; modulePath = "${pkgs.pam_p11}/lib/security/pam_p11.so"; args = [
             "${pkgs.opensc}/lib/opensc-pkcs11.so"
@@ -943,7 +943,7 @@ let
       value.source = pkgs.writeText "${name}.pam" service.text;
     };
 
-  optionalSudoConfigForSSHAgentAuth = optionalString config.security.pam.enableSSHAgentAuth ''
+  optionalSudoConfigForSSHAgentAuth = optionalString config.security.pam.sshAgentAuth.enable ''
     # Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so can do its magic.
     Defaults env_keep+=SSH_AUTH_SOCK
   '';
@@ -956,6 +956,7 @@ in
 
   imports = [
     (mkRenamedOptionModule [ "security" "pam" "enableU2F" ] [ "security" "pam" "u2f" "enable" ])
+    (mkRenamedOptionModule [ "security" "pam" "enableSSHAgentAuth" ] [ "security" "pam" "sshAgentAuth" "enable" ])
   ];
 
   ###### interface
@@ -1025,16 +1026,34 @@ in
       '';
     };
 
-    security.pam.enableSSHAgentAuth = mkOption {
-      type = types.bool;
-      default = false;
-      description =
-        lib.mdDoc ''
-          Enable sudo logins if the user's SSH agent provides a key
-          present in {file}`~/.ssh/authorized_keys`.
-          This allows machines to exclusively use SSH keys instead of
-          passwords.
+    security.pam.sshAgentAuth = {
+      enable = mkEnableOption ''
+        authenticating using a signature performed by the ssh-agent.
+        This allows using SSH keys exclusively, instead of passwords, for instance on remote machines
+      '';
+
+      authorizedKeysFiles = mkOption {
+        type = with types; listOf str;
+        description = ''
+          A list of paths to files in OpenSSH's `authorized_keys` format, containing
+          the keys that will be trusted by the `pam_ssh_agent_auth` module.
+
+          The following patterns are expanded when interpreting the path:
+          - `%f` and `%H` respectively expand to the fully-qualified and short hostname ;
+          - `%u` expands to the username ;
+          - `~` or `%h` expands to the user's home directory.
+
+          ::: {.note}
+          Specifying user-writeable files here result in an insecure configuration:  a malicious process
+          can then edit such an authorized_keys file and bypass the ssh-agent-based authentication.
+
+          See [issue #31611](https://github.com/NixOS/nixpkgs/issues/31611)
+          :::
         '';
+        example = [ "/etc/ssh/authorized_keys.d/%u" ];
+        default = config.services.openssh.authorizedKeysFiles;
+        defaultText = literalExpression "config.services.openssh.authorizedKeysFiles";
+      };
     };
 
     security.pam.enableOTPW = mkEnableOption (lib.mdDoc "the OTPW (one-time password) PAM module");
@@ -1067,8 +1086,8 @@ in
 
     security.pam.krb5 = {
       enable = mkOption {
-        default = config.krb5.enable;
-        defaultText = literalExpression "config.krb5.enable";
+        default = config.security.krb5.enable;
+        defaultText = literalExpression "config.security.krb5.enable";
         type = types.bool;
         description = lib.mdDoc ''
           Enables Kerberos PAM modules (`pam-krb5`,
@@ -1076,7 +1095,7 @@ in
 
           If set, users can authenticate with their Kerberos password.
           This requires a valid Kerberos configuration
-          (`config.krb5.enable` should be set to
+          (`config.security.krb5.enable` should be set to
           `true`).
 
           Note that the Kerberos PAM modules are not necessary when using SSS
@@ -1456,8 +1475,25 @@ in
           `security.pam.zfs.enable` requires enabling ZFS (`boot.zfs.enabled` or `boot.zfs.enableUnstable`).
         '';
       }
+      {
+        assertion = with config.security.pam.sshAgentAuth; enable -> authorizedKeysFiles != [];
+        message = ''
+          `security.pam.enableSSHAgentAuth` requires `services.openssh.authorizedKeysFiles` to be a non-empty list.
+          Did you forget to set `services.openssh.enable` ?
+        '';
+      }
     ];
 
+    warnings = optional
+      (with lib; with config.security.pam.sshAgentAuth;
+        enable && any (s: hasPrefix "%h" s || hasPrefix "~" s) authorizedKeysFiles)
+      ''config.security.pam.sshAgentAuth.authorizedKeysFiles contains files in the user's home directory.
+
+        Specifying user-writeable files there result in an insecure configuration:
+        a malicious process can then edit such an authorized_keys file and bypass the ssh-agent-based authentication.
+        See https://github.com/NixOS/nixpkgs/issues/31611
+      '';
+
     environment.systemPackages =
       # Include the PAM modules in the system path mostly for the manpages.
       [ pkgs.pam ]
diff --git a/nixos/modules/security/sudo-rs.nix b/nixos/modules/security/sudo-rs.nix
index f991675827efb..b4376562c34d7 100644
--- a/nixos/modules/security/sudo-rs.nix
+++ b/nixos/modules/security/sudo-rs.nix
@@ -6,8 +6,6 @@ let
 
   cfg = config.security.sudo-rs;
 
-  inherit (config.security.pam) enableSSHAgentAuth;
-
   toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
   toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
 
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index 3dd5d2e525d91..6aa9445eab65e 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -6,8 +6,6 @@ let
 
   cfg = config.security.sudo;
 
-  inherit (config.security.pam) enableSSHAgentAuth;
-
   toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
   toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
 
diff --git a/nixos/modules/security/wrappers/default.nix b/nixos/modules/security/wrappers/default.nix
index 250f9775be14d..a298686b34e97 100644
--- a/nixos/modules/security/wrappers/default.nix
+++ b/nixos/modules/security/wrappers/default.nix
@@ -278,7 +278,9 @@ in
     systemd.services.suid-sgid-wrappers = {
       description = "Create SUID/SGID Wrappers";
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      after = [ "systemd-sysusers.service" ];
       unitConfig.DefaultDependencies = false;
       unitConfig.RequiresMountsFor = [ "/nix/store" "/run/wrappers" ];
       serviceConfig.Type = "oneshot";
diff --git a/nixos/modules/security/wrappers/wrapper.nix b/nixos/modules/security/wrappers/wrapper.nix
index 27d46c630af54..ca4b27bff1801 100644
--- a/nixos/modules/security/wrappers/wrapper.nix
+++ b/nixos/modules/security/wrappers/wrapper.nix
@@ -1,8 +1,8 @@
 { stdenv, unsecvars, linuxHeaders, sourceProg, debug ? false }:
 # For testing:
-# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
+# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { sourceProg = "${pkgs.hello}/bin/hello"; debug = true; }'
 stdenv.mkDerivation {
-  name = "security-wrapper";
+  name = "security-wrapper-${baseNameOf sourceProg}";
   buildInputs = [ linuxHeaders ];
   dontUnpack = true;
   CFLAGS = [
diff --git a/nixos/modules/services/admin/pgadmin.nix b/nixos/modules/services/admin/pgadmin.nix
index 390c80d1a2d42..20b6b6670d9cc 100644
--- a/nixos/modules/services/admin/pgadmin.nix
+++ b/nixos/modules/services/admin/pgadmin.nix
@@ -3,7 +3,6 @@
 with lib;
 
 let
-  pkg = pkgs.pgadmin4;
   cfg = config.services.pgadmin;
 
   _base = with types; [ int bool str ];
@@ -36,6 +35,8 @@ in
       default = 5050;
     };
 
+    package = mkPackageOptionMD pkgs "pgadmin4" { };
+
     initialEmail = mkOption {
       description = lib.mdDoc "Initial email for the pgAdmin account";
       type = types.str;
@@ -43,12 +44,19 @@ in
 
     initialPasswordFile = mkOption {
       description = lib.mdDoc ''
-        Initial password file for the pgAdmin account.
+        Initial password file for the pgAdmin account. Minimum length by default is 6.
+        Please see `services.pgadmin.minimumPasswordLength`.
         NOTE: Should be string not a store path, to prevent the password from being world readable
       '';
       type = types.path;
     };
 
+    minimumPasswordLength = mkOption {
+      description = lib.mdDoc "Minimum length of the password";
+      type = types.int;
+      default = 6;
+    };
+
     emailServer = {
       enable = mkOption {
         description = lib.mdDoc ''
@@ -115,7 +123,9 @@ in
 
     services.pgadmin.settings = {
       DEFAULT_SERVER_PORT = cfg.port;
+      PASSWORD_LENGTH_MIN = cfg.minimumPasswordLength;
       SERVER_MODE = true;
+      UPGRADE_CHECK_ENABLED = false;
     } // (optionalAttrs cfg.openFirewall {
       DEFAULT_SERVER = mkDefault "::";
     }) // (optionalAttrs cfg.emailServer.enable {
@@ -139,6 +149,14 @@ in
 
       preStart = ''
         # NOTE: this is idempotent (aka running it twice has no effect)
+        # Check here for password length to prevent pgadmin from starting
+        # and presenting a hard to find error message
+        # see https://github.com/NixOS/nixpkgs/issues/270624
+        PW_LENGTH=$(wc -m < ${escapeShellArg cfg.initialPasswordFile})
+        if [ $PW_LENGTH -lt ${toString cfg.minimumPasswordLength} ]; then
+            echo "Password must be at least ${toString cfg.minimumPasswordLength} characters long"
+            exit 1
+        fi
         (
           # Email address:
           echo ${escapeShellArg cfg.initialEmail}
@@ -150,7 +168,7 @@ in
           echo "$PW"
           # Retype password:
           echo "$PW"
-        ) | ${pkg}/bin/pgadmin4-setup
+        ) | ${cfg.package}/bin/pgadmin4-cli setup-db
       '';
 
       restartTriggers = [
@@ -162,7 +180,7 @@ in
         DynamicUser = true;
         LogsDirectory = "pgadmin";
         StateDirectory = "pgadmin";
-        ExecStart = "${pkg}/bin/pgadmin4";
+        ExecStart = "${cfg.package}/bin/pgadmin4";
       };
     };
 
diff --git a/nixos/modules/services/audio/gmediarender.nix b/nixos/modules/services/audio/gmediarender.nix
index 545f2b1a2b60d..a4cb89098db7a 100644
--- a/nixos/modules/services/audio/gmediarender.nix
+++ b/nixos/modules/services/audio/gmediarender.nix
@@ -64,6 +64,7 @@ in
   config = mkIf cfg.enable {
     systemd = {
       services.gmediarender = {
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         description = "gmediarender server daemon";
diff --git a/nixos/modules/services/audio/jmusicbot.nix b/nixos/modules/services/audio/jmusicbot.nix
index fd1d4da192843..e7803677d0fd9 100644
--- a/nixos/modules/services/audio/jmusicbot.nix
+++ b/nixos/modules/services/audio/jmusicbot.nix
@@ -26,6 +26,7 @@ in
   config = mkIf cfg.enable {
     systemd.services.jmusicbot = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       description = "Discord music bot that's easy to set up and run yourself!";
       serviceConfig = mkMerge [{
diff --git a/nixos/modules/services/audio/mopidy.nix b/nixos/modules/services/audio/mopidy.nix
index 40e8679f53d74..9d8e67b0ea478 100644
--- a/nixos/modules/services/audio/mopidy.nix
+++ b/nixos/modules/services/audio/mopidy.nix
@@ -76,7 +76,7 @@ in {
 
     systemd.services.mopidy = {
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" "sound.target" ];
+      after = [ "network-online.target" "sound.target" ];
       description = "mopidy music player daemon";
       serviceConfig = {
         ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
diff --git a/nixos/modules/services/audio/mympd.nix b/nixos/modules/services/audio/mympd.nix
new file mode 100644
index 0000000000000..f1c7197085d7d
--- /dev/null
+++ b/nixos/modules/services/audio/mympd.nix
@@ -0,0 +1,129 @@
+{ pkgs, config, lib, ... }:
+
+let
+  cfg = config.services.mympd;
+in {
+  options = {
+
+    services.mympd = {
+
+      enable = lib.mkEnableOption (lib.mdDoc "MyMPD server");
+
+      package = lib.mkPackageOption pkgs "mympd" {};
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports needed for the functionality of the program.
+        '';
+      };
+
+      extraGroups = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        example = [ "music" ];
+        description = lib.mdDoc ''
+          Additional groups for the systemd service.
+        '';
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = with lib.types; attrsOf (nullOr (oneOf [ str bool int ]));
+          options = {
+            http_port = lib.mkOption {
+              type = lib.types.port;
+              description = lib.mdDoc ''
+                The HTTP port where mympd's web interface will be available.
+
+                The HTTPS/SSL port can be configured via {option}`config`.
+              '';
+              example = "8080";
+            };
+
+            ssl = lib.mkOption {
+              type = lib.types.bool;
+              description = lib.mdDoc ''
+                Whether to enable listening on the SSL port.
+
+                Refer to <https://jcorporation.github.io/myMPD/configuration/configuration-files#ssl-options>
+                for more information.
+              '';
+              default = false;
+            };
+          };
+        };
+        description = lib.mdDoc ''
+          Manages the configuration files declaratively. For all the configuration
+          options, see <https://jcorporation.github.io/myMPD/configuration/configuration-files>.
+
+          Each key represents the "File" column from the upstream configuration table, and the
+          value is the content of that file.
+        '';
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.mympd = {
+      # upstream service config: https://github.com/jcorporation/myMPD/blob/master/contrib/initscripts/mympd.service.in
+      after = [ "mpd.service" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = with lib; ''
+        config_dir="/var/lib/mympd/config"
+        mkdir -p "$config_dir"
+
+        ${pipe cfg.settings [
+          (mapAttrsToList (name: value: ''
+            echo -n "${if isBool value then boolToString value else toString value}" > "$config_dir/${name}"
+            ''))
+          (concatStringsSep "\n")
+        ]}
+      '';
+      unitConfig = {
+        Description = "myMPD server daemon";
+        Documentation = "man:mympd(1)";
+      };
+      serviceConfig = {
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        DynamicUser = true;
+        ExecStart = lib.getExe cfg.package;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictRealtime = true;
+        StateDirectory = "mympd";
+        CacheDirectory = "mympd";
+        RestrictAddressFamilies = "AF_INET AF_INET6 AF_NETLINK AF_UNIX";
+        RestrictNamespaces = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "@system-service";
+        SupplementaryGroups = cfg.extraGroups;
+      };
+    };
+
+    networking.firewall = lib.mkMerge [
+      (lib.mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.settings.http_port ];
+      })
+      (lib.mkIf (cfg.openFirewall && cfg.settings.ssl && cfg.settings.ssl_port != null) {
+        allowedTCPPorts = [ cfg.settings.ssl_port ];
+      })
+    ];
+
+  };
+
+  meta.maintainers = [ lib.maintainers.eliandoran ];
+
+}
diff --git a/nixos/modules/services/audio/spotifyd.nix b/nixos/modules/services/audio/spotifyd.nix
index 975be5a87cba9..1194b6f200d70 100644
--- a/nixos/modules/services/audio/spotifyd.nix
+++ b/nixos/modules/services/audio/spotifyd.nix
@@ -50,6 +50,7 @@ in
 
     systemd.services.spotifyd = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "sound.target" ];
       description = "spotifyd, a Spotify playing daemon";
       environment.SHELL = "/bin/sh";
diff --git a/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixos/modules/services/audio/wyoming/faster-whisper.nix
index eda409f1f8004..dd7f62744cd02 100644
--- a/nixos/modules/services/audio/wyoming/faster-whisper.nix
+++ b/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -121,6 +121,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-faster-whisper-${server}" {
+        inherit (options) enable;
         description = "Wyoming faster-whisper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/audio/wyoming/piper.nix b/nixos/modules/services/audio/wyoming/piper.nix
index 698828aa6cbaf..2828fdf078921 100644
--- a/nixos/modules/services/audio/wyoming/piper.nix
+++ b/nixos/modules/services/audio/wyoming/piper.nix
@@ -116,6 +116,7 @@ in
   in mkIf (cfg.servers != {}) {
     systemd.services = mapAttrs' (server: options:
       nameValuePair "wyoming-piper-${server}" {
+        inherit (options) enable;
         description = "Wyoming Piper server instance ${server}";
         after = [
           "network-online.target"
diff --git a/nixos/modules/services/audio/ympd.nix b/nixos/modules/services/audio/ympd.nix
index b74cc3f9c0b41..6e8d22dab3c80 100644
--- a/nixos/modules/services/audio/ympd.nix
+++ b/nixos/modules/services/audio/ympd.nix
@@ -50,6 +50,7 @@ in {
       description = "Standalone MPD Web GUI written in C";
 
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/backup/borgbackup.nix b/nixos/modules/services/backup/borgbackup.nix
index 039a5f227ac41..6f4455d3be605 100644
--- a/nixos/modules/services/backup/borgbackup.nix
+++ b/nixos/modules/services/backup/borgbackup.nix
@@ -143,20 +143,15 @@ let
   };
 
   # Paths listed in ReadWritePaths must exist before service is started
-  mkActivationScript = name: cfg:
+  mkTmpfiles = name: cfg:
     let
-      install = "install -o ${cfg.user} -g ${cfg.group}";
-    in
-      nameValuePair "borgbackup-job-${name}" (stringAfter [ "users" ] (''
-        # Ensure that the home directory already exists
-        # We can't assert createHome == true because that's not the case for root
-        cd "${config.users.users.${cfg.user}.home}"
-        # Create each directory separately to prevent root owned parent dirs
-        ${install} -d .config .config/borg
-        ${install} -d .cache .cache/borg
-      '' + optionalString (isLocalPath cfg.repo && !cfg.removableDevice) ''
-        ${install} -d ${escapeShellArg cfg.repo}
-      ''));
+      settings = { inherit (cfg) user group; };
+    in lib.nameValuePair "borgbackup-job-${name}" ({
+      "${config.users.users."${cfg.user}".home}/.config/borg".d = settings;
+      "${config.users.users."${cfg.user}".home}/.cache/borg".d = settings;
+    } // optionalAttrs (isLocalPath cfg.repo && !cfg.removableDevice) {
+      "${cfg.repo}".d = settings;
+    });
 
   mkPassAssertion = name: cfg: {
     assertion = with cfg.encryption;
@@ -602,53 +597,56 @@ in {
           };
 
           extraArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for all {command}`borg` calls the
               service has. Handle with care.
             '';
-            default = "";
-            example = "--remote-path=/path/to/borg";
+            default = [ ];
+            example = [ "--remote-path=/path/to/borg" ];
           };
 
           extraInitArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg init`.
               Can also be set at runtime using `$extraInitArgs`.
             '';
-            default = "";
-            example = "--append-only";
+            default = [ ];
+            example = [ "--append-only" ];
           };
 
           extraCreateArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg create`.
               Can also be set at runtime using `$extraCreateArgs`.
             '';
-            default = "";
-            example = "--stats --checkpoint-interval 600";
+            default = [ ];
+            example = [
+              "--stats"
+              "--checkpoint-interval 600"
+            ];
           };
 
           extraPruneArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg prune`.
               Can also be set at runtime using `$extraPruneArgs`.
             '';
-            default = "";
-            example = "--save-space";
+            default = [ ];
+            example = [ "--save-space" ];
           };
 
           extraCompactArgs = mkOption {
-            type = types.str;
+            type = with types; coercedTo (listOf str) escapeShellArgs str;
             description = lib.mdDoc ''
               Additional arguments for {command}`borg compact`.
               Can also be set at runtime using `$extraCompactArgs`.
             '';
-            default = "";
-            example = "--cleanup-commits";
+            default = [ ];
+            example = [ "--cleanup-commits" ];
           };
         };
       }
@@ -757,7 +755,7 @@ in {
         ++ mapAttrsToList mkSourceAssertions jobs
         ++ mapAttrsToList mkRemovableDeviceAssertions jobs;
 
-      system.activationScripts = mapAttrs' mkActivationScript jobs;
+      systemd.tmpfiles.settings = mapAttrs' mkTmpfiles jobs;
 
       systemd.services =
         # A job named "foo" is mapped to systemd.services.borgbackup-job-foo
diff --git a/nixos/modules/services/backup/btrbk.nix b/nixos/modules/services/backup/btrbk.nix
index 1e90ef54d33f9..364b77b6a21c1 100644
--- a/nixos/modules/services/backup/btrbk.nix
+++ b/nixos/modules/services/backup/btrbk.nix
@@ -6,14 +6,17 @@ let
     concatMapStringsSep
     concatStringsSep
     filterAttrs
+    flatten
+    getAttr
     isAttrs
     literalExpression
     mapAttrs'
     mapAttrsToList
     mkIf
     mkOption
+    optional
     optionalString
-    sort
+    sortOn
     types
     ;
 
@@ -37,7 +40,7 @@ let
   genConfig = set:
     let
       pairs = mapAttrsToList (name: value: { inherit name value; }) set;
-      sortedPairs = sort (a: b: prioOf a < prioOf b) pairs;
+      sortedPairs = sortOn prioOf pairs;
     in
       concatMap genPair sortedPairs;
   genSection = sec: secName: value:
@@ -84,6 +87,18 @@ let
     '';
   };
 
+  streamCompressMap = {
+    gzip = pkgs.gzip;
+    pigz = pkgs.pigz;
+    bzip2 = pkgs.bzip2;
+    pbzip2 = pkgs.pbzip2;
+    bzip3 = pkgs.bzip3;
+    xz = pkgs.xz;
+    lzo = pkgs.lzo;
+    lz4 = pkgs.lz4;
+    zstd = pkgs.zstd;
+  };
+
   cfg = config.services.btrbk;
   sshEnabled = cfg.sshAccess != [ ];
   serviceEnabled = cfg.instances != { };
@@ -94,7 +109,14 @@ in
   options = {
     services.btrbk = {
       extraPackages = mkOption {
-        description = lib.mdDoc "Extra packages for btrbk, like compression utilities for `stream_compress`";
+        description = lib.mdDoc ''
+          Extra packages for btrbk, like compression utilities for `stream_compress`.
+
+          **Note**: This option will get deprecated in future releases.
+          Required compression programs will get automatically provided to btrbk
+          depending on configured compression method in
+          `services.btrbk.instances.<name>.settings` option.
+        '';
         type = types.listOf types.package;
         default = [ ];
         example = literalExpression "[ pkgs.xz ]";
@@ -124,7 +146,19 @@ in
                   '';
                 };
                 settings = mkOption {
-                  type = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                  type = types.submodule {
+                    freeformType = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                    options = {
+                      stream_compress = mkOption {
+                        description = lib.mdDoc ''
+                          Compress the btrfs send stream before transferring it from/to remote locations using a
+                          compression command.
+                        '';
+                        type = types.enum ["gzip" "pigz" "bzip2" "pbzip2" "bzip3" "xz" "lzo" "lz4" "zstd" "no"];
+                        default = "no";
+                      };
+                    };
+                  };
                   default = { };
                   example = {
                     snapshot_preserve_min = "2d";
@@ -169,6 +203,11 @@ in
 
   };
   config = mkIf (sshEnabled || serviceEnabled) {
+
+    warnings = optional (cfg.extraPackages != []) ''
+      extraPackages option will be deprecated in future releases. Programs required for compression are now automatically selected depending on services.btrbk.instances.<name>.settings.stream_compress option.
+    '';
+
     environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
 
     security.sudo.extraRules = mkIf (sudo_doas == "sudo") [ sudoRule ];
@@ -232,12 +271,15 @@ in
       cfg.instances;
     systemd.services = mapAttrs'
       (
-        name: _: {
+        name: instance: {
           name = "btrbk-${name}";
           value = {
             description = "Takes BTRFS snapshots and maintains retention policies.";
             unitConfig.Documentation = "man:btrbk(1)";
-            path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+            path = [ "/run/wrappers" ]
+              ++ cfg.extraPackages
+              ++ optional (instance.settings.stream_compress != "no")
+                (getAttr instance.settings.stream_compress streamCompressMap);
             serviceConfig = {
               User = "btrbk";
               Group = "btrbk";
diff --git a/nixos/modules/services/backup/postgresql-backup.nix b/nixos/modules/services/backup/postgresql-backup.nix
index d3c6f3104fc54..82067d8ade34d 100644
--- a/nixos/modules/services/backup/postgresql-backup.nix
+++ b/nixos/modules/services/backup/postgresql-backup.nix
@@ -17,8 +17,8 @@ let
 
       compressCmd = getAttr cfg.compression {
         "none" = "cat";
-        "gzip" = "${pkgs.gzip}/bin/gzip -c -${toString cfg.compressionLevel}";
-        "zstd" = "${pkgs.zstd}/bin/zstd -c -${toString cfg.compressionLevel}";
+        "gzip" = "${pkgs.gzip}/bin/gzip -c -${toString cfg.compressionLevel} --rsyncable";
+        "zstd" = "${pkgs.zstd}/bin/zstd -c -${toString cfg.compressionLevel} --rsyncable";
       };
 
       mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}";
@@ -178,4 +178,5 @@ in {
     })
   ];
 
+  meta.maintainers = with lib.maintainers; [ Scrumplex ];
 }
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index e3eb504e0adfc..b222dd952d159 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -384,10 +384,11 @@ in
       ${lib.optionalString (backup.environmentFile != null) "source ${backup.environmentFile}"}
       # set same environment variables as the systemd service
       ${lib.pipe config.systemd.services."restic-backups-${name}".environment [
-        (lib.filterAttrs (_: v: v != null))
+        (lib.filterAttrs (n: v: v != null && n != "PATH"))
         (lib.mapAttrsToList (n: v: "${n}=${v}"))
         (lib.concatStringsSep "\n")
       ]}
+      PATH=${config.systemd.services."restic-backups-${name}".environment.PATH}:$PATH
 
       exec ${resticCmd} $@
     '') (lib.filterAttrs (_: v: v.createWrapper) config.services.restic.backups);
diff --git a/nixos/modules/tasks/snapraid.nix b/nixos/modules/services/backup/snapraid.nix
index 9570c6b76123b..c9b2550e80e81 100644
--- a/nixos/modules/tasks/snapraid.nix
+++ b/nixos/modules/services/backup/snapraid.nix
@@ -2,10 +2,15 @@
 
 with lib;
 
-let cfg = config.snapraid;
+let cfg = config.services.snapraid;
 in
 {
-  options.snapraid = with types; {
+  imports = [
+    # Should have never been on the top-level.
+    (mkRenamedOptionModule [ "snapraid" ] [ "services" "snapraid" ])
+  ];
+
+  options.services.snapraid = with types; {
     enable = mkEnableOption (lib.mdDoc "SnapRAID");
     dataDisks = mkOption {
       default = { };
diff --git a/nixos/modules/services/backup/tsm.nix b/nixos/modules/services/backup/tsm.nix
index c4de0b16d47d2..6798b18b3af73 100644
--- a/nixos/modules/services/backup/tsm.nix
+++ b/nixos/modules/services/backup/tsm.nix
@@ -3,6 +3,7 @@
 let
 
   inherit (lib.attrsets) hasAttr;
+  inherit (lib.meta) getExe';
   inherit (lib.modules) mkDefault mkIf;
   inherit (lib.options) mkEnableOption mkOption;
   inherit (lib.types) nonEmptyStr nullOr;
@@ -10,7 +11,7 @@ let
   options.services.tsmBackup = {
     enable = mkEnableOption (lib.mdDoc ''
       automatic backups with the
-      IBM Spectrum Protect (Tivoli Storage Manager, TSM) client.
+      IBM Storage Protect (Tivoli Storage Manager, TSM) client.
       This also enables
       {option}`programs.tsmClient.enable`
     '');
@@ -78,10 +79,10 @@ in
   config = mkIf cfg.enable {
     inherit assertions;
     programs.tsmClient.enable = true;
-    programs.tsmClient.servers.${cfg.servername}.passwdDir =
+    programs.tsmClient.servers.${cfg.servername}.passworddir =
       mkDefault "/var/lib/tsm-backup/password";
     systemd.services.tsm-backup = {
-      description = "IBM Spectrum Protect (Tivoli Storage Manager) Backup";
+      description = "IBM Storage Protect (Tivoli Storage Manager) Backup";
       # DSM_LOG needs a trailing slash to have it treated as a directory.
       # `/var/log` would be littered with TSM log files otherwise.
       environment.DSM_LOG = "/var/log/tsm-backup/";
@@ -89,12 +90,12 @@ in
       environment.HOME = "/var/lib/tsm-backup";
       serviceConfig = {
         # for exit status description see
-        # https://www.ibm.com/docs/en/spectrum-protect/8.1.13?topic=clients-client-return-codes
+        # https://www.ibm.com/docs/en/storage-protect/8.1.20?topic=clients-client-return-codes
         SuccessExitStatus = "4 8";
         # The `-se` option must come after the command.
         # The `-optfile` option suppresses a `dsm.opt`-not-found warning.
         ExecStart =
-          "${cfgPrg.wrappedPackage}/bin/dsmc ${cfg.command} -se='${cfg.servername}' -optfile=/dev/null";
+          "${getExe' cfgPrg.wrappedPackage "dsmc"} ${cfg.command} -se='${cfg.servername}' -optfile=/dev/null";
         LogsDirectory = "tsm-backup";
         StateDirectory = "tsm-backup";
         StateDirectoryMode = "0750";
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index 11c5adc6a8859..dca8996df0831 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -13,6 +13,13 @@ in
   ###### interface
   options.services.kubernetes.flannel = {
     enable = mkEnableOption (lib.mdDoc "flannel networking");
+
+    openFirewallPorts = mkOption {
+      description = lib.mdDoc ''
+        Whether to open the Flannel UDP ports in the firewall on all interfaces.'';
+      type = types.bool;
+      default = true;
+    };
   };
 
   ###### implementation
@@ -38,7 +45,7 @@ in
     };
 
     networking = {
-      firewall.allowedUDPPorts = [
+      firewall.allowedUDPPorts = mkIf cfg.openFirewallPorts [
         8285  # flannel udp
         8472  # flannel vxlan
       ];
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 38682701ea151..35151ebd6bd7b 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -212,7 +212,7 @@ in
 
     services.certmgr = {
       enable = true;
-      package = pkgs.certmgr-selfsigned;
+      package = pkgs.certmgr;
       svcManager = "command";
       specs =
         let
diff --git a/nixos/modules/services/cluster/spark/default.nix b/nixos/modules/services/cluster/spark/default.nix
index 2e3914a734bea..b3e1ac399ae9f 100644
--- a/nixos/modules/services/cluster/spark/default.nix
+++ b/nixos/modules/services/cluster/spark/default.nix
@@ -69,8 +69,8 @@ with lib;
       confDir = mkOption {
         type = types.path;
         description = lib.mdDoc "Spark configuration directory. Spark will use the configuration files (spark-defaults.conf, spark-env.sh, log4j.properties, etc) from this directory.";
-        default = "${cfg.package}/lib/${cfg.package.untarDir}/conf";
-        defaultText = literalExpression ''"''${package}/lib/''${package.untarDir}/conf"'';
+        default = "${cfg.package}/conf";
+        defaultText = literalExpression ''"''${package}/conf"'';
       };
       logDir = mkOption {
         type = types.path;
@@ -111,9 +111,9 @@ with lib;
             Type = "forking";
             User = "spark";
             Group = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-master.sh";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-master.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-master.sh";
+            ExecStop  = "${cfg.package}/sbin/stop-master.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
@@ -134,9 +134,9 @@ with lib;
           serviceConfig = {
             Type = "forking";
             User = "spark";
-            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
-            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-worker.sh spark://${cfg.worker.master}";
-            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-worker.sh";
+            WorkingDirectory = "${cfg.package}/";
+            ExecStart = "${cfg.package}/sbin/start-worker.sh spark://${cfg.worker.master}";
+            ExecStop  = "${cfg.package}/sbin/stop-worker.sh";
             TimeoutSec = 300;
             StartLimitBurst=10;
             Restart = "always";
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 56abeda3a5cdd..c86cb81e5df47 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -268,6 +268,7 @@ in {
     systemd.services.buildbot-master = {
       description = "Buildbot Continuous Integration Server.";
       after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       path = cfg.packages ++ cfg.pythonPackages python.pkgs;
       environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}";
@@ -305,5 +306,5 @@ in {
     '')
   ];
 
-  meta.maintainers = with lib.maintainers; [ mic92 lopsided98 ];
+  meta.maintainers = lib.teams.buildbot.members;
 }
diff --git a/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixos/modules/services/continuous-integration/buildbot/worker.nix
index b906788209b19..9c7b2bdd06e02 100644
--- a/nixos/modules/services/continuous-integration/buildbot/worker.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -128,9 +128,7 @@ in {
         '';
       };
 
-      package = mkPackageOption pkgs "python3Packages.buildbot-worker" {
-        example = "python2Packages.buildbot-worker";
-      };
+      package = mkPackageOption pkgs "buildbot-worker" { };
 
       packages = mkOption {
         default = with pkgs; [ git ];
@@ -190,6 +188,6 @@ in {
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ ];
+  meta.maintainers = lib.teams.buildbot.members;
 
 }
diff --git a/nixos/modules/services/continuous-integration/buildkite-agents.nix b/nixos/modules/services/continuous-integration/buildkite-agents.nix
index a35ca4168074f..2e488f83d4c3b 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agents.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agents.nix
@@ -35,6 +35,12 @@ let
         type = lib.types.str;
       };
 
+      extraGroups = lib.mkOption {
+        default = [ "keys" ];
+        description = lib.mdDoc "Groups the user for this buildkite agent should belong to";
+        type = lib.types.listOf lib.types.str;
+      };
+
       runtimePackages = lib.mkOption {
         default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
         defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
@@ -150,7 +156,7 @@ in
       home = cfg.dataDir;
       createHome = true;
       description = "Buildkite agent user";
-      extraGroups = [ "keys" ];
+      extraGroups = cfg.extraGroups;
       isSystemUser = true;
       group = "buildkite-agent-${name}";
     };
diff --git a/nixos/modules/services/continuous-integration/gitea-actions-runner.nix b/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
index 3f2be9464849f..06f0da3451a6c 100644
--- a/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
+++ b/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
@@ -188,6 +188,7 @@ in
         nameValuePair "gitea-runner-${escapeSystemdPath name}" {
           inherit (instance) enable;
           description = "Gitea Actions Runner";
+          wants = [ "network-online.target" ];
           after = [
             "network-online.target"
           ] ++ optionals (wantsDocker) [
diff --git a/nixos/modules/services/continuous-integration/github-runner/options.nix b/nixos/modules/services/continuous-integration/github-runner/options.nix
index 2335826e8b665..b9b1ea05e9672 100644
--- a/nixos/modules/services/continuous-integration/github-runner/options.nix
+++ b/nixos/modules/services/continuous-integration/github-runner/options.nix
@@ -153,6 +153,7 @@ with lib;
     type = types.attrs;
     description = lib.mdDoc ''
       Modify the systemd service. Can be used to, e.g., adjust the sandboxing options.
+      See {manpage}`systemd.exec(5)` for more options.
     '';
     example = {
       ProtectHome = false;
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 46b03bba37be7..54bbe69703f95 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -393,6 +393,7 @@ in
     systemd.services.hydra-evaluator =
       { wantedBy = [ "multi-user.target" ];
         requires = [ "hydra-init.service" ];
+        wants = [ "network-online.target" ];
         after = [ "hydra-init.service" "network.target" "network-online.target" ];
         path = with pkgs; [ hydra-package nettools jq ];
         restartTriggers = [ hydraConf ];
diff --git a/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixos/modules/services/continuous-integration/jenkins/default.nix
index e96743784e047..d69cf4587aaba 100644
--- a/nixos/modules/services/continuous-integration/jenkins/default.nix
+++ b/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -236,6 +236,7 @@ in {
 
       serviceConfig = {
         User = cfg.user;
+        StateDirectory = mkIf (hasPrefix "/var/lib/jenkins" cfg.home) "jenkins";
       };
     };
   };
diff --git a/nixos/modules/services/databases/aerospike.nix b/nixos/modules/services/databases/aerospike.nix
index 373c8f4bffb0d..4923c0f00ddb4 100644
--- a/nixos/modules/services/databases/aerospike.nix
+++ b/nixos/modules/services/databases/aerospike.nix
@@ -108,6 +108,11 @@ in
     };
     users.groups.aerospike.gid = config.ids.gids.aerospike;
 
+    boot.kernel.sysctl = {
+      "net.core.rmem_max" = mkDefault 15728640;
+      "net.core.wmem_max" = mkDefault 5242880;
+    };
+
     systemd.services.aerospike = rec {
       description = "Aerospike server";
 
@@ -131,14 +136,6 @@ in
           echo "kernel.shmmax too low, setting to 1GB"
           ${pkgs.procps}/bin/sysctl -w kernel.shmmax=1073741824
         fi
-        if [ $(echo "$(cat /proc/sys/net/core/rmem_max) < 15728640" | ${pkgs.bc}/bin/bc) == "1" ]; then
-          echo "increasing socket buffer limit (/proc/sys/net/core/rmem_max): $(cat /proc/sys/net/core/rmem_max) -> 15728640"
-          echo 15728640 > /proc/sys/net/core/rmem_max
-        fi
-        if [ $(echo "$(cat /proc/sys/net/core/wmem_max) <  5242880" | ${pkgs.bc}/bin/bc) == "1"  ]; then
-          echo "increasing socket buffer limit (/proc/sys/net/core/wmem_max): $(cat /proc/sys/net/core/wmem_max) -> 5242880"
-          echo  5242880 > /proc/sys/net/core/wmem_max
-        fi
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}"
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/smd"
         install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf"
diff --git a/nixos/modules/services/databases/firebird.nix b/nixos/modules/services/databases/firebird.nix
index 36c12eaaf5f15..431233ce5ed41 100644
--- a/nixos/modules/services/databases/firebird.nix
+++ b/nixos/modules/services/databases/firebird.nix
@@ -143,7 +143,7 @@ in
       # ConnectionTimeout = 180
 
       #RemoteServiceName = gds_db
-      RemoteServicePort = ${cfg.port}
+      RemoteServicePort = ${toString cfg.port}
 
       # randomly choose port for server Event Notification
       #RemoteAuxPort = 0
diff --git a/nixos/modules/services/databases/influxdb.nix b/nixos/modules/services/databases/influxdb.nix
index 34b4139e7c580..adb212ab08d0d 100644
--- a/nixos/modules/services/databases/influxdb.nix
+++ b/nixos/modules/services/databases/influxdb.nix
@@ -161,6 +161,7 @@ in
         ExecStart = ''${cfg.package}/bin/influxd -config "${configFile}"'';
         User = cfg.user;
         Group = cfg.group;
+        Restart = "on-failure";
       };
       postStart =
         let
diff --git a/nixos/modules/services/databases/lldap.nix b/nixos/modules/services/databases/lldap.nix
index d1574c98fe67f..e821da8e58aa3 100644
--- a/nixos/modules/services/databases/lldap.nix
+++ b/nixos/modules/services/databases/lldap.nix
@@ -104,6 +104,7 @@ in
   config = lib.mkIf cfg.enable {
     systemd.services.lldap = {
       description = "Lightweight LDAP server (lldap)";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index a7a0909f55e1b..df36e37976a44 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -294,6 +294,7 @@ in {
         "man:slapd-mdb"
       ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index e5e0b7efec29a..7d141f12b5dea 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -258,7 +258,7 @@ postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
 To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
 ```
 services.postgresql.package = pkgs.postgresql_12;
-services.postgresql.extraPlugins = with pkgs.postgresql_12.pkgs; [
+services.postgresql.extraPlugins = ps: with ps; [
   pg_repack
   postgis
 ];
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index 690f2d85a4c9a..ed5915735730b 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -18,7 +18,7 @@ let
     in
     if cfg.extraPlugins == []
       then base
-      else base.withPackages (_: cfg.extraPlugins);
+      else base.withPackages cfg.extraPlugins;
 
   toStr = value:
     if true == value then "yes"
@@ -391,12 +391,11 @@ in
       };
 
       extraPlugins = mkOption {
-        type = types.listOf types.path;
-        default = [];
-        example = literalExpression "with pkgs.postgresql_15.pkgs; [ postgis pg_repack ]";
+        type = with types; coercedTo (listOf path) (path: _ignorePg: path) (functionTo (listOf path));
+        default = _: [];
+        example = literalExpression "ps: with ps; [ postgis pg_repack ]";
         description = lib.mdDoc ''
-          List of PostgreSQL plugins. PostgreSQL version for each plugin should
-          match version for `services.postgresql.package` value.
+          List of PostgreSQL plugins.
         '';
       };
 
@@ -405,7 +404,7 @@ in
         default = {};
         description = lib.mdDoc ''
           PostgreSQL configuration. Refer to
-          <https://www.postgresql.org/docs/15/config-setting.html#CONFIG-SETTING-CONFIGURATION-FILE>
+          <https://www.postgresql.org/docs/current/config-setting.html#CONFIG-SETTING-CONFIGURATION-FILE>
           for an overview of `postgresql.conf`.
 
           ::: {.note}
diff --git a/nixos/modules/services/desktops/ayatana-indicators.nix b/nixos/modules/services/desktops/ayatana-indicators.nix
new file mode 100644
index 0000000000000..abc687bbd43dd
--- /dev/null
+++ b/nixos/modules/services/desktops/ayatana-indicators.nix
@@ -0,0 +1,58 @@
+{ config
+, pkgs
+, lib
+, ...
+}:
+
+let
+  cfg = config.services.ayatana-indicators;
+in
+{
+  options.services.ayatana-indicators = {
+    enable = lib.mkEnableOption (lib.mdDoc ''
+      Ayatana Indicators, a continuation of Canonical's Application Indicators
+    '');
+
+    packages = lib.mkOption {
+      type = lib.types.listOf lib.types.package;
+      default = [ ];
+      example = lib.literalExpression "with pkgs; [ ayatana-indicator-messages ]";
+      description = lib.mdDoc ''
+        List of packages containing Ayatana Indicator services
+        that should be brought up by the SystemD "ayatana-indicators" user target.
+
+        Packages specified here must have passthru.ayatana-indicators set correctly.
+
+        If, how, and where these indicators are displayed will depend on your DE.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = cfg.packages;
+
+      pathsToLink = [
+        "/share/ayatana"
+      ];
+    };
+
+    # libayatana-common's ayatana-indicators.target with explicit Wants & Before to bring up requested indicator services
+    systemd.user.targets."ayatana-indicators" =
+      let
+        indicatorServices = lib.lists.flatten
+          (map
+            (pkg:
+              (map (ind: "${ind}.service") pkg.passthru.ayatana-indicators))
+            cfg.packages);
+      in
+      {
+        description = "Target representing the lifecycle of the Ayatana Indicators. Each indicator should be bound to it in its individual service file";
+        partOf = [ "graphical-session.target" ];
+        wants = indicatorServices;
+        before = indicatorServices;
+      };
+  };
+
+  meta.maintainers = with lib.maintainers; [ OPNA2608 ];
+}
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index d99faf381e019..4c26e6874023a 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -35,6 +35,7 @@ in {
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
+    systemd.tmpfiles.packages = [ pkgs.flatpak ];
 
     environment.profiles = [
       "$HOME/.local/share/flatpak/exports"
diff --git a/nixos/modules/services/desktops/geoclue2.nix b/nixos/modules/services/desktops/geoclue2.nix
index b04f46c26a568..2a68bb0b55f3a 100644
--- a/nixos/modules/services/desktops/geoclue2.nix
+++ b/nixos/modules/services/desktops/geoclue2.nix
@@ -200,6 +200,7 @@ in
     };
 
     systemd.services.geoclue = {
+      wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
       after = lib.optionals cfg.enableWifi [ "network-online.target" ];
       # restart geoclue service when the configuration changes
       restartTriggers = [
@@ -217,6 +218,7 @@ in
         # we can't be part of a system service, and the agent should
         # be okay with the main service coming and going
         wantedBy = [ "default.target" ];
+        wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
         after = lib.optionals cfg.enableWifi [ "network-online.target" ];
         unitConfig.ConditionUser = "!@system";
         serviceConfig = {
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index 04ac415c177cb..da409030b3a35 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -4,6 +4,8 @@
 with lib;
 
 let
+  json = pkgs.formats.json {};
+  mapToFiles = location: config: concatMapAttrs (name: value: { "pipewire/${location}.conf.d/${name}.conf".source = json.generate "${name}" value;}) config;
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
                            && pkgs.stdenv.isx86_64
@@ -72,15 +74,140 @@ in {
           https://github.com/PipeWire/pipewire/blob/master/NEWS
         '';
       };
+
+      extraConfig = {
+        pipewire = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-clock-rate" = {
+              "context.properties" = {
+                "default.clock.rate" = 44100;
+              };
+            };
+            "11-no-upmixing" = {
+              "stream.properties" = {
+                "channelmix.upmix" = false;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire server.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
+
+            See `man pipewire.conf` for details, and [the PipeWire wiki][wiki] for examples.
+
+            See also:
+            - [PipeWire wiki - virtual devices][wiki-virtual-device] for creating virtual devices or remapping channels
+            - [PipeWire wiki - filter-chain][wiki-filter-chain] for creating more complex processing pipelines
+            - [PipeWire wiki - network][wiki-network] for streaming audio over a network
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-PipeWire
+            [wiki-virtual-device]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Virtual-Devices
+            [wiki-filter-chain]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Filter-Chain
+            [wiki-network]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Network
+          '';
+        };
+        client = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-no-resample" = {
+              "stream.properties" = {
+                "resample.disable" = true;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire client library, used by most applications.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-client
+          '';
+        };
+        client-rt = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "10-alsa-linear-volume" = {
+              "alsa.properties" = {
+                "alsa.volume-method" = "linear";
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples of general configuration, and [PipeWire wiki - ALSA][wiki-alsa] for ALSA clients.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-client
+            [wiki-alsa]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-ALSA
+          '';
+        };
+        jack = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "20-hide-midi" = {
+              "jack.properties" = {
+                "jack.show-midi" = false;
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire JACK server and client library.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
+
+            See the [PipeWire wiki][wiki] for examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-JACK
+          '';
+        };
+        pipewire-pulse = mkOption {
+          type = lib.types.attrsOf json.type;
+          default = {};
+          example = {
+            "15-force-s16-info" = {
+              "pulse.rules" = [{
+                matches = [
+                  { "application.process.binary" = "my-broken-app"; }
+                ];
+                actions = {
+                  quirks = [ "force-s16-info" ];
+                };
+              }];
+            };
+          };
+          description = lib.mdDoc ''
+            Additional configuration for the PipeWire PulseAudio server.
+
+            Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
+
+            See `man pipewire-pulse.conf` for details, and [the PipeWire wiki][wiki] for examples.
+
+            See also:
+            - [PipeWire wiki - PulseAudio tricks guide][wiki-tricks] for more examples.
+
+            [wiki]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Config-PulseAudio
+            [wiki-tricks]: https://gitlab.freedesktop.org/pipewire/pipewire/-/wikis/Guide-PulseAudio-Tricks
+          '';
+        };
+      };
     };
   };
 
   imports = [
     (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
-      Overriding default Pipewire configuration through NixOS options never worked correctly and is no longer supported.
-      Please create drop-in files in /etc/pipewire/pipewire.conf.d/ to make the desired setting changes instead.
+      Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
+      Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-
     (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
@@ -133,26 +260,35 @@ in {
     services.udev.packages = [ cfg.package ];
 
     # If any paths are updated here they must also be updated in the package test.
-    environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
-      text = ''
-        pcm_type.pipewire {
-          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
-          ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
-        }
-        ctl_type.pipewire {
-          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
-          ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
-        }
-      '';
-    };
-    environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
-      source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
-    };
-    environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
-      source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
-    };
+    environment.etc = {
+      "alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
+        text = ''
+          pcm_type.pipewire {
+            libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
+            ${optionalString enable32BitAlsaPlugins
+              "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
+          }
+          ctl_type.pipewire {
+            libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
+            ${optionalString enable32BitAlsaPlugins
+              "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
+          }
+        '';
+      };
+
+      "alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
+        source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
+      };
+
+      "alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
+        source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
+      };
+    }
+    // mapToFiles "pipewire" cfg.extraConfig.pipewire
+    // mapToFiles "client" cfg.extraConfig.client
+    // mapToFiles "client-rt" cfg.extraConfig.client-rt
+    // mapToFiles "jack" cfg.extraConfig.jack
+    // mapToFiles "pipewire-pulse" cfg.extraConfig.pipewire-pulse;
 
     environment.sessionVariables.LD_LIBRARY_PATH =
       lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
diff --git a/nixos/modules/services/development/livebook.md b/nixos/modules/services/development/livebook.md
index 73ddc57f6179a..5012e977a4f7f 100644
--- a/nixos/modules/services/development/livebook.md
+++ b/nixos/modules/services/development/livebook.md
@@ -18,7 +18,7 @@ which runs the server.
     port = 20123;
     # See note below about security
     environmentFile = pkgs.writeText "livebook.env" ''
-      LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+      LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
     '';
   };
 }
@@ -37,3 +37,14 @@ A better approach would be to put the password in some secure
 user-readable location and set `environmentFile = /home/user/secure/livebook.env`.
 
 :::
+
+### Extra dependencies {#module-services-livebook-extra-dependencies}
+
+By default, the Livebook service is run with minimum dependencies, but
+some features require additional packages.  For example, the machine
+learning Kinos require `gcc` and `gnumake`.  To add these, use
+`extraPackages`:
+
+```
+services.livebook.extraPackages = with pkgs; [ gcc gnumake ];
+```
diff --git a/nixos/modules/services/development/livebook.nix b/nixos/modules/services/development/livebook.nix
index 3991a4125ec39..75729ff28efaf 100644
--- a/nixos/modules/services/development/livebook.nix
+++ b/nixos/modules/services/development/livebook.nix
@@ -12,6 +12,8 @@ in
     # future, this can be changed to a system service.
     enableUserService = mkEnableOption "a user service for Livebook";
 
+    package = mkPackageOption pkgs "livebook" { };
+
     environmentFile = mkOption {
       type = types.path;
       description = lib.mdDoc ''
@@ -63,6 +65,15 @@ in
         }
       '';
     };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra packages to make available to the Livebook service.
+      '';
+      example = literalExpression "with pkgs; [ gcc gnumake ]";
+    };
   };
 
   config = mkIf cfg.enableUserService {
@@ -79,9 +90,9 @@ in
               sname = cfg.erlang_node_short_name;
             } // cfg.options);
           in
-          "${pkgs.livebook}/bin/livebook server ${args}";
+            "${cfg.package}/bin/livebook server ${args}";
       };
-      path = [ pkgs.bash ];
+      path = [ pkgs.bash ] ++ cfg.extraPackages;
       wantedBy = [ "default.target" ];
     };
   };
diff --git a/nixos/modules/services/development/nixseparatedebuginfod.nix b/nixos/modules/services/development/nixseparatedebuginfod.nix
new file mode 100644
index 0000000000000..daf85153d339f
--- /dev/null
+++ b/nixos/modules/services/development/nixseparatedebuginfod.nix
@@ -0,0 +1,105 @@
+{ pkgs, lib, config, ... }:
+let
+  cfg = config.services.nixseparatedebuginfod;
+  url = "127.0.0.1:${toString cfg.port}";
+in
+{
+  options = {
+    services.nixseparatedebuginfod = {
+      enable = lib.mkEnableOption "separatedebuginfod, a debuginfod server providing source and debuginfo for nix packages";
+      port = lib.mkOption {
+        description = "port to listen";
+        default = 1949;
+        type = lib.types.port;
+      };
+      nixPackage = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.nix;
+        defaultText = lib.literalExpression "pkgs.nix";
+        description = ''
+          The version of nix that nixseparatedebuginfod should use as client for the nix daemon. It is strongly advised to use nix version >= 2.18, otherwise some debug info may go missing.
+        '';
+      };
+      allowOldNix = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = ''
+          Do not fail evaluation when {option}`services.nixseparatedebuginfod.nixPackage` is older than nix 2.18.
+        '';
+      };
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [ {
+      assertion = cfg.allowOldNix || (lib.versionAtLeast cfg.nixPackage.version "2.18");
+      message = "nixseparatedebuginfod works better when `services.nixseparatedebuginfod.nixPackage` is set to nix >= 2.18 (instead of ${cfg.nixPackage.name}). Set `services.nixseparatedebuginfod.allowOldNix` to bypass.";
+    } ];
+
+    systemd.services.nixseparatedebuginfod = {
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "nix-daemon.service" ];
+      after = [ "nix-daemon.service" ];
+      path = [ cfg.nixPackage ];
+      serviceConfig = {
+        ExecStart = [ "${pkgs.nixseparatedebuginfod}/bin/nixseparatedebuginfod -l ${url}" ];
+        Restart = "on-failure";
+        CacheDirectory = "nixseparatedebuginfod";
+        # nix does not like DynamicUsers in allowed-users
+        User = "nixseparatedebuginfod";
+        Group = "nixseparatedebuginfod";
+
+        # hardening
+        # Filesystem stuff
+        ProtectSystem = "strict"; # Prevent writing to most of /
+        ProtectHome = true; # Prevent accessing /home and /root
+        PrivateTmp = true; # Give an own directory under /tmp
+        PrivateDevices = true; # Deny access to most of /dev
+        ProtectKernelTunables = true; # Protect some parts of /sys
+        ProtectControlGroups = true; # Remount cgroups read-only
+        RestrictSUIDSGID = true; # Prevent creating SETUID/SETGID files
+        PrivateMounts = true; # Give an own mount namespace
+        RemoveIPC = true;
+        UMask = "0077";
+
+        # Capabilities
+        CapabilityBoundingSet = ""; # Allow no capabilities at all
+        NoNewPrivileges = true; # Disallow getting more capabilities. This is also implied by other options.
+
+        # Kernel stuff
+        ProtectKernelModules = true; # Prevent loading of kernel modules
+        SystemCallArchitectures = "native"; # Usually no need to disable this
+        ProtectKernelLogs = true; # Prevent access to kernel logs
+        ProtectClock = true; # Prevent setting the RTC
+
+        # Networking
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+
+        # Misc
+        LockPersonality = true; # Prevent change of the personality
+        ProtectHostname = true; # Give an own UTS namespace
+        RestrictRealtime = true; # Prevent switching to RT scheduling
+        MemoryDenyWriteExecute = true; # Maybe disable this for interpreters like python
+        RestrictNamespaces = true;
+      };
+    };
+
+    users.users.nixseparatedebuginfod = {
+      isSystemUser = true;
+      group = "nixseparatedebuginfod";
+    };
+
+    users.groups.nixseparatedebuginfod = { };
+
+    nix.settings.extra-allowed-users = [ "nixseparatedebuginfod" ];
+
+    environment.variables.DEBUGINFOD_URLS = "http://${url}";
+
+    environment.systemPackages = [
+      # valgrind support requires debuginfod-find on PATH
+      (lib.getBin pkgs.elfutils)
+    ];
+
+    environment.etc."gdb/gdbinit.d/nixseparatedebuginfod.gdb".text = "set debuginfod enabled on";
+
+  };
+}
diff --git a/nixos/modules/services/development/zammad.nix b/nixos/modules/services/development/zammad.nix
index 87aceddd6635c..c084d6541ad38 100644
--- a/nixos/modules/services/development/zammad.nix
+++ b/nixos/modules/services/development/zammad.nix
@@ -21,6 +21,7 @@ let
     NODE_ENV = "production";
     RAILS_SERVE_STATIC_FILES = "true";
     RAILS_LOG_TO_STDOUT = "true";
+    REDIS_URL = "redis://${cfg.redis.host}:${toString cfg.redis.port}";
   };
   databaseConfig = settingsFormat.generate "database.yml" cfg.database.settings;
 in
@@ -65,6 +66,36 @@ in
         description = lib.mdDoc "Websocket service port.";
       };
 
+      redis = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local redis automatically.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zammad";
+          description = lib.mdDoc ''
+            Name of the redis server. Only used if `createLocally` is set to true.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Redis server address.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 6379;
+          description = lib.mdDoc "Port of the redis server.";
+        };
+      };
+
       database = {
         type = mkOption {
           type = types.enum [ "PostgreSQL" "MySQL" ];
@@ -206,6 +237,10 @@ in
         assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
         message = "a password cannot be specified if services.zammad.database.createLocally is set to true";
       }
+      {
+        assertion = cfg.redis.createLocally -> cfg.redis.host == "localhost";
+        message = "the redis host must be localhost if services.zammad.redis.createLocally is set to true";
+      }
     ];
 
     services.mysql = optionalAttrs (cfg.database.createLocally && cfg.database.type == "MySQL") {
@@ -231,6 +266,13 @@ in
       ];
     };
 
+    services.redis = optionalAttrs cfg.redis.createLocally {
+      servers."${cfg.redis.name}" = {
+        enable = true;
+        port = cfg.redis.port;
+      };
+    };
+
     systemd.services.zammad-web = {
       inherit environment;
       serviceConfig = serviceConfig // {
@@ -240,6 +282,8 @@ in
       after = [
         "network.target"
         "postgresql.service"
+      ] ++ optionals cfg.redis.createLocally [
+        "redis-${cfg.redis.name}.service"
       ];
       requires = [
         "postgresql.service"
@@ -303,16 +347,15 @@ in
       script = "./script/websocket-server.rb -b ${cfg.host} -p ${toString cfg.websocketPort} start";
     };
 
-    systemd.services.zammad-scheduler = {
-      inherit environment;
-      serviceConfig = serviceConfig // { Type = "forking"; };
+    systemd.services.zammad-worker = {
+      inherit serviceConfig environment;
       after = [ "zammad-web.service" ];
       requires = [ "zammad-web.service" ];
-      description = "Zammad scheduler";
+      description = "Zammad background worker";
       wantedBy = [ "multi-user.target" ];
-      script = "./script/scheduler.rb start";
+      script = "./script/background-worker.rb start";
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ garbas taeer ];
+  meta.maintainers = with lib.maintainers; [ taeer netali ];
 }
diff --git a/nixos/modules/services/display-managers/greetd.nix b/nixos/modules/services/display-managers/greetd.nix
index 779e141ca24bd..2212f97a9ffe2 100644
--- a/nixos/modules/services/display-managers/greetd.nix
+++ b/nixos/modules/services/display-managers/greetd.nix
@@ -4,7 +4,7 @@ with lib;
 let
   cfg = config.services.greetd;
   tty = "tty${toString cfg.vt}";
-  settingsFormat = pkgs.formats.toml {};
+  settingsFormat = pkgs.formats.toml { };
 in
 {
   options.services.greetd = {
@@ -27,7 +27,7 @@ in
       '';
     };
 
-    vt = mkOption  {
+    vt = mkOption {
       type = types.int;
       default = 1;
       description = lib.mdDoc ''
@@ -97,12 +97,18 @@ in
 
     systemd.defaultUnit = "graphical.target";
 
+    # Create directories potentially required by supported greeters
+    # See https://github.com/NixOS/nixpkgs/issues/248323
+    systemd.tmpfiles.rules = [
+      "d '/var/cache/tuigreet' - greeter greeter - -"
+    ];
+
     users.users.greeter = {
       isSystemUser = true;
       group = "greeter";
     };
 
-    users.groups.greeter = {};
+    users.groups.greeter = { };
   };
 
   meta.maintainers = with maintainers; [ queezle ];
diff --git a/nixos/modules/services/editors/emacs.nix b/nixos/modules/services/editors/emacs.nix
index 6f45be6640bc6..ff6fd85d8a9b7 100644
--- a/nixos/modules/services/editors/emacs.nix
+++ b/nixos/modules/services/editors/emacs.nix
@@ -15,25 +15,6 @@ let
     fi
   '';
 
-  desktopApplicationFile = pkgs.writeTextFile {
-    name = "emacsclient.desktop";
-    destination = "/share/applications/emacsclient.desktop";
-    text = ''
-      [Desktop Entry]
-      Name=Emacsclient
-      GenericName=Text Editor
-      Comment=Edit text
-      MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
-      Exec=emacseditor %F
-      Icon=emacs
-      Type=Application
-      Terminal=false
-      Categories=Development;TextEditor;
-      StartupWMClass=Emacs
-      Keywords=Text;Editor;
-    '';
-  };
-
 in
 {
 
@@ -102,7 +83,7 @@ in
       wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
     };
 
-    environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
+    environment.systemPackages = [ cfg.package editorScript ];
 
     environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "emacseditor");
   };
diff --git a/nixos/modules/services/games/teeworlds.nix b/nixos/modules/services/games/teeworlds.nix
index ffef440330c4e..bd0df1ffca578 100644
--- a/nixos/modules/services/games/teeworlds.nix
+++ b/nixos/modules/services/games/teeworlds.nix
@@ -100,7 +100,7 @@ in
 
       serviceConfig = {
         DynamicUser = true;
-        ExecStart = "${pkgs.teeworlds}/bin/teeworlds_srv -f ${teeworldsConf}";
+        ExecStart = "${pkgs.teeworlds-server}/bin/teeworlds_srv -f ${teeworldsConf}";
 
         # Hardening
         CapabilityBoundingSet = false;
diff --git a/nixos/modules/services/hardware/acpid.nix b/nixos/modules/services/hardware/acpid.nix
index 821f4ef205fc5..6021aad09f450 100644
--- a/nixos/modules/services/hardware/acpid.nix
+++ b/nixos/modules/services/hardware/acpid.nix
@@ -135,6 +135,7 @@ in
       wantedBy = [ "multi-user.target" ];
 
       serviceConfig = {
+        PrivateNetwork = true;
         ExecStart = escapeShellArgs
           ([ "${pkgs.acpid}/bin/acpid"
              "--foreground"
diff --git a/nixos/modules/services/hardware/kanata.nix b/nixos/modules/services/hardware/kanata.nix
index 0b77bfbc33b3f..05e76d8432154 100644
--- a/nixos/modules/services/hardware/kanata.nix
+++ b/nixos/modules/services/hardware/kanata.nix
@@ -78,7 +78,13 @@ let
   mkName = name: "kanata-${name}";
 
   mkDevices = devices:
-    optionalString ((length devices) > 0) "linux-dev ${concatStringsSep ":" devices}";
+    let
+      devicesString = pipe devices [
+        (map (device: "\"" + device + "\""))
+        (concatStringsSep " ")
+      ];
+    in
+    optionalString ((length devices) > 0) "linux-dev (${devicesString})";
 
   mkConfig = name: keyboard: pkgs.writeText "${mkName name}-config.kdb" ''
     (defcfg
diff --git a/nixos/modules/services/hardware/keyd.nix b/nixos/modules/services/hardware/keyd.nix
index 724e9b9568478..77297401a51c7 100644
--- a/nixos/modules/services/hardware/keyd.nix
+++ b/nixos/modules/services/hardware/keyd.nix
@@ -143,7 +143,7 @@ in
         RuntimeDirectory = "keyd";
 
         # Hardening
-        CapabilityBoundingSet = "";
+        CapabilityBoundingSet = [ "CAP_SYS_NICE" ];
         DeviceAllow = [
           "char-input rw"
           "/dev/uinput rw"
@@ -152,7 +152,7 @@ in
         PrivateNetwork = true;
         ProtectHome = true;
         ProtectHostname = true;
-        PrivateUsers = true;
+        PrivateUsers = false;
         PrivateMounts = true;
         PrivateTmp = true;
         RestrictNamespaces = true;
@@ -165,9 +165,9 @@ in
         LockPersonality = true;
         ProtectProc = "invisible";
         SystemCallFilter = [
+          "nice"
           "@system-service"
           "~@privileged"
-          "~@resources"
         ];
         RestrictAddressFamilies = [ "AF_UNIX" ];
         RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/hardware/pcscd.nix b/nixos/modules/services/hardware/pcscd.nix
index a9e4998efe37a..85accd8335f78 100644
--- a/nixos/modules/services/hardware/pcscd.nix
+++ b/nixos/modules/services/hardware/pcscd.nix
@@ -16,9 +16,6 @@ let
 
 in
 {
-
-  ###### interface
-
   options.services.pcscd = {
     enable = mkEnableOption (lib.mdDoc "PCSC-Lite daemon");
 
@@ -46,13 +43,10 @@ in
     };
   };
 
-  ###### implementation
-
   config = mkIf config.services.pcscd.enable {
-
     environment.etc."reader.conf".source = cfgFile;
 
-    environment.systemPackages = [ package ];
+    environment.systemPackages = [ package.out ];
     systemd.packages = [ (getBin package) ];
 
     services.pcscd.plugins = [ pkgs.ccid ];
@@ -61,7 +55,6 @@ in
 
     systemd.services.pcscd = {
       environment.PCSCLITE_HP_DROPDIR = pluginEnv;
-      restartTriggers = [ "/etc/reader.conf" ];
 
       # If the cfgFile is empty and not specified (in which case the default
       # /etc/reader.conf is assumed), pcscd will happily start going through the
diff --git a/nixos/modules/services/hardware/power-profiles-daemon.nix b/nixos/modules/services/hardware/power-profiles-daemon.nix
index 101da01b4a712..1d84bf8ac937c 100644
--- a/nixos/modules/services/hardware/power-profiles-daemon.nix
+++ b/nixos/modules/services/hardware/power-profiles-daemon.nix
@@ -1,10 +1,7 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.services.power-profiles-daemon;
-  package = pkgs.power-profiles-daemon;
 in
 
 {
@@ -15,8 +12,8 @@ in
 
     services.power-profiles-daemon = {
 
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Whether to enable power-profiles-daemon, a DBus daemon that allows
@@ -24,6 +21,8 @@ in
         '';
       };
 
+      package = lib.mkPackageOption pkgs "power-profiles-daemon" { };
+
     };
 
   };
@@ -31,7 +30,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
 
     assertions = [
       { assertion = !config.services.tlp.enable;
@@ -42,13 +41,13 @@ in
       }
     ];
 
-    environment.systemPackages = [ package ];
+    environment.systemPackages = [ cfg.package ];
 
-    services.dbus.packages = [ package ];
+    services.dbus.packages = [ cfg.package ];
 
-    services.udev.packages = [ package ];
+    services.udev.packages = [ cfg.package ];
 
-    systemd.packages = [ package ];
+    systemd.packages = [ cfg.package ];
 
   };
 
diff --git a/nixos/modules/services/hardware/sane.nix b/nixos/modules/services/hardware/sane.nix
index 8408844c4f943..8f64afe60734c 100644
--- a/nixos/modules/services/hardware/sane.nix
+++ b/nixos/modules/services/hardware/sane.nix
@@ -4,7 +4,7 @@ with lib;
 
 let
 
-  pkg = pkgs.sane-backends.override {
+  pkg = config.hardware.sane.backends-package.override {
     scanSnapDriversUnfree = config.hardware.sane.drivers.scanSnap.enable;
     scanSnapDriversPackage = config.hardware.sane.drivers.scanSnap.package;
   };
@@ -57,6 +57,13 @@ in
       '';
     };
 
+    hardware.sane.backends-package = mkOption {
+      type = types.package;
+      default = pkgs.sane-backends;
+      defaultText = literalExpression "pkgs.sane-backends";
+      description = lib.mdDoc "Backends driver package to use.";
+    };
+
     hardware.sane.snapshot = mkOption {
       type = types.bool;
       default = false;
diff --git a/nixos/modules/services/hardware/thermald.nix b/nixos/modules/services/hardware/thermald.nix
index 7ae602823cd65..a4839f326cc45 100644
--- a/nixos/modules/services/hardware/thermald.nix
+++ b/nixos/modules/services/hardware/thermald.nix
@@ -19,6 +19,12 @@ in
         '';
       };
 
+     ignoreCpuidCheck = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to ignore the cpuid check to allow running on unsupported platforms";
+      };
+
       configFile = mkOption {
         type = types.nullOr types.path;
         default = null;
@@ -42,6 +48,7 @@ in
           ${cfg.package}/sbin/thermald \
             --no-daemon \
             ${optionalString cfg.debug "--loglevel=debug"} \
+            ${optionalString cfg.ignoreCpuidCheck "--ignore-cpuid-check"} \
             ${optionalString (cfg.configFile != null) "--config-file ${cfg.configFile}"} \
             --dbus-enable \
             --adaptive
diff --git a/nixos/modules/services/hardware/thinkfan.nix b/nixos/modules/services/hardware/thinkfan.nix
index 8fa7b456f20e2..cca35f492b8e3 100644
--- a/nixos/modules/services/hardware/thinkfan.nix
+++ b/nixos/modules/services/hardware/thinkfan.nix
@@ -217,6 +217,8 @@ in {
 
     systemd.services = {
       thinkfan.environment.THINKFAN_ARGS = escapeShellArgs ([ "-c" configFile ] ++ cfg.extraArgs);
+      thinkfan.serviceConfig.Restart = "on-failure";
+      thinkfan.serviceConfig.RestartSec = "30s";
 
       # must be added manually, see issue #81138
       thinkfan.wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/hardware/udev.nix b/nixos/modules/services/hardware/udev.nix
index 08ca7a0d247d0..670b9087f1107 100644
--- a/nixos/modules/services/hardware/udev.nix
+++ b/nixos/modules/services/hardware/udev.nix
@@ -112,7 +112,8 @@ let
       echo "OK"
 
       filesToFixup="$(for i in "$out"/*; do
-        grep -l '\B\(/usr\)\?/s\?bin' "$i" || :
+        # list all files referring to (/usr)/bin paths, but allow references to /bin/sh.
+        grep -P -l '\B(?!\/bin\/sh\b)(\/usr)?\/bin(?:\/.*)?' "$i" || :
       done)"
 
       if [ -n "$filesToFixup" ]; then
@@ -222,6 +223,9 @@ in
         description = lib.mdDoc ''
           Packages added to the {env}`PATH` environment variable when
           executing programs from Udev rules.
+
+          coreutils, gnu{sed,grep}, util-linux and config.systemd.package are
+          automatically included.
         '';
       };
 
diff --git a/nixos/modules/services/hardware/vdr.nix b/nixos/modules/services/hardware/vdr.nix
index afa64fa16c4a6..689d83f7eedcd 100644
--- a/nixos/modules/services/hardware/vdr.nix
+++ b/nixos/modules/services/hardware/vdr.nix
@@ -1,18 +1,15 @@
 { config, lib, pkgs, ... }:
-
-with lib;
-
 let
   cfg = config.services.vdr;
-  libDir = "/var/lib/vdr";
-in {
-
-  ###### interface
 
+  inherit (lib)
+    mkEnableOption mkPackageOption mkOption types mkIf optional mdDoc;
+in
+{
   options = {
 
     services.vdr = {
-      enable = mkEnableOption (lib.mdDoc "VDR. Please put config into ${libDir}");
+      enable = mkEnableOption (mdDoc "Start VDR");
 
       package = mkPackageOption pkgs "vdr" {
         example = "wrapVdr.override { plugins = with pkgs.vdrPlugins; [ hello ]; }";
@@ -21,58 +18,84 @@ in {
       videoDir = mkOption {
         type = types.path;
         default = "/srv/vdr/video";
-        description = lib.mdDoc "Recording directory";
+        description = mdDoc "Recording directory";
       };
 
       extraArguments = mkOption {
         type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc "Additional command line arguments to pass to VDR.";
+        default = [ ];
+        description = mdDoc "Additional command line arguments to pass to VDR.";
+      };
+
+      enableLirc = mkEnableOption (mdDoc "LIRC");
+
+      user = mkOption {
+        type = types.str;
+        default = "vdr";
+        description = mdDoc ''
+          User under which the VDR service runs.
+        '';
       };
 
-      enableLirc = mkEnableOption (lib.mdDoc "LIRC");
+      group = mkOption {
+        type = types.str;
+        default = "vdr";
+        description = mdDoc ''
+          Group under which the VDRvdr service runs.
+        '';
+      };
     };
+
   };
 
-  ###### implementation
+  config = mkIf cfg.enable {
 
-  config = mkIf cfg.enable (mkMerge [{
     systemd.tmpfiles.rules = [
-      "d ${cfg.videoDir} 0755 vdr vdr -"
-      "Z ${cfg.videoDir} - vdr vdr -"
+      "d ${cfg.videoDir} 0755 ${cfg.user} ${cfg.group} -"
+      "Z ${cfg.videoDir} - ${cfg.user} ${cfg.group} -"
     ];
 
     systemd.services.vdr = {
       description = "VDR";
       wantedBy = [ "multi-user.target" ];
+      wants = optional cfg.enableLirc "lircd.service";
+      after = [ "network.target" ]
+        ++ optional cfg.enableLirc "lircd.service";
       serviceConfig = {
-        ExecStart = ''
-          ${cfg.package}/bin/vdr \
-            --video="${cfg.videoDir}" \
-            --config="${libDir}" \
-            ${escapeShellArgs cfg.extraArguments}
-        '';
-        User = "vdr";
+        ExecStart =
+          let
+            args = [
+              "--video=${cfg.videoDir}"
+            ]
+            ++ optional cfg.enableLirc "--lirc=${config.passthru.lirc.socket}"
+            ++ cfg.extraArguments;
+          in
+          "${cfg.package}/bin/vdr ${lib.escapeShellArgs args}";
+        User = cfg.user;
+        Group = cfg.group;
         CacheDirectory = "vdr";
         StateDirectory = "vdr";
+        RuntimeDirectory = "vdr";
         Restart = "on-failure";
       };
     };
 
-    users.users.vdr = {
-      group = "vdr";
-      home = libDir;
-      isSystemUser = true;
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = mkIf (cfg.user == "vdr") {
+      vdr = {
+        inherit (cfg) group;
+        home = "/run/vdr";
+        isSystemUser = true;
+        extraGroups = [
+          "video"
+          "audio"
+        ]
+        ++ optional cfg.enableLirc "lirc";
+      };
     };
 
-    users.groups.vdr = {};
-  }
+    users.groups = mkIf (cfg.group == "vdr") { vdr = { }; };
 
-  (mkIf cfg.enableLirc {
-    services.lirc.enable = true;
-    users.users.vdr.extraGroups = [ "lirc" ];
-    services.vdr.extraArguments = [
-      "--lirc=${config.passthru.lirc.socket}"
-    ];
-  })]);
+  };
 }
diff --git a/nixos/modules/services/home-automation/evcc.nix b/nixos/modules/services/home-automation/evcc.nix
index d0ce3fb4a1ce6..f360f525b04b9 100644
--- a/nixos/modules/services/home-automation/evcc.nix
+++ b/nixos/modules/services/home-automation/evcc.nix
@@ -41,6 +41,7 @@ in
 
   config = mkIf cfg.enable {
     systemd.services.evcc = {
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
         "mosquitto.target"
diff --git a/nixos/modules/services/home-automation/home-assistant.nix b/nixos/modules/services/home-automation/home-assistant.nix
index 54fd3e17292f6..a01628968966e 100644
--- a/nixos/modules/services/home-automation/home-assistant.nix
+++ b/nixos/modules/services/home-automation/home-assistant.nix
@@ -11,14 +11,12 @@ let
   # options shown in settings.
   # We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
   # https://www.home-assistant.io/docs/configuration/secrets/
-  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
+  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) (lib.recursiveUpdate customLovelaceModulesResources (cfg.config or {}));
   configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
     cp ${format.generate "configuration.yaml" filteredConfig} $out
     sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
   '';
-  lovelaceConfig = if (cfg.lovelaceConfig == null) then {}
-    else (lib.recursiveUpdate customLovelaceModulesResources cfg.lovelaceConfig);
-  lovelaceConfigFile = format.generate "ui-lovelace.yaml" lovelaceConfig;
+  lovelaceConfigFile = format.generate "ui-lovelace.yaml" cfg.lovelaceConfig;
 
   # Components advertised by the home-assistant package
   availableComponents = cfg.package.availableComponents;
@@ -77,7 +75,7 @@ let
   # Create parts of the lovelace config that reference lovelave modules as resources
   customLovelaceModulesResources = {
     lovelace.resources = map (card: {
-      url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname}.js?${card.version}";
+      url = "/local/nixos-lovelace-modules/${card.entrypoint or (card.pname + ".js")}?${card.version}";
       type = "module";
     }) cfg.customLovelaceModules;
   };
@@ -159,7 +157,7 @@ in {
       default = [];
       example = literalExpression ''
         with pkgs.home-assistant-custom-components; [
-          prometheus-sensor
+          prometheus_sensor
         ];
       '';
       description = lib.mdDoc ''
@@ -437,6 +435,7 @@ in {
 
     systemd.services.home-assistant = {
       description = "Home Assistant";
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
 
@@ -455,10 +454,10 @@ in {
           ln -s /etc/home-assistant/configuration.yaml "${cfg.configDir}/configuration.yaml"
         '';
         copyLovelaceConfig = if cfg.lovelaceConfigWritable then ''
+          rm -f "${cfg.configDir}/ui-lovelace.yaml"
           cp --no-preserve=mode ${lovelaceConfigFile} "${cfg.configDir}/ui-lovelace.yaml"
         '' else ''
-          rm -f "${cfg.configDir}/ui-lovelace.yaml"
-          ln -s /etc/home-assistant/ui-lovelace.yaml "${cfg.configDir}/ui-lovelace.yaml"
+          ln -fs /etc/home-assistant/ui-lovelace.yaml "${cfg.configDir}/ui-lovelace.yaml"
         '';
         copyCustomLovelaceModules = if cfg.customLovelaceModules != [] then ''
           mkdir -p "${cfg.configDir}/www"
@@ -470,8 +469,8 @@ in {
           mkdir -p "${cfg.configDir}/custom_components"
 
           # remove components symlinked in from below the /nix/store
-          components="$(find "${cfg.configDir}/custom_components" -maxdepth 1 -type l)"
-          for component in "$components"; do
+          readarray -d "" components < <(find "${cfg.configDir}/custom_components" -maxdepth 1 -type l -print0)
+          for component in "''${components[@]}"; do
             if [[ "$(readlink "$component")" =~ ^${escapeShellArg builtins.storeDir} ]]; then
               rm "$component"
             fi
@@ -525,7 +524,6 @@ in {
           "bluetooth_tracker"
           "bthome"
           "default_config"
-          "eq3btsmart"
           "eufylife_ble"
           "esphome"
           "fjaraskupan"
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
index 59eedff90d60e..4d21464018aac 100644
--- a/nixos/modules/services/logging/journaldriver.nix
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -84,6 +84,7 @@ in {
     systemd.services.journaldriver = {
       description = "Stackdriver Logging journal forwarder";
       script      = "${pkgs.journaldriver}/bin/journaldriver";
+      wants       = [ "network-online.target" ];
       after       = [ "network-online.target" ];
       wantedBy    = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/logging/logcheck.nix b/nixos/modules/services/logging/logcheck.nix
index 8a277cea6e461..5d87fc87d4161 100644
--- a/nixos/modules/services/logging/logcheck.nix
+++ b/nixos/modules/services/logging/logcheck.nix
@@ -220,10 +220,16 @@ in
       logcheck = {};
     };
 
-    system.activationScripts.logcheck = ''
-      mkdir -m 700 -p /var/{lib,lock}/logcheck
-      chown ${cfg.user} /var/{lib,lock}/logcheck
-    '';
+    systemd.tmpfiles.settings.logcheck = {
+      "/var/lib/logcheck".d = {
+        mode = "700";
+        inherit (cfg) user;
+      };
+      "/var/lock/logcheck".d = {
+        mode = "700";
+        inherit (cfg) user;
+      };
+    };
 
     services.cron.systemCronJobs =
         let withTime = name: {timeArgs, ...}: timeArgs != null;
diff --git a/nixos/modules/services/logging/vector.nix b/nixos/modules/services/logging/vector.nix
index 48f9eeb4ce8f0..9ccf8a4fa0610 100644
--- a/nixos/modules/services/logging/vector.nix
+++ b/nixos/modules/services/logging/vector.nix
@@ -51,13 +51,17 @@ in
         {
           ExecStart = "${getExe cfg.package} --config ${validateConfig conf}";
           DynamicUser = true;
-          Restart = "no";
+          Restart = "always";
           StateDirectory = "vector";
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
           AmbientCapabilities = "CAP_NET_BIND_SERVICE";
           # This group is required for accessing journald.
           SupplementaryGroups = mkIf cfg.journaldAccess "systemd-journal";
         };
+      unitConfig = {
+        StartLimitIntervalSec = 10;
+        StartLimitBurst = 5;
+      };
     };
   };
 }
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index abbb2f32e6ccc..79c8fec752521 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -1,8 +1,11 @@
 { options, config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib) any attrValues concatMapStringsSep concatStrings
+    concatStringsSep flatten imap1 isList literalExpression mapAttrsToList
+    mkEnableOption mkIf mkOption mkRemovedOptionModule optional optionalAttrs
+    optionalString singleton types;
+
   cfg = config.services.dovecot2;
   dovecotPkg = pkgs.dovecot;
 
@@ -113,6 +116,37 @@ let
       ''
     )
 
+    ''
+      plugin {
+        sieve_plugins = ${concatStringsSep " " cfg.sieve.plugins}
+    ''
+    (optionalString (cfg.sieve.extensions != []) ''sieve_extensions = ${concatMapStringsSep " " (el: "+${el}") cfg.sieve.extensions}'')
+    (optionalString (cfg.sieve.globalExtensions != []) ''sieve_global_extensions = ${concatMapStringsSep " " (el: "+${el}") cfg.sieve.globalExtensions}'')
+
+    (optionalString (cfg.imapsieve.mailbox != []) ''
+      ${
+        concatStringsSep "\n" (flatten (imap1 (
+            idx: el:
+              singleton "imapsieve_mailbox${toString idx}_name = ${el.name}"
+              ++ optional (el.from != null) "imapsieve_mailbox${toString idx}_from = ${el.from}"
+              ++ optional (el.causes != null) "imapsieve_mailbox${toString idx}_causes = ${el.causes}"
+              ++ optional (el.before != null) "imapsieve_mailbox${toString idx}_before = file:${stateDir}/imapsieve/before/${baseNameOf el.before}"
+              ++ optional (el.after != null) "imapsieve_mailbox${toString idx}_after = file:${stateDir}/imapsieve/after/${baseNameOf el.after}"
+          )
+          cfg.imapsieve.mailbox))
+      }
+    '')
+    (optionalString (cfg.sieve.pipeBins != []) ''
+        sieve_pipe_bin_dir = ${pkgs.linkFarm "sieve-pipe-bins" (map (el: {
+          name = builtins.unsafeDiscardStringContext (baseNameOf el);
+          path = el;
+        })
+        cfg.sieve.pipeBins)}
+    '')
+    ''
+      }
+    ''
+
     cfg.extraConfig
   ];
 
@@ -343,6 +377,104 @@ in
       description = lib.mdDoc "Quota limit for the user in bytes. Supports suffixes b, k, M, G, T and %.";
     };
 
+    imapsieve.mailbox = mkOption {
+      default = [];
+      description = "Configure Sieve filtering rules on IMAP actions";
+      type = types.listOf (types.submodule ({ config, ... }: {
+        options = {
+          name = mkOption {
+            description = ''
+              This setting configures the name of a mailbox for which administrator scripts are configured.
+
+              The settings defined hereafter with matching sequence numbers apply to the mailbox named by this setting.
+
+              This setting supports wildcards with a syntax compatible with the IMAP LIST command, meaning that this setting can apply to multiple or even all ("*") mailboxes.
+            '';
+            example = "Junk";
+            type = types.str;
+          };
+
+          from = mkOption {
+            default = null;
+            description = ''
+              Only execute the administrator Sieve scripts for the mailbox configured with services.dovecot2.imapsieve.mailbox.<name>.name when the message originates from the indicated mailbox.
+
+              This setting supports wildcards with a syntax compatible with the IMAP LIST command, meaning that this setting can apply to multiple or even all ("*") mailboxes.
+            '';
+            example = "*";
+            type = types.nullOr types.str;
+          };
+
+          causes = mkOption {
+            default = null;
+            description = ''
+              Only execute the administrator Sieve scripts for the mailbox configured with services.dovecot2.imapsieve.mailbox.<name>.name when one of the listed IMAPSIEVE causes apply.
+
+              This has no effect on the user script, which is always executed no matter the cause.
+            '';
+            example = "COPY";
+            type = types.nullOr (types.enum [ "APPEND" "COPY" "FLAG" ]);
+          };
+
+          before = mkOption {
+            default = null;
+            description = ''
+              When an IMAP event of interest occurs, this sieve script is executed before any user script respectively.
+
+              This setting each specify the location of a single sieve script. The semantics of this setting is similar to sieve_before: the specified scripts form a sequence together with the user script in which the next script is only executed when an (implicit) keep action is executed.
+            '';
+            example = literalExpression "./report-spam.sieve";
+            type = types.nullOr types.path;
+          };
+
+          after = mkOption {
+            default = null;
+            description = ''
+              When an IMAP event of interest occurs, this sieve script is executed after any user script respectively.
+
+              This setting each specify the location of a single sieve script. The semantics of this setting is similar to sieve_after: the specified scripts form a sequence together with the user script in which the next script is only executed when an (implicit) keep action is executed.
+            '';
+            example = literalExpression "./report-spam.sieve";
+            type = types.nullOr types.path;
+          };
+        };
+      }));
+    };
+
+    sieve = {
+      plugins = mkOption {
+        default = [];
+        example = [ "sieve_extprograms" ];
+        description = "Sieve plugins to load";
+        type = types.listOf types.str;
+      };
+
+      extensions = mkOption {
+        default = [];
+        description = "Sieve extensions for use in user scripts";
+        example = [ "notify" "imapflags" "vnd.dovecot.filter" ];
+        type = types.listOf types.str;
+      };
+
+      globalExtensions = mkOption {
+        default = [];
+        example = [ "vnd.dovecot.environment" ];
+        description = "Sieve extensions for use in global scripts";
+        type = types.listOf types.str;
+      };
+
+      pipeBins = mkOption {
+        default = [];
+        example = literalExpression ''
+          map lib.getExe [
+            (pkgs.writeShellScriptBin "learn-ham.sh" "exec ''${pkgs.rspamd}/bin/rspamc learn_ham")
+            (pkgs.writeShellScriptBin "learn-spam.sh" "exec ''${pkgs.rspamd}/bin/rspamc learn_spam")
+          ]
+        '';
+        description = "Programs available for use by the vnd.dovecot.pipe extension";
+        type = types.listOf types.path;
+      };
+    };
   };
 
 
@@ -353,14 +485,23 @@ in
       enable = true;
       params.dovecot2 = {};
     };
-    services.dovecot2.protocols =
-      optional cfg.enableImap "imap"
-      ++ optional cfg.enablePop3 "pop3"
-      ++ optional cfg.enableLmtp "lmtp";
-
-    services.dovecot2.mailPlugins = mkIf cfg.enableQuota {
-      globally.enable = [ "quota" ];
-      perProtocol.imap.enable = [ "imap_quota" ];
+
+    services.dovecot2 = {
+      protocols =
+        optional cfg.enableImap "imap"
+        ++ optional cfg.enablePop3 "pop3"
+        ++ optional cfg.enableLmtp "lmtp";
+
+      mailPlugins = mkIf cfg.enableQuota {
+        globally.enable = [ "quota" ];
+        perProtocol.imap.enable = [ "imap_quota" ];
+      };
+
+      sieve.plugins =
+        optional (cfg.imapsieve.mailbox != []) "sieve_imapsieve"
+        ++ optional (cfg.sieve.pipeBins != []) "sieve_extprograms";
+
+      sieve.globalExtensions = optional (cfg.sieve.pipeBins != []) "vnd.dovecot.pipe";
     };
 
     users.users = {
@@ -415,7 +556,7 @@ in
       # (should be 0) so that the compiled sieve script is newer than
       # the source file and Dovecot won't try to compile it.
       preStart = ''
-        rm -rf ${stateDir}/sieve
+        rm -rf ${stateDir}/sieve ${stateDir}/imapsieve
       '' + optionalString (cfg.sieveScripts != {}) ''
         mkdir -p ${stateDir}/sieve
         ${concatStringsSep "\n" (
@@ -432,6 +573,29 @@ in
         ) cfg.sieveScripts
       )}
         chown -R '${cfg.mailUser}:${cfg.mailGroup}' '${stateDir}/sieve'
+      ''
+      + optionalString (cfg.imapsieve.mailbox != []) ''
+        mkdir -p ${stateDir}/imapsieve/{before,after}
+
+        ${
+          concatMapStringsSep "\n"
+            (el:
+              optionalString (el.before != null) ''
+                cp -p ${el.before} ${stateDir}/imapsieve/before/${baseNameOf el.before}
+                ${pkgs.dovecot_pigeonhole}/bin/sievec '${stateDir}/imapsieve/before/${baseNameOf el.before}'
+              ''
+              + optionalString (el.after != null) ''
+                cp -p ${el.after} ${stateDir}/imapsieve/after/${baseNameOf el.after}
+                ${pkgs.dovecot_pigeonhole}/bin/sievec '${stateDir}/imapsieve/after/${baseNameOf el.after}'
+              ''
+            )
+            cfg.imapsieve.mailbox
+        }
+
+        ${
+          optionalString (cfg.mailUser != null && cfg.mailGroup != null)
+            "chown -R '${cfg.mailUser}:${cfg.mailGroup}' '${stateDir}/imapsieve'"
+        }
       '';
     };
 
@@ -459,4 +623,5 @@ in
 
   };
 
+  meta.maintainers = [ lib.maintainers.dblsaiko ];
 }
diff --git a/nixos/modules/services/mail/listmonk.nix b/nixos/modules/services/mail/listmonk.nix
index be2f9680ca5ac..945eb436c1f23 100644
--- a/nixos/modules/services/mail/listmonk.nix
+++ b/nixos/modules/services/mail/listmonk.nix
@@ -201,13 +201,12 @@ in {
         DynamicUser = true;
         NoNewPrivileges = true;
         CapabilityBoundingSet = "";
-        SystemCallArchitecture = "native";
+        SystemCallArchitectures = "native";
         SystemCallFilter = [ "@system-service" "~@privileged" ];
-        ProtectDevices = true;
+        PrivateDevices = true;
         ProtectControlGroups = true;
         ProtectKernelTunables = true;
         ProtectHome = true;
-        DeviceAllow = false;
         RestrictNamespaces = true;
         RestrictRealtime = true;
         UMask = "0027";
diff --git a/nixos/modules/services/mail/nullmailer.nix b/nixos/modules/services/mail/nullmailer.nix
index f6befe246b12a..4fd0026dbe4eb 100644
--- a/nixos/modules/services/mail/nullmailer.nix
+++ b/nixos/modules/services/mail/nullmailer.nix
@@ -120,7 +120,7 @@ with lib;
         };
 
         maxpause = mkOption {
-          type = types.nullOr types.str;
+          type = with types; nullOr (oneOf [ str int ]);
           default = null;
           description = lib.mdDoc ''
              The maximum time to pause between successive queue runs, in seconds.
@@ -138,7 +138,7 @@ with lib;
         };
 
         pausetime = mkOption {
-          type = types.nullOr types.str;
+          type = with types; nullOr (oneOf [ str int ]);
           default = null;
           description = lib.mdDoc ''
             The minimum time to pause between successive queue runs when there
@@ -168,7 +168,7 @@ with lib;
         };
 
         sendtimeout = mkOption {
-          type = types.nullOr types.str;
+          type = with types; nullOr (oneOf [ str int ]);
           default = null;
           description = lib.mdDoc ''
             The  time to wait for a remote module listed above to complete sending
@@ -194,7 +194,7 @@ with lib;
     environment = {
       systemPackages = [ pkgs.nullmailer ];
       etc = let
-        validAttrs = filterAttrs (name: value: value != null) cfg.config;
+        validAttrs = lib.mapAttrs (_: toString) (filterAttrs (_: value: value != null) cfg.config);
       in
         (foldl' (as: name: as // { "nullmailer/${name}".text = validAttrs.${name}; }) {} (attrNames validAttrs))
           // optionalAttrs (cfg.remotesFile != null) { "nullmailer/remotes".source = cfg.remotesFile; };
diff --git a/nixos/modules/services/mail/postfix.nix b/nixos/modules/services/mail/postfix.nix
index 23c47aaca7e23..209e066a19ef8 100644
--- a/nixos/modules/services/mail/postfix.nix
+++ b/nixos/modules/services/mail/postfix.nix
@@ -747,7 +747,7 @@ in
 
             ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
               ln -sf ${from} /var/lib/postfix/conf/${to}
-              ${pkgs.postfix}/bin/postalias /var/lib/postfix/conf/${to}
+              ${pkgs.postfix}/bin/postalias -o -p /var/lib/postfix/conf/${to}
             '') cfg.aliasFiles)}
             ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
               ln -sf ${from} /var/lib/postfix/conf/${to}
@@ -779,6 +779,19 @@ in
             ExecStart = "${pkgs.postfix}/bin/postfix start";
             ExecStop = "${pkgs.postfix}/bin/postfix stop";
             ExecReload = "${pkgs.postfix}/bin/postfix reload";
+
+            # Hardening
+            PrivateTmp = true;
+            PrivateDevices = true;
+            ProtectSystem = "full";
+            CapabilityBoundingSet = [ "~CAP_NET_ADMIN CAP_SYS_ADMIN CAP_SYS_BOOT CAP_SYS_MODULE" ];
+            MemoryDenyWriteExecute = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" "AF_UNIX" ];
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
           };
         };
 
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index c35ece8362f67..3f1a695ab91ae 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -102,6 +102,12 @@ in
       apply = configuredMaxAttachmentSize: "${toString (configuredMaxAttachmentSize * 1.3)}M";
     };
 
+    configureNginx = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc "Configure nginx as a reverse proxy for roundcube.";
+    };
+
     extraConfig = mkOption {
       type = types.lines;
       default = "";
@@ -142,26 +148,39 @@ in
       ${cfg.extraConfig}
     '';
 
-    services.nginx = {
+    services.nginx = lib.mkIf cfg.configureNginx {
       enable = true;
       virtualHosts = {
         ${cfg.hostName} = {
           forceSSL = mkDefault true;
           enableACME = mkDefault true;
+          root = cfg.package;
           locations."/" = {
-            root = cfg.package;
             index = "index.php";
+            priority = 1100;
             extraConfig = ''
-              location ~* \.php(/|$) {
-                fastcgi_split_path_info ^(.+\.php)(/.+)$;
-                fastcgi_pass unix:${fpm.socket};
-
-                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
-                fastcgi_param PATH_INFO       $fastcgi_path_info;
-
-                include ${config.services.nginx.package}/conf/fastcgi_params;
-                include ${pkgs.nginx}/conf/fastcgi.conf;
-              }
+              add_header Cache-Control 'public, max-age=604800, must-revalidate';
+            '';
+          };
+          locations."~ ^/(SQL|bin|config|logs|temp|vendor)/" = {
+            priority = 3110;
+            extraConfig = ''
+              return 404;
+            '';
+          };
+          locations."~ ^/(CHANGELOG.md|INSTALL|LICENSE|README.md|SECURITY.md|UPGRADING|composer.json|composer.lock)" = {
+            priority = 3120;
+            extraConfig = ''
+              return 404;
+            '';
+          };
+          locations."~* \\.php(/|$)" = {
+            priority = 3130;
+            extraConfig = ''
+              fastcgi_pass unix:${fpm.socket};
+              fastcgi_param PATH_INFO $fastcgi_path_info;
+              fastcgi_split_path_info ^(.+\.php)(/.+)$;
+              include ${config.services.nginx.package}/conf/fastcgi.conf;
             '';
           };
         };
@@ -231,6 +250,7 @@ in
         path = [ config.services.postgresql.package ];
       })
       {
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         script = let
diff --git a/nixos/modules/services/mail/rspamd-trainer.nix b/nixos/modules/services/mail/rspamd-trainer.nix
new file mode 100644
index 0000000000000..bb78ddf9dd471
--- /dev/null
+++ b/nixos/modules/services/mail/rspamd-trainer.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rspamd-trainer;
+  format = pkgs.formats.toml { };
+
+in {
+  options.services.rspamd-trainer = {
+
+    enable = mkEnableOption (mdDoc "Spam/ham trainer for rspamd");
+
+    settings = mkOption {
+      default = { };
+      description = mdDoc ''
+        IMAP authentication configuration for rspamd-trainer. For supplying
+        the IMAP password, use the `secrets` option.
+      '';
+      type = types.submodule {
+        freeformType = format.type;
+      };
+      example = literalExpression ''
+        {
+          HOST = "localhost";
+          USERNAME = "spam@example.com";
+          INBOXPREFIX = "INBOX/";
+        }
+      '';
+    };
+
+    secrets = lib.mkOption {
+      type = with types; listOf path;
+      description = lib.mdDoc ''
+        A list of files containing the various secrets. Should be in the
+        format expected by systemd's `EnvironmentFile` directory. For the
+        IMAP account password use `PASSWORD = mypassword`.
+      '';
+      default = [ ];
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      services.rspamd-trainer = {
+        description = "Spam/ham trainer for rspamd";
+        serviceConfig = {
+          ExecStart = "${pkgs.rspamd-trainer}/bin/rspamd-trainer";
+          WorkingDirectory = "/var/lib/rspamd-trainer";
+          StateDirectory = [ "rspamd-trainer/log" ];
+          Type = "oneshot";
+          DynamicUser = true;
+          EnvironmentFile = [
+            ( format.generate "rspamd-trainer-env" cfg.settings )
+            cfg.secrets
+          ];
+        };
+      };
+      timers."rspamd-trainer" = {
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnBootSec = "10m";
+          OnUnitActiveSec = "10m";
+          Unit = "rspamd-trainer.service";
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixos/modules/services/mail/sympa.nix b/nixos/modules/services/mail/sympa.nix
index 04ae46f66eeaf..13fc8656a2b5a 100644
--- a/nixos/modules/services/mail/sympa.nix
+++ b/nixos/modules/services/mail/sympa.nix
@@ -435,7 +435,7 @@ in
 
       wantedBy = [ "multi-user.target" ];
       after = [ "network-online.target" ];
-      wants = sympaSubServices;
+      wants = sympaSubServices ++ [ "network-online.target" ];
       before = sympaSubServices;
       serviceConfig = sympaServiceConfig "sympa_msg";
 
diff --git a/nixos/modules/services/matrix/appservice-irc.nix b/nixos/modules/services/matrix/appservice-irc.nix
index d153ffc2ace87..c79cd799b4d0e 100644
--- a/nixos/modules/services/matrix/appservice-irc.nix
+++ b/nixos/modules/services/matrix/appservice-irc.nix
@@ -214,7 +214,7 @@ in {
         RestrictRealtime = true;
         PrivateMounts = true;
         SystemCallFilter = [
-          "@system-service @pkey"
+          "@system-service @pkey @chown"
           "~@privileged @resources"
         ];
         SystemCallArchitectures = "native";
diff --git a/nixos/modules/services/matrix/matrix-sliding-sync.nix b/nixos/modules/services/matrix/matrix-sliding-sync.nix
index 295be0c6bf167..8b22cd7dba802 100644
--- a/nixos/modules/services/matrix/matrix-sliding-sync.nix
+++ b/nixos/modules/services/matrix/matrix-sliding-sync.nix
@@ -1,10 +1,14 @@
 { config, lib, pkgs, ... }:
 
 let
-  cfg = config.services.matrix-synapse.sliding-sync;
+  cfg = config.services.matrix-sliding-sync;
 in
 {
-  options.services.matrix-synapse.sliding-sync = {
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "matrix-synapse" "sliding-sync" ] [ "services" "matrix-sliding-sync" ])
+  ];
+
+  options.services.matrix-sliding-sync = {
     enable = lib.mkEnableOption (lib.mdDoc "sliding sync");
 
     package = lib.mkPackageOption pkgs "matrix-sliding-sync" { };
@@ -83,6 +87,7 @@ in
     systemd.services.matrix-sliding-sync = rec {
       after =
         lib.optional cfg.createDatabase "postgresql.service"
+        ++ lib.optional config.services.dendrite.enable "dendrite.service"
         ++ lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
       wants = after;
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/matrix/maubot.nix b/nixos/modules/services/matrix/maubot.nix
index 7d392c22983b4..bc96ca03b1fc7 100644
--- a/nixos/modules/services/matrix/maubot.nix
+++ b/nixos/modules/services/matrix/maubot.nix
@@ -42,7 +42,7 @@ let
       database = lib.last (lib.splitString "/" noSchema);
     };
 
-  postgresDBs = [
+  postgresDBs = builtins.filter isPostgresql [
     cfg.settings.database
     cfg.settings.crypto_database
     cfg.settings.plugin_databases.postgres
diff --git a/nixos/modules/services/matrix/synapse.md b/nixos/modules/services/matrix/synapse.md
index 58be24204fcfe..f270be8c8d781 100644
--- a/nixos/modules/services/matrix/synapse.md
+++ b/nixos/modules/services/matrix/synapse.md
@@ -16,13 +16,13 @@ around Matrix.
 
 ## Synapse Homeserver {#module-services-matrix-synapse}
 
-[Synapse](https://github.com/matrix-org/synapse) is
+[Synapse](https://github.com/element-hq/synapse) is
 the reference homeserver implementation of Matrix from the core development
 team at matrix.org. The following configuration example will set up a
 synapse server for the `example.org` domain, served from
 the host `myhostname.example.org`. For more information,
 please refer to the
-[installation instructions of Synapse](https://matrix-org.github.io/synapse/latest/setup/installation.html) .
+[installation instructions of Synapse](https://element-hq.github.io/synapse/latest/setup/installation.html) .
 ```
 { pkgs, lib, config, ... }:
 let
@@ -70,7 +70,7 @@ in {
         # the domain (i.e. example.org from @foo:example.org) and the federation port
         # is 8448.
         # Further reference can be found in the docs about delegation under
-        # https://matrix-org.github.io/synapse/latest/delegate.html
+        # https://element-hq.github.io/synapse/latest/delegate.html
         locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig;
         # This is usually needed for homeserver discovery (from e.g. other Matrix clients).
         # Further reference can be found in the upstream docs at
@@ -169,7 +169,7 @@ in an additional file like this:
 ::: {.note}
 It's also possible to user alternative authentication mechanism such as
 [LDAP (via `matrix-synapse-ldap3`)](https://github.com/matrix-org/matrix-synapse-ldap3)
-or [OpenID](https://matrix-org.github.io/synapse/latest/openid.html).
+or [OpenID](https://element-hq.github.io/synapse/latest/openid.html).
 :::
 
 ## Element (formerly known as Riot) Web Client {#module-services-matrix-element-web}
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix
index 9cc769c2d0db7..4c1c396eac056 100644
--- a/nixos/modules/services/matrix/synapse.nix
+++ b/nixos/modules/services/matrix/synapse.nix
@@ -446,7 +446,7 @@ in {
         default = { };
         description = mdDoc ''
           The primary synapse configuration. See the
-          [sample configuration](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
+          [sample configuration](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
           for possible values.
 
           Secrets should be passed in by using the `extraConfigFiles` option.
@@ -749,7 +749,7 @@ in {
                     by the module, but in practice it broke on runtime and as a result, no URL
                     preview worked anywhere if this was set.
 
-                    See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
+                    See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
                     on how to configure it properly.
                   ''))
                   (types.attrsOf types.str));
@@ -873,7 +873,7 @@ in {
                 Redis configuration for synapse.
 
                 See the
-                [upstream documentation](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/usage/configuration/config_documentation.md#redis)
+                [upstream documentation](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/usage/configuration/config_documentation.md#redis)
                 for available options.
               '';
             };
@@ -886,7 +886,7 @@ in {
         description = lib.mdDoc ''
           Options for configuring workers. Worker support will be enabled if at least one worker is configured here.
 
-          See the [worker documention](https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration)
+          See the [worker documention](https://element-hq.github.io/synapse/latest/workers.html#worker-configuration)
           for possible options for each worker. Worker-specific options overriding the shared homeserver configuration can be
           specified here for each worker.
 
@@ -900,9 +900,9 @@ in {
             using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
 
             Workers also require a proper reverse proxy setup to direct incoming requests to the appropriate process. See
-            the [reverse proxy documentation](https://matrix-org.github.io/synapse/latest/reverse_proxy.html) for a
+            the [reverse proxy documentation](https://element-hq.github.io/synapse/latest/reverse_proxy.html) for a
             general reverse proxying setup and
-            the [worker documentation](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications)
+            the [worker documentation](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications)
             for the available endpoints per worker application.
           :::
         '';
@@ -932,7 +932,7 @@ in {
                 The file for log configuration.
 
                 See the [python documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema)
-                for the schema and the [upstream repository](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
+                for the schema and the [upstream repository](https://github.com/element-hq/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
                 for an example.
               '';
             };
@@ -1056,6 +1056,7 @@ in {
 
     systemd.targets.matrix-synapse = lib.mkIf hasWorkers {
       description = "Synapse Matrix parent target";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
       wantedBy = [ "multi-user.target" ];
     };
@@ -1071,6 +1072,7 @@ in {
             requires = optional hasLocalPostgresDB "postgresql.service";
           }
           else {
+            wants = [ "network-online.target" ];
             after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
             requires = optional hasLocalPostgresDB "postgresql.service";
             wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/misc/amazon-ssm-agent.nix b/nixos/modules/services/misc/amazon-ssm-agent.nix
index 20b836abe164f..89a1c07665106 100644
--- a/nixos/modules/services/misc/amazon-ssm-agent.nix
+++ b/nixos/modules/services/misc/amazon-ssm-agent.nix
@@ -41,6 +41,7 @@ in {
     # See https://github.com/aws/amazon-ssm-agent/blob/mainline/packaging/linux/amazon-ssm-agent.service
     systemd.services.amazon-ssm-agent = {
       inherit (cfg.package.meta) description;
+      wants    = [ "network-online.target" ];
       after    = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/misc/ankisyncd.nix b/nixos/modules/services/misc/ankisyncd.nix
index e4de46e19a8fc..f5acfbb0ee969 100644
--- a/nixos/modules/services/misc/ankisyncd.nix
+++ b/nixos/modules/services/misc/ankisyncd.nix
@@ -46,6 +46,12 @@ in
     };
 
     config = mkIf cfg.enable {
+      warnings = [
+        ''
+        `services.ankisyncd` has been replaced by `services.anki-sync-server` and will be removed after
+        24.05 because anki-sync-server(-rs and python) are not maintained.
+        ''
+      ];
       networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
 
       systemd.services.ankisyncd = {
diff --git a/nixos/modules/services/misc/bcg.nix b/nixos/modules/services/misc/bcg.nix
index 9da4a879cdd00..ad0b9c871342f 100644
--- a/nixos/modules/services/misc/bcg.nix
+++ b/nixos/modules/services/misc/bcg.nix
@@ -154,7 +154,7 @@ in
     in {
       description = "BigClown Gateway";
       wantedBy = [ "multi-user.target" ];
-      wants = mkIf config.services.mosquitto.enable [ "mosquitto.service" ];
+      wants = [ "network-online.target" ] ++ lib.optional config.services.mosquitto.enable "mosquitto.service";
       after = [ "network-online.target" ];
       preStart = ''
         umask 077
diff --git a/nixos/modules/services/misc/domoticz.nix b/nixos/modules/services/misc/domoticz.nix
index fd9fcf0b78eb5..315092f933514 100644
--- a/nixos/modules/services/misc/domoticz.nix
+++ b/nixos/modules/services/misc/domoticz.nix
@@ -35,6 +35,7 @@ in {
     systemd.services."domoticz" = {
       description = pkgDesc;
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         DynamicUser = true;
diff --git a/nixos/modules/services/misc/etesync-dav.nix b/nixos/modules/services/misc/etesync-dav.nix
index 9d99d548d95b0..ae2b5ad043433 100644
--- a/nixos/modules/services/misc/etesync-dav.nix
+++ b/nixos/modules/services/misc/etesync-dav.nix
@@ -59,6 +59,7 @@ in
 
       systemd.services.etesync-dav = {
         description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         path = [ pkgs.etesync-dav ];
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index f4305bea2ad76..d0135b2ba7acd 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -234,6 +234,13 @@ in
         description = lib.mdDoc "Path to the git repositories.";
       };
 
+      camoHmacKeyFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/secrets/gitea/camoHmacKey";
+        description = lib.mdDoc "Path to a file containing the camo HMAC key.";
+      };
+
       mailerPasswordFile = mkOption {
         type = types.nullOr types.str;
         default = null;
@@ -429,6 +436,10 @@ in
         LFS_JWT_SECRET = "#lfsjwtsecret#";
       };
 
+      camo = mkIf (cfg.camoHmacKeyFile != null) {
+        HMAC_KEY = "#hmackey#";
+      };
+
       session = {
         COOKIE_NAME = lib.mkDefault "session";
       };
@@ -570,6 +581,10 @@ in
               ${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
             ''}
 
+            ${lib.optionalString (cfg.camoHmacKeyFile != null) ''
+              ${replaceSecretBin} '#hmackey#' '${cfg.camoHmacKeyFile}' '${runConfig}'
+            ''}
+
             ${lib.optionalString (cfg.mailerPasswordFile != null) ''
               ${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
             ''}
diff --git a/nixos/modules/services/misc/guix/default.nix b/nixos/modules/services/misc/guix/default.nix
new file mode 100644
index 0000000000000..7174ff36b7090
--- /dev/null
+++ b/nixos/modules/services/misc/guix/default.nix
@@ -0,0 +1,406 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.guix;
+
+  package = cfg.package.override { inherit (cfg) stateDir storeDir; };
+
+  guixBuildUser = id: {
+    name = "guixbuilder${toString id}";
+    group = cfg.group;
+    extraGroups = [ cfg.group ];
+    createHome = false;
+    description = "Guix build user ${toString id}";
+    isSystemUser = true;
+  };
+
+  guixBuildUsers = numberOfUsers:
+    builtins.listToAttrs (map
+      (user: {
+        name = user.name;
+        value = user;
+      })
+      (builtins.genList guixBuildUser numberOfUsers));
+
+  # A set of Guix user profiles to be linked at activation. All of these should
+  # be default profiles managed by Guix CLI and the profiles are located in
+  # `${cfg.stateDir}/profiles/per-user/$USER/$PROFILE`.
+  guixUserProfiles = {
+    # The default Guix profile managed by `guix pull`. Take note this should be
+    # the profile with the most precedence in `PATH` env to let users use their
+    # updated versions of `guix` CLI.
+    "current-guix" = "\${XDG_CONFIG_HOME}/guix/current";
+
+    # The default Guix home profile. This profile contains more than exports
+    # such as an activation script at `$GUIX_HOME_PROFILE/activate`.
+    "guix-home" = "$HOME/.guix-home/profile";
+
+    # The default Guix profile similar to $HOME/.nix-profile from Nix.
+    "guix-profile" = "$HOME/.guix-profile";
+  };
+
+  # All of the Guix profiles to be used.
+  guixProfiles = lib.attrValues guixUserProfiles;
+
+  serviceEnv = {
+    GUIX_LOCPATH = "${cfg.stateDir}/guix/profiles/per-user/root/guix-profile/lib/locale";
+    LC_ALL = "C.UTF-8";
+  };
+in
+{
+  meta.maintainers = with lib.maintainers; [ foo-dogsquared ];
+
+  options.services.guix = with lib; {
+    enable = mkEnableOption "Guix build daemon service";
+
+    group = mkOption {
+      type = types.str;
+      default = "guixbuild";
+      example = "guixbuild";
+      description = ''
+        The group of the Guix build user pool.
+      '';
+    };
+
+    nrBuildUsers = mkOption {
+      type = types.ints.unsigned;
+      description = ''
+        Number of Guix build users to be used in the build pool.
+      '';
+      default = 10;
+      example = 20;
+    };
+
+    extraArgs = mkOption {
+      type = with types; listOf str;
+      default = [ ];
+      example = [ "--max-jobs=4" "--debug" ];
+      description = ''
+        Extra flags to pass to the Guix daemon service.
+      '';
+    };
+
+    package = mkPackageOption pkgs "guix" {
+      extraDescription = ''
+        It should contain {command}`guix-daemon` and {command}`guix`
+        executable.
+      '';
+    };
+
+    storeDir = mkOption {
+      type = types.path;
+      default = "/gnu/store";
+      description = ''
+        The store directory where the Guix service will serve to/from. Take
+        note Guix cannot take advantage of substitutes if you set it something
+        other than {file}`/gnu/store` since most of the cached builds are
+        assumed to be in there.
+
+        ::: {.warning}
+        This will also recompile all packages because the normal cache no
+        longer applies.
+        :::
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var";
+      description = ''
+        The state directory where Guix service will store its data such as its
+        user-specific profiles, cache, and state files.
+
+        ::: {.warning}
+        Changing it to something other than the default will rebuild the
+        package.
+        :::
+      '';
+      example = "/gnu/var";
+    };
+
+    publish = {
+      enable = mkEnableOption "substitute server for your Guix store directory";
+
+      generateKeyPair = mkOption {
+        type = types.bool;
+        description = ''
+          Whether to generate signing keys in {file}`/etc/guix` which are
+          required to initialize a substitute server. Otherwise,
+          `--public-key=$FILE` and `--private-key=$FILE` can be passed in
+          {option}`services.guix.publish.extraArgs`.
+        '';
+        default = true;
+        example = false;
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8181;
+        example = 8200;
+        description = ''
+          Port of the substitute server to listen on.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "guix-publish";
+        description = ''
+          Name of the user to change once the server is up.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        description = ''
+          Extra flags to pass to the substitute server.
+        '';
+        default = [];
+        example = [
+          "--compression=zstd:6"
+          "--discover=no"
+        ];
+      };
+    };
+
+    gc = {
+      enable = mkEnableOption "automatic garbage collection service for Guix";
+
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        default = [ ];
+        description = ''
+          List of arguments to be passed to {command}`guix gc`.
+
+          When given no option, it will try to collect all garbage which is
+          often inconvenient so it is recommended to set [some
+          options](https://guix.gnu.org/en/manual/en/html_node/Invoking-guix-gc.html).
+        '';
+        example = [
+          "--delete-generations=1m"
+          "--free-space=10G"
+          "--optimize"
+        ];
+      };
+
+      dates = lib.mkOption {
+        type = types.str;
+        default = "03:15";
+        example = "weekly";
+        description = ''
+          How often the garbage collection occurs. This takes the time format
+          from {manpage}`systemd.time(7)`.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ package ];
+
+      users.users = guixBuildUsers cfg.nrBuildUsers;
+      users.groups.${cfg.group} = { };
+
+      # Guix uses Avahi (through guile-avahi) both for the auto-discovering and
+      # advertising substitute servers in the local network.
+      services.avahi.enable = lib.mkDefault true;
+      services.avahi.publish.enable = lib.mkDefault true;
+      services.avahi.publish.userServices = lib.mkDefault true;
+
+      # It's similar to Nix daemon so there's no question whether or not this
+      # should be sandboxed.
+      systemd.services.guix-daemon = {
+        environment = serviceEnv;
+        script = ''
+          ${lib.getExe' package "guix-daemon"} \
+            --build-users-group=${cfg.group} \
+            ${lib.escapeShellArgs cfg.extraArgs}
+        '';
+        serviceConfig = {
+          OOMPolicy = "continue";
+          RemainAfterExit = "yes";
+          Restart = "always";
+          TasksMax = 8192;
+        };
+        unitConfig.RequiresMountsFor = [
+          cfg.storeDir
+          cfg.stateDir
+        ];
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      # This is based from Nix daemon socket unit from upstream Nix package.
+      # Guix build daemon has support for systemd-style socket activation.
+      systemd.sockets.guix-daemon = {
+        description = "Guix daemon socket";
+        before = [ "multi-user.target" ];
+        listenStreams = [ "${cfg.stateDir}/guix/daemon-socket/socket" ];
+        unitConfig.RequiresMountsFor = [ cfg.storeDir cfg.stateDir ];
+        wantedBy = [ "sockets.target" ];
+      };
+
+      systemd.mounts = [{
+        description = "Guix read-only store directory";
+        before = [ "guix-daemon.service" ];
+        what = cfg.storeDir;
+        where = cfg.storeDir;
+        type = "none";
+        options = "bind,ro";
+
+        unitConfig.DefaultDependencies = false;
+        wantedBy = [ "guix-daemon.service" ];
+      }];
+
+      # Make transferring files from one store to another easier with the usual
+      # case being of most substitutes from the official Guix CI instance.
+      system.activationScripts.guix-authorize-keys = ''
+        for official_server_keys in ${package}/share/guix/*.pub; do
+          ${lib.getExe' package "guix"} archive --authorize < $official_server_keys
+        done
+      '';
+
+      # Link the usual Guix profiles to the home directory. This is useful in
+      # ephemeral setups where only certain part of the filesystem is
+      # persistent (e.g., "Erase my darlings"-type of setup).
+      system.userActivationScripts.guix-activate-user-profiles.text = let
+        guixProfile = profile: "${cfg.stateDir}/guix/profiles/per-user/\${USER}/${profile}";
+        linkProfile = profile: location: let
+          userProfile = guixProfile profile;
+        in ''
+          [ -d "${userProfile}" ] && ln -sfn "${userProfile}" "${location}"
+        '';
+        linkProfileToPath = acc: profile: location: let
+          in acc + (linkProfile profile location);
+
+        # This should contain export-only Guix user profiles. The rest of it is
+        # handled manually in the activation script.
+        guixUserProfiles' = lib.attrsets.removeAttrs guixUserProfiles [ "guix-home" ];
+
+        linkExportsScript = lib.foldlAttrs linkProfileToPath "" guixUserProfiles';
+      in ''
+        # Don't export this please! It is only expected to be used for this
+        # activation script and nothing else.
+        XDG_CONFIG_HOME=''${XDG_CONFIG_HOME:-$HOME/.config}
+
+        # Linking the usual Guix profiles into the home directory.
+        ${linkExportsScript}
+
+        # Activate all of the default Guix non-exports profiles manually.
+        ${linkProfile "guix-home" "$HOME/.guix-home"}
+        [ -L "$HOME/.guix-home" ] && "$HOME/.guix-home/activate"
+      '';
+
+      # GUIX_LOCPATH is basically LOCPATH but for Guix libc which in turn used by
+      # virtually every Guix-built packages. This is so that Guix-installed
+      # applications wouldn't use incompatible locale data and not touch its host
+      # system.
+      environment.sessionVariables.GUIX_LOCPATH = lib.makeSearchPath "lib/locale" guixProfiles;
+
+      # What Guix profiles export is very similar to Nix profiles so it is
+      # acceptable to list it here. Also, it is more likely that the user would
+      # want to use packages explicitly installed from Guix so we're putting it
+      # first.
+      environment.profiles = lib.mkBefore guixProfiles;
+    }
+
+    (lib.mkIf cfg.publish.enable {
+      systemd.services.guix-publish = {
+        description = "Guix remote store";
+        environment = serviceEnv;
+
+        # Mounts will be required by the daemon service anyways so there's no
+        # need add RequiresMountsFor= or something similar.
+        requires = [ "guix-daemon.service" ];
+        after = [ "guix-daemon.service" ];
+        partOf = [ "guix-daemon.service" ];
+
+        preStart = lib.mkIf cfg.publish.generateKeyPair ''
+          # Generate the keypair if it's missing.
+          [ -f "/etc/guix/signing-key.sec" ] && [ -f "/etc/guix/signing-key.pub" ] || \
+            ${lib.getExe' package "guix"} archive --generate-key || {
+              rm /etc/guix/signing-key.*;
+              ${lib.getExe' package "guix"} archive --generate-key;
+            }
+        '';
+        script = ''
+          ${lib.getExe' package "guix"} publish \
+            --user=${cfg.publish.user} --port=${builtins.toString cfg.publish.port} \
+            ${lib.escapeShellArgs cfg.publish.extraArgs}
+        '';
+
+        serviceConfig = {
+          Restart = "always";
+          RestartSec = 10;
+
+          ProtectClock = true;
+          ProtectHostname = true;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectControlGroups = true;
+          SystemCallFilter = [
+            "@system-service"
+            "@debug"
+            "@setuid"
+          ];
+
+          RestrictNamespaces = true;
+          RestrictAddressFamilies = [
+            "AF_UNIX"
+            "AF_INET"
+            "AF_INET6"
+          ];
+
+          # While the permissions can be set, it is assumed to be taken by Guix
+          # daemon service which it has already done the setup.
+          ConfigurationDirectory = "guix";
+
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+          CapabilityBoundingSet = [
+            "CAP_NET_BIND_SERVICE"
+            "CAP_SETUID"
+            "CAP_SETGID"
+          ];
+        };
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      users.users.guix-publish = lib.mkIf (cfg.publish.user == "guix-publish") {
+        description = "Guix publish user";
+        group = config.users.groups.guix-publish.name;
+        isSystemUser = true;
+      };
+      users.groups.guix-publish = {};
+    })
+
+    (lib.mkIf cfg.gc.enable {
+      # This service should be handled by root to collect all garbage by all
+      # users.
+      systemd.services.guix-gc = {
+        description = "Guix garbage collection";
+        startAt = cfg.gc.dates;
+        script = ''
+          ${lib.getExe' package "guix"} gc ${lib.escapeShellArgs cfg.gc.extraArgs}
+        '';
+
+        serviceConfig = {
+          Type = "oneshot";
+
+          PrivateDevices = true;
+          PrivateNetworks = true;
+          ProtectControlGroups = true;
+          ProtectHostname = true;
+          ProtectKernelTunables = true;
+          SystemCallFilter = [
+            "@default"
+            "@file-system"
+            "@basic-io"
+            "@system-service"
+          ];
+        };
+      };
+
+      systemd.timers.guix-gc.timerConfig.Persistent = true;
+    })
+  ]);
+}
diff --git a/nixos/modules/services/misc/llama-cpp.nix b/nixos/modules/services/misc/llama-cpp.nix
new file mode 100644
index 0000000000000..4d76456fb2fd5
--- /dev/null
+++ b/nixos/modules/services/misc/llama-cpp.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  cfg = config.services.llama-cpp;
+in {
+
+  options = {
+
+    services.llama-cpp = {
+      enable = lib.mkEnableOption "LLaMA C++ server";
+
+      package = lib.mkPackageOption pkgs "llama-cpp" { };
+
+      model = lib.mkOption {
+        type = lib.types.path;
+        example = "/models/mistral-instruct-7b/ggml-model-q4_0.gguf";
+        description = "Model path.";
+      };
+
+      extraFlags = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        description = "Extra flags passed to llama-cpp-server.";
+        example = ["-c" "4096" "-ngl" "32" "--numa"];
+        default = [];
+      };
+
+      host = lib.mkOption {
+        type = lib.types.str;
+        default = "127.0.0.1";
+        example = "0.0.0.0";
+        description = "IP address the LLaMA C++ server listens on.";
+      };
+
+      port = lib.mkOption {
+        type = lib.types.port;
+        default = 8080;
+        description = "Listen port for LLaMA C++ server.";
+      };
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = "Open ports in the firewall for LLaMA C++ server.";
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services.llama-cpp = {
+      description = "LLaMA C++ server";
+      after = ["network.target"];
+      wantedBy = ["multi-user.target"];
+
+      serviceConfig = {
+        Type = "idle";
+        KillSignal = "SIGINT";
+        ExecStart = "${cfg.package}/bin/llama-cpp-server --log-disable --host ${cfg.host} --port ${builtins.toString cfg.port} -m ${cfg.model} ${utils.escapeSystemdExecArgs cfg.extraFlags}";
+        Restart = "on-failure";
+        RestartSec = 300;
+
+        # for GPU acceleration
+        PrivateDevices = false;
+
+        # hardening
+        DynamicUser = true;
+        CapabilityBoundingSet = "";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        NoNewPrivileges = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        MemoryDenyWriteExecute = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        SystemCallErrorNumber = "EPERM";
+        ProtectProc = "invisible";
+        ProtectHostname = true;
+        ProcSubset = "pid";
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ newam ];
+}
diff --git a/nixos/modules/services/misc/mediatomb.nix b/nixos/modules/services/misc/mediatomb.nix
index d421d74c53ad7..03235e9a12655 100644
--- a/nixos/modules/services/misc/mediatomb.nix
+++ b/nixos/modules/services/misc/mediatomb.nix
@@ -357,6 +357,7 @@ in {
       description = "${cfg.serverName} media Server";
       # Gerbera might fail if the network interface is not available on startup
       # https://github.com/gerbera/gerbera/issues/1324
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig.ExecStart = "${binaryCommand} --port ${toString cfg.port} ${interfaceFlag} ${configFlag} --home ${cfg.dataDir}";
diff --git a/nixos/modules/services/misc/metabase.nix b/nixos/modules/services/misc/metabase.nix
index 883fa0b959116..5fc18e27eaae4 100644
--- a/nixos/modules/services/misc/metabase.nix
+++ b/nixos/modules/services/misc/metabase.nix
@@ -77,6 +77,7 @@ in {
     systemd.services.metabase = {
       description = "Metabase server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         MB_PLUGINS_DIR = "${dataDir}/plugins";
diff --git a/nixos/modules/services/misc/moonraker.nix b/nixos/modules/services/misc/moonraker.nix
index 0ee7e898cf761..4e419aafa990b 100644
--- a/nixos/modules/services/misc/moonraker.nix
+++ b/nixos/modules/services/misc/moonraker.nix
@@ -103,7 +103,7 @@ in {
 
   config = mkIf cfg.enable {
     warnings = []
-      ++ optional (cfg.settings ? update_manager)
+      ++ optional (cfg.settings.update_manager.enable_system_updates or false)
         ''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.''
       ++ optional (cfg.configDir != null)
         ''
@@ -186,6 +186,12 @@ in {
       };
     };
 
+    # set this to false, otherwise we'll get a warning indicating that `/etc/klipper.cfg`
+    # is not located in the moonraker config directory.
+    services.moonraker.settings = lib.mkIf (!config.services.klipper.mutableConfig) {
+      file_manager.check_klipper_config_path = false;
+    };
+
     security.polkit.extraConfig = lib.optionalString cfg.allowSystemControl ''
       // nixos/moonraker: Allow Moonraker to perform system-level operations
       //
diff --git a/nixos/modules/services/misc/nitter.nix b/nixos/modules/services/misc/nitter.nix
index c2c462d46bb5b..d2cf7c0de2b77 100644
--- a/nixos/modules/services/misc/nitter.nix
+++ b/nixos/modules/services/misc/nitter.nix
@@ -304,6 +304,23 @@ in
         '';
       };
 
+      guestAccounts = mkOption {
+        type = types.path;
+        default = "/var/lib/nitter/guest_accounts.jsonl";
+        description = lib.mdDoc ''
+          Path to the guest accounts file.
+
+          This file contains a list of guest accounts that can be used to
+          access the instance without logging in. The file is in JSONL format,
+          where each line is a JSON object with the following fields:
+
+          {"oauth_token":"some_token","oauth_token_secret":"some_secret_key"}
+
+          See https://github.com/zedeus/nitter/wiki/Guest-Account-Branch-Deployment
+          for more information on guest accounts and how to generate them.
+        '';
+      };
+
       redisCreateLocally = mkOption {
         type = types.bool;
         default = true;
@@ -333,8 +350,12 @@ in
         after = [ "network-online.target" ];
         serviceConfig = {
           DynamicUser = true;
+          LoadCredential="guestAccountsFile:${cfg.guestAccounts}";
           StateDirectory = "nitter";
-          Environment = [ "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf" ];
+          Environment = [
+            "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf"
+            "NITTER_ACCOUNTS_FILE=%d/guestAccountsFile"
+          ];
           # Some parts of Nitter expect `public` folder in working directory,
           # see https://github.com/zedeus/nitter/issues/414
           WorkingDirectory = "${cfg.package}/share/nitter";
diff --git a/nixos/modules/services/misc/nix-ssh-serve.nix b/nixos/modules/services/misc/nix-ssh-serve.nix
index b656692ca01cd..cf9d6339c69b7 100644
--- a/nixos/modules/services/misc/nix-ssh-serve.nix
+++ b/nixos/modules/services/misc/nix-ssh-serve.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 let cfg = config.nix.sshServe;
@@ -46,7 +46,7 @@ in {
       description = "Nix SSH store user";
       isSystemUser = true;
       group = "nix-ssh";
-      useDefaultShell = true;
+      shell = pkgs.bashInteractive;
     };
     users.groups.nix-ssh = {};
 
diff --git a/nixos/modules/services/misc/ntfy-sh.nix b/nixos/modules/services/misc/ntfy-sh.nix
index 98134e94eeede..b8b0772401156 100644
--- a/nixos/modules/services/misc/ntfy-sh.nix
+++ b/nixos/modules/services/misc/ntfy-sh.nix
@@ -79,12 +79,6 @@ in
         cache-file = mkDefault "/var/lib/ntfy-sh/cache-file.db";
       };
 
-      systemd.tmpfiles.rules = [
-        "f ${cfg.settings.auth-file} 0600 ${cfg.user} ${cfg.group} - -"
-        "d ${cfg.settings.attachment-cache-dir} 0700 ${cfg.user} ${cfg.group} - -"
-        "f ${cfg.settings.cache-file} 0600 ${cfg.user} ${cfg.group} - -"
-      ];
-
       systemd.services.ntfy-sh = {
         description = "Push notifications server";
 
diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix
new file mode 100644
index 0000000000000..d9359d2b5cd44
--- /dev/null
+++ b/nixos/modules/services/misc/ollama.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }: let
+
+  cfg = config.services.ollama;
+
+in {
+
+  options = {
+    services.ollama = {
+      enable = lib.mkEnableOption (
+        lib.mdDoc "Server for local large language models"
+      );
+      listenAddress = lib.mkOption {
+        type = lib.types.str;
+        default = "127.0.0.1:11434";
+        description = lib.mdDoc ''
+          Specifies the bind address on which the ollama server HTTP interface listens.
+        '';
+      };
+      package = lib.mkPackageOption pkgs "ollama" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd = {
+      services.ollama = {
+        wantedBy = [ "multi-user.target" ];
+        description = "Server for local large language models";
+        after = [ "network.target" ];
+        environment = {
+          HOME = "%S/ollama";
+          OLLAMA_MODELS = "%S/ollama/models";
+          OLLAMA_HOST = cfg.listenAddress;
+        };
+        serviceConfig = {
+          ExecStart = "${lib.getExe cfg.package} serve";
+          WorkingDirectory = "/var/lib/ollama";
+          StateDirectory = [ "ollama" ];
+          DynamicUser = true;
+        };
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index b3bc7d89009db..ca34a327dbdfa 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -10,7 +10,7 @@ let
   defaultFont = "${pkgs.liberation_ttf}/share/fonts/truetype/LiberationSerif-Regular.ttf";
 
   # Don't start a redis instance if the user sets a custom redis connection
-  enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
+  enableRedis = !(cfg.settings ? PAPERLESS_REDIS);
   redisServer = config.services.redis.servers.paperless;
 
   env = {
@@ -24,9 +24,11 @@ let
     PAPERLESS_TIME_ZONE = config.time.timeZone;
   } // optionalAttrs enableRedis {
     PAPERLESS_REDIS = "unix://${redisServer.unixSocket}";
-  } // (
-    lib.mapAttrs (_: toString) cfg.extraConfig
-  );
+  } // (lib.mapAttrs (_: s:
+    if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
+    else if lib.isBool s then lib.boolToString s
+    else toString s
+  ) cfg.settings);
 
   manage = pkgs.writeShellScript "manage" ''
     set -o allexport # Export the following env vars
@@ -82,6 +84,7 @@ in
 
   imports = [
     (mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ])
+    (mkRenamedOptionModule [ "services" "paperless" "extraConfig" ] [ "services" "paperless" "settings" ])
   ];
 
   options.services.paperless = {
@@ -160,32 +163,30 @@ in
       description = lib.mdDoc "Web interface port.";
     };
 
-    # FIXME this should become an RFC42-style settings attr
-    extraConfig = mkOption {
-      type = types.attrs;
+    settings = mkOption {
+      type = lib.types.submodule {
+        freeformType = with lib.types; attrsOf (let
+          typeList = [ bool float int str path package ];
+        in oneOf (typeList ++ [ (listOf (oneOf typeList)) (attrsOf (oneOf typeList)) ]));
+      };
       default = { };
       description = lib.mdDoc ''
         Extra paperless config options.
 
-        See [the documentation](https://docs.paperless-ngx.com/configuration/)
-        for available options.
+        See [the documentation](https://docs.paperless-ngx.com/configuration/) for available options.
 
-        Note that some options such as `PAPERLESS_CONSUMER_IGNORE_PATTERN` expect JSON values. Use `builtins.toJSON` to ensure proper quoting.
+        Note that some settings such as `PAPERLESS_CONSUMER_IGNORE_PATTERN` expect JSON values.
+        Settings declared as lists or attrsets will automatically be serialised into JSON strings for your convenience.
       '';
-      example = literalExpression ''
-        {
-          PAPERLESS_OCR_LANGUAGE = "deu+eng";
-
-          PAPERLESS_DBHOST = "/run/postgresql";
-
-          PAPERLESS_CONSUMER_IGNORE_PATTERN = builtins.toJSON [ ".DS_STORE/*" "desktop.ini" ];
-
-          PAPERLESS_OCR_USER_ARGS = builtins.toJSON {
-            optimize = 1;
-            pdfa_image_compression = "lossless";
-          };
+      example = {
+        PAPERLESS_OCR_LANGUAGE = "deu+eng";
+        PAPERLESS_DBHOST = "/run/postgresql";
+        PAPERLESS_CONSUMER_IGNORE_PATTERN = [ ".DS_STORE/*" "desktop.ini" ];
+        PAPERLESS_OCR_USER_ARGS = {
+          optimize = 1;
+          pdfa_image_compression = "lossless";
         };
-      '';
+      };
     };
 
     user = mkOption {
@@ -296,6 +297,7 @@ in
       wantedBy = [ "paperless-scheduler.service" ];
       before = [ "paperless-scheduler.service" ];
       after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
       serviceConfig = defaultServiceConfig // {
         User = cfg.user;
         Type = "oneshot";
diff --git a/nixos/modules/services/misc/portunus.nix b/nixos/modules/services/misc/portunus.nix
index 3299b6404c2b5..47af24f024cdf 100644
--- a/nixos/modules/services/misc/portunus.nix
+++ b/nixos/modules/services/misc/portunus.nix
@@ -102,7 +102,9 @@ in
     ldap = {
       package = mkOption {
         type = types.package;
-        # needs openldap built with a libxcrypt that support crypt sha256 until https://github.com/majewsky/portunus/issues/2 is solved
+        # needs openldap built with a libxcrypt that support crypt sha256 until users have had time to migrate to newer hashes
+        # Ref: <https://github.com/majewsky/portunus/issues/2>
+        # TODO: remove in NixOS 24.11 (cf. same note on pkgs/servers/portunus/default.nix)
         default = pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; };
         defaultText = lib.literalExpression "pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; }";
         description = lib.mdDoc "The OpenLDAP package to use.";
@@ -228,7 +230,10 @@ in
         description = "Self-contained authentication service";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        serviceConfig.ExecStart = "${cfg.package.out}/bin/portunus-orchestrator";
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/portunus-orchestrator";
+          Restart = "on-failure";
+        };
         environment = {
           PORTUNUS_LDAP_SUFFIX = cfg.ldap.suffix;
           PORTUNUS_SERVER_BINARY = "${cfg.package}/bin/portunus-server";
@@ -247,6 +252,7 @@ in
             acmeDirectory = config.security.acme.certs."${cfg.domain}".directory;
           in
           {
+            PORTUNUS_SERVER_HTTP_SECURE = "true";
             PORTUNUS_SLAPD_TLS_CA_CERTIFICATE = "/etc/ssl/certs/ca-certificates.crt";
             PORTUNUS_SLAPD_TLS_CERTIFICATE = "${acmeDirectory}/cert.pem";
             PORTUNUS_SLAPD_TLS_DOMAIN_NAME = cfg.domain;
diff --git a/nixos/modules/services/misc/preload.nix b/nixos/modules/services/misc/preload.nix
index 19b2531087dd6..d26e2c3d383e8 100644
--- a/nixos/modules/services/misc/preload.nix
+++ b/nixos/modules/services/misc/preload.nix
@@ -19,7 +19,7 @@ in {
 
       serviceConfig = {
         EnvironmentFile = "${cfg.package}/etc/conf.d/preload";
-        ExecStart = "${getExe cfg.package} --foreground $PRELOAD_OPTS";
+        ExecStart = "${getExe cfg.package} -l '' --foreground $PRELOAD_OPTS";
         Type = "simple";
         # Only preload data during CPU idle time
         IOSchedulingClass = 3;
diff --git a/nixos/modules/services/misc/redmine.nix b/nixos/modules/services/misc/redmine.nix
index e3941d2e29de4..c1209e34a92b5 100644
--- a/nixos/modules/services/misc/redmine.nix
+++ b/nixos/modules/services/misc/redmine.nix
@@ -53,7 +53,7 @@ in
       enable = mkEnableOption (lib.mdDoc "Redmine");
 
       package = mkPackageOption pkgs "redmine" {
-        example = "redmine.override { ruby = pkgs.ruby_2_7; }";
+        example = "redmine.override { ruby = pkgs.ruby_3_2; }";
       };
 
       user = mkOption {
@@ -264,9 +264,12 @@ in
       { assertion = cfg.database.passwordFile != null || cfg.database.socket != null;
         message = "one of services.redmine.database.socket or services.redmine.database.passwordFile must be set";
       }
-      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user && cfg.database.user == cfg.database.name;
+      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
         message = "services.redmine.database.user must be set to ${cfg.user} if services.redmine.database.createLocally is set true";
       }
+      { assertion = pgsqlLocal -> cfg.database.user == cfg.database.name;
+        message = "services.redmine.database.user and services.redmine.database.name must be the same when using a local postgresql database";
+      }
       { assertion = cfg.database.createLocally -> cfg.database.socket != null;
         message = "services.redmine.database.socket must be set if services.redmine.database.createLocally is set to true";
       }
diff --git a/nixos/modules/services/misc/tandoor-recipes.nix b/nixos/modules/services/misc/tandoor-recipes.nix
index 2d7d29b2e7172..6c51a9bb85550 100644
--- a/nixos/modules/services/misc/tandoor-recipes.nix
+++ b/nixos/modules/services/misc/tandoor-recipes.nix
@@ -12,7 +12,7 @@ let
     DEBUG_TOOLBAR = "0";
     MEDIA_ROOT = "/var/lib/tandoor-recipes";
   } // optionalAttrs (config.time.timeZone != null) {
-    TIMEZONE = config.time.timeZone;
+    TZ = config.time.timeZone;
   } // (
     lib.mapAttrs (_: toString) cfg.extraConfig
   );
diff --git a/nixos/modules/services/misc/taskserver/helper-tool.py b/nixos/modules/services/misc/taskserver/helper-tool.py
index fec05728b2b6b..b1eebb07686b2 100644
--- a/nixos/modules/services/misc/taskserver/helper-tool.py
+++ b/nixos/modules/services/misc/taskserver/helper-tool.py
@@ -61,6 +61,10 @@ def run_as_taskd_user():
     os.setuid(uid)
 
 
+def run_as_taskd_group():
+    gid = grp.getgrnam(TASKD_GROUP).gr_gid
+    os.setgid(gid)
+
 def taskd_cmd(cmd, *args, **kwargs):
     """
     Invoke taskd with the specified command with the privileges of the 'taskd'
@@ -90,7 +94,7 @@ def certtool_cmd(*args, **kwargs):
     """
     return subprocess.check_output(
         [CERTTOOL_COMMAND] + list(args),
-        preexec_fn=lambda: os.umask(0o077),
+        preexec_fn=run_as_taskd_group,
         stderr=subprocess.STDOUT,
         **kwargs
     )
@@ -156,17 +160,33 @@ def generate_key(org, user):
         sys.stderr.write(msg.format(user))
         return
 
-    basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user)
-    if os.path.exists(basedir):
+    keysdir = os.path.join(TASKD_DATA_DIR, "keys" )
+    orgdir  = os.path.join(keysdir       , org    )
+    userdir = os.path.join(orgdir        , user   )
+    if os.path.exists(userdir):
         raise OSError("Keyfile directory for {} already exists.".format(user))
 
-    privkey = os.path.join(basedir, "private.key")
-    pubcert = os.path.join(basedir, "public.cert")
+    privkey = os.path.join(userdir, "private.key")
+    pubcert = os.path.join(userdir, "public.cert")
 
     try:
-        os.makedirs(basedir, mode=0o700)
+        # We change the permissions and the owner ship of the base directories
+        # so that cfg.group and cfg.user could read the directories' contents.
+        # See also: https://bugs.python.org/issue42367
+        for bd in [keysdir, orgdir, userdir]:
+            # Allow cfg.group, but not others to read the contents of this group
+            os.makedirs(bd, exist_ok=True)
+            # not using mode= argument to makedirs intentionally - forcing the
+            # permissions we want
+            os.chmod(bd, mode=0o750)
+            os.chown(
+                bd,
+                uid=pwd.getpwnam(TASKD_USER).pw_uid,
+                gid=grp.getgrnam(TASKD_GROUP).gr_gid,
+            )
 
         certtool_cmd("-p", "--bits", CERT_BITS, "--outfile", privkey)
+        os.chmod(privkey, 0o640)
 
         template_data = [
             "organization = {0}".format(org),
@@ -187,7 +207,7 @@ def generate_key(org, user):
                 "--outfile", pubcert
             )
     except:
-        rmtree(basedir)
+        rmtree(userdir)
         raise
 
 
diff --git a/nixos/modules/services/misc/tuxclocker.nix b/nixos/modules/services/misc/tuxclocker.nix
new file mode 100644
index 0000000000000..5969f75b8e30d
--- /dev/null
+++ b/nixos/modules/services/misc/tuxclocker.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.tuxclocker;
+in
+{
+  options.programs.tuxclocker = {
+    enable = mkEnableOption (lib.mdDoc ''
+      TuxClocker, a hardware control and monitoring program
+    '');
+
+    enableAMD = mkEnableOption (lib.mdDoc ''
+      AMD GPU controls.
+      Sets the `amdgpu.ppfeaturemask` kernel parameter to 0xfffd7fff to enable all TuxClocker controls
+    '');
+
+    enabledNVIDIADevices = mkOption {
+      type = types.listOf types.int;
+      default = [ ];
+      example = [ 0 1 ];
+      description = lib.mdDoc ''
+        Enable NVIDIA GPU controls for a device by index.
+        Sets the `Coolbits` Xorg option to enable all TuxClocker controls.
+      '';
+    };
+
+    useUnfree = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to use components requiring unfree dependencies.
+        Disabling this allows you to get everything from the binary cache.
+      '';
+    };
+  };
+
+  config = let
+      package = if cfg.useUnfree then pkgs.tuxclocker else pkgs.tuxclocker-without-unfree;
+    in
+      mkIf cfg.enable {
+        environment.systemPackages = [
+          package
+        ];
+
+        services.dbus.packages = [
+          package
+        ];
+
+        # MSR is used for some features
+        boot.kernelModules = [ "msr" ];
+
+        # https://download.nvidia.com/XFree86/Linux-x86_64/430.14/README/xconfigoptions.html#Coolbits
+        services.xserver.config = let
+          configSection = (i: ''
+            Section "Device"
+              Driver "nvidia"
+              Option "Coolbits" "31"
+              Identifier "Device-nvidia[${toString i}]"
+            EndSection
+          '');
+        in
+          concatStrings (map configSection cfg.enabledNVIDIADevices);
+
+        # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n207
+        # Enable everything modifiable in TuxClocker
+        boot.kernelParams = mkIf cfg.enableAMD [ "amdgpu.ppfeaturemask=0xfffd7fff" ];
+      };
+}
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 62c50490ee99a..5ac010bf81ee8 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -74,7 +74,7 @@ let
     fi
   '';
   provisionConfDir = pkgs.runCommand "grafana-provisioning" { nativeBuildInputs = [ pkgs.xorg.lndir ]; } ''
-    mkdir -p $out/{datasources,dashboards,notifiers,alerting}
+    mkdir -p $out/{alerting,datasources,dashboards,notifiers,plugins}
     ${ln { src = datasourceFileOrDir;    dir = "datasources"; filename = "datasource"; }}
     ${ln { src = dashboardFileOrDir;     dir = "dashboards";  filename = "dashboard"; }}
     ${ln { src = notifierFileOrDir;      dir = "notifiers";   filename = "notifier"; }}
@@ -1831,7 +1831,7 @@ in
         set -o errexit -o pipefail -o nounset -o errtrace
         shopt -s inherit_errexit
 
-        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir} -config ${configFile}
+        exec ${cfg.package}/bin/grafana server -homepath ${cfg.dataDir} -config ${configFile}
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
diff --git a/nixos/modules/services/monitoring/mackerel-agent.nix b/nixos/modules/services/monitoring/mackerel-agent.nix
index 62a7858500f24..5915634ed26fe 100644
--- a/nixos/modules/services/monitoring/mackerel-agent.nix
+++ b/nixos/modules/services/monitoring/mackerel-agent.nix
@@ -84,6 +84,7 @@ in {
     # upstream service file in https://git.io/JUt4Q
     systemd.services.mackerel-agent = {
       description = "mackerel.io agent";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "nss-lookup.target" ];
       wantedBy = [ "multi-user.target" ];
       environment = {
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 78b12537e27fd..5cf3c096397cb 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -198,6 +198,7 @@ in {
         }
       ];
 
+    services.netdata.configDir.".opt-out-from-anonymous-statistics" = mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
     environment.etc."netdata/netdata.conf".source = configFile;
     environment.etc."netdata/conf.d".source = configDirectory;
 
@@ -205,7 +206,15 @@ in {
       description = "Real time performance monitoring";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      path = (with pkgs; [ curl gawk iproute2 which procps bash ])
+      path = (with pkgs; [
+          curl
+          gawk
+          iproute2
+          which
+          procps
+          bash
+          util-linux # provides logger command; required for syslog health alarms
+      ])
         ++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
         ++ lib.optional config.virtualisation.libvirtd.enable (config.virtualisation.libvirtd.package);
       environment = {
diff --git a/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 4fd630015f35a..bb426d8b7beb0 100644
--- a/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -174,6 +174,7 @@ in {
 
       systemd.services.alertmanager = {
         wantedBy = [ "multi-user.target" ];
+        wants    = [ "network-online.target" ];
         after    = [ "network-online.target" ];
         preStart = ''
            ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
diff --git a/nixos/modules/services/monitoring/prometheus/default.nix b/nixos/modules/services/monitoring/prometheus/default.nix
index 90ea56658b02d..b4ac8e21451af 100644
--- a/nixos/modules/services/monitoring/prometheus/default.nix
+++ b/nixos/modules/services/monitoring/prometheus/default.nix
@@ -41,12 +41,12 @@ let
   # This becomes the main config file for Prometheus
   promConfig = {
     global = filterValidPrometheus cfg.globalConfig;
-    rule_files = map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
-      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
-    ]);
     scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
     remote_write = filterValidPrometheus cfg.remoteWrite;
     remote_read = filterValidPrometheus cfg.remoteRead;
+    rule_files = optionals (!(cfg.enableAgentMode)) (map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
+    ]));
     alerting = {
       inherit (cfg) alertmanagers;
     };
@@ -62,15 +62,20 @@ let
     promtoolCheck "check config ${lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"}" "prometheus.yml" yml;
 
   cmdlineArgs = cfg.extraFlags ++ [
-    "--storage.tsdb.path=${workingDir}/data/"
     "--config.file=${
       if cfg.enableReload
       then "/etc/prometheus/prometheus.yaml"
       else prometheusYml
     }"
     "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
-    "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
-  ] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
+  ] ++ (
+    if (cfg.enableAgentMode) then [
+      "--enable-feature=agent"
+    ] else [
+       "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity }"
+       "--storage.tsdb.path=${workingDir}/data/"
+    ])
+    ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
     ++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}"
     ++ optional (cfg.webConfigFile != null) "--web.config.file=${cfg.webConfigFile}";
 
@@ -1430,6 +1435,10 @@ let
       remote_timeout = mkOpt types.str ''
         Timeout for requests to the remote write endpoint.
       '';
+      headers = mkOpt (types.attrsOf types.str) ''
+        Custom HTTP headers to be sent along with each remote write request.
+        Be aware that headers that are set by Prometheus itself can't be overwritten.
+      '';
       write_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
         List of remote write relabel configurations.
       '';
@@ -1525,6 +1534,10 @@ let
       remote_timeout = mkOpt types.str ''
         Timeout for requests to the remote read endpoint.
       '';
+      headers = mkOpt (types.attrsOf types.str) ''
+        Custom HTTP headers to be sent along with each remote read request.
+        Be aware that headers that are set by Prometheus itself can't be overwritten.
+      '';
       read_recent = mkOpt types.bool ''
         Whether reads should be made for queries for time ranges that
         the local storage should have complete data for.
@@ -1612,6 +1625,8 @@ in
       '';
     };
 
+    enableAgentMode = mkEnableOption (lib.mdDoc "agent mode");
+
     configText = mkOption {
       type = types.nullOr types.lines;
       default = null;
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 39abd293b2d18..35db8a7376b11 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -64,6 +64,7 @@ let
     "pgbouncer"
     "php-fpm"
     "pihole"
+    "ping"
     "postfix"
     "postgres"
     "process"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
index 8b1cd47d0a409..3abb6ff6bdf8b 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
@@ -15,8 +15,8 @@ in {
       type = types.listOf types.str;
       example = literalExpression ''
         [
-          "/run/kea-dhcp4/kea-dhcp4.socket"
-          "/run/kea-dhcp6/kea-dhcp6.socket"
+          "/run/kea/kea-dhcp4.socket"
+          "/run/kea/kea-dhcp6.socket"
         ]
       '';
       description = lib.mdDoc ''
@@ -31,13 +31,15 @@ in {
     ];
     serviceConfig = {
       User = "kea";
+      DynamicUser = true;
       ExecStart = ''
         ${pkgs.prometheus-kea-exporter}/bin/kea-exporter \
           --address ${cfg.listenAddress} \
           --port ${toString cfg.port} \
           ${concatStringsSep " " cfg.controlSocketPaths}
       '';
-      SupplementaryGroups = [ "kea" ];
+      RuntimeDirectory = "kea";
+      RuntimeDirectoryPreserve = true;
       RestrictAddressFamilies = [
         # Need AF_UNIX to collect data
         "AF_UNIX"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix b/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
index db5c4d15be662..b36a09c609206 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/mongodb.nix
@@ -55,12 +55,12 @@ in
       RuntimeDirectory = "prometheus-mongodb-exporter";
       ExecStart = ''
         ${getExe pkgs.prometheus-mongodb-exporter} \
-          --mongodb.uri=${cfg.uri}
+          --mongodb.uri="${cfg.uri}" \
           ${if cfg.collectAll then "--collect-all" else concatMapStringsSep " " (x: "--collect.${x}") cfg.collector} \
-          --collector.collstats=${concatStringsSep "," cfg.collStats} \
-          --collector.indexstats=${concatStringsSep "," cfg.indexStats} \
-          --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
-          --web.telemetry-path=${cfg.telemetryPath} \
+          ${optionalString (length cfg.collStats > 0) "--mongodb.collstats-colls=${concatStringsSep "," cfg.collStats}"} \
+          ${optionalString (length cfg.indexStats > 0) "--mongodb.indexstats-colls=${concatStringsSep "," cfg.indexStats}"} \
+          --web.listen-address="${cfg.listenAddress}:${toString cfg.port}" \
+          --web.telemetry-path="${cfg.telemetryPath}" \
           ${escapeShellArgs cfg.extraFlags}
       '';
     };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
index 3158e71f0468b..88dc79fc2503f 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -43,14 +43,14 @@ in
     };
   };
   serviceOpts = mkMerge ([{
+    environment.CONST_LABELS = concatStringsSep "," cfg.constLabels;
     serviceConfig = {
       ExecStart = ''
         ${pkgs.prometheus-nginx-exporter}/bin/nginx-prometheus-exporter \
           --nginx.scrape-uri='${cfg.scrapeUri}' \
-          --nginx.ssl-verify=${boolToString cfg.sslVerify} \
+          --${lib.optionalString (!cfg.sslVerify) "no-"}nginx.ssl-verify \
           --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
           --web.telemetry-path=${cfg.telemetryPath} \
-          --prometheus.const-labels=${concatStringsSep "," cfg.constLabels} \
           ${concatStringsSep " \\\n  " cfg.extraFlags}
       '';
     };
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/ping.nix b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
new file mode 100644
index 0000000000000..af78b6bef6258
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/ping.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.ping;
+
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "config.yml" cfg.settings;
+in
+{
+  port = 9427;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+
+      description = lib.mdDoc ''
+        Configuration for ping_exporter, see
+        <https://github.com/czerwonk/ping_exporter>
+        for supported values.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      # ping-exporter needs `CAP_NET_RAW` to run as non root https://github.com/czerwonk/ping_exporter#running-as-non-root-user
+      CapabilityBoundingSet = [ "CAP_NET_RAW" ];
+      AmbientCapabilities = [ "CAP_NET_RAW" ];
+      ExecStart = ''
+        ${pkgs.prometheus-ping-exporter}/bin/ping_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --config.path="${configFile}" \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
index 4112774940139..b9ab305f7c082 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
@@ -19,7 +19,11 @@ in
           };
           apiKeyFile = mkOption {
             type = types.str;
-            description = "File containing the API key.";
+            description = ''
+              The path to a file containing the API key.
+              The file is securely passed to the service by leveraging systemd credentials.
+              No special permissions need to be set on this file.
+            '';
             example = "/run/secrets/sabnzbd_apikey";
           };
         };
@@ -30,18 +34,24 @@ in
   serviceOpts =
     let
       servers = lib.zipAttrs cfg.servers;
-      apiKeys = lib.concatStringsSep "," (builtins.map (file: "$(cat ${file})") servers.apiKeyFile);
+      credentials = lib.imap0 (i: v: { name = "apikey-${toString i}"; path = v; }) servers.apiKeyFile;
     in
     {
+      serviceConfig.LoadCredential = builtins.map ({ name, path }: "${name}:${path}") credentials;
+
       environment = {
         METRICS_PORT = toString cfg.port;
         METRICS_ADDR = cfg.listenAddress;
         SABNZBD_BASEURLS = lib.concatStringsSep "," servers.baseUrl;
       };
 
-      script = ''
-        export SABNZBD_APIKEYS="${apiKeys}"
-        exec ${lib.getExe pkgs.prometheus-sabnzbd-exporter}
-      '';
+      script =
+        let
+          apiKeys = lib.concatStringsSep "," (builtins.map (cred: "$(< $CREDENTIALS_DIRECTORY/${cred.name})") credentials);
+        in
+        ''
+          export SABNZBD_APIKEYS="${apiKeys}"
+          exec ${lib.getExe pkgs.prometheus-sabnzbd-exporter}
+        '';
     };
 }
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
index edc6e4b5022a5..840ce493ee812 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -4,6 +4,25 @@ with lib;
 
 let
   cfg = config.services.prometheus.exporters.snmp;
+
+  # This ensures that we can deal with string paths, path types and
+  # store-path strings with context.
+  coerceConfigFile = file:
+    if (builtins.isPath file) || (lib.isStorePath file) then
+      file
+    else
+      (lib.warn ''
+        ${logPrefix}: configuration file "${file}" is being copied to the nix-store.
+        If you would like to avoid that, please set enableConfigCheck to false.
+        '' /. + file);
+
+  checkConfig = file:
+    pkgs.runCommandLocal "checked-snmp-exporter-config.yml" {
+      nativeBuildInputs = [ pkgs.buildPackages.prometheus-snmp-exporter ];
+    } ''
+      ln -s ${coerceConfigFile file} $out
+      snmp_exporter --dry-run --config.file $out
+    '';
 in
 {
   port = 9116;
@@ -24,15 +43,23 @@ in
         Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
       '';
       example = {
-        "default" = {
-          "version" = 2;
-          "auth" = {
-            "community" = "public";
-          };
+        auths.public_v2 = {
+          community = "public";
+          version = 2;
         };
       };
     };
 
+    enableConfigCheck = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to run a correctness check for the configuration file. This depends
+        on the configuration file residing in the nix-store. Paths passed as string will
+        be copied to the store.
+      '';
+    };
+
     logFormat = mkOption {
       type = types.enum ["logfmt" "json"];
       default = "logfmt";
@@ -50,9 +77,13 @@ in
     };
   };
   serviceOpts = let
-    configFile = if cfg.configurationPath != null
-                 then cfg.configurationPath
-                 else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
+    uncheckedConfigFile = if cfg.configurationPath != null
+                          then cfg.configurationPath
+                          else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
+    configFile = if cfg.enableConfigCheck then
+      checkConfig uncheckedConfigFile
+    else
+      uncheckedConfigFile;
     in {
     serviceConfig = {
       ExecStart = ''
diff --git a/nixos/modules/services/monitoring/snmpd.nix b/nixos/modules/services/monitoring/snmpd.nix
new file mode 100644
index 0000000000000..f2d3953e6a620
--- /dev/null
+++ b/nixos/modules/services/monitoring/snmpd.nix
@@ -0,0 +1,83 @@
+{ pkgs, config, lib, ... }:
+
+let
+  cfg = config.services.snmpd;
+  configFile = if cfg.configText != "" then
+    pkgs.writeText "snmpd.cfg" ''
+      ${cfg.configText}
+    '' else null;
+in {
+  options.services.snmpd = {
+    enable = lib.mkEnableOption "snmpd";
+
+    package = lib.mkPackageOption pkgs "net-snmp" {};
+
+    listenAddress = lib.mkOption {
+      type = lib.types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        The address to listen on for SNMP and AgentX messages.
+      '';
+      example = "127.0.0.1";
+    };
+
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 161;
+      description = lib.mdDoc ''
+        The port to listen on for SNMP and AgentX messages.
+      '';
+    };
+
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open port in firewall for snmpd.
+      '';
+    };
+
+    configText = lib.mkOption {
+      type = lib.types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        The contents of the snmpd.conf. If the {option}`configFile` option
+        is set, this value will be ignored.
+
+        Note that the contents of this option will be added to the Nix
+        store as world-readable plain text, {option}`configFile` can be used in
+        addition to a secret management tool to protect sensitive data.
+      '';
+    };
+
+    configFile = lib.mkOption {
+      type = lib.types.path;
+      default = configFile;
+      defaultText = lib.literalMD "The value of {option}`configText`.";
+      description = lib.mdDoc ''
+        Path to the snmpd.conf file. By default, if {option}`configText` is set,
+        a config file will be automatically generated.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services."snmpd" = {
+      description = "Simple Network Management Protocol (SNMP) daemon.";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${lib.getExe' cfg.package "snmpd"} -f -Lo -c ${cfg.configFile} ${cfg.listenAddress}:${toString cfg.port}";
+      };
+    };
+
+    networking.firewall.allowedUDPPorts = lib.mkIf cfg.openFirewall [
+      cfg.port
+    ];
+  };
+
+  meta.maintainers = [ lib.maintainers.eliandoran ];
+
+}
diff --git a/nixos/modules/services/monitoring/teamviewer.nix b/nixos/modules/services/monitoring/teamviewer.nix
index 9b1278317943d..7c45247aa6d5a 100644
--- a/nixos/modules/services/monitoring/teamviewer.nix
+++ b/nixos/modules/services/monitoring/teamviewer.nix
@@ -30,6 +30,7 @@ in
       description = "TeamViewer remote control daemon";
 
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "network.target" "dbus.service" ];
       requires = [ "dbus.service" ];
       preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer";
diff --git a/nixos/modules/services/monitoring/telegraf.nix b/nixos/modules/services/monitoring/telegraf.nix
index ee28ee03adf33..3bab8aba7bd60 100644
--- a/nixos/modules/services/monitoring/telegraf.nix
+++ b/nixos/modules/services/monitoring/telegraf.nix
@@ -59,6 +59,7 @@ in {
     in {
       description = "Telegraf Agent";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = lib.optional (config.services.telegraf.extraConfig.inputs ? procstat) pkgs.procps;
       serviceConfig = {
diff --git a/nixos/modules/services/monitoring/thanos.nix b/nixos/modules/services/monitoring/thanos.nix
index 5baa0d8446e54..02502816ef5d7 100644
--- a/nixos/modules/services/monitoring/thanos.nix
+++ b/nixos/modules/services/monitoring/thanos.nix
@@ -394,9 +394,8 @@ let
         Maximum number of queries processed concurrently by query node.
       '';
 
-      query.replica-labels = mkAttrsParam "query.replica-label" ''
+      query.replica-labels = mkListParam "query.replica-label" ''
         Labels to treat as a replica indicator along which data is
-
         deduplicated.
 
         Still you will be able to query without deduplication using
diff --git a/nixos/modules/services/monitoring/ups.nix b/nixos/modules/services/monitoring/ups.nix
index efef2d777acd8..63afb5deb5bd4 100644
--- a/nixos/modules/services/monitoring/ups.nix
+++ b/nixos/modules/services/monitoring/ups.nix
@@ -6,9 +6,83 @@ with lib;
 
 let
   cfg = config.power.ups;
-in
+  defaultPort = 3493;
+
+  nutFormat = {
+
+    type = with lib.types; let
+
+      singleAtom = nullOr (oneOf [
+        bool
+        int
+        float
+        str
+      ]) // {
+        description = "atom (null, bool, int, float or string)";
+      };
+
+      in attrsOf (oneOf [
+        singleAtom
+        (listOf (nonEmptyListOf singleAtom))
+      ]);
+
+    generate = name: value:
+      let
+        normalizedValue =
+          lib.mapAttrs (key: val:
+            if lib.isList val
+            then forEach val (elem: if lib.isList elem then elem else [elem])
+            else
+              if val == null
+              then []
+              else [[val]]
+          ) value;
+
+        mkValueString = concatMapStringsSep " " (v:
+          let str = generators.mkValueStringDefault {} v;
+          in
+            # Quote the value if it has spaces and isn't already quoted.
+            if (hasInfix " " str) && !(hasPrefix "\"" str && hasSuffix "\"" str)
+            then "\"${str}\""
+            else str
+        );
+
+      in pkgs.writeText name (lib.generators.toKeyValue {
+        mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
+        listsAsDuplicateKeys = true;
+      } normalizedValue);
+
+  };
+
+  installSecrets = source: target: secrets:
+    pkgs.writeShellScript "installSecrets.sh" ''
+      install -m0600 -D ${source} "${target}"
+      ${concatLines (forEach secrets (name: ''
+        ${pkgs.replace-secret}/bin/replace-secret \
+          '@${name}@' \
+          "$CREDENTIALS_DIRECTORY/${name}" \
+          "${target}"
+      ''))}
+      chmod u-w "${target}"
+    '';
+
+  upsmonConf = nutFormat.generate "upsmon.conf" cfg.upsmon.settings;
+
+  upsdUsers = pkgs.writeText "upsd.users" (let
+    # This looks like INI, but it's not quite because the
+    # 'upsmon' option lacks a '='. See: man upsd.users
+    userConfig = name: user: concatStringsSep "\n      " (concatLists [
+      [
+        "[${name}]"
+        "password = \"@upsdusers_password_${name}@\""
+      ]
+      (optional (user.upsmon != null) "upsmon ${user.upsmon}")
+      (forEach user.actions (action: "actions = ${action}"))
+      (forEach user.instcmds (instcmd: "instcmds = ${instcmd}"))
+    ]);
+  in concatStringsSep "\n\n" (mapAttrsToList userConfig cfg.users));
+
 
-let
   upsOptions = {name, config, ...}:
   {
     options = {
@@ -95,6 +169,213 @@ let
     };
   };
 
+  listenOptions = {
+    options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = defaultPort;
+        description = lib.mdDoc ''
+          TCP port for `upsd` to listen on.
+          See `man upsd.conf` for details.
+        '';
+      };
+    };
+  };
+
+  upsdOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`";
+        description = mdDoc "Whether to enable `upsd`.";
+      };
+
+      listen = mkOption {
+        type = with types; listOf (submodule listenOptions);
+        default = [];
+        example = [
+          {
+            address = "192.168.50.1";
+          }
+          {
+            address = "::1";
+            port = 5923;
+          }
+        ];
+        description = lib.mdDoc ''
+          Address of the interface for `upsd` to listen on.
+          See `man upsd` for details`.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional lines to add to `upsd.conf`.
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" ]);
+    };
+  };
+
+
+  monitorOptions = { name, config, ... }: {
+    options = {
+      system = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc ''
+          Identifier of the UPS to monitor, in this form: `<upsname>[@<hostname>[:<port>]]`
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      powerValue = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          Number of power supplies that the UPS feeds on this system.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Username from `upsd.users` for accessing this UPS.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.str;
+        defaultText = literalMD "power.ups.users.\${user}.passwordFile";
+        description = lib.mdDoc ''
+          The full path to a file containing the password from
+          `upsd.users` for accessing this UPS. The password file
+          is read on service start.
+          See `upsmon.conf` for details.
+        '';
+      };
+
+      type = mkOption {
+        type = types.str;
+        default = "master";
+        description = lib.mdDoc ''
+          The relationship with `upsd`.
+          See `upsmon.conf` for details.
+        '';
+      };
+    };
+
+    config = {
+      passwordFile = mkDefault cfg.users.${config.user}.passwordFile;
+    };
+  };
+
+  upsmonOptions = {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        defaultText = literalMD "`true` if `mode` is one of `standalone`, `netserver`, `netclient`";
+        description = mdDoc "Whether to enable `upsmon`.";
+      };
+
+      monitor = mkOption {
+        type = with types; attrsOf (submodule monitorOptions);
+        default = {};
+        description = lib.mdDoc ''
+          Set of UPS to monitor. See `man upsmon.conf` for details.
+        '';
+      };
+
+      settings = mkOption {
+        type = nutFormat.type;
+        default = {};
+        defaultText = literalMD ''
+          {
+            MINSUPPLIES = 1;
+            RUN_AS_USER = "root";
+            NOTIFYCMD = "''${pkgs.nut}/bin/upssched";
+            SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now";
+          }
+        '';
+        description = mdDoc "Additional settings to add to `upsmon.conf`.";
+        example = literalMD ''
+          {
+            MINSUPPLIES = 2;
+            NOTIFYFLAG = [
+              [ "ONLINE" "SYSLOG+EXEC" ]
+              [ "ONBATT" "SYSLOG+EXEC" ]
+            ];
+          }
+        '';
+      };
+    };
+
+    config = {
+      enable = mkDefault (elem cfg.mode [ "standalone" "netserver" "netclient" ]);
+      settings = {
+        RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
+        MINSUPPLIES = mkDefault 1;
+        NOTIFYCMD = mkDefault "${pkgs.nut}/bin/upssched";
+        SHUTDOWNCMD = mkDefault "${pkgs.systemd}/bin/shutdown now";
+        MONITOR = flip mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
+      };
+    };
+  };
+
+  userOptions = {
+    options = {
+      passwordFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The full path to a file that contains the user's (clear text)
+          password. The password file is read on service start.
+        '';
+      };
+
+      actions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Allow the user to do certain things with upsd.
+          See `man upsd.users` for details.
+        '';
+      };
+
+      instcmds = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Let the user initiate specific instant commands. Use "ALL" to grant all commands automatically. For the full list of what your UPS supports, use "upscmd -l".
+          See `man upsd.users` for details.
+        '';
+      };
+
+      upsmon = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Add the necessary actions for a upsmon process to work.
+          See `man upsd.users` for details.
+        '';
+      };
+    };
+  };
+
 in
 
 
@@ -103,19 +384,14 @@ in
     # powerManagement.powerDownCommands
 
     power.ups = {
-      enable = mkOption {
-        default = false;
-        type = with types; bool;
-        description = lib.mdDoc ''
-          Enables support for Power Devices, such as Uninterruptible Power
-          Supplies, Power Distribution Units and Solar Controllers.
-        '';
-      };
+      enable = mkEnableOption (lib.mdDoc ''
+        Enables support for Power Devices, such as Uninterruptible Power
+        Supplies, Power Distribution Units and Solar Controllers.
+      '');
 
-      # This option is not used yet.
       mode = mkOption {
         default = "standalone";
-        type = types.str;
+        type = types.enum [ "none" "standalone" "netserver" "netclient" ];
         description = lib.mdDoc ''
           The MODE determines which part of the NUT is to be started, and
           which configuration files must be modified.
@@ -148,6 +424,13 @@ in
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for `upsd`.
+        '';
+      };
 
       maxStartDelay = mkOption {
         default = 45;
@@ -161,6 +444,22 @@ in
         '';
       };
 
+      upsmon = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsmon.conf` configuration file.
+        '';
+        type = types.submodule upsmonOptions;
+      };
+
+      upsd = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Options for the `upsd.conf` configuration file.
+        '';
+        type = types.submodule upsdOptions;
+      };
+
       ups = mkOption {
         default = {};
         # see nut/etc/ups.conf.sample
@@ -172,46 +471,95 @@ in
         type = with types; attrsOf (submodule upsOptions);
       };
 
+      users = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Users that can access upsd. See `man upsd.users`.
+        '';
+        type = with types; attrsOf (submodule userOptions);
+      };
+
     };
   };
 
   config = mkIf cfg.enable {
 
+    assertions = [
+      (let
+        totalPowerValue = foldl' add 0 (map (monitor: monitor.powerValue) (attrValues cfg.upsmon.monitor));
+        minSupplies = cfg.upsmon.settings.MINSUPPLIES;
+      in mkIf cfg.upsmon.enable {
+        assertion = totalPowerValue >= minSupplies;
+        message = ''
+          `power.ups.upsmon`: Total configured power value (${toString totalPowerValue}) must be at least MINSUPPLIES (${toString minSupplies}).
+        '';
+      })
+    ];
+
     environment.systemPackages = [ pkgs.nut ];
 
-    systemd.services.upsmon = {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts =
+        if cfg.upsd.listen == []
+        then [ defaultPort ]
+        else unique (forEach cfg.upsd.listen (listen: listen.port));
+    };
+
+    systemd.services.upsmon = let
+      secrets = mapAttrsToList (name: monitor: "upsmon_password_${name}") cfg.upsmon.monitor;
+      createUpsmonConf = installSecrets upsmonConf "/run/nut/upsmon.conf" secrets;
+    in {
+      enable = cfg.upsmon.enable;
       description = "Uninterruptible Power Supplies (Monitor)";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      script = "${pkgs.nut}/sbin/upsmon";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsmonConf}";
+        ExecStart = "${pkgs.nut}/sbin/upsmon";
+        ExecReload = "${pkgs.nut}/sbin/upsmon -c reload";
+        LoadCredential = mapAttrsToList (name: monitor: "upsmon_password_${name}:${monitor.passwordFile}") cfg.upsmon.monitor;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
-    systemd.services.upsd = {
+    systemd.services.upsd = let
+      secrets = mapAttrsToList (name: user: "upsdusers_password_${name}") cfg.users;
+      createUpsdUsers = installSecrets upsdUsers "/run/nut/upsd.users" secrets;
+    in {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Daemon)";
       after = [ "network.target" "upsmon.service" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Type = "forking";
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/sbin/upsd -u root";
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "${createUpsdUsers}";
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/sbin/upsd -u root";
+        ExecReload = "${pkgs.nut}/sbin/upsd -c reload";
+        LoadCredential = mapAttrsToList (name: user: "upsdusers_password_${name}:${user.passwordFile}") cfg.users;
+      };
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
+      restartTriggers = [
+        config.environment.etc."nut/upsd.conf".source
+      ];
     };
 
     systemd.services.upsdrv = {
+      enable = cfg.upsd.enable;
       description = "Uninterruptible Power Supplies (Register all UPS)";
       after = [ "upsd.service" ];
       wantedBy = [ "multi-user.target" ];
-      # TODO: replace 'root' by another username.
-      script = "${pkgs.nut}/bin/upsdrvctl -u root start";
       serviceConfig = {
         Type = "oneshot";
         RemainAfterExit = true;
+        # TODO: replace 'root' by another username.
+        ExecStart = "${pkgs.nut}/bin/upsdrvctl -u root start";
       };
-      environment.NUT_CONFPATH = "/etc/nut/";
-      environment.NUT_STATEPATH = "/var/lib/nut/";
+      environment.NUT_CONFPATH = "/etc/nut";
+      environment.NUT_STATEPATH = "/var/lib/nut";
     };
 
     environment.etc = {
@@ -223,26 +571,26 @@ in
         ''
           maxstartdelay = ${toString cfg.maxStartDelay}
 
-          ${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
-
-          "}
+          ${concatStringsSep "\n\n" (forEach (attrValues cfg.ups) (ups: ups.summary))}
+        '';
+      "nut/upsd.conf".source = pkgs.writeText "upsd.conf"
+        ''
+          ${concatStringsSep "\n" (forEach cfg.upsd.listen (listen: "LISTEN ${listen.address} ${toString listen.port}"))}
+          ${cfg.upsd.extraConfig}
         '';
       "nut/upssched.conf".source = cfg.schedulerRules;
-      # These file are containing private information and thus should not
-      # be stored inside the Nix store.
-      /*
-      "nut/upsd.conf".source = "";
-      "nut/upsd.users".source = "";
-      "nut/upsmon.conf".source = "";
-      */
+      "nut/upsd.users".source = "/run/nut/upsd.users";
+      "nut/upsmon.conf".source = "/run/nut/upsmon.conf";
     };
 
     power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
 
     systemd.tmpfiles.rules = [
       "d /var/state/ups -"
+      "d /var/lib/nut 700"
     ];
 
+    services.udev.packages = [ pkgs.nut ];
 
 /*
     users.users.nut =
diff --git a/nixos/modules/services/monitoring/watchdogd.nix b/nixos/modules/services/monitoring/watchdogd.nix
new file mode 100644
index 0000000000000..e8d104651c6ad
--- /dev/null
+++ b/nixos/modules/services/monitoring/watchdogd.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.watchdogd;
+
+  mkPluginOpts = plugin: defWarn: defCrit: {
+    enabled = mkEnableOption "watchdogd plugin ${plugin}";
+    interval = mkOption {
+      type = types.ints.unsigned;
+      default = 300;
+      description = ''
+        Amount of seconds between every poll.
+      '';
+    };
+    logmark = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to log current stats every poll interval.
+      '';
+    };
+    warning = mkOption {
+      type = types.numbers.nonnegative;
+      default = defWarn;
+      description = ''
+        The high watermark level. Alert sent to log.
+      '';
+    };
+    critical = mkOption {
+      type = types.numbers.nonnegative;
+      default = defCrit;
+      description = ''
+        The critical watermark level. Alert sent to log, followed by reboot or script action.
+      '';
+    };
+  };
+in {
+  options.services.watchdogd = {
+    enable = mkEnableOption "watchdogd, an advanced system & process supervisor";
+    package = mkPackageOption pkgs "watchdogd" { };
+
+    settings = mkOption {
+      type = with types; submodule {
+        freeformType = let
+          valueType = oneOf [
+            bool
+            int
+            float
+            str
+          ];
+        in attrsOf (either valueType (attrsOf valueType));
+
+        options = {
+          timeout = mkOption {
+            type = types.ints.unsigned;
+            default = 15;
+            description = ''
+              The WDT timeout before reset.
+            '';
+          };
+          interval = mkOption {
+            type = types.ints.unsigned;
+            default = 5;
+            description = ''
+              The kick interval, i.e. how often {manpage}`watchdogd(8)` should reset the WDT timer.
+            '';
+          };
+
+          safe-exit = mkOption {
+            type = types.bool;
+            default = true;
+            description = ''
+              With {var}`safeExit` enabled, the daemon will ask the driver to disable the WDT before exiting.
+              However, some WDT drivers (or hardware) may not support this.
+            '';
+          };
+
+          filenr = mkPluginOpts "filenr" 0.9 1.0;
+
+          loadavg = mkPluginOpts "loadavg" 1.0 2.0;
+
+          meminfo = mkPluginOpts "meminfo" 0.9 0.95;
+        };
+      };
+      default = { };
+      description = ''
+        Configuration to put in {file}`watchdogd.conf`.
+        See {manpage}`watchdogd.conf(5)` for more details.
+      '';
+    };
+  };
+
+  config = let
+    toConfig = attrs: concatStringsSep "\n" (mapAttrsToList toValue attrs);
+
+    toValue = name: value:
+      if isAttrs value
+        then pipe value [
+          (mapAttrsToList toValue)
+          (map (s: "  ${s}"))
+          (concatStringsSep "\n")
+          (s: "${name} {\n${s}\n}")
+        ]
+      else if isBool value
+        then "${name} = ${boolToString value}"
+      else if any (f: f value) [isString isInt isFloat]
+        then "${name} = ${toString value}"
+      else throw ''
+        Found invalid type in `services.watchdogd.settings`: '${typeOf value}'
+      '';
+
+    watchdogdConf = pkgs.writeText "watchdogd.conf" (toConfig cfg.settings);
+  in mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.watchdogd = {
+      documentation = [
+        "man:watchdogd(8)"
+        "man:watchdogd.conf(5)"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      description = "Advanced system & process supervisor";
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/watchdogd -n -f ${watchdogdConf}";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ vifino ];
+}
diff --git a/nixos/modules/services/network-filesystems/drbd.nix b/nixos/modules/services/network-filesystems/drbd.nix
index e74ed391d48e3..79a1b768b4615 100644
--- a/nixos/modules/services/network-filesystems/drbd.nix
+++ b/nixos/modules/services/network-filesystems/drbd.nix
@@ -55,8 +55,8 @@ let cfg = config.services.drbd; in
       wants = [ "systemd-udev.settle.service" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.drbd}/sbin/drbdadm up all";
-        ExecStop = "${pkgs.drbd}/sbin/drbdadm down all";
+        ExecStart = "${pkgs.drbd}/bin/drbdadm up all";
+        ExecStop = "${pkgs.drbd}/bin/drbdadm down all";
       };
     };
   };
diff --git a/nixos/modules/services/network-filesystems/eris-server.nix b/nixos/modules/services/network-filesystems/eris-server.nix
index 66eccfac408c4..104676a52c61f 100644
--- a/nixos/modules/services/network-filesystems/eris-server.nix
+++ b/nixos/modules/services/network-filesystems/eris-server.nix
@@ -3,6 +3,7 @@
 let
   cfg = config.services.eris-server;
   stateDirectoryPath = "\${STATE_DIRECTORY}";
+  nullOrStr = with lib.types; nullOr str;
 in {
 
   options.services.eris-server = {
@@ -26,7 +27,7 @@ in {
     };
 
     listenCoap = lib.mkOption {
-      type = lib.types.str;
+      type = nullOrStr;
       default = ":5683";
       example = "[::1]:5683";
       description = ''
@@ -39,8 +40,8 @@ in {
     };
 
     listenHttp = lib.mkOption {
-      type = lib.types.str;
-      default = "";
+      type = nullOrStr;
+      default = null;
       example = "[::1]:8080";
       description = "Server HTTP listen address. Do not listen by default.";
     };
@@ -58,8 +59,8 @@ in {
     };
 
     mountpoint = lib.mkOption {
-      type = lib.types.str;
-      default = "";
+      type = nullOrStr;
+      default = null;
       example = "/eris";
       description = ''
         Mountpoint for FUSE namespace that exposes "urn:eris:…" files.
@@ -69,33 +70,44 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = lib.strings.versionAtLeast cfg.package.version "20231219";
+      message =
+        "Version of `config.services.eris-server.package` is incompatible with this module";
+    }];
+
     systemd.services.eris-server = let
-      cmd =
-        "${cfg.package}/bin/eris-go server --coap '${cfg.listenCoap}' --http '${cfg.listenHttp}' ${
-          lib.optionalString cfg.decode "--decode "
-        }${
-          lib.optionalString (cfg.mountpoint != "")
-          ''--mountpoint "${cfg.mountpoint}" ''
-        }${lib.strings.escapeShellArgs cfg.backends}";
+      cmd = "${cfg.package}/bin/eris-go server"
+        + (lib.optionalString (cfg.listenCoap != null)
+          " --coap '${cfg.listenCoap}'")
+        + (lib.optionalString (cfg.listenHttp != null)
+          " --http '${cfg.listenHttp}'")
+        + (lib.optionalString cfg.decode " --decode")
+        + (lib.optionalString (cfg.mountpoint != null)
+          " --mountpoint '${cfg.mountpoint}'");
     in {
       description = "ERIS block server";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
-      script = lib.mkIf (cfg.mountpoint != "") ''
+      environment.ERIS_STORE_URL = toString cfg.backends;
+      script = lib.mkIf (cfg.mountpoint != null) ''
         export PATH=${config.security.wrapperDir}:$PATH
         ${cmd}
       '';
       serviceConfig = let
-        umounter = lib.mkIf (cfg.mountpoint != "")
+        umounter = lib.mkIf (cfg.mountpoint != null)
           "-${config.security.wrapperDir}/fusermount -uz ${cfg.mountpoint}";
-      in {
-        ExecStartPre = umounter;
-        ExecStart = lib.mkIf (cfg.mountpoint == "") cmd;
-        ExecStopPost = umounter;
-        Restart = "always";
-        RestartSec = 20;
-        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
-      };
+      in if (cfg.mountpoint == null) then {
+        ExecStart = cmd;
+      } else
+        {
+          ExecStartPre = umounter;
+          ExecStopPost = umounter;
+        } // {
+          Restart = "always";
+          RestartSec = 20;
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        };
     };
   };
 
diff --git a/nixos/modules/services/network-filesystems/kubo.nix b/nixos/modules/services/network-filesystems/kubo.nix
index fbf9b32a2b25a..10162c1633e7b 100644
--- a/nixos/modules/services/network-filesystems/kubo.nix
+++ b/nixos/modules/services/network-filesystems/kubo.nix
@@ -52,7 +52,7 @@ let
 
   multiaddrsToListenStreams = addrIn:
     let
-      addrs = if builtins.typeOf addrIn == "list"
+      addrs = if builtins.isList addrIn
       then addrIn else [ addrIn ];
       unfilteredResult = map multiaddrToListenStream addrs;
     in
@@ -60,7 +60,7 @@ let
 
   multiaddrsToListenDatagrams = addrIn:
     let
-      addrs = if builtins.typeOf addrIn == "list"
+      addrs = if builtins.isList addrIn
       then addrIn else [ addrIn ];
       unfilteredResult = map multiaddrToListenDatagram addrs;
     in
@@ -99,7 +99,12 @@ in
 
     services.kubo = {
 
-      enable = mkEnableOption (lib.mdDoc "Interplanetary File System (WARNING: may cause severe network degradation)");
+      enable = mkEnableOption (lib.mdDoc ''
+        the Interplanetary File System (WARNING: may cause severe network degradation).
+        NOTE: after enabling this option and rebuilding your system, you need to log out
+        and back in for the `IPFS_PATH` environment variable to be present in your shell.
+        Until you do that, the CLI tools won't be able to talk to the daemon by default
+      '');
 
       package = mkPackageOption pkgs "kubo" { };
 
@@ -147,18 +152,6 @@ in
         description = lib.mdDoc "Whether Kubo should try to run the fs-repo-migration at startup.";
       };
 
-      ipfsMountDir = mkOption {
-        type = types.str;
-        default = "/ipfs";
-        description = lib.mdDoc "Where to mount the IPFS namespace to";
-      };
-
-      ipnsMountDir = mkOption {
-        type = types.str;
-        default = "/ipns";
-        description = lib.mdDoc "Where to mount the IPNS namespace to";
-      };
-
       enableGC = mkOption {
         type = types.bool;
         default = false;
@@ -205,6 +198,18 @@ in
               ];
               description = lib.mdDoc "Where Kubo listens for incoming p2p connections";
             };
+
+            Mounts.IPFS = mkOption {
+              type = types.str;
+              default = "/ipfs";
+              description = lib.mdDoc "Where to mount the IPFS namespace to";
+            };
+
+            Mounts.IPNS = mkOption {
+              type = types.str;
+              default = "/ipns";
+              description = lib.mdDoc "Where to mount the IPNS namespace to";
+            };
           };
         };
         description = lib.mdDoc ''
@@ -274,16 +279,17 @@ in
       {
         assertion = !((lib.versionAtLeast cfg.package.version "0.21") && (builtins.hasAttr "Experimental" cfg.settings) && (builtins.hasAttr "AcceleratedDHTClient" cfg.settings.Experimental));
         message = ''
-    The `services.kubo.settings.Experimental.AcceleratedDHTClient` option was renamed to `services.kubo.settings.Routing.AcceleratedDHTClient` in Kubo 0.21.
-  '';
+          The `services.kubo.settings.Experimental.AcceleratedDHTClient` option was renamed to `services.kubo.settings.Routing.AcceleratedDHTClient` in Kubo 0.21.
+        '';
       }
     ];
 
     environment.systemPackages = [ cfg.package ];
     environment.variables.IPFS_PATH = fakeKuboRepo;
 
-    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    # https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
     boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+    boot.kernel.sysctl."net.core.wmem_max" = mkDefault 2500000;
 
     programs.fuse = mkIf cfg.autoMount {
       userAllowOther = true;
@@ -309,8 +315,8 @@ in
     systemd.tmpfiles.rules = [
       "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
     ] ++ optionals cfg.autoMount [
-      "d '${cfg.ipfsMountDir}' - ${cfg.user} ${cfg.group} - -"
-      "d '${cfg.ipnsMountDir}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.settings.Mounts.IPFS}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.settings.Mounts.IPNS}' - ${cfg.user} ${cfg.group} - -"
     ];
 
     # The hardened systemd unit breaks the fuse-mount function according to documentation in the unit file itself
@@ -320,8 +326,6 @@ in
 
     services.kubo.settings = mkIf cfg.autoMount {
       Mounts.FuseAllowOther = lib.mkDefault true;
-      Mounts.IPFS = lib.mkDefault cfg.ipfsMountDir;
-      Mounts.IPNS = lib.mkDefault cfg.ipnsMountDir;
     };
 
     systemd.services.ipfs = {
@@ -352,8 +356,8 @@ in
           ipfs --offline config replace -
       '';
       postStop = mkIf cfg.autoMount ''
-        # After an unclean shutdown the fuse mounts at cfg.ipnsMountDir and cfg.ipfsMountDir are locked
-        umount --quiet '${cfg.ipnsMountDir}' '${cfg.ipfsMountDir}' || true
+        # After an unclean shutdown the fuse mounts at cfg.settings.Mounts.IPFS and cfg.settings.Mounts.IPNS are locked
+        umount --quiet '${cfg.settings.Mounts.IPFS}' '${cfg.settings.Mounts.IPNS}' || true
       '';
       serviceConfig = {
         ExecStart = [ "" "${cfg.package}/bin/ipfs daemon ${kuboFlags}" ];
@@ -361,6 +365,8 @@ in
         Group = cfg.group;
         StateDirectory = "";
         ReadWritePaths = optionals (!cfg.autoMount) [ "" cfg.dataDir ];
+        # Make sure the socket units are started before ipfs.service
+        Sockets = [ "ipfs-gateway.socket" "ipfs-api.socket" ];
       } // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; };
     } // optionalAttrs (!cfg.startWhenNeeded) {
       wantedBy = [ "default.target" ];
@@ -403,8 +409,8 @@ in
     (mkRenamedOptionModule [ "services" "ipfs" "defaultMode" ] [ "services" "kubo" "defaultMode" ])
     (mkRenamedOptionModule [ "services" "ipfs" "autoMount" ] [ "services" "kubo" "autoMount" ])
     (mkRenamedOptionModule [ "services" "ipfs" "autoMigrate" ] [ "services" "kubo" "autoMigrate" ])
-    (mkRenamedOptionModule [ "services" "ipfs" "ipfsMountDir" ] [ "services" "kubo" "ipfsMountDir" ])
-    (mkRenamedOptionModule [ "services" "ipfs" "ipnsMountDir" ] [ "services" "kubo" "ipnsMountDir" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipfsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPFS" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipnsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPNS" ])
     (mkRenamedOptionModule [ "services" "ipfs" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
     (mkRenamedOptionModule [ "services" "ipfs" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
     (mkRenamedOptionModule [ "services" "ipfs" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
@@ -419,5 +425,7 @@ in
     (mkRenamedOptionModule [ "services" "kubo" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
     (mkRenamedOptionModule [ "services" "kubo" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
     (mkRenamedOptionModule [ "services" "kubo" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
+    (mkRenamedOptionModule [ "services" "kubo" "ipfsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPFS" ])
+    (mkRenamedOptionModule [ "services" "kubo" "ipnsMountDir" ] [ "services" "kubo" "settings" "Mounts" "IPNS" ])
   ];
 }
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
index bb0fee087e62e..02c3482ec657b 100644
--- a/nixos/modules/services/network-filesystems/openafs/client.nix
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -215,6 +215,7 @@ in
     systemd.services.afsd = {
       description = "AFS client";
       wantedBy = [ "multi-user.target" ];
+      wants = lib.optional (!cfg.startDisconnected) "network-online.target";
       after = singleton (if cfg.startDisconnected then  "network.target" else "network-online.target");
       serviceConfig = { RemainAfterExit = true; };
       restartIfChanged = false;
diff --git a/nixos/modules/services/network-filesystems/samba.nix b/nixos/modules/services/network-filesystems/samba.nix
index 5d02eac8e9f1a..ef368ddbeefd5 100644
--- a/nixos/modules/services/network-filesystems/samba.nix
+++ b/nixos/modules/services/network-filesystems/samba.nix
@@ -154,7 +154,7 @@ in
       };
 
       securityType = mkOption {
-        type = types.str;
+        type = types.enum [ "auto" "user" "domain" "ads" ];
         default = "user";
         description = lib.mdDoc "Samba security type";
       };
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index de51843ba6f9c..782681018116c 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -42,6 +42,7 @@ in
 {
   imports = [
     (lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
+    (lib.mkRenamedOptionModule [ "services" "avahi" "nssmdns" ] [ "services" "avahi" "nssmdns4" ])
   ];
 
   options.services.avahi = {
@@ -93,8 +94,7 @@ in
 
     ipv6 = mkOption {
       type = types.bool;
-      default = config.networking.enableIPv6;
-      defaultText = literalExpression "config.networking.enableIPv6";
+      default = false;
       description = lib.mdDoc "Whether to use IPv6.";
     };
 
@@ -218,13 +218,28 @@ in
       };
     };
 
-    nssmdns = mkOption {
+    nssmdns4 = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv4.
+        Enabling it allows applications to resolve names in the `.local`
+        domain by transparently querying the Avahi daemon.
+      '';
+    };
+
+    nssmdns6 = mkOption {
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
-        Whether to enable the mDNS NSS (Name Service Switch) plug-in.
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in for IPv6.
         Enabling it allows applications to resolve names in the `.local`
         domain by transparently querying the Avahi daemon.
+
+        ::: {.note}
+        Due to the fact that most mDNS responders only register local IPv4 addresses,
+        most user want to leave this option disabled to avoid long timeouts when applications first resolve the none existing IPv6 address.
+        :::
       '';
     };
 
@@ -256,10 +271,19 @@ in
 
     users.groups.avahi = { };
 
-    system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
-    system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
-      (mkBefore [ "mdns_minimal [NOTFOUND=return]" ]) # before resolve
-      (mkAfter [ "mdns" ]) # after dns
+    system.nssModules = optional (cfg.nssmdns4 || cfg.nssmdns6) pkgs.nssmdns;
+    system.nssDatabases.hosts = let
+      mdns = if (cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns"
+      else if (!cfg.nssmdns4 && cfg.nssmdns6) then
+        "mdns6"
+      else if (cfg.nssmdns4 && !cfg.nssmdns6) then
+        "mdns4"
+      else
+        "";
+    in optionals (cfg.nssmdns4 || cfg.nssmdns6) (mkMerge [
+      (mkBefore [ "${mdns}_minimal [NOTFOUND=return]" ]) # before resolve
+      (mkAfter [ "${mdns}" ]) # after dns
     ]);
 
     environment.systemPackages = [ cfg.package ];
diff --git a/nixos/modules/services/networking/bitcoind.nix b/nixos/modules/services/networking/bitcoind.nix
index 4512e666ba5ba..59722e31c62ab 100644
--- a/nixos/modules/services/networking/bitcoind.nix
+++ b/nixos/modules/services/networking/bitcoind.nix
@@ -198,6 +198,7 @@ in
         '';
       in {
         description = "Bitcoin daemon";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         serviceConfig = {
diff --git a/nixos/modules/services/networking/dante.nix b/nixos/modules/services/networking/dante.nix
index 605f2d74f8275..f0d1d6305c54d 100644
--- a/nixos/modules/services/networking/dante.nix
+++ b/nixos/modules/services/networking/dante.nix
@@ -47,6 +47,7 @@ in
 
     systemd.services.dante = {
       description   = "Dante SOCKS v4 and v5 compatible proxy server";
+      wants         = [ "network-online.target" ];
       after         = [ "network-online.target" ];
       wantedBy      = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/networking/ddclient.nix b/nixos/modules/services/networking/ddclient.nix
index 8f4fb0bc78d4e..18f205b8d99ef 100644
--- a/nixos/modules/services/networking/ddclient.nix
+++ b/nixos/modules/services/networking/ddclient.nix
@@ -126,7 +126,7 @@ with lib;
         default = "dyndns2";
         type = str;
         description = lib.mdDoc ''
-          Protocol to use with dynamic DNS provider (see https://sourceforge.net/p/ddclient/wiki/protocols).
+          Protocol to use with dynamic DNS provider (see https://ddclient.net/protocols.html ).
         '';
       };
 
@@ -217,7 +217,7 @@ with lib;
         inherit RuntimeDirectory;
         inherit StateDirectory;
         Type = "oneshot";
-        ExecStartPre = "!${pkgs.writeShellScript "ddclient-prestart" preStart}";
+        ExecStartPre = [ "!${pkgs.writeShellScript "ddclient-prestart" preStart}" ];
         ExecStart = "${lib.getExe cfg.package} -file /run/${RuntimeDirectory}/ddclient.conf";
       };
     };
diff --git a/nixos/modules/services/networking/dhcpcd.nix b/nixos/modules/services/networking/dhcpcd.nix
index 8b6d3fc55f3e4..2b59352ac616b 100644
--- a/nixos/modules/services/networking/dhcpcd.nix
+++ b/nixos/modules/services/networking/dhcpcd.nix
@@ -98,7 +98,7 @@ let
           # anything ever again ("couldn't resolve ..., giving up on
           # it"), so we silently lose time synchronisation. This also
           # applies to openntpd.
-          /run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd.service openntpd.service chronyd.service || true
+          /run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd.service openntpd.service chronyd.service ntpd-rs.service || true
       fi
 
       ${cfg.runHook}
diff --git a/nixos/modules/services/networking/dnsmasq.md b/nixos/modules/services/networking/dnsmasq.md
new file mode 100644
index 0000000000000..6fc9178b1c0d5
--- /dev/null
+++ b/nixos/modules/services/networking/dnsmasq.md
@@ -0,0 +1,68 @@
+# Dnsmasq {#module-services-networking-dnsmasq}
+
+Dnsmasq is an integrated DNS, DHCP and TFTP server for small networks.
+
+## Configuration {#module-services-networking-dnsmasq-configuration}
+
+### An authoritative DHCP and DNS server on a home network {#module-services-networking-dnsmasq-configuration-home}
+
+On a home network, you can use Dnsmasq as a DHCP and DNS server. New devices on
+your network will be configured by Dnsmasq, and instructed to use it as the DNS
+server by default. This allows you to rely on your own server to perform DNS
+queries and caching, with DNSSEC enabled.
+
+The following example assumes that
+
+- you have disabled your router's integrated DHCP server, if it has one
+- your router's address is set in  [](#opt-networking.defaultGateway.address)
+- your system's Ethernet interface is `eth0`
+- you have configured the address(es) to forward DNS queries in [](#opt-networking.nameservers)
+
+```nix
+{
+  services.dnsmasq = {
+    enable = true;
+    settings = {
+      interface = "eth0";
+      bind-interfaces = true; # Only bind to the specified interface
+      dhcp-authoritative = true; # Should be set when dnsmasq is definitely the only DHCP server on a network
+
+      server = config.networking.nameservers; # Upstream dns servers to which requests should be forwarded
+
+      dhcp-host = [
+        # Give the current system a fixed address of 192.168.0.254
+        "dc:a6:32:0b:ea:b9,192.168.0.254,${config.networking.hostName},infinite"
+      ];
+
+      dhcp-option = [
+        # Address of the gateway, i.e. your router
+        "option:router,${config.networking.defaultGateway.address}"
+      ];
+
+      dhcp-range = [
+        # Range of IPv4 addresses to give out
+        # <range start>,<range end>,<lease time>
+        "192.168.0.10,192.168.0.253,24h"
+        # Enable stateless IPv6 allocation
+        "::f,::ff,constructor:eth0,ra-stateless"
+      ];
+
+      dhcp-rapid-commit = true; # Faster DHCP negotiation for IPv6
+      local-service = true; # Accept DNS queries only from hosts whose address is on a local subnet
+      log-queries = true; # Log results of all DNS queries
+      bogus-priv = true; # Don't forward requests for the local address ranges (192.168.x.x etc) to upstream nameservers
+      domain-needed = true; # Don't forward requests without dots or domain parts to upstream nameservers
+
+      dnssec = true; # Enable DNSSEC
+      # DNSSEC trust anchor. Source: https://data.iana.org/root-anchors/root-anchors.xml
+      trust-anchor = ".,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D";
+    };
+  };
+}
+```
+
+## References {#module-services-networking-dnsmasq-references}
+
+- Upstream website: <https://dnsmasq.org>
+- Manpage: <https://dnsmasq.org/docs/dnsmasq-man.html>
+- FAQ: <https://dnsmasq.org/docs/FAQ>
diff --git a/nixos/modules/services/networking/dnsmasq.nix b/nixos/modules/services/networking/dnsmasq.nix
index 8d1ca36c38ed2..d01a1b6707a53 100644
--- a/nixos/modules/services/networking/dnsmasq.nix
+++ b/nixos/modules/services/networking/dnsmasq.nix
@@ -181,4 +181,6 @@ in
         restartTriggers = [ config.environment.etc.hosts.source ];
     };
   };
+
+  meta.doc = ./dnsmasq.md;
 }
diff --git a/nixos/modules/services/networking/ejabberd.nix b/nixos/modules/services/networking/ejabberd.nix
index b10a3d9f21df6..78af256f9c81b 100644
--- a/nixos/modules/services/networking/ejabberd.nix
+++ b/nixos/modules/services/networking/ejabberd.nix
@@ -120,6 +120,12 @@ in {
         if [ -z "$(ls -A '${cfg.spoolDir}')" ]; then
           touch "${cfg.spoolDir}/.firstRun"
         fi
+
+        if ! test -e ${cfg.spoolDir}/.erlang.cookie; then
+          touch ${cfg.spoolDir}/.erlang.cookie
+          chmod 600 ${cfg.spoolDir}/.erlang.cookie
+          dd if=/dev/random bs=16 count=1 | base64 > ${cfg.spoolDir}/.erlang.cookie
+        fi
       '';
 
       postStart = ''
diff --git a/nixos/modules/services/networking/ergo.nix b/nixos/modules/services/networking/ergo.nix
index 033d4d9caf8a8..1bee0f43f988a 100644
--- a/nixos/modules/services/networking/ergo.nix
+++ b/nixos/modules/services/networking/ergo.nix
@@ -114,6 +114,7 @@ in {
     systemd.services.ergo = {
       description = "ergo server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/networking/expressvpn.nix b/nixos/modules/services/networking/expressvpn.nix
index 30de6987d31fe..05c24d8bccffc 100644
--- a/nixos/modules/services/networking/expressvpn.nix
+++ b/nixos/modules/services/networking/expressvpn.nix
@@ -21,6 +21,7 @@ with lib;
         RestartSec = 5;
       };
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
     };
   };
diff --git a/nixos/modules/services/networking/firewall-iptables.nix b/nixos/modules/services/networking/firewall-iptables.nix
index e900868387203..2d11517700086 100644
--- a/nixos/modules/services/networking/firewall-iptables.nix
+++ b/nixos/modules/services/networking/firewall-iptables.nix
@@ -308,8 +308,9 @@ in
       description = "Firewall";
       wantedBy = [ "sysinit.target" ];
       wants = [ "network-pre.target" ];
-      before = [ "network-pre.target" ];
       after = [ "systemd-modules-load.service" ];
+      before = [ "network-pre.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
 
       path = [ cfg.package ] ++ cfg.extraPackages;
 
diff --git a/nixos/modules/services/networking/frp.nix b/nixos/modules/services/networking/frp.nix
index 218d532c12daf..eb022308bc29f 100644
--- a/nixos/modules/services/networking/frp.nix
+++ b/nixos/modules/services/networking/frp.nix
@@ -4,8 +4,8 @@ with lib;
 
 let
   cfg = config.services.frp;
-  settingsFormat = pkgs.formats.ini { };
-  configFile = settingsFormat.generate "frp.ini" cfg.settings;
+  settingsFormat = pkgs.formats.toml { };
+  configFile = settingsFormat.generate "frp.toml" cfg.settings;
   isClient = (cfg.role == "client");
   isServer = (cfg.role == "server");
 in
@@ -31,17 +31,13 @@ in
         default = { };
         description = mdDoc ''
           Frp configuration, for configuration options
-          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_legacy_full.ini)
-          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_legacy_full.ini) on github.
-        '';
-        example = literalExpression ''
-          {
-            common = {
-              server_addr = "x.x.x.x";
-              server_port = 7000;
-            };
-          }
+          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_full_example.toml)
+          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_full_example.toml) on github.
         '';
+        example = {
+            serverAddr = "x.x.x.x";
+            serverPort = 7000;
+          };
       };
     };
   };
@@ -62,7 +58,7 @@ in
             Type = "simple";
             Restart = "on-failure";
             RestartSec = 15;
-            ExecStart = "${cfg.package}/bin/${executableFile} -c ${configFile}";
+            ExecStart = "${cfg.package}/bin/${executableFile} --strict_config -c ${configFile}";
             StateDirectoryMode = optionalString isServer "0700";
             DynamicUser = true;
             # Hardening
diff --git a/nixos/modules/services/networking/gns3-server.md b/nixos/modules/services/networking/gns3-server.md
new file mode 100644
index 0000000000000..9320d914fbd3a
--- /dev/null
+++ b/nixos/modules/services/networking/gns3-server.md
@@ -0,0 +1,31 @@
+# GNS3 Server {#module-services-gns3-server}
+
+[GNS3](https://www.gns3.com/), a network software emulator.
+
+## Basic Usage {#module-services-gns3-server-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.gns3-server = {
+    enable = true;
+
+    auth = {
+      enable = true;
+      user = "gns3";
+      passwordFile = "/var/lib/secrets/gns3_password";
+    };
+
+    ssl = {
+      enable = true;
+      certFile = "/var/lib/gns3/ssl/cert.pem";
+      keyFile = "/var/lib/gns3/ssl/key.pem";
+    };
+
+    dynamips.enable = true;
+    ubridge.enable = true;
+    vpcs.enable = true;
+  };
+}
+```
diff --git a/nixos/modules/services/networking/gns3-server.nix b/nixos/modules/services/networking/gns3-server.nix
new file mode 100644
index 0000000000000..25583765de672
--- /dev/null
+++ b/nixos/modules/services/networking/gns3-server.nix
@@ -0,0 +1,263 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.gns3-server;
+
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "gns3-server.conf" cfg.settings;
+
+in {
+  meta = {
+    doc = ./gns3-server.md;
+    maintainers = [ lib.maintainers.anthonyroussel ];
+  };
+
+  options = {
+    services.gns3-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "GNS3 Server daemon");
+
+      package = lib.mkPackageOptionMD pkgs "gns3-server" { };
+
+      auth = {
+        enable = lib.mkEnableOption (lib.mdDoc "password based HTTP authentication to access the GNS3 Server");
+
+        user = lib.mkOption {
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "gns3";
+          description = lib.mdDoc ''Username used to access the GNS3 Server.'';
+        };
+
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/run/secrets/gns3-server-password";
+          description = lib.mdDoc ''
+            A file containing the password to access the GNS3 Server.
+
+            ::: {.warning}
+            This should be a string, not a nix path, since nix paths
+            are copied into the world-readable nix store.
+            :::
+          '';
+        };
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule { freeformType = settingsFormat.type; };
+        default = {};
+        example = { host = "127.0.0.1"; port = 3080; };
+        description = lib.mdDoc ''
+          The global options in `config` file in ini format.
+
+          Refer to <https://docs.gns3.com/docs/using-gns3/administration/gns3-server-configuration-file/>
+          for all available options.
+        '';
+      };
+
+      log = {
+        file = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = "/var/log/gns3/server.log";
+          description = lib.mdDoc ''Path of the file GNS3 Server should log to.'';
+        };
+
+        debug = lib.mkEnableOption (lib.mdDoc "debug logging");
+      };
+
+      ssl = {
+        enable = lib.mkEnableOption (lib.mdDoc "SSL encryption");
+
+        certFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/gns3/ssl/server.pem";
+          description = lib.mdDoc ''
+            Path to the SSL certificate file. This certificate will
+            be offered to, and may be verified by, clients.
+          '';
+        };
+
+        keyFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/gns3/ssl/server.key";
+          description = lib.mdDoc "Private key file for the certificate.";
+        };
+      };
+
+      dynamips = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable Dynamips support.'');
+        package = lib.mkPackageOptionMD pkgs "dynamips" { };
+      };
+
+      ubridge = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable uBridge support.'');
+        package = lib.mkPackageOptionMD pkgs "ubridge" { };
+      };
+
+      vpcs = {
+        enable = lib.mkEnableOption (lib.mdDoc ''Whether to enable VPCS support.'');
+        package = lib.mkPackageOptionMD pkgs "vpcs" { };
+      };
+    };
+  };
+
+  config = let
+    flags = {
+      enableDocker = config.virtualisation.docker.enable;
+      enableLibvirtd = config.virtualisation.libvirtd.enable;
+    };
+
+  in lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.ssl.enable -> cfg.ssl.certFile != null;
+        message = "Please provide a certificate to use for SSL encryption.";
+      }
+      {
+        assertion = cfg.ssl.enable -> cfg.ssl.keyFile != null;
+        message = "Please provide a private key to use for SSL encryption.";
+      }
+      {
+        assertion = cfg.auth.enable -> cfg.auth.user != null;
+        message = "Please provide a username to use for HTTP authentication.";
+      }
+      {
+        assertion = cfg.auth.enable -> cfg.auth.passwordFile != null;
+        message = "Please provide a password file to use for HTTP authentication.";
+      }
+    ];
+
+    users.groups.ubridge = lib.mkIf cfg.ubridge.enable { };
+
+    security.wrappers.ubridge = lib.mkIf cfg.ubridge.enable {
+      capabilities = "cap_net_raw,cap_net_admin=eip";
+      group = "ubridge";
+      owner = "root";
+      permissions = "u=rwx,g=rx,o=r";
+      source = lib.getExe cfg.ubridge.package;
+    };
+
+    services.gns3-server.settings = lib.mkMerge [
+      {
+        Server = {
+          appliances_path = lib.mkDefault "/var/lib/gns3/appliances";
+          configs_path = lib.mkDefault "/var/lib/gns3/configs";
+          images_path = lib.mkDefault "/var/lib/gns3/images";
+          projects_path = lib.mkDefault "/var/lib/gns3/projects";
+          symbols_path = lib.mkDefault "/var/lib/gns3/symbols";
+        };
+      }
+      (lib.mkIf (cfg.ubridge.enable) {
+        Server.ubridge_path = lib.mkDefault (lib.getExe cfg.ubridge.package);
+      })
+      (lib.mkIf (cfg.auth.enable) {
+        Server = {
+          auth = lib.mkDefault (lib.boolToString cfg.auth.enable);
+          user = lib.mkDefault cfg.auth.user;
+          password = lib.mkDefault "@AUTH_PASSWORD@";
+        };
+      })
+      (lib.mkIf (cfg.vpcs.enable) {
+        VPCS.vpcs_path = lib.mkDefault (lib.getExe cfg.vpcs.package);
+      })
+      (lib.mkIf (cfg.dynamips.enable) {
+        Dynamips.dynamips_path = lib.mkDefault (lib.getExe cfg.dynamips.package);
+      })
+    ];
+
+    systemd.services.gns3-server = let
+      commandArgs = lib.cli.toGNUCommandLineShell { } {
+        config = "/etc/gns3/gns3_server.conf";
+        pid = "/run/gns3/server.pid";
+        log = cfg.log.file;
+        ssl = cfg.ssl.enable;
+        # These are implicitly not set if `null`
+        certfile = cfg.ssl.certFile;
+        certkey = cfg.ssl.keyFile;
+      };
+    in
+    {
+      description = "GNS3 Server";
+
+      after = [ "network.target" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+
+      # configFile cannot be stored in RuntimeDirectory, because GNS3
+      # uses the `--config` base path to stores supplementary configuration files at runtime.
+      #
+      preStart = ''
+        install -m660 ${configFile} /etc/gns3/gns3_server.conf
+
+        ${lib.optionalString cfg.auth.enable ''
+          ${pkgs.replace-secret}/bin/replace-secret \
+            '@AUTH_PASSWORD@' \
+            "''${CREDENTIALS_DIRECTORY}/AUTH_PASSWORD" \
+            /etc/gns3/gns3_server.conf
+        ''}
+      '';
+
+      path = lib.optional flags.enableLibvirtd pkgs.qemu;
+
+      reloadTriggers = [ configFile ];
+
+      serviceConfig = {
+        ConfigurationDirectory = "gns3";
+        ConfigurationDirectoryMode = "0750";
+        DynamicUser = true;
+        Environment = "HOME=%S/gns3";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStart = "${lib.getExe cfg.package} ${commandArgs}";
+        Group = "gns3";
+        LimitNOFILE = 16384;
+        LoadCredential = lib.mkIf cfg.auth.enable [ "AUTH_PASSWORD:${cfg.auth.passwordFile}" ];
+        LogsDirectory = "gns3";
+        LogsDirectoryMode = "0750";
+        PIDFile = "/run/gns3/server.pid";
+        Restart = "on-failure";
+        RestartSec = 5;
+        RuntimeDirectory = "gns3";
+        StateDirectory = "gns3";
+        StateDirectoryMode = "0750";
+        SupplementaryGroups = lib.optional flags.enableDocker "docker"
+          ++ lib.optional flags.enableLibvirtd "libvirtd"
+          ++ lib.optional cfg.ubridge.enable "ubridge";
+        User = "gns3";
+        WorkingDirectory = "%S/gns3";
+
+        # Hardening
+        DeviceAllow = lib.optional flags.enableLibvirtd "/dev/kvm";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        # Don't restrict ProcSubset because python3Packages.psutil requires read access to /proc/stat
+        # ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+          "AF_UNIX"
+          "AF_PACKET"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/harmonia.nix b/nixos/modules/services/networking/harmonia.nix
index beaa7d00b6ce8..b384ac9261376 100644
--- a/nixos/modules/services/networking/harmonia.nix
+++ b/nixos/modules/services/networking/harmonia.nix
@@ -29,6 +29,11 @@ in
 
   config = lib.mkIf cfg.enable {
     nix.settings.extra-allowed-users = [ "harmonia" ];
+    users.users.harmonia = {
+      isSystemUser = true;
+      group = "harmonia";
+    };
+    users.groups.harmonia = { };
 
     systemd.services.harmonia = {
       description = "harmonia binary cache service";
@@ -50,7 +55,7 @@ in
         ExecStart = lib.getExe cfg.package;
         User = "harmonia";
         Group = "harmonia";
-        DynamicUser = true;
+        Restart = "on-failure";
         PrivateUsers = true;
         DeviceAllow = [ "" ];
         UMask = "0066";
diff --git a/nixos/modules/services/networking/headscale.nix b/nixos/modules/services/networking/headscale.nix
index 4224a0578cc30..95b5fcf6ebde8 100644
--- a/nixos/modules/services/networking/headscale.nix
+++ b/nixos/modules/services/networking/headscale.nix
@@ -460,6 +460,7 @@ in {
 
     systemd.services.headscale = {
       description = "headscale coordination server for Tailscale";
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       wantedBy = ["multi-user.target"];
       restartTriggers = [configFile];
diff --git a/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixos/modules/services/networking/ircd-hybrid/default.nix
index 554b0f7bb8b44..64a34cc52d25a 100644
--- a/nixos/modules/services/networking/ircd-hybrid/default.nix
+++ b/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -125,7 +125,8 @@ in
 
     systemd.services.ircd-hybrid = {
       description = "IRCD Hybrid server";
-      after = [ "started networking" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       script = "${ircdService}/bin/control start";
     };
diff --git a/nixos/modules/services/networking/ivpn.nix b/nixos/modules/services/networking/ivpn.nix
index 6df630c1f1947..6c9ae599e670f 100644
--- a/nixos/modules/services/networking/ivpn.nix
+++ b/nixos/modules/services/networking/ivpn.nix
@@ -27,7 +27,7 @@ with lib;
     systemd.services.ivpn-service = {
       description = "iVPN daemon";
       wantedBy = [ "multi-user.target" ];
-      wants = [ "network.target" ];
+      wants = [ "network.target" "network-online.target" ];
       after = [
         "network-online.target"
         "NetworkManager.service"
diff --git a/nixos/modules/services/networking/iwd.nix b/nixos/modules/services/networking/iwd.nix
index b74f5d0bec9b8..d46c1a69a6197 100644
--- a/nixos/modules/services/networking/iwd.nix
+++ b/nixos/modules/services/networking/iwd.nix
@@ -64,8 +64,10 @@ in
     };
 
     systemd.services.iwd = {
+      path = [ config.networking.resolvconf.package ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ configFile ];
+      serviceConfig.ReadWritePaths = "-/etc/resolv.conf";
     };
   };
 
diff --git a/nixos/modules/services/networking/jigasi.nix b/nixos/modules/services/networking/jigasi.nix
new file mode 100644
index 0000000000000..e701689031b14
--- /dev/null
+++ b/nixos/modules/services/networking/jigasi.nix
@@ -0,0 +1,237 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jigasi;
+  homeDirName = "jigasi-home";
+  stateDir = "/tmp";
+  sipCommunicatorPropertiesFile = "${stateDir}/${homeDirName}/sip-communicator.properties";
+  sipCommunicatorPropertiesFileUnsubstituted = "${pkgs.jigasi}/etc/jitsi/jigasi/sip-communicator.properties";
+in
+{
+  options.services.jigasi = with types; {
+    enable = mkEnableOption "Jitsi Gateway to SIP - component of Jitsi Meet";
+
+    xmppHost = mkOption {
+      type = str;
+      example = "localhost";
+      description = ''
+        Hostname of the XMPP server to connect to.
+      '';
+    };
+
+    xmppDomain = mkOption {
+      type = nullOr str;
+      example = "meet.example.org";
+      description = ''
+        Domain name of the XMMP server to which to connect as a component.
+
+        If null, <option>xmppHost</option> is used.
+      '';
+    };
+
+    componentPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-component";
+      description = ''
+        Path to file containing component secret.
+      '';
+    };
+
+    userName = mkOption {
+      type = str;
+      default = "callcontrol";
+      description = ''
+        User part of the JID for XMPP user connection.
+      '';
+    };
+
+    userDomain = mkOption {
+      type = str;
+      example = "internal.meet.example.org";
+      description = ''
+        Domain part of the JID for XMPP user connection.
+      '';
+    };
+
+    userPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jigasi-user";
+      description = ''
+        Path to file containing password for XMPP user connection.
+      '';
+    };
+
+    bridgeMuc = mkOption {
+      type = str;
+      example = "jigasibrewery@internal.meet.example.org";
+      description = ''
+        JID of the internal MUC used to communicate with Videobridges.
+      '';
+    };
+
+    defaultJvbRoomName = mkOption {
+      type = str;
+      default = "";
+      example = "siptest";
+      description = ''
+        Name of the default JVB room that will be joined if no special header is included in SIP invite.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = ''
+        File containing environment variables to be passed to the jigasi service,
+        in which secret tokens can be specified securely by defining values for
+        <literal>JIGASI_SIPUSER</literal>,
+        <literal>JIGASI_SIPPWD</literal>,
+        <literal>JIGASI_SIPSERVER</literal> and
+        <literal>JIGASI_SIPPORT</literal>.
+      '';
+    };
+
+    config = mkOption {
+      type = attrsOf str;
+      default = { };
+      example = literalExpression ''
+        {
+          "org.jitsi.jigasi.auth.URL" = "XMPP:jitsi-meet.example.com";
+        }
+      '';
+      description = ''
+        Contents of the <filename>sip-communicator.properties</filename> configuration file for jigasi.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.jicofo.config = {
+      "org.jitsi.jicofo.jigasi.BREWERY" = "${cfg.bridgeMuc}";
+    };
+
+    services.jigasi.config = mapAttrs (_: v: mkDefault v) {
+      "org.jitsi.jigasi.BRIDGE_MUC" = cfg.bridgeMuc;
+    };
+
+    users.groups.jitsi-meet = {};
+
+    systemd.services.jigasi = let
+      jigasiProps = {
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "${stateDir}";
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "${homeDirName}";
+        "-Djava.util.logging.config.file" = "${pkgs.jigasi}/etc/jitsi/jigasi/logging.properties";
+      };
+    in
+    {
+      description = "Jitsi Gateway to SIP";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        [ -f "${sipCommunicatorPropertiesFile}" ] && rm -f "${sipCommunicatorPropertiesFile}"
+        mkdir -p "$(dirname ${sipCommunicatorPropertiesFile})"
+        temp="${sipCommunicatorPropertiesFile}.unsubstituted"
+
+        export DOMAIN_BASE="${cfg.xmppDomain}"
+        export JIGASI_XMPP_PASSWORD=$(cat "${cfg.userPasswordFile}")
+        export JIGASI_DEFAULT_JVB_ROOM_NAME="${cfg.defaultJvbRoomName}"
+
+        # encode the credentials to base64
+        export JIGASI_SIPPWD=$(echo -n "$JIGASI_SIPPWD" | base64 -w 0)
+        export JIGASI_XMPP_PASSWORD_BASE64=$(cat "${cfg.userPasswordFile}" | base64 -w 0)
+
+        cp "${sipCommunicatorPropertiesFileUnsubstituted}" "$temp"
+        chmod 644 "$temp"
+        cat <<EOF >>"$temp"
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.SERVER_PORT=$JIGASI_SIPPORT
+        net.java.sip.communicator.impl.protocol.sip.acc1403273890647.PREFERRED_TRANSPORT=udp
+        EOF
+        chmod 444 "$temp"
+
+        # Replace <<$VAR_NAME>> from example config to $VAR_NAME for environment substitution
+        sed -i -E \
+          's/<<([^>]+)>>/\$\1/g' \
+          "$temp"
+
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.PASSWORD=\).*|\1\$JIGASI_XMPP_PASSWORD_BASE64|g' \
+          "$temp"
+
+        sed -i \
+          's|\(#\)\(org.jitsi.jigasi.DEFAULT_JVB_ROOM_NAME=\).*|\2\$JIGASI_DEFAULT_JVB_ROOM_NAME|g' \
+          "$temp"
+
+        ${pkgs.envsubst}/bin/envsubst \
+          -o "${sipCommunicatorPropertiesFile}" \
+          -i "$temp"
+
+        # Set the brewery room name
+        sed -i \
+          's|\(net\.java\.sip\.communicator\.impl\.protocol\.jabber\.acc-xmpp-1\.BREWERY=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+        sed -i \
+          's|\(org\.jitsi\.jigasi\.ALLOWED_JID=\).*|\1${cfg.bridgeMuc}|g' \
+          "${sipCommunicatorPropertiesFile}"
+
+
+        # Disable certificate verification for self-signed certificates
+        sed -i \
+          's|\(# \)\(net.java.sip.communicator.service.gui.ALWAYS_TRUST_MODE_ENABLED=true\)|\2|g' \
+          "${sipCommunicatorPropertiesFile}"
+      '';
+
+      restartTriggers = [
+        config.environment.etc."jitsi/jigasi/sip-communicator.properties".source
+      ];
+      environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jigasiProps);
+
+      script = ''
+        ${pkgs.jigasi}/bin/jigasi \
+          --host="${cfg.xmppHost}" \
+          --domain="${if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain}" \
+          --secret="$(cat ${cfg.componentPasswordFile})" \
+          --user_name="${cfg.userName}" \
+          --user_domain="${cfg.userDomain}" \
+          --user_password="$(cat ${cfg.userPasswordFile})" \
+          --configdir="${stateDir}" \
+          --configdirname="${homeDirName}"
+      '';
+
+      serviceConfig = {
+        Type = "exec";
+
+        DynamicUser = true;
+        User = "jigasi";
+        Group = "jitsi-meet";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        StateDirectory = baseNameOf stateDir;
+        EnvironmentFile = cfg.environmentFile;
+      };
+    };
+
+    environment.etc."jitsi/jigasi/sip-communicator.properties".source =
+      mkDefault "${sipCommunicatorPropertiesFile}";
+    environment.etc."jitsi/jigasi/logging.properties".source =
+      mkDefault "${stateDir}/logging.properties-journal";
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixos/modules/services/networking/kea.nix b/nixos/modules/services/networking/kea.nix
index 2f922a026a3a9..656ddd41fd12b 100644
--- a/nixos/modules/services/networking/kea.nix
+++ b/nixos/modules/services/networking/kea.nix
@@ -254,6 +254,8 @@ in
       DynamicUser = true;
       User = "kea";
       ConfigurationDirectory = "kea";
+      RuntimeDirectory = "kea";
+      RuntimeDirectoryPreserve = true;
       StateDirectory = "kea";
       UMask = "0077";
     };
@@ -288,8 +290,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-ctrl-agent";
-        KEA_LOCKFILE_DIR = "/run/kea-ctrl-agent";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -300,7 +302,6 @@ in
         ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.ctrl-agent.extraArgs}";
         KillMode = "process";
         Restart = "on-failure";
-        RuntimeDirectory = "kea-ctrl-agent";
       } // commonServiceConfig;
     };
   })
@@ -324,13 +325,16 @@ in
         "network-online.target"
         "time-sync.target"
       ];
+      wants = [
+        "network-online.target"
+      ];
       wantedBy = [
         "multi-user.target"
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp4";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp4";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -348,7 +352,6 @@ in
           "CAP_NET_BIND_SERVICE"
           "CAP_NET_RAW"
         ];
-        RuntimeDirectory = "kea-dhcp4";
       } // commonServiceConfig;
     };
   })
@@ -372,13 +375,16 @@ in
         "network-online.target"
         "time-sync.target"
       ];
+      wants = [
+        "network-online.target"
+      ];
       wantedBy = [
         "multi-user.target"
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp6";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp6";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -394,7 +400,6 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
-        RuntimeDirectory = "kea-dhcp6";
       } // commonServiceConfig;
     };
   })
@@ -414,6 +419,7 @@ in
         "https://kea.readthedocs.io/en/kea-${package.version}/arm/ddns.html"
       ];
 
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
         "time-sync.target"
@@ -423,8 +429,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea-dhcp-ddns";
-        KEA_LOCKFILE_DIR = "/run/kea-dhcp-ddns";
+        KEA_PIDFILE_DIR = "/run/kea";
+        KEA_LOCKFILE_DIR = "/run/kea";
       };
 
       restartTriggers = [
@@ -439,7 +445,6 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
-        RuntimeDirectory = "kea-dhcp-ddns";
       } // commonServiceConfig;
     };
   })
diff --git a/nixos/modules/services/networking/keepalived/default.nix b/nixos/modules/services/networking/keepalived/default.nix
index 29fbea5545c36..429a47c3962c6 100644
--- a/nixos/modules/services/networking/keepalived/default.nix
+++ b/nixos/modules/services/networking/keepalived/default.nix
@@ -150,6 +150,14 @@ in
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically allow VRRP and AH packets in the firewall.
+        '';
+      };
+
       enableScriptSecurity = mkOption {
         type = types.bool;
         default = false;
@@ -282,6 +290,19 @@ in
 
     assertions = flatten (map vrrpInstanceAssertions vrrpInstances);
 
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      extraCommands = ''
+        # Allow VRRP and AH packets
+        ip46tables -A nixos-fw -p vrrp -m comment --comment "services.keepalived.openFirewall" -j ACCEPT
+        ip46tables -A nixos-fw -p ah -m comment --comment "services.keepalived.openFirewall" -j ACCEPT
+      '';
+
+      extraStopCommands = ''
+        ip46tables -D nixos-fw -p vrrp -m comment --comment "services.keepalived.openFirewall" -j ACCEPT
+        ip46tables -D nixos-fw -p ah -m comment --comment "services.keepalived.openFirewall" -j ACCEPT
+      '';
+    };
+
     systemd.timers.keepalived-boot-delay = {
       description = "Keepalive Daemon delay to avoid instant transition to MASTER state";
       after = [ "network.target" "network-online.target" "syslog.target" ];
diff --git a/nixos/modules/services/networking/miniupnpd.nix b/nixos/modules/services/networking/miniupnpd.nix
index 64aacaf350404..116298dc6b1db 100644
--- a/nixos/modules/services/networking/miniupnpd.nix
+++ b/nixos/modules/services/networking/miniupnpd.nix
@@ -13,8 +13,17 @@ let
       listening_ip=${range}
     '') cfg.internalIPs}
 
+    ${lib.optionalString (firewall == "nftables") ''
+      upnp_table_name=miniupnpd
+      upnp_nat_table_name=miniupnpd
+    ''}
+
     ${cfg.appendConfig}
   '';
+  firewall = if config.networking.nftables.enable then "nftables" else "iptables";
+  miniupnpd = pkgs.miniupnpd.override { inherit firewall; };
+  firewallScripts = lib.optionals (firewall == "iptables")
+    ([ "iptables"] ++ lib.optional (config.networking.enableIPv6) "ip6tables");
 in
 {
   options = {
@@ -57,20 +66,50 @@ in
   };
 
   config = mkIf cfg.enable {
-    networking.firewall.extraCommands = ''
-      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_init.sh -i ${cfg.externalInterface}
-    '';
+    networking.firewall.extraCommands = lib.mkIf (firewallScripts != []) (builtins.concatStringsSep "\n" (map (fw: ''
+      EXTIF=${cfg.externalInterface} ${pkgs.bash}/bin/bash -x ${miniupnpd}/etc/miniupnpd/${fw}_init.sh
+    '') firewallScripts));
+
+    networking.firewall.extraStopCommands = lib.mkIf (firewallScripts != []) (builtins.concatStringsSep "\n" (map (fw: ''
+      EXTIF=${cfg.externalInterface} ${pkgs.bash}/bin/bash -x ${miniupnpd}/etc/miniupnpd/${fw}_removeall.sh
+    '') firewallScripts));
 
-    networking.firewall.extraStopCommands = ''
-      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_removeall.sh -i ${cfg.externalInterface}
-    '';
+    networking.nftables = lib.mkIf (firewall == "nftables") {
+      # see nft_init in ${miniupnpd-nftables}/etc/miniupnpd
+      tables.miniupnpd = {
+        family = "inet";
+        # The following is omitted because it's expected that the firewall is to be responsible for it.
+        #
+        # chain forward {
+        #   type filter hook forward priority filter; policy drop;
+        #   jump miniupnpd
+        # }
+        #
+        # Otherwise, it quickly gets ugly with (potentially) two forward chains with "policy drop".
+        # This means the chain "miniupnpd" never actually gets triggered and is simply there to satisfy
+        # miniupnpd. If you're doing it yourself (without networking.firewall), the easiest way to get
+        # it to work is adding a rule "ct status dnat accept" - this is what networking.firewall does.
+        # If you don't want to simply accept forwarding for all "ct status dnat" packets, override
+        # upnp_table_name with whatever your table is, create a chain "miniupnpd" in your table and
+        # jump into it from your forward chain.
+        content = ''
+          chain miniupnpd {}
+          chain prerouting_miniupnpd {
+            type nat hook prerouting priority dstnat; policy accept;
+          }
+          chain postrouting_miniupnpd {
+            type nat hook postrouting priority srcnat; policy accept;
+          }
+        '';
+      };
+    };
 
     systemd.services.miniupnpd = {
       description = "MiniUPnP daemon";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
+        ExecStart = "${miniupnpd}/bin/miniupnpd -f ${configFile}";
         PIDFile = "/run/miniupnpd.pid";
         Type = "forking";
       };
diff --git a/nixos/modules/services/networking/mosquitto.nix b/nixos/modules/services/networking/mosquitto.nix
index f2b158b989427..ad9eefb422525 100644
--- a/nixos/modules/services/networking/mosquitto.nix
+++ b/nixos/modules/services/networking/mosquitto.nix
@@ -596,6 +596,7 @@ in
     systemd.services.mosquitto = {
       description = "Mosquitto MQTT Broker Daemon";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         Type = "notify";
diff --git a/nixos/modules/services/networking/mullvad-vpn.nix b/nixos/modules/services/networking/mullvad-vpn.nix
index 446c71f40764d..5da4ca1d1d803 100644
--- a/nixos/modules/services/networking/mullvad-vpn.nix
+++ b/nixos/modules/services/networking/mullvad-vpn.nix
@@ -53,7 +53,7 @@ with lib;
     systemd.services.mullvad-daemon = {
       description = "Mullvad VPN daemon";
       wantedBy = [ "multi-user.target" ];
-      wants = [ "network.target" ];
+      wants = [ "network.target" "network-online.target" ];
       after = [
         "network-online.target"
         "NetworkManager.service"
diff --git a/nixos/modules/services/networking/nbd.nix b/nixos/modules/services/networking/nbd.nix
index 454380aa3154c..b4bf7ede84632 100644
--- a/nixos/modules/services/networking/nbd.nix
+++ b/nixos/modules/services/networking/nbd.nix
@@ -117,6 +117,7 @@ in
     boot.kernelModules = [ "nbd" ];
 
     systemd.services.nbd-server = {
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       before = [ "multi-user.target" ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/networking/nebula.nix b/nixos/modules/services/networking/nebula.nix
index b9ebbfbd9a297..e13876172dac6 100644
--- a/nixos/modules/services/networking/nebula.nix
+++ b/nixos/modules/services/networking/nebula.nix
@@ -196,7 +196,7 @@ in
             before = [ "sshd.service" ];
             wantedBy = [ "multi-user.target" ];
             serviceConfig = {
-              Type = "simple";
+              Type = "notify";
               Restart = "always";
               ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
               UMask = "0027";
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index d32712c8243d7..c96439cf2641a 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -565,7 +565,10 @@ in
       wantedBy = [ "network-online.target" ];
     };
 
-    systemd.services.ModemManager.aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
+    systemd.services.ModemManager = {
+      aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
+      path = lib.optionals (cfg.fccUnlockScripts != []) [ pkgs.libqmi pkgs.libmbim ];
+    };
 
     systemd.services.NetworkManager-dispatcher = {
       wantedBy = [ "network.target" ];
diff --git a/nixos/modules/services/networking/ntp/ntpd-rs.nix b/nixos/modules/services/networking/ntp/ntpd-rs.nix
new file mode 100644
index 0000000000000..4643ac146ddb9
--- /dev/null
+++ b/nixos/modules/services/networking/ntp/ntpd-rs.nix
@@ -0,0 +1,89 @@
+{ lib, config, pkgs, ... }:
+
+let
+  cfg = config.services.ntpd-rs;
+  format = pkgs.formats.toml { };
+  configFile = format.generate "ntpd-rs.toml" cfg.settings;
+in
+{
+  options.services.ntpd-rs = {
+    enable = lib.mkEnableOption "Network Time Service (ntpd-rs)";
+    metrics.enable = lib.mkEnableOption "ntpd-rs Prometheus Metrics Exporter";
+
+    package = lib.mkPackageOption pkgs "ntpd-rs" { };
+
+    useNetworkingTimeServers = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Use source time servers from {var}`networking.timeServers` in config.
+      '';
+    };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = format.type;
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Settings to write to {file}`ntp.toml`
+
+        See <https://docs.ntpd-rs.pendulum-project.org/man/ntp.toml.5>
+        for more information about available options.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !config.services.timesyncd.enable;
+        message = ''
+          `ntpd-rs` is not compatible with `services.timesyncd`. Please disable one of them.
+        '';
+      }
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+    systemd.packages = [ cfg.package ];
+
+    services.timesyncd.enable = false;
+    systemd.services.systemd-timedated.environment = {
+      SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd-rs.service";
+    };
+
+    services.ntpd-rs.settings = {
+      observability = {
+        observation-path = lib.mkDefault "/var/run/ntpd-rs/observe";
+      };
+      source = lib.mkIf cfg.useNetworkingTimeServers (map
+        (ts: {
+          mode = "server";
+          address = ts;
+        })
+        config.networking.timeServers);
+    };
+
+    systemd.services.ntpd-rs = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "";
+        Group = "";
+        DynamicUser = true;
+        ExecStart = [ "" "${lib.makeBinPath [ cfg.package ]}/ntp-daemon --config=${configFile}" ];
+      };
+    };
+
+    systemd.services.ntpd-rs-metrics = lib.mkIf cfg.metrics.enable {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "";
+        Group = "";
+        DynamicUser = true;
+        ExecStart = [ "" "${lib.makeBinPath [ cfg.package ]}/ntp-metrics-exporter --config=${configFile}" ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ fpletz ];
+}
diff --git a/nixos/modules/services/networking/ocserv.nix b/nixos/modules/services/networking/ocserv.nix
index 9548fd92dbda3..3c61d56b893e9 100644
--- a/nixos/modules/services/networking/ocserv.nix
+++ b/nixos/modules/services/networking/ocserv.nix
@@ -85,6 +85,7 @@ in
     systemd.services.ocserv = {
       description = "OpenConnect SSL VPN server";
       documentation = [ "man:ocserv(8)" ];
+      wants = [ "network-online.target" ];
       after = [ "dbus.service" "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/networking/pleroma.nix b/nixos/modules/services/networking/pleroma.nix
index db0a61b834699..8470f5e9cbc0c 100644
--- a/nixos/modules/services/networking/pleroma.nix
+++ b/nixos/modules/services/networking/pleroma.nix
@@ -92,6 +92,7 @@ in {
 
     systemd.services.pleroma = {
       description = "Pleroma social network";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
diff --git a/nixos/modules/services/networking/quicktun.nix b/nixos/modules/services/networking/quicktun.nix
index 7aed972adc882..2d44659f20804 100644
--- a/nixos/modules/services/networking/quicktun.nix
+++ b/nixos/modules/services/networking/quicktun.nix
@@ -1,94 +1,153 @@
-{ config, pkgs, lib, ... }:
+{ options, config, pkgs, lib, ... }:
 
 let
+  inherit (lib) mkOption mdDoc types mkIf;
 
+  opt = options.services.quicktun;
   cfg = config.services.quicktun;
-
 in
-
-with lib;
-
 {
   options = {
-
     services.quicktun = mkOption {
       default = { };
-      description = lib.mdDoc "QuickTun tunnels";
-      type = types.attrsOf (types.submodule {
+      description = mdDoc ''
+        QuickTun tunnels.
+
+        See <http://wiki.ucis.nl/QuickTun> for more information about available options.
+      '';
+      type = types.attrsOf (types.submodule ({ name, ... }: let
+        qtcfg = cfg.${name};
+      in {
         options = {
           tunMode = mkOption {
-            type = types.int;
-            default = 0;
-            example = 1;
-            description = lib.mdDoc "";
+            type = with types; coercedTo bool (b: if b then 1 else 0) (ints.between 0 1);
+            default = false;
+            example = true;
+            description = mdDoc "Whether to operate in tun (IP) or tap (Ethernet) mode.";
           };
 
           remoteAddress = mkOption {
             type = types.str;
+            default = "0.0.0.0";
             example = "tunnel.example.com";
-            description = lib.mdDoc "";
+            description = mdDoc ''
+              IP address or hostname of the remote end (use `0.0.0.0` for a floating/dynamic remote endpoint).
+            '';
           };
 
           localAddress = mkOption {
-            type = types.str;
+            type = with types; nullOr str;
+            default = null;
             example = "0.0.0.0";
-            description = lib.mdDoc "";
+            description = mdDoc "IP address or hostname of the local end.";
           };
 
           localPort = mkOption {
-            type = types.int;
+            type = types.port;
             default = 2998;
-            description = lib.mdDoc "";
+            description = mdDoc "Local UDP port.";
           };
 
           remotePort = mkOption {
-            type = types.int;
-            default = 2998;
-            description = lib.mdDoc "";
+            type = types.port;
+            default = qtcfg.localPort;
+            defaultText = lib.literalExpression "config.services.quicktun.<name>.localPort";
+            description = mdDoc " remote UDP port";
           };
 
           remoteFloat = mkOption {
-            type = types.int;
-            default = 0;
-            description = lib.mdDoc "";
+            type = with types; coercedTo bool (b: if b then 1 else 0) (ints.between 0 1);
+            default = false;
+            example = true;
+            description = mdDoc ''
+              Whether to allow the remote address and port to change when properly encrypted packets are received.
+            '';
           };
 
           protocol = mkOption {
-            type = types.str;
+            type = types.enum [ "raw" "nacl0" "nacltai" "salty" ];
             default = "nacltai";
-            description = lib.mdDoc "";
+            description = mdDoc "Which protocol to use.";
           };
 
           privateKey = mkOption {
-            type = types.str;
-            description = lib.mdDoc "";
+            type = with types; nullOr str;
+            default = null;
+            description = mdDoc ''
+              Local secret key in hexadecimal form.
+
+              ::: {.warning}
+              This option is deprecated. Please use {var}`services.quicktun.<name>.privateKeyFile` instead.
+              :::
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
+          };
+
+          privateKeyFile = mkOption {
+            type = with types; nullOr path;
+            # This is a hack to deprecate `privateKey` without using `mkChangedModuleOption`
+            default = if qtcfg.privateKey == null then null else pkgs.writeText "quickttun-key-${name}" qtcfg.privateKey;
+            defaultText = "null";
+            description = mdDoc ''
+              Path to file containing local secret key in binary or hexadecimal form.
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
           };
 
           publicKey = mkOption {
-            type = types.str;
-            description = lib.mdDoc "";
+            type = with types; nullOr str;
+            default = null;
+            description = mdDoc ''
+              Remote public key in hexadecimal form.
+
+              ::: {.note}
+              Not needed when {var}`services.quicktun.<name>.protocol` is set to `raw`.
+              :::
+            '';
           };
 
           timeWindow = mkOption {
-            type = types.int;
+            type = types.ints.unsigned;
             default = 5;
-            description = lib.mdDoc "";
+            description = mdDoc ''
+              Allowed time window for first received packet in seconds (positive number allows packets from history)
+            '';
           };
 
           upScript = mkOption {
-            type = types.lines;
-            default = "";
-            description = lib.mdDoc "";
+            type = with types; nullOr lines;
+            default = null;
+            description = mdDoc ''
+              Run specified command or script after the tunnel device has been opened.
+            '';
           };
         };
-      });
+      }));
     };
-
   };
 
-  config = mkIf (cfg != []) {
-    systemd.services = foldr (a: b: a // b) {} (
-      mapAttrsToList (name: qtcfg: {
+  config = {
+    warnings = lib.pipe cfg [
+      (lib.mapAttrsToList (name: value: if value.privateKey != null then name else null))
+      (builtins.filter (n: n != null))
+      (map (n: "  - services.quicktun.${n}.privateKey"))
+      (services: lib.optional (services != [ ]) ''
+        `services.quicktun.<name>.privateKey` is deprecated.
+        Please use `services.quicktun.<name>.privateKeyFile` instead.
+
+        Offending options:
+        ${lib.concatStringsSep "\n" services}
+      '')
+    ];
+
+    systemd.services = lib.mkMerge (
+      lib.mapAttrsToList (name: qtcfg: {
         "quicktun-${name}" = {
           wantedBy = [ "multi-user.target" ];
           after = [ "network.target" ];
@@ -96,14 +155,14 @@ with lib;
             INTERFACE = name;
             TUN_MODE = toString qtcfg.tunMode;
             REMOTE_ADDRESS = qtcfg.remoteAddress;
-            LOCAL_ADDRESS = qtcfg.localAddress;
+            LOCAL_ADDRESS = mkIf (qtcfg.localAddress != null) (qtcfg.localAddress);
             LOCAL_PORT = toString qtcfg.localPort;
             REMOTE_PORT = toString qtcfg.remotePort;
             REMOTE_FLOAT = toString qtcfg.remoteFloat;
-            PRIVATE_KEY = qtcfg.privateKey;
-            PUBLIC_KEY = qtcfg.publicKey;
+            PRIVATE_KEY_FILE = mkIf (qtcfg.privateKeyFile != null) qtcfg.privateKeyFile;
+            PUBLIC_KEY = mkIf (qtcfg.publicKey != null) qtcfg.publicKey;
             TIME_WINDOW = toString qtcfg.timeWindow;
-            TUN_UP_SCRIPT = pkgs.writeScript "quicktun-${name}-up.sh" qtcfg.upScript;
+            TUN_UP_SCRIPT = mkIf (qtcfg.upScript != null) (pkgs.writeScript "quicktun-${name}-up.sh" qtcfg.upScript);
             SUID = "nobody";
           };
           serviceConfig = {
@@ -114,5 +173,4 @@ with lib;
       }) cfg
     );
   };
-
 }
diff --git a/nixos/modules/services/networking/rosenpass.nix b/nixos/modules/services/networking/rosenpass.nix
index d2a264b83d677..487cb6f601429 100644
--- a/nixos/modules/services/networking/rosenpass.nix
+++ b/nixos/modules/services/networking/rosenpass.nix
@@ -208,6 +208,7 @@ in
       in
       rec {
         wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         path = [ cfg.package pkgs.wireguard-tools ];
 
diff --git a/nixos/modules/services/networking/rxe.nix b/nixos/modules/services/networking/rxe.nix
index 7dbb4823b4bcd..07437ed71195b 100644
--- a/nixos/modules/services/networking/rxe.nix
+++ b/nixos/modules/services/networking/rxe.nix
@@ -33,7 +33,7 @@ in {
 
       wantedBy = [ "multi-user.target" ];
       after = [ "systemd-modules-load.service" "network-online.target" ];
-      wants = [ "network-pre.target" ];
+      wants = [ "network-pre.target" "network-online.target" ];
 
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/networking/soju.nix b/nixos/modules/services/networking/soju.nix
index 7f0ac3e3b8e69..d69ec08ca13a0 100644
--- a/nixos/modules/services/networking/soju.nix
+++ b/nixos/modules/services/networking/soju.nix
@@ -110,6 +110,7 @@ in
     systemd.services.soju = {
       description = "soju IRC bouncer";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         DynamicUser = true;
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index f54ce59174387..aca8343b7d597 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -600,7 +600,11 @@ in
           { description = "SSH Socket";
             wantedBy = [ "sockets.target" ];
             socketConfig.ListenStream = if cfg.listenAddresses != [] then
-              map (l: "${l.addr}:${toString (if l.port != null then l.port else 22)}") cfg.listenAddresses
+              concatMap
+                ({ addr, port }:
+                  if port != null then [ "${addr}:${toString port}" ]
+                  else map (p: "${addr}:${toString p}") cfg.ports)
+                cfg.listenAddresses
             else
               cfg.ports;
             socketConfig.Accept = true;
@@ -674,7 +678,11 @@ in
           (lport: "sshd -G -T -C lport=${toString lport} -f ${sshconf} > /dev/null")
           cfg.ports}
         ${concatMapStringsSep "\n"
-          (la: "sshd -G -T -C ${escapeShellArg "laddr=${la.addr},lport=${toString la.port}"} -f ${sshconf} > /dev/null")
+          (la:
+            concatMapStringsSep "\n"
+              (port: "sshd -G -T -C ${escapeShellArg "laddr=${la.addr},lport=${toString port}"} -f ${sshconf} > /dev/null")
+              (if la.port != null then [ la.port ] else cfg.ports)
+          )
           cfg.listenAddresses}
         touch $out
       '')
diff --git a/nixos/modules/services/networking/strongswan-swanctl/module.nix b/nixos/modules/services/networking/strongswan-swanctl/module.nix
index c8832ed4defb6..a988509239558 100644
--- a/nixos/modules/services/networking/strongswan-swanctl/module.nix
+++ b/nixos/modules/services/networking/strongswan-swanctl/module.nix
@@ -55,6 +55,7 @@ in  {
     systemd.services.strongswan-swanctl = {
       description = "strongSwan IPsec IKEv1/IKEv2 daemon using swanctl";
       wantedBy = [ "multi-user.target" ];
+      wants    = [ "network-online.target" ];
       after    = [ "network-online.target" ];
       path     = with pkgs; [ kmod iproute2 iptables util-linux ];
       environment = {
diff --git a/nixos/modules/services/networking/strongswan.nix b/nixos/modules/services/networking/strongswan.nix
index e58526814d1ad..dcf04d2a1917c 100644
--- a/nixos/modules/services/networking/strongswan.nix
+++ b/nixos/modules/services/networking/strongswan.nix
@@ -153,6 +153,7 @@ in
       description = "strongSwan IPSec Service";
       wantedBy = [ "multi-user.target" ];
       path = with pkgs; [ kmod iproute2 iptables util-linux ]; # XXX Linux
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
diff --git a/nixos/modules/services/networking/syncplay.nix b/nixos/modules/services/networking/syncplay.nix
index 0a66d93bf153a..151259b6d4ad2 100644
--- a/nixos/modules/services/networking/syncplay.nix
+++ b/nixos/modules/services/networking/syncplay.nix
@@ -107,6 +107,7 @@ in
     systemd.services.syncplay = {
       description = "Syncplay Service";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index 99d4d9eeffcc6..e0425792431e6 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -559,6 +559,15 @@ in {
         '';
       };
 
+      databaseDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The directory containing the database and logs.
+        '';
+        default = cfg.configDir;
+        defaultText = literalExpression "config.${opt.configDir}";
+      };
+
       extraFlags = mkOption {
         type = types.listOf types.str;
         default = [];
@@ -660,7 +669,7 @@ in {
               -no-browser \
               -gui-address=${if isUnixGui then "unix://" else ""}${cfg.guiAddress} \
               -config=${cfg.configDir} \
-              -data=${cfg.dataDir} \
+              -data=${cfg.databaseDir} \
               ${escapeShellArgs cfg.extraFlags}
           '';
           MemoryDenyWriteExecute = true;
diff --git a/nixos/modules/services/networking/tailscale.nix b/nixos/modules/services/networking/tailscale.nix
index 3822df81063d9..1070e4e252967 100644
--- a/nixos/modules/services/networking/tailscale.nix
+++ b/nixos/modules/services/networking/tailscale.nix
@@ -100,8 +100,8 @@ in {
     };
 
     systemd.services.tailscaled-autoconnect = mkIf (cfg.authKeyFile != null) {
-      after = ["tailscale.service"];
-      wants = ["tailscale.service"];
+      after = ["tailscaled.service"];
+      wants = ["tailscaled.service"];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/networking/teamspeak3.nix b/nixos/modules/services/networking/teamspeak3.nix
index f09ef1a959ed4..ff41539a6d9b7 100644
--- a/nixos/modules/services/networking/teamspeak3.nix
+++ b/nixos/modules/services/networking/teamspeak3.nix
@@ -50,7 +50,7 @@ in
       };
 
       defaultVoicePort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 9987;
         description = lib.mdDoc ''
           Default UDP port for clients to connect to virtual servers - used for first virtual server, subsequent ones will open on incrementing port numbers by default.
@@ -67,7 +67,7 @@ in
       };
 
       fileTransferPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 30033;
         description = lib.mdDoc ''
           TCP port opened for file transfers.
@@ -84,10 +84,26 @@ in
       };
 
       queryPort = mkOption {
-        type = types.int;
+        type = types.port;
         default = 10011;
         description = lib.mdDoc ''
-          TCP port opened for ServerQuery connections.
+          TCP port opened for ServerQuery connections using the raw telnet protocol.
+        '';
+      };
+
+      querySshPort = mkOption {
+        type = types.port;
+        default = 10022;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the SSH protocol.
+        '';
+      };
+
+      queryHttpPort = mkOption {
+        type = types.port;
+        default = 10080;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections using the HTTP protocol.
         '';
       };
 
@@ -128,7 +144,9 @@ in
     ];
 
     networking.firewall = mkIf cfg.openFirewall {
-      allowedTCPPorts = [ cfg.fileTransferPort ] ++ optionals (cfg.openFirewallServerQuery) [ cfg.queryPort (cfg.queryPort + 11) ];
+      allowedTCPPorts = [ cfg.fileTransferPort ] ++ (map (port:
+        mkIf cfg.openFirewallServerQuery port
+      ) [cfg.queryPort cfg.querySshPort cfg.queryHttpPort]);
       # subsequent vServers will use the incremented voice port, let's just open the next 10
       allowedUDPPortRanges = [ { from = cfg.defaultVoicePort; to = cfg.defaultVoicePort + 10; } ];
     };
@@ -141,13 +159,19 @@ in
       serviceConfig = {
         ExecStart = ''
           ${ts3}/bin/ts3server \
-            dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
-            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            dbsqlpath=${ts3}/lib/teamspeak/sql/ \
+            logpath=${cfg.logPath} \
+            license_accepted=1 \
             default_voice_port=${toString cfg.defaultVoicePort} \
-            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             filetransfer_port=${toString cfg.fileTransferPort} \
+            query_port=${toString cfg.queryPort} \
+            query_ssh_port=${toString cfg.querySshPort} \
+            query_http_port=${toString cfg.queryHttpPort} \
+            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
             ${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
-            query_port=${toString cfg.queryPort} license_accepted=1
+            ${optionalString (cfg.queryIP != null) "query_ssh_ip=${cfg.queryIP}"} \
+            ${optionalString (cfg.queryIP != null) "query_http_ip=${cfg.queryIP}"} \
         '';
         WorkingDirectory = cfg.dataDir;
         User = user;
diff --git a/nixos/modules/services/networking/tinyproxy.nix b/nixos/modules/services/networking/tinyproxy.nix
index 42d45c460c2e7..8ff12b52f10ca 100644
--- a/nixos/modules/services/networking/tinyproxy.nix
+++ b/nixos/modules/services/networking/tinyproxy.nix
@@ -85,7 +85,7 @@ in
         User = "tinyproxy";
         Group = "tinyproxy";
         Type = "simple";
-        ExecStart = "${getExe pkgs.tinyproxy} -d -c ${configFile}";
+        ExecStart = "${getExe cfg.package} -d -c ${configFile}";
         ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
         KillSignal = "SIGINT";
         TimeoutStopSec = "30s";
diff --git a/nixos/modules/services/networking/vdirsyncer.nix b/nixos/modules/services/networking/vdirsyncer.nix
index 6a069943434da..165dc70f0876a 100644
--- a/nixos/modules/services/networking/vdirsyncer.nix
+++ b/nixos/modules/services/networking/vdirsyncer.nix
@@ -20,9 +20,11 @@ let
     else
       pkgs.writeText "vdirsyncer-${name}.conf" (toIniJson (
         {
-          general = cfg'.config.general // (lib.optionalAttrs (cfg'.config.statusPath == null) {
-            status_path = "/var/lib/vdirsyncer/${name}";
-          });
+          general = cfg'.config.general // {
+            status_path = if cfg'.config.statusPath == null
+                          then "/var/lib/vdirsyncer/${name}"
+                          else cfg'.config.statusPath;
+          };
         } // (
           mapAttrs' (name: nameValuePair "pair ${name}") cfg'.config.pairs
         ) // (
diff --git a/nixos/modules/services/networking/wasabibackend.nix b/nixos/modules/services/networking/wasabibackend.nix
index 938145b35ee88..e3a48afd2a2c5 100644
--- a/nixos/modules/services/networking/wasabibackend.nix
+++ b/nixos/modules/services/networking/wasabibackend.nix
@@ -119,6 +119,7 @@ in {
     systemd.services.wasabibackend = {
       description = "wasabibackend server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         DOTNET_PRINT_TELEMETRY_MESSAGE = "false";
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 90d9c68433cf4..4586550ed75e7 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -107,6 +107,10 @@ let
       stopIfChanged = false;
 
       path = [ package ];
+      # if `userControl.enable`, the supplicant automatically changes the permissions
+      #  and owning group of the runtime dir; setting `umask` ensures the generated
+      #  config file isn't readable (except to root);  see nixpkgs#267693
+      serviceConfig.UMask = "066";
       serviceConfig.RuntimeDirectory = "wpa_supplicant";
       serviceConfig.RuntimeDirectoryMode = "700";
       serviceConfig.EnvironmentFile = mkIf (cfg.environmentFile != null)
diff --git a/nixos/modules/services/networking/xrdp.nix b/nixos/modules/services/networking/xrdp.nix
index 3b674840b936b..7e6634cd239aa 100644
--- a/nixos/modules/services/networking/xrdp.nix
+++ b/nixos/modules/services/networking/xrdp.nix
@@ -4,14 +4,17 @@ with lib;
 
 let
   cfg = config.services.xrdp;
+
   confDir = pkgs.runCommand "xrdp.conf" { preferLocalBuild = true; } ''
-    mkdir $out
+    mkdir -p $out
 
-    cp ${cfg.package}/etc/xrdp/{km-*,xrdp,sesman,xrdp_keyboard}.ini $out
+    cp -r ${cfg.package}/etc/xrdp/* $out
+    chmod -R +w $out
 
     cat > $out/startwm.sh <<EOF
     #!/bin/sh
     . /etc/profile
+    ${lib.optionalString cfg.audio.enable "${cfg.audio.package}/libexec/pulsaudio-xrdp-module/pulseaudio_xrdp_init"}
     ${cfg.defaultWindowManager}
     EOF
     chmod +x $out/startwm.sh
@@ -25,13 +28,17 @@ let
 
     substituteInPlace $out/sesman.ini \
       --replace LogFile=xrdp-sesman.log LogFile=/dev/null \
-      --replace EnableSyslog=1 EnableSyslog=0
+      --replace EnableSyslog=1 EnableSyslog=0 \
+      --replace startwm.sh $out/startwm.sh \
+      --replace reconnectwm.sh $out/reconnectwm.sh \
 
     # Ensure that clipboard works for non-ASCII characters
     sed -i -e '/.*SessionVariables.*/ a\
     LANG=${config.i18n.defaultLocale}\
     LOCALE_ARCHIVE=${config.i18n.glibcLocales}/lib/locale/locale-archive
     ' $out/sesman.ini
+
+    ${cfg.extraConfDirCommands}
   '';
 in
 {
@@ -44,7 +51,12 @@ in
 
       enable = mkEnableOption (lib.mdDoc "xrdp, the Remote Desktop Protocol server");
 
-      package = mkPackageOption pkgs "xrdp" { };
+      package = mkPackageOptionMD pkgs "xrdp" { };
+
+      audio = {
+        enable = mkEnableOption (lib.mdDoc "audio support for xrdp sessions. So far it only works with PulseAudio sessions on the server side. No PipeWire support yet");
+        package = mkPackageOptionMD pkgs "pulseaudio-module-xrdp" {};
+      };
 
       port = mkOption {
         type = types.port;
@@ -93,86 +105,117 @@ in
       confDir = mkOption {
         type = types.path;
         default = confDir;
-        defaultText = literalMD "generated from configuration";
-        description = lib.mdDoc "The location of the config files for xrdp.";
+        internal = true;
+        description = lib.mdDoc ''
+          Configuration directory of xrdp and sesman.
+
+          Changes to this must be made through extraConfDirCommands.
+        '';
+        readOnly = true;
+      };
+
+      extraConfDirCommands = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Extra commands to run on the default confDir derivation.
+        '';
+        example = ''
+          substituteInPlace $out/sesman.ini \
+            --replace LogLevel=INFO LogLevel=DEBUG \
+            --replace LogFile=/dev/null LogFile=/var/log/xrdp.log
+        '';
       };
     };
   };
 
-
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkMerge [
+    (mkIf cfg.audio.enable {
+      environment.systemPackages = [ cfg.audio.package ];  # needed for autostart
 
-    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+      hardware.pulseaudio.extraModules = [ cfg.audio.package ];
+    })
 
-    # xrdp can run X11 program even if "services.xserver.enable = false"
-    xdg = {
-      autostart.enable = true;
-      menus.enable = true;
-      mime.enable = true;
-      icons.enable = true;
-    };
+    (mkIf cfg.enable {
 
-    fonts.enableDefaultPackages = mkDefault true;
-
-    systemd = {
-      services.xrdp = {
-        wantedBy = [ "multi-user.target" ];
-        after = [ "network.target" ];
-        description = "xrdp daemon";
-        requires = [ "xrdp-sesman.service" ];
-        preStart = ''
-          # prepare directory for unix sockets (the sockets will be owned by loggedinuser:xrdp)
-          mkdir -p /tmp/.xrdp || true
-          chown xrdp:xrdp /tmp/.xrdp
-          chmod 3777 /tmp/.xrdp
-
-          # generate a self-signed certificate
-          if [ ! -s ${cfg.sslCert} -o ! -s ${cfg.sslKey} ]; then
-            mkdir -p $(dirname ${cfg.sslCert}) || true
-            mkdir -p $(dirname ${cfg.sslKey}) || true
-            ${pkgs.openssl.bin}/bin/openssl req -x509 -newkey rsa:2048 -sha256 -nodes -days 365 \
-              -subj /C=US/ST=CA/L=Sunnyvale/O=xrdp/CN=www.xrdp.org \
-              -config ${cfg.package}/share/xrdp/openssl.conf \
-              -keyout ${cfg.sslKey} -out ${cfg.sslCert}
-            chown root:xrdp ${cfg.sslKey} ${cfg.sslCert}
-            chmod 440 ${cfg.sslKey} ${cfg.sslCert}
-          fi
-          if [ ! -s /run/xrdp/rsakeys.ini ]; then
-            mkdir -p /run/xrdp
-            ${cfg.package}/bin/xrdp-keygen xrdp /run/xrdp/rsakeys.ini
-          fi
-        '';
-        serviceConfig = {
-          User = "xrdp";
-          Group = "xrdp";
-          PermissionsStartOnly = true;
-          ExecStart = "${cfg.package}/bin/xrdp --nodaemon --port ${toString cfg.port} --config ${cfg.confDir}/xrdp.ini";
-        };
+      networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+      # xrdp can run X11 program even if "services.xserver.enable = false"
+      xdg = {
+        autostart.enable = true;
+        menus.enable = true;
+        mime.enable = true;
+        icons.enable = true;
       };
 
-      services.xrdp-sesman = {
-        wantedBy = [ "multi-user.target" ];
-        after = [ "network.target" ];
-        description = "xrdp session manager";
-        restartIfChanged = false; # do not restart on "nixos-rebuild switch". like "display-manager", it can have many interactive programs as children
-        serviceConfig = {
-          ExecStart = "${cfg.package}/bin/xrdp-sesman --nodaemon --config ${cfg.confDir}/sesman.ini";
-          ExecStop  = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+      fonts.enableDefaultPackages = mkDefault true;
+
+      environment.etc."xrdp".source = "${confDir}/*";
+
+      systemd = {
+        services.xrdp = {
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          description = "xrdp daemon";
+          requires = [ "xrdp-sesman.service" ];
+          preStart = ''
+            # prepare directory for unix sockets (the sockets will be owned by loggedinuser:xrdp)
+            mkdir -p /tmp/.xrdp || true
+            chown xrdp:xrdp /tmp/.xrdp
+            chmod 3777 /tmp/.xrdp
+
+            # generate a self-signed certificate
+            if [ ! -s ${cfg.sslCert} -o ! -s ${cfg.sslKey} ]; then
+              mkdir -p $(dirname ${cfg.sslCert}) || true
+              mkdir -p $(dirname ${cfg.sslKey}) || true
+              ${lib.getExe pkgs.openssl} req -x509 -newkey rsa:2048 -sha256 -nodes -days 365 \
+                -subj /C=US/ST=CA/L=Sunnyvale/O=xrdp/CN=www.xrdp.org \
+                -config ${cfg.package}/share/xrdp/openssl.conf \
+                -keyout ${cfg.sslKey} -out ${cfg.sslCert}
+              chown root:xrdp ${cfg.sslKey} ${cfg.sslCert}
+              chmod 440 ${cfg.sslKey} ${cfg.sslCert}
+            fi
+            if [ ! -s /run/xrdp/rsakeys.ini ]; then
+              mkdir -p /run/xrdp
+              ${pkgs.xrdp}/bin/xrdp-keygen xrdp /run/xrdp/rsakeys.ini
+            fi
+          '';
+          serviceConfig = {
+            User = "xrdp";
+            Group = "xrdp";
+            PermissionsStartOnly = true;
+            ExecStart = "${pkgs.xrdp}/bin/xrdp --nodaemon --port ${toString cfg.port} --config ${confDir}/xrdp.ini";
+          };
+        };
+
+        services.xrdp-sesman = {
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          description = "xrdp session manager";
+          restartIfChanged = false; # do not restart on "nixos-rebuild switch". like "display-manager", it can have many interactive programs as children
+          serviceConfig = {
+            ExecStart = "${pkgs.xrdp}/bin/xrdp-sesman --nodaemon --config ${confDir}/sesman.ini";
+            ExecStop  = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+          };
         };
+
       };
 
-    };
+      users.users.xrdp = {
+        description   = "xrdp daemon user";
+        isSystemUser  = true;
+        group         = "xrdp";
+      };
+      users.groups.xrdp = {};
 
-    users.users.xrdp = {
-      description   = "xrdp daemon user";
-      isSystemUser  = true;
-      group         = "xrdp";
-    };
-    users.groups.xrdp = {};
+      security.pam.services.xrdp-sesman = {
+        allowNullPassword = true;
+        startSession = true;
+      };
 
-    security.pam.services.xrdp-sesman = { allowNullPassword = true; startSession = true; };
-  };
+    })
+  ];
 
 }
diff --git a/nixos/modules/services/networking/yggdrasil.nix b/nixos/modules/services/networking/yggdrasil.nix
index 514753687d699..9173e7eb3457b 100644
--- a/nixos/modules/services/networking/yggdrasil.nix
+++ b/nixos/modules/services/networking/yggdrasil.nix
@@ -137,16 +137,24 @@ in
         message = "networking.enableIPv6 must be true for yggdrasil to work";
       }];
 
-      system.activationScripts.yggdrasil = mkIf cfg.persistentKeys ''
-        if [ ! -e ${keysPath} ]
-        then
-          mkdir --mode=700 -p ${builtins.dirOf keysPath}
-          ${binYggdrasil} -genconf -json \
-            | ${pkgs.jq}/bin/jq \
-                'to_entries|map(select(.key|endswith("Key")))|from_entries' \
-            > ${keysPath}
-        fi
-      '';
+      # This needs to be a separate service. The yggdrasil service fails if
+      # this is put into its preStart.
+      systemd.services.yggdrasil-persistent-keys = lib.mkIf cfg.persistentKeys {
+        wantedBy = [ "multi-user.target" ];
+        before = [ "yggdrasil.service" ];
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        script = ''
+          if [ ! -e ${keysPath} ]
+          then
+            mkdir --mode=700 -p ${builtins.dirOf keysPath}
+            ${binYggdrasil} -genconf -json \
+              | ${pkgs.jq}/bin/jq \
+                  'to_entries|map(select(.key|endswith("Key")))|from_entries' \
+              > ${keysPath}
+          fi
+        '';
+      };
 
       systemd.services.yggdrasil = {
         description = "Yggdrasil Network Service";
diff --git a/nixos/modules/services/networking/zerotierone.nix b/nixos/modules/services/networking/zerotierone.nix
index 994e01d4980ea..60615d553041b 100644
--- a/nixos/modules/services/networking/zerotierone.nix
+++ b/nixos/modules/services/networking/zerotierone.nix
@@ -4,6 +4,8 @@ with lib;
 
 let
   cfg = config.services.zerotierone;
+  localConfFile = pkgs.writeText "zt-local.conf" (builtins.toJSON cfg.localConf);
+  localConfFilePath = "/var/lib/zerotier-one/local.conf";
 in
 {
   options.services.zerotierone.enable = mkEnableOption (lib.mdDoc "ZeroTierOne");
@@ -29,6 +31,19 @@ in
 
   options.services.zerotierone.package = mkPackageOption pkgs "zerotierone" { };
 
+  options.services.zerotierone.localConf = mkOption {
+    default = null;
+    description = mdDoc ''
+      Optional configuration to be written to the Zerotier JSON-based local.conf.
+      If set, the configuration will be symlinked to `/var/lib/zerotier-one/local.conf` at build time.
+      To understand the configuration format, refer to https://docs.zerotier.com/config/#local-configuration-options.
+    '';
+    example = {
+      settings.allowTcpFallbackRelay = false;
+    };
+    type = types.nullOr types.attrs;
+  };
+
   config = mkIf cfg.enable {
     systemd.services.zerotierone = {
       description = "ZeroTierOne";
@@ -45,7 +60,17 @@ in
         chown -R root:root /var/lib/zerotier-one
       '' + (concatMapStrings (netId: ''
         touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
-      '') cfg.joinNetworks);
+      '') cfg.joinNetworks) + optionalString (cfg.localConf != null) ''
+        if [ -L "${localConfFilePath}" ]
+        then
+          rm ${localConfFilePath}
+        elif [ -f "${localConfFilePath}" ]
+        then
+          mv ${localConfFilePath} ${localConfFilePath}.bak
+        fi
+        ln -s ${localConfFile} ${localConfFilePath}
+      '';
+
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/zerotier-one -p${toString cfg.port}";
         Restart = "always";
diff --git a/nixos/modules/services/networking/znc/default.nix b/nixos/modules/services/networking/znc/default.nix
index d3ba4a524197d..e15233293cf25 100644
--- a/nixos/modules/services/networking/znc/default.nix
+++ b/nixos/modules/services/networking/znc/default.nix
@@ -243,6 +243,7 @@ in
     systemd.services.znc = {
       description = "ZNC Server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 3a2744303474f..1f044384a5b83 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -4,9 +4,10 @@ with lib;
 
 let
 
-  inherit (pkgs) cups cups-pk-helper cups-filters xdg-utils;
+  inherit (pkgs) cups-pk-helper cups-filters xdg-utils;
 
   cfg = config.services.printing;
+  cups = cfg.package;
 
   avahiEnabled = config.services.avahi.enable;
   polkitEnabled = config.security.polkit.enable;
@@ -140,6 +141,8 @@ in
         '';
       };
 
+      package = lib.mkPackageOption pkgs "cups" {};
+
       stateless = mkOption {
         type = types.bool;
         default = false;
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 539a322b431f6..d238b26a226b5 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -3,6 +3,12 @@ with lib;
 let
   cfg = config.services.hound;
 in {
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "hound" "extraGroups" ] "Use users.users.hound.extraGroups instead")
+  ];
+
+  meta.maintainers = with maintainers; [ SuperSandro2000 ];
+
   options = {
     services.hound = {
       enable = mkOption {
@@ -13,6 +19,8 @@ in {
         '';
       };
 
+      package = mkPackageOptionMD pkgs "hound" { };
+
       user = mkOption {
         default = "hound";
         type = types.str;
@@ -29,27 +37,15 @@ in {
         '';
       };
 
-      extraGroups = mkOption {
-        type = types.listOf types.str;
-        default = [ ];
-        example = [ "dialout" ];
-        description = lib.mdDoc ''
-          List of extra groups that the "hound" user should be a part of.
-        '';
-      };
-
       home = mkOption {
         default = "/var/lib/hound";
         type = types.path;
         description = lib.mdDoc ''
-          The path to use as hound's $HOME. If the default user
-          "hound" is configured then this is the home of the "hound"
-          user.
+          The path to use as hound's $HOME.
+          If the default user "hound" is configured then this is the home of the "hound" user.
         '';
       };
 
-      package = mkPackageOption pkgs "hound" { };
-
       config = mkOption {
         type = types.str;
         description = lib.mdDoc ''
@@ -57,63 +53,62 @@ in {
           should be an absolute path to a writable location on disk.
         '';
         example = literalExpression ''
-          '''
-            {
-              "max-concurrent-indexers" : 2,
-              "dbpath" : "''${services.hound.home}/data",
-              "repos" : {
-                  "nixpkgs": {
-                    "url" : "https://www.github.com/NixOS/nixpkgs.git"
-                  }
-              }
+          {
+            "max-concurrent-indexers" : 2,
+            "repos" : {
+                "nixpkgs": {
+                  "url" : "https://www.github.com/NixOS/nixpkgs.git"
+                }
             }
-          '''
+          }
         '';
       };
 
       listen = mkOption {
         type = types.str;
         default = "0.0.0.0:6080";
-        example = "127.0.0.1:6080 or just :6080";
+        example = ":6080";
         description = lib.mdDoc ''
-          Listen on this IP:port / :port
+          Listen on this [IP]:port
         '';
       };
     };
   };
 
   config = mkIf cfg.enable {
-    users.groups = optionalAttrs (cfg.group == "hound") {
-      hound.gid = config.ids.gids.hound;
+    users.groups = lib.mkIf (cfg.group == "hound") {
+      hound = { };
     };
 
-    users.users = optionalAttrs (cfg.user == "hound") {
+    users.users = lib.mkIf (cfg.user == "hound") {
       hound = {
-        description = "hound code search";
+        description = "Hound code search";
         createHome = true;
-        home = cfg.home;
-        group = cfg.group;
-        extraGroups = cfg.extraGroups;
-        uid = config.ids.uids.hound;
+        isSystemUser = true;
+        inherit (cfg) home group;
       };
     };
 
-    systemd.services.hound = {
+    systemd.services.hound = let
+      configFile = pkgs.writeTextFile {
+        name = "hound.json";
+        text = cfg.config;
+        checkPhase = ''
+          # check if the supplied text is valid json
+          ${lib.getExe pkgs.jq} . $target > /dev/null
+        '';
+      };
+    in {
       description = "Hound Code Search";
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
-
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
         WorkingDirectory = cfg.home;
         ExecStartPre = "${pkgs.git}/bin/git config --global --replace-all http.sslCAinfo /etc/ssl/certs/ca-certificates.crt";
-        ExecStart = "${cfg.package}/bin/houndd" +
-                    " -addr ${cfg.listen}" +
-                    " -conf ${pkgs.writeText "hound.json" cfg.config}";
-
+        ExecStart = "${cfg.package}/bin/houndd -addr ${cfg.listen} -conf ${configFile}";
       };
     };
   };
-
 }
diff --git a/nixos/modules/services/security/bitwarden-directory-connector-cli.nix b/nixos/modules/services/security/bitwarden-directory-connector-cli.nix
new file mode 100644
index 0000000000000..18c02e22fd7e6
--- /dev/null
+++ b/nixos/modules/services/security/bitwarden-directory-connector-cli.nix
@@ -0,0 +1,323 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+with lib; let
+  cfg = config.services.bitwarden-directory-connector-cli;
+in {
+  options.services.bitwarden-directory-connector-cli = {
+    enable = mkEnableOption "Bitwarden Directory Connector";
+
+    package = mkPackageOption pkgs "bitwarden-directory-connector-cli" {};
+
+    domain = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The domain the Bitwarden/Vaultwarden is accessible on.";
+      example = "https://vaultwarden.example.com";
+    };
+
+    user = mkOption {
+      type = types.str;
+      description = lib.mdDoc "User to run the program.";
+      default = "bwdc";
+    };
+
+    interval = mkOption {
+      type = types.str;
+      default = "*:0,15,30,45";
+      description = lib.mdDoc "The interval when to run the connector. This uses systemd's OnCalendar syntax.";
+    };
+
+    ldap = mkOption {
+      description = lib.mdDoc ''
+        Options to configure the LDAP connection.
+        If you used the desktop application to test the configuration you can find the settings by searching for `ldap` in `~/.config/Bitwarden\ Directory\ Connector/data.json`.
+      '';
+      default = {};
+      type = types.submodule ({
+        config,
+        options,
+        ...
+      }: {
+        freeformType = types.attrsOf (pkgs.formats.json {}).type;
+
+        config.finalJSON = builtins.toJSON (removeAttrs config (filter (x: x == "finalJSON" || ! options.${x}.isDefined or false) (attrNames options)));
+
+        options = {
+          finalJSON = mkOption {
+            type = (pkgs.formats.json {}).type;
+            internal = true;
+            readOnly = true;
+            visible = false;
+          };
+
+          ssl = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether to use TLS.";
+          };
+          startTls = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether to use STARTTLS.";
+          };
+
+          hostname = mkOption {
+            type = types.str;
+            description = lib.mdDoc "The host the LDAP is accessible on.";
+            example = "ldap.example.com";
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 389;
+            description = lib.mdDoc "Port LDAP is accessible on.";
+          };
+
+          ad = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether the LDAP Server is an Active Directory.";
+          };
+
+          pagedSearch = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether the LDAP server paginates search results.";
+          };
+
+          rootPath = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Root path for LDAP.";
+            example = "dc=example,dc=com";
+          };
+
+          username = mkOption {
+            type = types.str;
+            description = lib.mdDoc "The user to authenticate as.";
+            example = "cn=admin,dc=example,dc=com";
+          };
+        };
+      });
+    };
+
+    sync = mkOption {
+      description = lib.mdDoc ''
+        Options to configure what gets synced.
+        If you used the desktop application to test the configuration you can find the settings by searching for `sync` in `~/.config/Bitwarden\ Directory\ Connector/data.json`.
+      '';
+      default = {};
+      type = types.submodule ({
+        config,
+        options,
+        ...
+      }: {
+        freeformType = types.attrsOf (pkgs.formats.json {}).type;
+
+        config.finalJSON = builtins.toJSON (removeAttrs config (filter (x: x == "finalJSON" || ! options.${x}.isDefined or false) (attrNames options)));
+
+        options = {
+          finalJSON = mkOption {
+            type = (pkgs.formats.json {}).type;
+            internal = true;
+            readOnly = true;
+            visible = false;
+          };
+
+          removeDisabled = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Remove users from bitwarden groups if no longer in the ldap group.";
+          };
+
+          overwriteExisting = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              lib.mdDoc "Remove and re-add users/groups, See https://bitwarden.com/help/user-group-filters/#overwriting-syncs for more details.";
+          };
+
+          largeImport = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Enable if you are syncing more than 2000 users/groups.";
+          };
+
+          memberAttribute = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Attribute that lists members in a LDAP group.";
+            example = "uniqueMember";
+          };
+
+          creationDateAttribute = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Attribute that lists a user's creation date.";
+            example = "whenCreated";
+          };
+
+          useEmailPrefixSuffix = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "If a user has no email address, combine a username prefix with a suffix value to form an email.";
+          };
+          emailPrefixAttribute = mkOption {
+            type = types.str;
+            description = lib.mdDoc "The attribute that contains the users username.";
+            example = "accountName";
+          };
+          emailSuffix = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Suffix for the email, normally @example.com.";
+            example = "@example.com";
+          };
+
+          users = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Sync users.";
+          };
+          userPath = mkOption {
+            type = types.str;
+            description = lib.mdDoc "User directory, relative to root.";
+            default = "ou=users";
+          };
+          userObjectClass = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Class that users must have.";
+            default = "inetOrgPerson";
+          };
+          userEmailAttribute = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Attribute for a users email.";
+            default = "mail";
+          };
+          userFilter = mkOption {
+            type = types.str;
+            description = lib.mdDoc "LDAP filter for users.";
+            example = "(memberOf=cn=sales,ou=groups,dc=example,dc=com)";
+            default = "";
+          };
+
+          groups = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether to sync ldap groups into BitWarden.";
+          };
+          groupPath = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Group directory, relative to root.";
+            default = "ou=groups";
+          };
+          groupObjectClass = mkOption {
+            type = types.str;
+            description = lib.mdDoc "A class that groups will have.";
+            default = "groupOfNames";
+          };
+          groupNameAttribute = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Attribute for a name of group.";
+            default = "cn";
+          };
+          groupFilter = mkOption {
+            type = types.str;
+            description = lib.mdDoc "LDAP filter for groups.";
+            example = "(cn=sales)";
+            default = "";
+          };
+        };
+      });
+    };
+
+    secrets = {
+      ldap = mkOption {
+        type = types.str;
+        description = "Path to file that contains LDAP password for user in {option}`ldap.username";
+      };
+
+      bitwarden = {
+        client_path_id = mkOption {
+          type = types.str;
+          description = "Path to file that contains Client ID.";
+        };
+        client_path_secret = mkOption {
+          type = types.str;
+          description = "Path to file that contains Client Secret.";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups."${cfg.user}" = {};
+    users.users."${cfg.user}" = {
+      isSystemUser = true;
+      group = cfg.user;
+    };
+
+    systemd = {
+      timers.bitwarden-directory-connector-cli = {
+        description = "Sync timer for Bitwarden Directory Connector";
+        wantedBy = ["timers.target"];
+        after = ["network-online.target"];
+        timerConfig = {
+          OnCalendar = cfg.interval;
+          Unit = "bitwarden-directory-connector-cli.service";
+          Persistent = true;
+        };
+      };
+
+      services.bitwarden-directory-connector-cli = {
+        description = "Main process for Bitwarden Directory Connector";
+        path = [pkgs.jq];
+
+        environment = {
+          BITWARDENCLI_CONNECTOR_APPDATA_DIR = "/tmp";
+          BITWARDENCLI_CONNECTOR_PLAINTEXT_SECRETS = "true";
+        };
+
+        serviceConfig = {
+          Type = "oneshot";
+          User = "${cfg.user}";
+          PrivateTmp = true;
+          preStart = ''
+            set -eo pipefail
+
+            # create the config file
+            ${lib.getExe cfg.package} data-file
+            touch /tmp/data.json.tmp
+            chmod 600 /tmp/data.json{,.tmp}
+
+            ${lib.getExe cfg.package} config server ${cfg.domain}
+
+            # now login to set credentials
+            export BW_CLIENTID="$(< ${escapeShellArg cfg.secrets.bitwarden.client_path_id})"
+            export BW_CLIENTSECRET="$(< ${escapeShellArg cfg.secrets.bitwarden.client_path_secret})"
+            ${lib.getExe cfg.package} login
+
+            jq '.authenticatedAccounts[0] as $account
+              | .[$account].directoryConfigurations.ldap |= $ldap_data
+              | .[$account].directorySettings.organizationId |= $orgID
+              | .[$account].directorySettings.sync |= $sync_data' \
+              --argjson ldap_data ${escapeShellArg cfg.ldap.finalJSON} \
+              --arg orgID "''${BW_CLIENTID//organization.}" \
+              --argjson sync_data ${escapeShellArg cfg.sync.finalJSON} \
+              /tmp/data.json \
+              > /tmp/data.json.tmp
+
+            mv -f /tmp/data.json.tmp /tmp/data.json
+
+            # final config
+            ${lib.getExe cfg.package} config directory 0
+            ${lib.getExe cfg.package} config ldap.password --secretfile ${cfg.secrets.ldap}
+          '';
+
+          ExecStart = "${lib.getExe cfg.package} sync";
+        };
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [Silver-Golden];
+}
diff --git a/nixos/modules/services/security/certmgr.nix b/nixos/modules/services/security/certmgr.nix
index db80e943973dc..02cb7afe87bad 100644
--- a/nixos/modules/services/security/certmgr.nix
+++ b/nixos/modules/services/security/certmgr.nix
@@ -182,6 +182,7 @@ in
     systemd.services.certmgr = {
       description = "certmgr";
       path = mkIf (cfg.svcManager == "command") [ pkgs.bash ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       inherit preStart;
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index 72a195d3a04ed..4480c0cae60c9 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -3,7 +3,6 @@ with lib;
 let
   clamavUser = "clamav";
   stateDir = "/var/lib/clamav";
-  runDir = "/run/clamav";
   clamavGroup = clamavUser;
   cfg = config.services.clamav;
   pkg = pkgs.clamav;
@@ -99,6 +98,29 @@ in
           '';
         };
       };
+
+      scanner = {
+        enable = mkEnableOption (lib.mdDoc "ClamAV scanner");
+
+        interval = mkOption {
+          type = types.str;
+          default = "*-*-* 04:00:00";
+          description = lib.mdDoc ''
+            How often clamdscan is invoked. See systemd.time(7) for more
+            information about the format.
+            By default this runs using 10 cores at most, be sure to run it at a time of low traffic.
+          '';
+        };
+
+        scanDirectories = mkOption {
+          type = with types; listOf str;
+          default = [ "/home" "/var/lib" "/tmp" "/etc" "/var/tmp" ];
+          description = lib.mdDoc ''
+            List of directories to scan.
+            The default includes everything I could think of that is valid for nixos. Feel free to contribute a PR to add to the default if you see something missing.
+          '';
+        };
+      };
     };
   };
 
@@ -117,9 +139,8 @@ in
 
     services.clamav.daemon.settings = {
       DatabaseDirectory = stateDir;
-      LocalSocket = "${runDir}/clamd.ctl";
-      PidFile = "${runDir}/clamd.pid";
-      TemporaryDirectory = "/tmp";
+      LocalSocket = "/run/clamav/clamd.ctl";
+      PidFile = "/run/clamav/clamd.pid";
       User = "clamav";
       Foreground = true;
     };
@@ -175,6 +196,7 @@ in
     systemd.services.clamav-freshclam = mkIf cfg.updater.enable {
       description = "ClamAV virus database updater (freshclam)";
       restartTriggers = [ freshclamConfigFile ];
+      requires = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
@@ -182,7 +204,6 @@ in
         ExecStart = "${pkg}/bin/freshclam";
         SuccessExitStatus = "1"; # if databases are up to date
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -204,7 +225,6 @@ in
       serviceConfig = {
         Type = "oneshot";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
@@ -224,18 +244,38 @@ in
     systemd.services.clamav-fangfrisch = mkIf cfg.fangfrisch.enable {
       description = "ClamAV virus database updater (fangfrisch)";
       restartTriggers = [ fangfrischConfigFile ];
+      requires = [ "network-online.target" ];
       after = [ "network-online.target" "clamav-fangfrisch-init.service" ];
 
       serviceConfig = {
         Type = "oneshot";
         ExecStart = "${pkgs.fangfrisch}/bin/fangfrisch --conf ${fangfrischConfigFile} refresh";
         StateDirectory = "clamav";
-        RuntimeDirectory = "clamav";
         User = clamavUser;
         Group = clamavGroup;
         PrivateTmp = "yes";
         PrivateDevices = "yes";
       };
     };
+
+    systemd.timers.clamdscan = mkIf cfg.scanner.enable {
+      description = "Timer for ClamAV virus scanner";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.scanner.interval;
+        Unit = "clamdscan.service";
+      };
+    };
+
+    systemd.services.clamdscan = mkIf cfg.scanner.enable {
+      description = "ClamAV virus scanner";
+      after = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+      wants = optionals cfg.updater.enable [ "clamav-freshclam.service" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkg}/bin/clamdscan --multiscan --fdpass --infected --allmatch ${lib.concatStringsSep " " cfg.scanner.scanDirectories}";
+      };
+    };
   };
 }
diff --git a/nixos/modules/services/security/munge.nix b/nixos/modules/services/security/munge.nix
index 4d6fe33f697b8..9d306c205f946 100644
--- a/nixos/modules/services/security/munge.nix
+++ b/nixos/modules/services/security/munge.nix
@@ -45,19 +45,25 @@ in
 
     systemd.services.munged = {
       wantedBy = [ "multi-user.target" ];
-      after = [ "network.target" ];
+      wants = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
 
       path = [ pkgs.munge pkgs.coreutils ];
 
       serviceConfig = {
         ExecStartPre = "+${pkgs.coreutils}/bin/chmod 0400 ${cfg.password}";
-        ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
-        PIDFile = "/run/munge/munged.pid";
-        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStart = "${pkgs.munge}/bin/munged --foreground --key-file ${cfg.password}";
         User = "munge";
         Group = "munge";
         StateDirectory = "munge";
         StateDirectoryMode = "0711";
+        Restart = "on-failure";
         RuntimeDirectory = "munge";
       };
 
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 78916c907279a..d1dc37d549d2d 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -572,6 +572,7 @@ in
       description = "OAuth2 Proxy";
       path = [ cfg.package ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/security/shibboleth-sp.nix b/nixos/modules/services/security/shibboleth-sp.nix
index e7897c3324cf6..975de1efa2f2a 100644
--- a/nixos/modules/services/security/shibboleth-sp.nix
+++ b/nixos/modules/services/security/shibboleth-sp.nix
@@ -1,44 +1,43 @@
-{pkgs, config, lib, ...}:
+{ config, lib, pkgs, ... }:
 
-with lib;
 let
   cfg = config.services.shibboleth-sp;
 in {
   options = {
     services.shibboleth-sp = {
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc "Whether to enable the shibboleth service";
       };
 
-      configFile = mkOption {
-        type = types.path;
-        example = literalExpression ''"''${pkgs.shibboleth-sp}/etc/shibboleth/shibboleth2.xml"'';
+      configFile = lib.mkOption {
+        type = lib.types.path;
+        example = lib.literalExpression ''"''${pkgs.shibboleth-sp}/etc/shibboleth/shibboleth2.xml"'';
         description = lib.mdDoc "Path to shibboleth config file";
       };
 
-      fastcgi.enable = mkOption {
-        type = types.bool;
+      fastcgi.enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc "Whether to include the shibauthorizer and shibresponder FastCGI processes";
       };
 
-      fastcgi.shibAuthorizerPort = mkOption {
-        type = types.int;
+      fastcgi.shibAuthorizerPort = lib.mkOption {
+        type = lib.types.int;
         default = 9100;
         description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
       };
 
-      fastcgi.shibResponderPort = mkOption {
-        type = types.int;
+      fastcgi.shibResponderPort = lib.mkOption {
+        type = lib.types.int;
         default = 9101;
         description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
       };
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     systemd.services.shibboleth-sp = {
       description = "Provides SSO and federation for web applications";
       after       = lib.optionals cfg.fastcgi.enable [ "shibresponder.service" "shibauthorizer.service" ];
@@ -48,7 +47,7 @@ in {
       };
     };
 
-    systemd.services.shibresponder = mkIf cfg.fastcgi.enable {
+    systemd.services.shibresponder = lib.mkIf cfg.fastcgi.enable {
       description = "Provides SSO through Shibboleth via FastCGI";
       after       = [ "network.target" ];
       wantedBy    = [ "multi-user.target" ];
@@ -59,7 +58,7 @@ in {
       };
     };
 
-    systemd.services.shibauthorizer = mkIf cfg.fastcgi.enable {
+    systemd.services.shibauthorizer = lib.mkIf cfg.fastcgi.enable {
       description = "Provides SSO through Shibboleth via FastCGI";
       after       = [ "network.target" ];
       wantedBy    = [ "multi-user.target" ];
@@ -71,5 +70,5 @@ in {
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ jammerful ];
+  meta.maintainers = with lib.maintainers; [ ];
 }
diff --git a/nixos/modules/services/security/tor.nix b/nixos/modules/services/security/tor.nix
index 4ff941251c99b..dea20dec1ab47 100644
--- a/nixos/modules/services/security/tor.nix
+++ b/nixos/modules/services/security/tor.nix
@@ -854,7 +854,7 @@ in
           BridgeRelay = true;
           ExtORPort.port = mkDefault "auto";
           ServerTransportPlugin.transports = mkDefault ["obfs4"];
-          ServerTransportPlugin.exec = mkDefault "${pkgs.obfs4}/bin/obfs4proxy managed";
+          ServerTransportPlugin.exec = mkDefault "${lib.getExe pkgs.obfs4} managed";
         } // optionalAttrs (cfg.relay.role == "private-bridge") {
           ExtraInfoStatistics = false;
           PublishServerDescriptor = false;
diff --git a/nixos/modules/services/security/vaultwarden/backup.sh b/nixos/modules/services/security/vaultwarden/backup.sh
index 2a3de0ab1deeb..7668da5bc88f3 100644
--- a/nixos/modules/services/security/vaultwarden/backup.sh
+++ b/nixos/modules/services/security/vaultwarden/backup.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
 # Based on: https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
-if ! mkdir -p "$BACKUP_FOLDER"; then
-  echo "Could not create backup folder '$BACKUP_FOLDER'" >&2
+if [ ! -d "$BACKUP_FOLDER" ]; then
+  echo "Backup folder '$BACKUP_FOLDER' does not exist" >&2
   exit 1
 fi
 
diff --git a/nixos/modules/services/security/vaultwarden/default.nix b/nixos/modules/services/security/vaultwarden/default.nix
index 14bbfa95a9ca2..470db735bf649 100644
--- a/nixos/modules/services/security/vaultwarden/default.nix
+++ b/nixos/modules/services/security/vaultwarden/default.nix
@@ -55,6 +55,7 @@ in {
       description = lib.mdDoc ''
         The directory under which vaultwarden will backup its persistent data.
       '';
+      example = "/var/backup/vaultwarden";
     };
 
     config = mkOption {
@@ -230,6 +231,13 @@ in {
       };
       wantedBy = [ "multi-user.target" ];
     };
+
+    systemd.tmpfiles.settings = mkIf (cfg.backupDir != null) {
+      "10-vaultwarden".${cfg.backupDir}.d = {
+        inherit user group;
+        mode = "0770";
+      };
+    };
   };
 
   # uses attributes of the linked package
diff --git a/nixos/modules/services/system/cachix-agent/default.nix b/nixos/modules/services/system/cachix-agent/default.nix
index 196d3291d5555..f8020fe970f1b 100644
--- a/nixos/modules/services/system/cachix-agent/default.nix
+++ b/nixos/modules/services/system/cachix-agent/default.nix
@@ -49,6 +49,7 @@ in {
   config = mkIf cfg.enable {
     systemd.services.cachix-agent = {
       description = "Cachix Deploy Agent";
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       path = [ config.nix.package ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/system/cachix-watch-store.nix b/nixos/modules/services/system/cachix-watch-store.nix
index 992a59cbc075b..d48af29465aa5 100644
--- a/nixos/modules/services/system/cachix-watch-store.nix
+++ b/nixos/modules/services/system/cachix-watch-store.nix
@@ -23,6 +23,14 @@ in
       '';
     };
 
+    signingKeyFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        Optional file containing a self-managed signing key to sign uploaded store paths.
+      '';
+      default = null;
+    };
+
     compressionLevel = mkOption {
       type = types.nullOr types.int;
       description = lib.mdDoc "The compression level for ZSTD compression (between 0 and 16)";
@@ -53,6 +61,7 @@ in
   config = mkIf cfg.enable {
     systemd.services.cachix-watch-store-agent = {
       description = "Cachix watch store Agent";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = [ config.nix.package ];
       wantedBy = [ "multi-user.target" ];
@@ -69,7 +78,8 @@ in
         DynamicUser = true;
         LoadCredential = [
           "cachix-token:${toString cfg.cachixTokenFile}"
-        ];
+        ]
+        ++ lib.optional (cfg.signingKeyFile != null) "signing-key:${toString cfg.signingKeyFile}";
       };
       script =
         let
@@ -80,6 +90,7 @@ in
         in
         ''
           export CACHIX_AUTH_TOKEN="$(<"$CREDENTIALS_DIRECTORY/cachix-token")"
+          ${lib.optionalString (cfg.signingKeyFile != null) ''export CACHIX_SIGNING_KEY="$(<"$CREDENTIALS_DIRECTORY/signing-key")"''}
           ${lib.escapeShellArgs command}
         '';
     };
diff --git a/nixos/modules/services/system/cloud-init.nix b/nixos/modules/services/system/cloud-init.nix
index d782bb1a36668..00ae77be4271c 100644
--- a/nixos/modules/services/system/cloud-init.nix
+++ b/nixos/modules/services/system/cloud-init.nix
@@ -164,7 +164,10 @@ in
     systemd.services.cloud-init-local = {
       description = "Initial cloud-init job (pre-networking)";
       wantedBy = [ "multi-user.target" ];
-      before = [ "systemd-networkd.service" ];
+      # In certain environments (AWS for example), cloud-init-local will
+      # first configure an IP through DHCP, and later delete it.
+      # This can cause race conditions with anything else trying to set IP through DHCP.
+      before = [ "systemd-networkd.service" "dhcpcd.service" ];
       path = path;
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/system/dbus.nix b/nixos/modules/services/system/dbus.nix
index 8d5b25e617625..b47ebc92f93a8 100644
--- a/nixos/modules/services/system/dbus.nix
+++ b/nixos/modules/services/system/dbus.nix
@@ -184,6 +184,11 @@ in
         aliases = [
           "dbus.service"
         ];
+        unitConfig = {
+          # We get errors when reloading the dbus-broker service
+          # if /tmp got remounted after this service started
+          RequiresMountsFor = [ "/tmp" ];
+        };
         # Don't restart dbus. Bad things tend to happen if we do.
         reloadIfChanged = true;
         restartTriggers = [
diff --git a/nixos/modules/services/system/kerberos/default.nix b/nixos/modules/services/system/kerberos/default.nix
index 4ed48e463741a..486d4b49c195a 100644
--- a/nixos/modules/services/system/kerberos/default.nix
+++ b/nixos/modules/services/system/kerberos/default.nix
@@ -3,7 +3,7 @@
 let
   inherit (lib) mkOption mkIf types length attrNames;
   cfg = config.services.kerberos_server;
-  kerberos = config.krb5.kerberos;
+  kerberos = config.security.krb5.package;
 
   aclEntry = {
     options = {
diff --git a/nixos/modules/services/system/kerberos/heimdal.nix b/nixos/modules/services/system/kerberos/heimdal.nix
index 837c59caa5620..ecafc92766704 100644
--- a/nixos/modules/services/system/kerberos/heimdal.nix
+++ b/nixos/modules/services/system/kerberos/heimdal.nix
@@ -4,7 +4,7 @@ let
   inherit (lib) mkIf concatStringsSep concatMapStrings toList mapAttrs
     mapAttrsToList;
   cfg = config.services.kerberos_server;
-  kerberos = config.krb5.kerberos;
+  kerberos = config.security.krb5.package;
   stateDir = "/var/heimdal";
   aclFiles = mapAttrs
     (name: {acl, ...}: pkgs.writeText "${name}.acl" (concatMapStrings ((
@@ -35,7 +35,7 @@ in
         mkdir -m 0755 -p ${stateDir}
       '';
       serviceConfig.ExecStart =
-        "${kerberos}/libexec/heimdal/kadmind --config-file=/etc/heimdal-kdc/kdc.conf";
+        "${kerberos}/libexec/kadmind --config-file=/etc/heimdal-kdc/kdc.conf";
       restartTriggers = [ kdcConfFile ];
     };
 
@@ -46,7 +46,7 @@ in
         mkdir -m 0755 -p ${stateDir}
       '';
       serviceConfig.ExecStart =
-        "${kerberos}/libexec/heimdal/kdc --config-file=/etc/heimdal-kdc/kdc.conf";
+        "${kerberos}/libexec/kdc --config-file=/etc/heimdal-kdc/kdc.conf";
       restartTriggers = [ kdcConfFile ];
     };
 
@@ -56,7 +56,7 @@ in
       preStart = ''
         mkdir -m 0755 -p ${stateDir}
       '';
-      serviceConfig.ExecStart = "${kerberos}/libexec/heimdal/kpasswdd";
+      serviceConfig.ExecStart = "${kerberos}/libexec/kpasswdd";
       restartTriggers = [ kdcConfFile ];
     };
 
diff --git a/nixos/modules/services/system/kerberos/mit.nix b/nixos/modules/services/system/kerberos/mit.nix
index 112000140453f..a654bd1fe7e1b 100644
--- a/nixos/modules/services/system/kerberos/mit.nix
+++ b/nixos/modules/services/system/kerberos/mit.nix
@@ -4,7 +4,7 @@ let
   inherit (lib) mkIf concatStrings concatStringsSep concatMapStrings toList
     mapAttrs mapAttrsToList;
   cfg = config.services.kerberos_server;
-  kerberos = config.krb5.kerberos;
+  kerberos = config.security.krb5.package;
   stateDir = "/var/lib/krb5kdc";
   PIDFile = "/run/kdc.pid";
   aclMap = {
diff --git a/nixos/modules/services/system/zram-generator.nix b/nixos/modules/services/system/zram-generator.nix
index 10b9992375cc1..429531e5743d8 100644
--- a/nixos/modules/services/system/zram-generator.nix
+++ b/nixos/modules/services/system/zram-generator.nix
@@ -27,7 +27,7 @@ in
 
   config = lib.mkIf cfg.enable {
     system.requiredKernelConfig = with config.lib.kernelConfig; [
-      (isModule "ZRAM")
+      (isEnabled "ZRAM")
     ];
 
     systemd.packages = [ cfg.package ];
diff --git a/nixos/modules/services/torrent/transmission.nix b/nixos/modules/services/torrent/transmission.nix
index 0cd24fb03a7bd..5dd02eb331633 100644
--- a/nixos/modules/services/torrent/transmission.nix
+++ b/nixos/modules/services/torrent/transmission.nix
@@ -251,6 +251,20 @@ in
           For instance, SSH sessions may time out more easily.
         '';
       };
+
+      webHome = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "pkgs.flood-for-transmission";
+        description = lib.mdDoc ''
+          If not `null`, sets the value of the `TRANSMISSION_WEB_HOME`
+          environment variable used by the service. Useful for overriding
+          the web interface files, without overriding the transmission
+          package and thus requiring rebuilding it locally. Use this if
+          you want to use an alternative web interface, such as
+          `pkgs.flood-for-transmission`.
+        '';
+      };
     };
   };
 
@@ -280,6 +294,7 @@ in
       requires = optional apparmor.enable "apparmor.service";
       wantedBy = [ "multi-user.target" ];
       environment.CURL_CA_BUNDLE = etc."ssl/certs/ca-certificates.crt".source;
+      environment.TRANSMISSION_WEB_HOME = lib.mkIf (cfg.webHome != null) cfg.webHome;
 
       serviceConfig = {
         # Use "+" because credentialsFile may not be accessible to User= or Group=.
@@ -314,6 +329,9 @@ in
         BindPaths =
           [ "${cfg.home}/${settingsDir}"
             cfg.settings.download-dir
+            # Transmission may need to read in the host's /run (eg. /run/systemd/resolve)
+            # or write in its private /run (eg. /run/host).
+            "/run"
           ] ++
           optional cfg.settings.incomplete-dir-enabled
             cfg.settings.incomplete-dir ++
@@ -324,7 +342,6 @@ in
           # an AppArmor profile is provided to get a confinement based upon paths and rights.
           builtins.storeDir
           "/etc"
-          "/run"
           ] ++
           optional (cfg.settings.script-torrent-done-enabled &&
                     cfg.settings.script-torrent-done-filename != null)
@@ -333,10 +350,10 @@ in
             cfg.settings.watch-dir;
         StateDirectory = [
           "transmission"
-          "transmission/.config/transmission-daemon"
-          "transmission/.incomplete"
-          "transmission/Downloads"
-          "transmission/watch-dir"
+          "transmission/${settingsDir}"
+          "transmission/${incompleteDir}"
+          "transmission/${downloadsDir}"
+          "transmission/${watchDir}"
         ];
         StateDirectoryMode = mkDefault 750;
         # The following options are only for optimizing:
@@ -349,10 +366,10 @@ in
         MemoryDenyWriteExecute = true;
         NoNewPrivileges = true;
         PrivateDevices = true;
-        PrivateMounts = true;
+        PrivateMounts = mkDefault true;
         PrivateNetwork = mkDefault false;
         PrivateTmp = true;
-        PrivateUsers = true;
+        PrivateUsers = mkDefault true;
         ProtectClock = true;
         ProtectControlGroups = true;
         # ProtectHome=true would not allow BindPaths= to work across /home,
@@ -434,7 +451,7 @@ in
       # at least up to the values hardcoded here:
       (mkIf cfg.settings.utp-enabled {
         "net.core.rmem_max" = mkDefault 4194304; # 4MB
-        "net.core.wmem_max" = mkDefault "1048576"; # 1MB
+        "net.core.wmem_max" = mkDefault 1048576; # 1MB
       })
       (mkIf cfg.performanceNetParameters {
         # Increase the number of available source (local) TCP and UDP ports to 49151.
@@ -490,6 +507,10 @@ in
         # https://gitlab.com/apparmor/apparmor/-/wikis/AppArmorStacking#seccomp-and-no_new_privs
         px ${cfg.settings.script-torrent-done-filename} -> &@{dirs},
       ''}
+
+      ${optionalString (cfg.webHome != null) ''
+        r ${cfg.webHome}/**,
+      ''}
     '';
   };
 
diff --git a/nixos/modules/services/video/frigate.nix b/nixos/modules/services/video/frigate.nix
index 146e968780c38..b7945282ba09b 100644
--- a/nixos/modules/services/video/frigate.nix
+++ b/nixos/modules/services/video/frigate.nix
@@ -353,6 +353,7 @@ in
       ];
       serviceConfig = {
         ExecStart = "${cfg.package.python.interpreter} -m frigate";
+        Restart = "on-failure";
 
         User = "frigate";
         Group = "frigate";
diff --git a/nixos/modules/services/video/go2rtc/default.nix b/nixos/modules/services/video/go2rtc/default.nix
index 13851fa0306f6..9dddbb60baa80 100644
--- a/nixos/modules/services/video/go2rtc/default.nix
+++ b/nixos/modules/services/video/go2rtc/default.nix
@@ -94,6 +94,7 @@ in
 
   config = lib.mkIf cfg.enable {
     systemd.services.go2rtc = {
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
       ];
diff --git a/nixos/modules/services/web-apps/akkoma.nix b/nixos/modules/services/web-apps/akkoma.nix
index 8980556ab0142..4cd9e26643787 100644
--- a/nixos/modules/services/web-apps/akkoma.nix
+++ b/nixos/modules/services/web-apps/akkoma.nix
@@ -974,7 +974,7 @@ in {
       # This service depends on network-online.target and is sequenced after
       # it because it requires access to the Internet to function properly.
       bindsTo = [ "akkoma-config.service" ];
-      wants = [ "network-online.service" ];
+      wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       after = [
         "akkoma-config.target"
diff --git a/nixos/modules/services/web-apps/alps.nix b/nixos/modules/services/web-apps/alps.nix
index 05fb676102df4..81c6b8ad30b5f 100644
--- a/nixos/modules/services/web-apps/alps.nix
+++ b/nixos/modules/services/web-apps/alps.nix
@@ -94,6 +94,7 @@ in {
       description = "alps is a simple and extensible webmail.";
       documentation = [ "https://git.sr.ht/~migadu/alps" ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/web-apps/c2fmzq-server.nix b/nixos/modules/services/web-apps/c2fmzq-server.nix
index 2749c2a5a87aa..dee131182de16 100644
--- a/nixos/modules/services/web-apps/c2fmzq-server.nix
+++ b/nixos/modules/services/web-apps/c2fmzq-server.nix
@@ -6,8 +6,12 @@ let
   cfg = config.services.c2fmzq-server;
 
   argsFormat = {
-    type = with lib.types; nullOr (oneOf [ bool int str ]);
-    generate = lib.cli.toGNUCommandLineShell { };
+    type = with lib.types; attrsOf (nullOr (oneOf [ bool int str ]));
+    generate = lib.cli.toGNUCommandLineShell {
+      mkBool = k: v: [
+        "--${k}=${if v then "true" else "false"}"
+      ];
+    };
   };
 in {
   options.services.c2fmzq-server = {
@@ -76,6 +80,7 @@ in {
       description = "c2FmZQ-server";
       documentation = [ "https://github.com/c2FmZQ/c2FmZQ/blob/main/README.md" ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/web-apps/code-server.nix b/nixos/modules/services/web-apps/code-server.nix
new file mode 100644
index 0000000000000..d087deb7848d0
--- /dev/null
+++ b/nixos/modules/services/web-apps/code-server.nix
@@ -0,0 +1,260 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.code-server;
+  defaultUser = "code-server";
+  defaultGroup = defaultUser;
+in {
+  options = {
+    services.code-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "code-server");
+
+      package = lib.mkPackageOptionMD pkgs "code-server" {
+        example = ''
+          pkgs.vscode-with-extensions.override {
+            vscode = pkgs.code-server;
+            vscodeExtensions = with pkgs.vscode-extensions; [
+              bbenoist.nix
+              dracula-theme.theme-dracula
+            ];
+          }
+        '';
+      };
+
+      extraPackages = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional packages to add to the code-server {env}`PATH`.
+        '';
+        example = lib.literalExpression "[ pkgs.go ]";
+        type = lib.types.listOf lib.types.package;
+      };
+
+      extraEnvironment = lib.mkOption {
+        type = lib.types.attrsOf lib.types.str;
+        description = lib.mdDoc ''
+          Additional environment variables to pass to code-server.
+        '';
+        default = { };
+        example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; };
+      };
+
+      extraArguments = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional arguments to pass to code-server.
+        '';
+        example = lib.literalExpression ''[ "--log=info" ]'';
+        type = lib.types.listOf lib.types.str;
+      };
+
+      host = lib.mkOption {
+        default = "localhost";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 4444;
+        description = lib.mdDoc ''
+          The port the server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      auth = lib.mkOption {
+        default = "password";
+        description = lib.mdDoc ''
+          The type of authentication to use.
+        '';
+        type = lib.types.enum [ "none" "password" ];
+      };
+
+      hashedPassword = lib.mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Create the password with: `echo -n 'thisismypassword' | npx argon2-cli -e`.
+        '';
+        type = lib.types.str;
+      };
+
+      user = lib.mkOption {
+        default = defaultUser;
+        example = "yourUser";
+        description = lib.mdDoc ''
+          The user to run code-server as.
+          By default, a user named `${defaultUser}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      group = lib.mkOption {
+        default = defaultGroup;
+        example = "yourGroup";
+        description = lib.mdDoc ''
+          The group to run code-server under.
+          By default, a group named `${defaultGroup}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      extraGroups = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          An array of additional groups for the `${defaultUser}` user.
+        '';
+        example = [ "docker" ];
+        type = lib.types.listOf lib.types.str;
+      };
+
+      socket = lib.mkOption {
+        default = null;
+        example = "/run/code-server/socket";
+        description = lib.mdDoc ''
+          Path to a socket (bind-addr will be ignored).
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      socketMode = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+           File mode of the socket.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      userDataDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Path to the user data directory.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      extensionsDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Path to the extensions directory.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      proxyDomain = lib.mkOption {
+        default = null;
+        example = "code-server.lan";
+        description = lib.mdDoc ''
+          Domain used for proxying ports.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      disableTelemetry = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable telemetry.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableUpdateCheck = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable update check.
+          Without this flag, code-server checks every 6 hours against the latest github release and
+          then notifies you once every week that a new release is available.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableFileDownloads = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable file downloads from Code.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableWorkspaceTrust = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable Workspace Trust feature.
+        '';
+        type = lib.types.bool;
+      };
+
+      disableGettingStartedOverride = lib.mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Disable the coder/coder override in the Help: Getting Started page.
+        '';
+        type = lib.types.bool;
+      };
+
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.code-server = {
+      description = "Code server";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      path = cfg.extraPackages;
+      environment = {
+        HASHED_PASSWORD = cfg.hashedPassword;
+      } // cfg.extraEnvironment;
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} \
+            --auth=${cfg.auth} \
+            --bind-addr=${cfg.host}:${toString cfg.port} \
+          '' + lib.optionalString (cfg.socket != null) ''
+            --socket=${cfg.socket} \
+          '' + lib.optionalString (cfg.userDataDir != null) ''
+            --user-data-dir=${cfg.userDataDir} \
+          '' + lib.optionalString (cfg.extensionsDir != null) ''
+            --extensions-dir=${cfg.extensionsDir} \
+          '' + lib.optionalString (cfg.disableTelemetry == true) ''
+            --disable-telemetry \
+          '' + lib.optionalString (cfg.disableUpdateCheck == true) ''
+            --disable-update-check \
+          '' + lib.optionalString (cfg.disableFileDownloads == true) ''
+            --disable-file-downloads \
+          '' + lib.optionalString (cfg.disableWorkspaceTrust == true) ''
+            --disable-workspace-trust \
+          '' + lib.optionalString (cfg.disableGettingStartedOverride == true) ''
+            --disable-getting-started-override \
+          '' + lib.escapeShellArgs cfg.extraArguments;
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        RuntimeDirectory = cfg.user;
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+      };
+    };
+
+    users.users."${cfg.user}" = lib.mkMerge [
+      (lib.mkIf (cfg.user == defaultUser) {
+        isNormalUser = true;
+        description = "code-server user";
+        inherit (cfg) group;
+      })
+      {
+        packages = cfg.extraPackages;
+        inherit (cfg) extraGroups;
+      }
+    ];
+
+    users.groups."${defaultGroup}" = lib.mkIf (cfg.group == defaultGroup) { };
+  };
+
+  meta.maintainers = [ lib.maintainers.stackshadow ];
+}
diff --git a/nixos/modules/services/web-apps/dokuwiki.nix b/nixos/modules/services/web-apps/dokuwiki.nix
index 1df1cbf9f0e18..256ab3229ea6c 100644
--- a/nixos/modules/services/web-apps/dokuwiki.nix
+++ b/nixos/modules/services/web-apps/dokuwiki.nix
@@ -122,62 +122,8 @@ let
     };
   };
 
-  # The current implementations of `doRename`,  `mkRenamedOptionModule` do not provide the full options path when used with submodules.
-  # They would only show `settings.useacl' instead of `services.dokuwiki.sites."site1.local".settings.useacl'
-  # The partial re-implementation of these functions is done to help users in debugging by showing the full path.
-  mkRenamed = from: to: { config, options, name, ... }: let
-    pathPrefix = [ "services" "dokuwiki" "sites" name ];
-    fromPath = pathPrefix  ++ from;
-    fromOpt = getAttrFromPath from options;
-    toOp = getAttrsFromPath to config;
-    toPath = pathPrefix ++ to;
-  in {
-    options = setAttrByPath from (mkOption {
-      visible = false;
-      description = lib.mdDoc "Alias of {option}${showOption toPath}";
-      apply = x: builtins.trace "Obsolete option `${showOption fromPath}' is used. It was renamed to ${showOption toPath}" toOp;
-    });
-    config = mkMerge [
-      {
-        warnings = optional fromOpt.isDefined
-          "The option `${showOption fromPath}' defined in ${showFiles fromOpt.files} has been renamed to `${showOption toPath}'.";
-      }
-      (lib.modules.mkAliasAndWrapDefsWithPriority (setAttrByPath to) fromOpt)
-    ];
-  };
-
   siteOpts = { options, config, lib, name, ... }:
     {
-      imports = [
-        (mkRenamed [ "aclUse" ] [ "settings" "useacl" ])
-        (mkRenamed [ "superUser" ] [ "settings" "superuser" ])
-        (mkRenamed [ "disableActions" ] [ "settings"  "disableactions" ])
-        ({ config, options, ... }: let
-          showPath = suffix: lib.options.showOption ([ "services" "dokuwiki" "sites" name ] ++ suffix);
-          replaceExtraConfig = "Please use `${showPath ["settings"]}' to pass structured settings instead.";
-          ecOpt = options.extraConfig;
-          ecPath = showPath [ "extraConfig" ];
-        in {
-          options.extraConfig = mkOption {
-            visible = false;
-            apply = x: throw "The option ${ecPath} can no longer be used since it's been removed.\n${replaceExtraConfig}";
-          };
-          config.assertions = [
-            {
-              assertion = !ecOpt.isDefined;
-              message = "The option definition `${ecPath}' in ${showFiles ecOpt.files} no longer has any effect; please remove it.\n${replaceExtraConfig}";
-            }
-            {
-              assertion = config.mergedConfig.useacl -> (config.acl != null || config.aclFile != null);
-              message = "Either ${showPath [ "acl" ]} or ${showPath [ "aclFile" ]} is mandatory if ${showPath [ "settings" "useacl" ]} is true";
-            }
-            {
-              assertion = config.usersFile != null -> config.mergedConfig.useacl != false;
-              message = "${showPath [ "settings" "useacl" ]} is required when ${showPath [ "usersFile" ]} is set (Currently defined as `${config.usersFile}' in ${showFiles options.usersFile.files}).";
-            }
-          ];
-        })
-      ];
 
       options = {
         enable = mkEnableOption (lib.mdDoc "DokuWiki web application");
@@ -392,21 +338,6 @@ let
           '';
         };
 
-      # Required for the mkRenamedOptionModule
-      # TODO: Remove me once https://github.com/NixOS/nixpkgs/issues/96006 is fixed
-      # or we don't have any more notes about the removal of extraConfig, ...
-      warnings = mkOption {
-        type = types.listOf types.unspecified;
-        default = [ ];
-        visible = false;
-        internal = true;
-      };
-      assertions = mkOption {
-        type = types.listOf types.unspecified;
-        default = [ ];
-        visible = false;
-        internal = true;
-      };
     };
   };
 in
@@ -440,10 +371,6 @@ in
   # implementation
   config = mkIf (eachSite != {}) (mkMerge [{
 
-    warnings = flatten (mapAttrsToList (_: cfg: cfg.warnings) eachSite);
-
-    assertions = flatten (mapAttrsToList (_: cfg: cfg.assertions) eachSite);
-
     services.phpfpm.pools = mapAttrs' (hostName: cfg: (
       nameValuePair "dokuwiki-${hostName}" {
         inherit user;
diff --git a/nixos/modules/services/web-apps/freshrss.nix b/nixos/modules/services/web-apps/freshrss.nix
index 9683730bbe1f8..c8399143c37ba 100644
--- a/nixos/modules/services/web-apps/freshrss.nix
+++ b/nixos/modules/services/web-apps/freshrss.nix
@@ -294,7 +294,6 @@ in
       systemd.services.freshrss-updater = {
         description = "FreshRSS feed updater";
         after = [ "freshrss-config.service" ];
-        wantedBy = [ "multi-user.target" ];
         startAt = "*:0/5";
         environment = {
           DATA_PATH = cfg.dataDir;
diff --git a/nixos/modules/services/web-apps/healthchecks.nix b/nixos/modules/services/web-apps/healthchecks.nix
index e5e425a29d54c..1d439f162313b 100644
--- a/nixos/modules/services/web-apps/healthchecks.nix
+++ b/nixos/modules/services/web-apps/healthchecks.nix
@@ -176,6 +176,7 @@ in
     systemd.targets.healthchecks = {
       description = "Target for all Healthchecks services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
     };
 
diff --git a/nixos/modules/services/web-apps/invidious.nix b/nixos/modules/services/web-apps/invidious.nix
index cfba3c8a29708..359aaabfe673a 100644
--- a/nixos/modules/services/web-apps/invidious.nix
+++ b/nixos/modules/services/web-apps/invidious.nix
@@ -10,82 +10,115 @@ let
   generatedHmacKeyFile = "/var/lib/invidious/hmac_key";
   generateHmac = cfg.hmacKeyFile == null;
 
-  serviceConfig = {
-    systemd.services.invidious = {
-      description = "Invidious (An alternative YouTube front-end)";
-      wants = [ "network-online.target" ];
-      after = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-
-      preStart = lib.optionalString generateHmac ''
-        if [[ ! -e "${generatedHmacKeyFile}" ]]; then
-          ${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
-          chmod 0600 "${generatedHmacKeyFile}"
-        fi
-      '';
-
-      script = ''
-        configParts=()
-      ''
-      # autogenerated hmac_key
-      + lib.optionalString generateHmac ''
-        configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
-      ''
-      # generated settings file
-      + ''
-        configParts+=("$(< ${lib.escapeShellArg settingsFile})")
-      ''
-      # optional database password file
-      + lib.optionalString (cfg.database.host != null) ''
-        configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
-      ''
-      # optional extra settings file
-      + lib.optionalString (cfg.extraSettingsFile != null) ''
-        configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
-      ''
-      # explicitly specified hmac key file
-      + lib.optionalString (cfg.hmacKeyFile != null) ''
-        configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
-      ''
-      # merge all parts into a single configuration with later elements overriding previous elements
-      + ''
-        export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
-        exec ${cfg.package}/bin/invidious
-      '';
-
-      serviceConfig = {
-        RestartSec = "2s";
-        DynamicUser = true;
-        StateDirectory = "invidious";
-        StateDirectoryMode = "0750";
-
-        CapabilityBoundingSet = "";
-        PrivateDevices = true;
-        PrivateUsers = true;
-        ProtectHome = true;
-        ProtectKernelLogs = true;
-        ProtectProc = "invisible";
-        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
-        RestrictNamespaces = true;
-        SystemCallArchitectures = "native";
-        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
-
-        # Because of various issues Invidious must be restarted often, at least once a day, ideally
-        # every hour.
-        # This option enables the automatic restarting of the Invidious instance.
-        Restart = lib.mkDefault "always";
-        RuntimeMaxSec = lib.mkDefault "1h";
-      };
+  commonInvidousServiceConfig = {
+    description = "Invidious (An alternative YouTube front-end)";
+    wants = [ "network-online.target" ];
+    after = [ "network-online.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+    requires = lib.optional cfg.database.createLocally "postgresql.service";
+    wantedBy = [ "multi-user.target" ];
+
+    serviceConfig = {
+      RestartSec = "2s";
+      DynamicUser = true;
+      User = lib.mkIf (cfg.database.createLocally || cfg.serviceScale > 1) "invidious";
+      StateDirectory = "invidious";
+      StateDirectoryMode = "0750";
+
+      CapabilityBoundingSet = "";
+      PrivateDevices = true;
+      PrivateUsers = true;
+      ProtectHome = true;
+      ProtectKernelLogs = true;
+      ProtectProc = "invisible";
+      RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+      RestrictNamespaces = true;
+      SystemCallArchitectures = "native";
+      SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+
+      # Because of various issues Invidious must be restarted often, at least once a day, ideally
+      # every hour.
+      # This option enables the automatic restarting of the Invidious instance.
+      # To ensure multiple instances of Invidious are not restarted at the exact same time, a
+      # randomized extra offset of up to 5 minutes is added.
+      Restart = lib.mkDefault "always";
+      RuntimeMaxSec = lib.mkDefault "1h";
+      RuntimeRandomizedExtraSec = lib.mkDefault "5min";
     };
+  };
+  mkInvidiousService = scaleIndex:
+    lib.foldl' lib.recursiveUpdate commonInvidousServiceConfig [
+      # only generate the hmac file in the first service
+      (lib.optionalAttrs (scaleIndex == 0) {
+        preStart = lib.optionalString generateHmac ''
+          if [[ ! -e "${generatedHmacKeyFile}" ]]; then
+            ${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
+            chmod 0600 "${generatedHmacKeyFile}"
+          fi
+        '';
+      })
+      # configure the secondary services to run after the first service
+      (lib.optionalAttrs (scaleIndex > 0) {
+        after = commonInvidousServiceConfig.after ++ [ "invidious.service" ];
+        wants = commonInvidousServiceConfig.wants ++ [ "invidious.service" ];
+      })
+      {
+        script = ''
+          configParts=()
+        ''
+        # autogenerated hmac_key
+        + lib.optionalString generateHmac ''
+          configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
+        ''
+        # generated settings file
+        + ''
+          configParts+=("$(< ${lib.escapeShellArg settingsFile})")
+        ''
+        # optional database password file
+        + lib.optionalString (cfg.database.host != null) ''
+          configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
+        ''
+        # optional extra settings file
+        + lib.optionalString (cfg.extraSettingsFile != null) ''
+          configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
+        ''
+        # explicitly specified hmac key file
+        + lib.optionalString (cfg.hmacKeyFile != null) ''
+          configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
+        ''
+        # configure threads for secondary instances
+        + lib.optionalString (scaleIndex > 0) ''
+          configParts+=('{"channel_threads":0, "feed_threads":0}')
+        ''
+        # configure different ports for the instances
+        + ''
+          configParts+=('{"port":${toString (cfg.port + scaleIndex)}}')
+        ''
+        # merge all parts into a single configuration with later elements overriding previous elements
+        + ''
+          export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
+          exec ${cfg.package}/bin/invidious
+        '';
+      }
+    ];
 
-    services.invidious.settings = {
-      inherit (cfg) port;
+  serviceConfig = {
+    systemd.services = builtins.listToAttrs (builtins.genList
+      (scaleIndex: {
+        name = "invidious" + lib.optionalString (scaleIndex > 0) "-${builtins.toString scaleIndex}";
+        value = mkInvidiousService scaleIndex;
+      })
+      cfg.serviceScale);
 
+    services.invidious.settings = {
       # Automatically initialises and migrates the database if necessary
       check_tables = true;
 
       db = {
-        user = lib.mkDefault "kemal";
+        user = lib.mkDefault (
+          if (lib.versionAtLeast config.system.stateVersion "24.05")
+          then "invidious"
+          else "kemal"
+        );
         dbname = lib.mkDefault "invidious";
         port = cfg.database.port;
         # Blank for unix sockets, see
@@ -94,67 +127,74 @@ let
         # Not needed because peer authentication is enabled
         password = lib.mkIf (cfg.database.host == null) "";
       };
+
+      host_binding = cfg.address;
     } // (lib.optionalAttrs (cfg.domain != null) {
       inherit (cfg) domain;
     });
 
-    assertions = [{
-      assertion = cfg.database.host != null -> cfg.database.passwordFile != null;
-      message = "If database host isn't null, database password needs to be set";
-    }];
+    assertions = [
+      {
+        assertion = cfg.database.host != null -> cfg.database.passwordFile != null;
+        message = "If database host isn't null, database password needs to be set";
+      }
+      {
+        assertion = cfg.serviceScale >= 1;
+        message = "Service can't be scaled below one instance";
+      }
+    ];
   };
 
   # Settings necessary for running with an automatically managed local database
   localDatabaseConfig = lib.mkIf cfg.database.createLocally {
+    assertions = [
+      {
+        assertion = cfg.settings.db.user == cfg.settings.db.dbname;
+        message = ''
+          For local automatic database provisioning (services.invidious.database.createLocally == true)
+          to  work, the username used to connect to PostgreSQL must match the database name, that is
+          services.invidious.settings.db.user must match services.invidious.settings.db.dbname.
+          This is the default since NixOS 24.05. For older systems, it is normally safe to manually set
+          the user to "invidious" as the new user will be created with permissions
+          for the existing database. `REASSIGN OWNED BY kemal TO invidious;` may also be needed, it can be
+          run as `sudo -u postgres env psql --user=postgres --dbname=invidious -c 'reassign OWNED BY kemal to invidious;'`.
+        '';
+      }
+    ];
     # Default to using the local database if we create it
     services.invidious.database.host = lib.mkDefault null;
 
-
-    # TODO(raitobezarius to maintainers of invidious): I strongly advise to clean up the kemal specific
-    # thing for 24.05 and use `ensureDBOwnership`.
-    # See https://github.com/NixOS/nixpkgs/issues/216989
-    systemd.services.postgresql.postStart = lib.mkAfter ''
-      $PSQL -tAc 'ALTER DATABASE "${cfg.settings.db.dbname}" OWNER TO "${cfg.settings.db.user}";'
-    '';
     services.postgresql = {
       enable = true;
-      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = false; };
+      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = true; };
       ensureDatabases = lib.singleton cfg.settings.db.dbname;
-      # This is only needed because the unix user invidious isn't the same as
-      # the database user. This tells postgres to map one to the other.
-      identMap = ''
-        invidious invidious ${cfg.settings.db.user}
-      '';
-      # And this specifically enables peer authentication for only this
-      # database, which allows passwordless authentication over the postgres
-      # unix socket for the user map given above.
-      authentication = ''
-        local ${cfg.settings.db.dbname} ${cfg.settings.db.user} peer map=invidious
-      '';
     };
+  };
+
+  ytproxyConfig = lib.mkIf cfg.http3-ytproxy.enable {
+    systemd.services.http3-ytproxy = {
+      description = "HTTP3 ytproxy for Invidious";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
 
-    systemd.services.invidious-db-clean = {
-      description = "Invidious database cleanup";
-      documentation = [ "https://docs.invidious.io/Database-Information-and-Maintenance.md" ];
-      startAt = lib.mkDefault "weekly";
-      path = [ config.services.postgresql.package ];
-      after = [ "postgresql.service" ];
       script = ''
-        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "DELETE FROM nonces * WHERE expire < current_timestamp"
-        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "TRUNCATE TABLE videos"
+        mkdir -p socket
+        exec ${lib.getExe cfg.http3-ytproxy.package};
       '';
+
       serviceConfig = {
+        RestartSec = "2s";
         DynamicUser = true;
-        User = "invidious";
+        User = lib.mkIf cfg.nginx.enable config.services.nginx.user;
+        RuntimeDirectory = "http3-ytproxy";
+        WorkingDirectory = "/run/http3-ytproxy";
       };
     };
 
-    systemd.services.invidious = {
-      requires = [ "postgresql.service" ];
-      after = [ "postgresql.service" ];
-
-      serviceConfig = {
-        User = "invidious";
+    services.nginx.virtualHosts.${cfg.domain} = lib.mkIf cfg.nginx.enable {
+      locations."~ (^/videoplayback|^/vi/|^/ggpht/|^/sb/)" = {
+        proxyPass = "http://unix:/run/http3-ytproxy/socket/http-proxy.sock";
       };
     };
   };
@@ -165,14 +205,28 @@ let
       external_port = 80;
     };
 
-    services.nginx = {
+    services.nginx = let
+      ip = if cfg.address == "0.0.0.0" then "127.0.0.1" else cfg.address;
+    in
+    {
       enable = true;
       virtualHosts.${cfg.domain} = {
-        locations."/".proxyPass = "http://127.0.0.1:${toString cfg.port}";
+        locations."/".proxyPass =
+          if cfg.serviceScale == 1 then
+            "http://${ip}:${toString cfg.port}"
+          else "http://upstream-invidious";
 
         enableACME = lib.mkDefault true;
         forceSSL = lib.mkDefault true;
       };
+      upstreams = lib.mkIf (cfg.serviceScale > 1) {
+        "upstream-invidious".servers = builtins.listToAttrs (builtins.genList
+          (scaleIndex: {
+            name = "${ip}:${toString (cfg.port + scaleIndex)}";
+            value = { };
+          })
+          cfg.serviceScale);
+      };
     };
 
     assertions = [{
@@ -220,6 +274,20 @@ in
       '';
     };
 
+    serviceScale = lib.mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        How many invidious instances to run.
+
+        See https://docs.invidious.io/improve-public-instance/#2-multiple-invidious-processes for more details
+        on how this is intended to work. All instances beyond the first one have the options `channel_threads`
+        and `feed_threads` set to 0 to avoid conflicts with multiple instances refreshing subscriptions. Instances
+        will be configured to bind to consecutive ports starting with {option}`services.invidious.port` for the
+        first instance.
+      '';
+    };
+
     # This needs to be outside of settings to avoid infinite recursion
     # (determining if nginx should be enabled and therefore the settings
     # modified).
@@ -233,6 +301,16 @@ in
       '';
     };
 
+    address = lib.mkOption {
+      type = types.str;
+      # default from https://github.com/iv-org/invidious/blob/master/config/config.example.yml
+      default = if cfg.nginx.enable then "127.0.0.1" else "0.0.0.0";
+      defaultText = lib.literalExpression ''if config.services.invidious.nginx.enable then "127.0.0.1" else "0.0.0.0"'';
+      description = lib.mdDoc ''
+        The IP address Invidious should bind to.
+      '';
+    };
+
     port = lib.mkOption {
       type = types.port;
       # Default from https://docs.invidious.io/Configuration.md
@@ -298,11 +376,28 @@ in
         which can also be used to disable AMCE and TLS.
       '';
     };
+
+    http3-ytproxy = {
+      enable = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable http3-ytproxy for faster loading of images and video playback.
+
+          If {option}`services.invidious.nginx.enable` is used, nginx will be configured automatically. If not, you
+          need to configure a reverse proxy yourself according to
+          https://docs.invidious.io/improve-public-instance/#3-speed-up-video-playback-with-http3-ytproxy.
+        '';
+      };
+
+      package = lib.mkPackageOptionMD pkgs "http3-ytproxy" { };
+    };
   };
 
   config = lib.mkIf cfg.enable (lib.mkMerge [
     serviceConfig
     localDatabaseConfig
     nginxConfig
+    ytproxyConfig
   ]);
 }
diff --git a/nixos/modules/services/web-apps/invoiceplane.nix b/nixos/modules/services/web-apps/invoiceplane.nix
index 429520470a0d4..618bd848ebcb2 100644
--- a/nixos/modules/services/web-apps/invoiceplane.nix
+++ b/nixos/modules/services/web-apps/invoiceplane.nix
@@ -252,11 +252,11 @@ in
         };
 
         options.webserver = mkOption {
-          type = types.enum [ "caddy" ];
+          type = types.enum [ "caddy" "nginx" ];
           default = "caddy";
+          example = "nginx";
           description = lib.mdDoc ''
-            Which webserver to use for virtual host management. Currently only
-            caddy is supported.
+            Which webserver to use for virtual host management.
           '';
         };
       };
@@ -390,5 +390,39 @@ in
     };
   })
 
+  (mkIf (cfg.webserver == "nginx") {
+    services.nginx = {
+      enable = true;
+      virtualHosts = mapAttrs' (hostName: cfg: (
+        nameValuePair hostName {
+          root = pkg hostName cfg;
+          extraConfig = ''
+            index index.php index.html index.htm;
+
+            if (!-e $request_filename){
+              rewrite ^(.*)$ /index.php break;
+            }
+          '';
+
+          locations = {
+            "/setup".extraConfig = ''
+              rewrite ^(.*)$ http://${hostName}/ redirect;
+            '';
+
+            "~ .php$" = {
+              extraConfig = ''
+                fastcgi_split_path_info ^(.+\.php)(/.+)$;
+                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+                fastcgi_pass unix:${config.services.phpfpm.pools."invoiceplane-${hostName}".socket};
+                include ${config.services.nginx.package}/conf/fastcgi_params;
+                include ${config.services.nginx.package}/conf/fastcgi.conf;
+              '';
+            };
+          };
+        }
+      )) eachSite;
+    };
+  })
+
   ]);
 }
diff --git a/nixos/modules/services/web-apps/jitsi-meet.nix b/nixos/modules/services/web-apps/jitsi-meet.nix
index 0c0eb66e65b7c..c4505534d635e 100644
--- a/nixos/modules/services/web-apps/jitsi-meet.nix
+++ b/nixos/modules/services/web-apps/jitsi-meet.nix
@@ -35,6 +35,7 @@ let
       domain = cfg.hostName;
       muc = "conference.${cfg.hostName}";
       focus = "focus.${cfg.hostName}";
+      jigasi = "jigasi.${cfg.hostName}";
     };
     bosh = "//${cfg.hostName}/http-bind";
     websocket = "wss://${cfg.hostName}/xmpp-websocket";
@@ -145,6 +146,16 @@ in
       '';
     };
 
+    jigasi.enable = mkOption {
+      type = bool;
+      default = false;
+      description = ''
+        Whether to enable jigasi instance and configure it to connect to Prosody.
+
+        Additional configuration is possible with <option>services.jigasi</option>.
+      '';
+    };
+
     nginx.enable = mkOption {
       type = bool;
       default = true;
@@ -224,7 +235,7 @@ in
           roomDefaultPublicJids = true;
           extraConfig = ''
             storage = "memory"
-            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}" }
+            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}", "jigasi@auth.${cfg.hostName}" }
           '';
           #-- muc_room_cache_size = 1000
         }
@@ -263,6 +274,9 @@ in
           Component "focus.${cfg.hostName}" "client_proxy"
             target_address = "focus@auth.${cfg.hostName}"
 
+          Component "jigasi.${cfg.hostName}" "client_proxy"
+            target_address = "jigasi@auth.${cfg.hostName}"
+
           Component "speakerstats.${cfg.hostName}" "speakerstats_component"
             muc_component = "conference.${cfg.hostName}"
 
@@ -356,7 +370,10 @@ in
         ${config.services.prosody.package}/bin/prosodyctl mod_roster_command subscribe focus.${cfg.hostName} focus@auth.${cfg.hostName}
         ${config.services.prosody.package}/bin/prosodyctl register jibri auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-auth-secret)"
         ${config.services.prosody.package}/bin/prosodyctl register recorder recorder.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"
+      '' + optionalString cfg.jigasi.enable ''
+        ${config.services.prosody.package}/bin/prosodyctl register jigasi auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jigasi-user-secret)"
       '';
+
       serviceConfig = {
         EnvironmentFile = [ "/var/lib/jitsi-meet/secrets-env" ];
         SupplementaryGroups = [ "jitsi-meet" ];
@@ -371,13 +388,13 @@ in
 
     systemd.services.jitsi-meet-init-secrets = {
       wantedBy = [ "multi-user.target" ];
-      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service");
+      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service") ++ (optional cfg.jigasi.enable "jigasi.service");
       serviceConfig = {
         Type = "oneshot";
       };
 
       script = let
-        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
+        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optionals cfg.jigasi.enable [ "jigasi-user-secret" "jigasi-component-secret" ]) ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
       in
       ''
         cd /var/lib/jitsi-meet
@@ -391,6 +408,7 @@ in
 
         # for easy access in prosody
         echo "JICOFO_COMPONENT_SECRET=$(cat jicofo-component-secret)" > secrets-env
+        echo "JIGASI_COMPONENT_SECRET=$(cat jigasi-component-secret)" >> secrets-env
         chown root:jitsi-meet secrets-env
         chmod 640 secrets-env
       ''
@@ -592,6 +610,20 @@ in
         stripFromRoomDomain = "conference.";
       };
     };
+
+    services.jigasi = mkIf cfg.jigasi.enable {
+      enable = true;
+      xmppHost = "localhost";
+      xmppDomain = cfg.hostName;
+      userDomain = "auth.${cfg.hostName}";
+      userName = "jigasi";
+      userPasswordFile = "/var/lib/jitsi-meet/jigasi-user-secret";
+      componentPasswordFile = "/var/lib/jitsi-meet/jigasi-component-secret";
+      bridgeMuc = "jigasibrewery@internal.${cfg.hostName}";
+      config = {
+        "org.jitsi.jigasi.ALWAYS_TRUST_MODE_ENABLED" = "true";
+      };
+    };
   };
 
   meta.doc = ./jitsi-meet.md;
diff --git a/nixos/modules/services/web-apps/keycloak.nix b/nixos/modules/services/web-apps/keycloak.nix
index 5d44bdee64a73..6d2948913b194 100644
--- a/nixos/modules/services/web-apps/keycloak.nix
+++ b/nixos/modules/services/web-apps/keycloak.nix
@@ -25,7 +25,6 @@ let
     maintainers
     catAttrs
     collect
-    splitString
     hasPrefix
     ;
 
@@ -329,7 +328,8 @@ in
             };
 
             hostname = mkOption {
-              type = str;
+              type = nullOr str;
+              default = null;
               example = "keycloak.example.com";
               description = lib.mdDoc ''
                 The hostname part of the public URL used as base for
@@ -451,7 +451,7 @@ in
 
       keycloakConfig = lib.generators.toKeyValue {
         mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
-          mkValueString = v: with builtins;
+          mkValueString = v:
             if isInt v then toString v
             else if isString v then v
             else if true == v then "true"
@@ -480,6 +480,14 @@ in
             assertion = createLocalPostgreSQL -> config.services.postgresql.settings.standard_conforming_strings or true;
             message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
           }
+          {
+            assertion = cfg.settings.hostname != null || cfg.settings.hostname-url or null != null;
+            message = "Setting the Keycloak hostname is required, see `services.keycloak.settings.hostname`";
+          }
+          {
+            assertion = !(cfg.settings.hostname != null && cfg.settings.hostname-url or null != null);
+            message = "`services.keycloak.settings.hostname` and `services.keycloak.settings.hostname-url` are mutually exclusive";
+          }
         ];
 
         environment.systemPackages = [ keycloakBuild ];
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 8686506b1c282..538e728fcc72f 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -136,7 +136,7 @@ let
         # System Call Filtering
         SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
       } // cfgService;
-      path = with pkgs; [ file imagemagick ffmpeg ];
+      path = with pkgs; [ ffmpeg-headless file imagemagick ];
     })
   ) cfg.sidekiqProcesses;
 
@@ -229,7 +229,7 @@ in {
       streamingProcesses = lib.mkOption {
         description = lib.mdDoc ''
           Number of processes used by the mastodon-streaming service.
-          Recommended is the amount of your CPU cores minus one.
+          Please define this explicitly, recommended is the amount of your CPU cores minus one.
         '';
         type = lib.types.ints.positive;
         example = 3;
@@ -711,31 +711,28 @@ in {
     systemd.services.mastodon-init-db = lib.mkIf cfg.automaticMigrations {
       script = lib.optionalString (!databaseActuallyCreateLocally) ''
         umask 077
-
-        export PGPASSFILE
-        PGPASSFILE=$(mktemp)
-        cat > $PGPASSFILE <<EOF
-        ${cfg.database.host}:${toString cfg.database.port}:${cfg.database.name}:${cfg.database.user}:$(cat ${cfg.database.passwordFile})
-        EOF
-
+        export PGPASSWORD="$(cat '${cfg.database.passwordFile}')"
       '' + ''
-        if [ `psql ${cfg.database.name} -c \
+        if [ `psql -c \
                 "select count(*) from pg_class c \
                 join pg_namespace s on s.oid = c.relnamespace \
                 where s.nspname not in ('pg_catalog', 'pg_toast', 'information_schema') \
                 and s.nspname not like 'pg_temp%';" | sed -n 3p` -eq 0 ]; then
+          echo "Seeding database"
           SAFETY_ASSURED=1 rails db:schema:load
           rails db:seed
         else
+          echo "Migrating database (this might be a noop)"
           rails db:migrate
         fi
       '' +  lib.optionalString (!databaseActuallyCreateLocally) ''
-        rm $PGPASSFILE
-        unset PGPASSFILE
+        unset PGPASSWORD
       '';
       path = [ cfg.package pkgs.postgresql ];
       environment = env // lib.optionalAttrs (!databaseActuallyCreateLocally) {
         PGHOST = cfg.database.host;
+        PGPORT = toString cfg.database.port;
+        PGDATABASE = cfg.database.name;
         PGUSER = cfg.database.user;
       };
       serviceConfig = {
@@ -776,7 +773,7 @@ in {
         # System Call Filtering
         SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
       } // cfgService;
-      path = with pkgs; [ file imagemagick ffmpeg ];
+      path = with pkgs; [ ffmpeg-headless file imagemagick ];
     };
 
     systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable {
diff --git a/nixos/modules/services/web-apps/mattermost.nix b/nixos/modules/services/web-apps/mattermost.nix
index f19465eeb59a5..5035594323749 100644
--- a/nixos/modules/services/web-apps/mattermost.nix
+++ b/nixos/modules/services/web-apps/mattermost.nix
@@ -102,7 +102,7 @@ in
     services.mattermost = {
       enable = mkEnableOption (lib.mdDoc "Mattermost chat server");
 
-      package = mkPackageOption pkgs "mattermostl" { };
+      package = mkPackageOption pkgs "mattermost" { };
 
       statePath = mkOption {
         type = types.str;
diff --git a/nixos/modules/services/web-apps/miniflux.nix b/nixos/modules/services/web-apps/miniflux.nix
index a500008fc7925..1a5b7d0c24e9b 100644
--- a/nixos/modules/services/web-apps/miniflux.nix
+++ b/nixos/modules/services/web-apps/miniflux.nix
@@ -21,10 +21,10 @@ in
       package = mkPackageOption pkgs "miniflux" { };
 
       config = mkOption {
-        type = types.attrsOf types.str;
+        type = with types; attrsOf (oneOf [ str int ]);
         example = literalExpression ''
           {
-            CLEANUP_FREQUENCY = "48";
+            CLEANUP_FREQUENCY = 48;
             LISTEN_ADDR = "localhost:8080";
           }
         '';
@@ -51,12 +51,11 @@ in
   };
 
   config = mkIf cfg.enable {
-
     services.miniflux.config =  {
       LISTEN_ADDR = mkDefault defaultAddress;
       DATABASE_URL = "user=miniflux host=/run/postgresql dbname=miniflux";
-      RUN_MIGRATIONS = "1";
-      CREATE_ADMIN = "1";
+      RUN_MIGRATIONS = 1;
+      CREATE_ADMIN = 1;
     };
 
     services.postgresql = {
@@ -90,7 +89,7 @@ in
         User = "miniflux";
         DynamicUser = true;
         RuntimeDirectory = "miniflux";
-        RuntimeDirectoryMode = "0700";
+        RuntimeDirectoryMode = "0750";
         EnvironmentFile = cfg.adminCredentialsFile;
         # Hardening
         CapabilityBoundingSet = [ "" ];
@@ -117,7 +116,7 @@ in
         UMask = "0077";
       };
 
-      environment = cfg.config;
+      environment = lib.mapAttrs (_: toString) cfg.config;
     };
     environment.systemPackages = [ cfg.package ];
 
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
index 0a530bff92325..bdb08f6131496 100644
--- a/nixos/modules/services/web-apps/mobilizon.nix
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -384,7 +384,7 @@ in
           ensureDBOwnership = false;
         }
       ];
-      extraPlugins = with postgresql.pkgs; [ postgis ];
+      extraPlugins = ps: with ps; [ postgis ];
     };
 
     # Nginx config taken from support/nginx/mobilizon-release.conf
diff --git a/nixos/modules/services/web-apps/netbox.nix b/nixos/modules/services/web-apps/netbox.nix
index 3b9434e3d3456..72ec578146a76 100644
--- a/nixos/modules/services/web-apps/netbox.nix
+++ b/nixos/modules/services/web-apps/netbox.nix
@@ -267,6 +267,7 @@ in {
     systemd.targets.netbox = {
       description = "Target for all NetBox services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "redis-netbox.service" ];
     };
 
@@ -317,7 +318,7 @@ in {
 
         serviceConfig = defaultServiceConfig // {
           ExecStart = ''
-            ${pkgs.python3Packages.gunicorn}/bin/gunicorn netbox.wsgi \
+            ${pkg.gunicorn}/bin/gunicorn netbox.wsgi \
               --bind ${cfg.listenAddress}:${toString cfg.port} \
               --pythonpath ${pkg}/opt/netbox/netbox
           '';
diff --git a/nixos/modules/services/web-apps/nextcloud.md b/nixos/modules/services/web-apps/nextcloud.md
index ecc7f380592ae..ce8f96a6a3896 100644
--- a/nixos/modules/services/web-apps/nextcloud.md
+++ b/nixos/modules/services/web-apps/nextcloud.md
@@ -5,7 +5,7 @@ self-hostable cloud platform. The server setup can be automated using
 [services.nextcloud](#opt-services.nextcloud.enable). A
 desktop client is packaged at `pkgs.nextcloud-client`.
 
-The current default by NixOS is `nextcloud27` which is also the latest
+The current default by NixOS is `nextcloud28` which is also the latest
 major version available.
 
 ## Basic usage {#module-services-nextcloud-basic-usage}
@@ -51,7 +51,7 @@ to ensure that changes can be applied by changing the module's options.
 In case the application serves multiple domains (those are checked with
 [`$_SERVER['HTTP_HOST']`](https://www.php.net/manual/en/reserved.variables.server.php))
 it's needed to add them to
-[`services.nextcloud.config.extraTrustedDomains`](#opt-services.nextcloud.config.extraTrustedDomains).
+[`services.nextcloud.extraOptions.trusted_domains`](#opt-services.nextcloud.extraOptions.trusted_domains).
 
 Auto updates for Nextcloud apps can be enabled using
 [`services.nextcloud.autoUpdateApps`](#opt-services.nextcloud.autoUpdateApps.enable).
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 6c50ea3c81ef7..0b19265942c03 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -9,6 +9,7 @@ let
   jsonFormat = pkgs.formats.json {};
 
   defaultPHPSettings = {
+    output_buffering = "0";
     short_open_tag = "Off";
     expose_php = "Off";
     error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
@@ -23,11 +24,49 @@ let
     catch_workers_output = "yes";
   };
 
+  appStores = {
+    # default apps bundled with pkgs.nextcloudXX, e.g. files, contacts
+    apps = {
+      enabled = true;
+      writable = false;
+    };
+    # apps installed via cfg.extraApps
+    nix-apps = {
+      enabled = cfg.extraApps != { };
+      linkTarget = pkgs.linkFarm "nix-apps"
+        (mapAttrsToList (name: path: { inherit name path; }) cfg.extraApps);
+      writable = false;
+    };
+    # apps installed via the app store.
+    store-apps = {
+      enabled = cfg.appstoreEnable == null || cfg.appstoreEnable;
+      linkTarget = "${cfg.home}/store-apps";
+      writable = true;
+    };
+  };
+
+  webroot = pkgs.runCommand
+    "${cfg.package.name or "nextcloud"}-with-apps"
+    { }
+    ''
+      mkdir $out
+      ln -sfv "${cfg.package}"/* "$out"
+      ${concatStrings
+        (mapAttrsToList (name: store: optionalString (store.enabled && store?linkTarget) ''
+          if [ -e "$out"/${name} ]; then
+            echo "Didn't expect ${name} already in $out!"
+            exit 1
+          fi
+          ln -sfTv ${store.linkTarget} "$out"/${name}
+        '') appStores)}
+    '';
+
   inherit (cfg) datadir;
 
   phpPackage = cfg.phpPackage.buildEnv {
     extensions = { enabled, all }:
       (with all; enabled
+        ++ [ bz2 intl sodium ] # recommended
         ++ optional cfg.enableImagemagick imagick
         # Optionally enabled depending on caching settings
         ++ optional cfg.caching.apcu apcu
@@ -44,7 +83,7 @@ let
 
   occ = pkgs.writeScriptBin "nextcloud-occ" ''
     #! ${pkgs.runtimeShell}
-    cd ${cfg.package}
+    cd ${webroot}
     sudo=exec
     if [[ "$USER" != nextcloud ]]; then
       sudo='exec /run/wrappers/bin/sudo -u nextcloud --preserve-env=NEXTCLOUD_CONFIG_DIR --preserve-env=OC_PASS'
@@ -60,37 +99,127 @@ let
   mysqlLocal = cfg.database.createLocally && cfg.config.dbtype == "mysql";
   pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
 
+  nextcloudGreaterOrEqualThan = versionAtLeast cfg.package.version;
+  nextcloudOlderThan = versionOlder cfg.package.version;
+
   # https://github.com/nextcloud/documentation/pull/11179
-  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2";
+  ocmProviderIsNotAStaticDirAnymore = nextcloudGreaterOrEqualThan "27.1.2"
+    || (nextcloudOlderThan "27.0.0" && nextcloudGreaterOrEqualThan "26.0.8");
+
+  overrideConfig = let
+    c = cfg.config;
+    requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
+    objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
+      'objectstore' => [
+        'class' => '\\OC\\Files\\ObjectStore\\S3',
+        'arguments' => [
+          'bucket' => '${s3.bucket}',
+          'autocreate' => ${boolToString s3.autocreate},
+          'key' => '${s3.key}',
+          'secret' => nix_read_secret('${s3.secretFile}'),
+          ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"}
+          ${optionalString (s3.port != null) "'port' => ${toString s3.port},"}
+          'use_ssl' => ${boolToString s3.useSsl},
+          ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
+          'use_path_style' => ${boolToString s3.usePathStyle},
+          ${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
+        ],
+      ]
+    '';
+    showAppStoreSetting = cfg.appstoreEnable != null || cfg.extraApps != {};
+    renderedAppStoreSetting =
+      let
+        x = cfg.appstoreEnable;
+      in
+        if x == null then "false"
+        else boolToString x;
+    mkAppStoreConfig = name: { enabled, writable, ... }: optionalString enabled ''
+      [ 'path' => '${webroot}/${name}', 'url' => '/${name}', 'writable' => ${boolToString writable} ],
+    '';
+  in pkgs.writeText "nextcloud-config.php" ''
+    <?php
+    ${optionalString requiresReadSecretFunction ''
+      function nix_read_secret($file) {
+        if (!file_exists($file)) {
+          throw new \RuntimeException(sprintf(
+            "Cannot start Nextcloud, secret file %s set by NixOS doesn't seem to "
+            . "exist! Please make sure that the file exists and has appropriate "
+            . "permissions for user & group 'nextcloud'!",
+            $file
+          ));
+        }
+        return trim(file_get_contents($file));
+      }''}
+    function nix_decode_json_file($file, $error) {
+      if (!file_exists($file)) {
+        throw new \RuntimeException(sprintf($error, $file));
+      }
+      $decoded = json_decode(file_get_contents($file), true);
+
+      if (json_last_error() !== JSON_ERROR_NONE) {
+        throw new \RuntimeException(sprintf("Cannot decode %s, because: %s", $file, json_last_error_msg()));
+      }
 
+      return $decoded;
+    }
+    $CONFIG = [
+      'apps_paths' => [
+        ${concatStrings (mapAttrsToList mkAppStoreConfig appStores)}
+      ],
+      ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
+      ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
+      ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
+      ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
+      ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
+      ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
+      ${optionalString (c.dbpassFile != null) ''
+          'dbpassword' => nix_read_secret(
+            "${c.dbpassFile}"
+          ),
+        ''
+      }
+      'dbtype' => '${c.dbtype}',
+      ${objectstoreConfig}
+    ];
+
+    $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+      "${jsonFormat.generate "nextcloud-extraOptions.json" cfg.extraOptions}",
+      "impossible: this should never happen (decoding generated extraOptions file %s failed)"
+    ));
+
+    ${optionalString (cfg.secretFile != null) ''
+      $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+        "${cfg.secretFile}",
+        "Cannot start Nextcloud, secrets file %s set by NixOS doesn't exist!"
+      ));
+    ''}
+  '';
 in {
 
   imports = [
-    (mkRemovedOptionModule [ "services" "nextcloud" "config" "adminpass" ] ''
-      Please use `services.nextcloud.config.adminpassFile' instead!
-    '')
-    (mkRemovedOptionModule [ "services" "nextcloud" "config" "dbpass" ] ''
-      Please use `services.nextcloud.config.dbpassFile' instead!
-    '')
-    (mkRemovedOptionModule [ "services" "nextcloud" "nginx" "enable" ] ''
-      The nextcloud module supports `nginx` as reverse-proxy by default and doesn't
-      support other reverse-proxies officially.
-
-      However it's possible to use an alternative reverse-proxy by
-
-        * disabling nginx
-        * setting `listen.owner` & `listen.group` in the phpfpm-pool to a different value
-
-      Further details about this can be found in the `Nextcloud`-section of the NixOS-manual
-      (which can be opened e.g. by running `nixos-help`).
-    '')
     (mkRemovedOptionModule [ "services" "nextcloud" "enableBrokenCiphersForSSE" ] ''
       This option has no effect since there's no supported Nextcloud version packaged here
       using OpenSSL for RC4 SSE.
     '')
-    (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] ''
-      Use services.nextcloud.enableImagemagick instead.
+    (mkRemovedOptionModule [ "services" "nextcloud" "config" "dbport" ] ''
+      Add port to services.nextcloud.config.dbhost instead.
     '')
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "logLevel" ] [ "services" "nextcloud" "extraOptions" "loglevel" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "logType" ] [ "services" "nextcloud" "extraOptions" "log_type" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "defaultPhoneRegion" ] [ "services" "nextcloud" "extraOptions" "default_phone_region" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "overwriteProtocol" ] [ "services" "nextcloud" "extraOptions" "overwriteprotocol" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "skeletonDirectory" ] [ "services" "nextcloud" "extraOptions" "skeletondirectory" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "globalProfiles" ] [ "services" "nextcloud" "extraOptions" "profile.enabled" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "extraTrustedDomains" ] [ "services" "nextcloud" "extraOptions" "trusted_domains" ])
+    (mkRenamedOptionModule
+      [ "services" "nextcloud" "config" "trustedProxies" ] [ "services" "nextcloud" "extraOptions" "trusted_proxies" ])
   ];
 
   options.services.nextcloud = {
@@ -154,32 +283,6 @@ in {
         Set this to false to disable the installation of apps from the global appstore. App management is always enabled regardless of this setting.
       '';
     };
-    logLevel = mkOption {
-      type = types.ints.between 0 4;
-      default = 2;
-      description = lib.mdDoc ''
-        Log level value between 0 (DEBUG) and 4 (FATAL).
-
-        - 0 (debug): Log all activity.
-
-        - 1 (info): Log activity such as user logins and file activities, plus warnings, errors, and fatal errors.
-
-        - 2 (warn): Log successful operations, as well as warnings of potential problems, errors and fatal errors.
-
-        - 3 (error): Log failed operations and fatal errors.
-
-        - 4 (fatal): Log only fatal errors that cause the server to stop.
-      '';
-    };
-    logType = mkOption {
-      type = types.enum [ "errorlog" "file" "syslog" "systemd" ];
-      default = "syslog";
-      description = lib.mdDoc ''
-        Logging backend to use.
-        systemd requires the php-systemd package to be added to services.nextcloud.phpExtraExtensions.
-        See the [nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html) for details.
-      '';
-    };
     https = mkOption {
       type = types.bool;
       default = false;
@@ -188,7 +291,7 @@ in {
     package = mkOption {
       type = types.package;
       description = lib.mdDoc "Which package to use for the Nextcloud instance.";
-      relatedPackages = [ "nextcloud26" "nextcloud27" ];
+      relatedPackages = [ "nextcloud26" "nextcloud27" "nextcloud28" ];
     };
     phpPackage = mkPackageOption pkgs "php" {
       example = "php82";
@@ -203,16 +306,6 @@ in {
       '';
     };
 
-    skeletonDirectory = mkOption {
-      default = "";
-      type = types.str;
-      description = lib.mdDoc ''
-        The directory where the skeleton files are located. These files will be
-        copied to the data directory of new users. Leave empty to not copy any
-        skeleton files.
-      '';
-    };
-
     webfinger = mkOption {
       type = types.bool;
       default = false;
@@ -238,7 +331,7 @@ in {
     };
 
     phpOptions = mkOption {
-      type = types.attrsOf types.str;
+      type = with types; attrsOf (oneOf [ str int ]);
       defaultText = literalExpression (generators.toPretty { } defaultPHPSettings);
       description = lib.mdDoc ''
         Options for PHP's php.ini file for nextcloud.
@@ -312,7 +405,6 @@ in {
 
     };
 
-
     config = {
       dbtype = mkOption {
         type = types.enum [ "sqlite" "pgsql" "mysql" ];
@@ -343,18 +435,14 @@ in {
           else if mysqlLocal then "localhost:/run/mysqld/mysqld.sock"
           else "localhost";
         defaultText = "localhost";
+        example = "localhost:5000";
         description = lib.mdDoc ''
-          Database host or socket path.
+          Database host (+port) or socket path.
           If [](#opt-services.nextcloud.database.createLocally) is true and
           [](#opt-services.nextcloud.config.dbtype) is either `pgsql` or `mysql`,
           defaults to the correct Unix socket instead.
         '';
       };
-      dbport = mkOption {
-        type = with types; nullOr (either int str);
-        default = null;
-        description = lib.mdDoc "Database port.";
-      };
       dbtableprefix = mkOption {
         type = types.nullOr types.str;
         default = null;
@@ -377,53 +465,6 @@ in {
           setup of Nextcloud by the systemd service `nextcloud-setup.service`.
         '';
       };
-
-      extraTrustedDomains = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc ''
-          Trusted domains from which the Nextcloud installation will be
-          accessible.  You don't need to add
-          `services.nextcloud.hostname` here.
-        '';
-      };
-
-      trustedProxies = mkOption {
-        type = types.listOf types.str;
-        default = [];
-        description = lib.mdDoc ''
-          Trusted proxies to provide if the Nextcloud installation is being
-          proxied to secure against, e.g. spoofing.
-        '';
-      };
-
-      overwriteProtocol = mkOption {
-        type = types.nullOr (types.enum [ "http" "https" ]);
-        default = null;
-        example = "https";
-
-        description = lib.mdDoc ''
-          Force Nextcloud to always use HTTP or HTTPS i.e. for link generation.
-          Nextcloud uses the currently used protocol by default, but when
-          behind a reverse-proxy, it may use `http` for everything although
-          Nextcloud may be served via HTTPS.
-        '';
-      };
-
-      defaultPhoneRegion = mkOption {
-        default = null;
-        type = types.nullOr types.str;
-        example = "DE";
-        description = lib.mdDoc ''
-          An [ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html)
-          country code which replaces automatic phone-number detection
-          without a country code.
-
-          As an example, with `DE` set as the default phone region,
-          the `+49` prefix can be omitted for phone numbers.
-        '';
-      };
-
       objectstore = {
         s3 = {
           enable = mkEnableOption (lib.mdDoc ''
@@ -606,30 +647,109 @@ in {
         The nextcloud-occ program preconfigured to target this Nextcloud instance.
       '';
     };
-    globalProfiles = mkEnableOption (lib.mdDoc "global profiles") // {
-      description = lib.mdDoc ''
-        Makes user-profiles globally available under `nextcloud.tld/u/user.name`.
-        Even though it's enabled by default in Nextcloud, it must be explicitly enabled
-        here because it has the side-effect that personal information is even accessible to
-        unauthenticated users by default.
-
-        By default, the following properties are set to “Show to everyone”
-        if this flag is enabled:
-        - About
-        - Full name
-        - Headline
-        - Organisation
-        - Profile picture
-        - Role
-        - Twitter
-        - Website
-
-        Only has an effect in Nextcloud 23 and later.
-      '';
-    };
 
     extraOptions = mkOption {
-      type = jsonFormat.type;
+      type = types.submodule {
+        freeformType = jsonFormat.type;
+        options = {
+
+          loglevel = mkOption {
+            type = types.ints.between 0 4;
+            default = 2;
+            description = lib.mdDoc ''
+              Log level value between 0 (DEBUG) and 4 (FATAL).
+
+              - 0 (debug): Log all activity.
+
+              - 1 (info): Log activity such as user logins and file activities, plus warnings, errors, and fatal errors.
+
+              - 2 (warn): Log successful operations, as well as warnings of potential problems, errors and fatal errors.
+
+              - 3 (error): Log failed operations and fatal errors.
+
+              - 4 (fatal): Log only fatal errors that cause the server to stop.
+            '';
+          };
+          log_type = mkOption {
+            type = types.enum [ "errorlog" "file" "syslog" "systemd" ];
+            default = "syslog";
+            description = lib.mdDoc ''
+              Logging backend to use.
+              systemd requires the php-systemd package to be added to services.nextcloud.phpExtraExtensions.
+              See the [nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html) for details.
+            '';
+          };
+          skeletondirectory = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc ''
+              The directory where the skeleton files are located. These files will be
+              copied to the data directory of new users. Leave empty to not copy any
+              skeleton files.
+            '';
+          };
+          trusted_domains = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc ''
+              Trusted domains, from which the nextcloud installation will be
+              accessible. You don't need to add
+              `services.nextcloud.hostname` here.
+            '';
+          };
+          trusted_proxies = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc ''
+              Trusted proxies, to provide if the nextcloud installation is being
+              proxied to secure against e.g. spoofing.
+            '';
+          };
+          overwriteprotocol = mkOption {
+            type = types.enum [ "" "http" "https" ];
+            default = "";
+            example = "https";
+            description = lib.mdDoc ''
+              Force Nextcloud to always use HTTP or HTTPS i.e. for link generation.
+              Nextcloud uses the currently used protocol by default, but when
+              behind a reverse-proxy, it may use `http` for everything although
+              Nextcloud may be served via HTTPS.
+            '';
+          };
+          default_phone_region = mkOption {
+            default = "";
+            type = types.str;
+            example = "DE";
+            description = lib.mdDoc ''
+              An [ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html)
+              country code which replaces automatic phone-number detection
+              without a country code.
+
+              As an example, with `DE` set as the default phone region,
+              the `+49` prefix can be omitted for phone numbers.
+            '';
+          };
+          "profile.enabled" = mkEnableOption (lib.mdDoc "global profiles") // {
+            description = lib.mdDoc ''
+              Makes user-profiles globally available under `nextcloud.tld/u/user.name`.
+              Even though it's enabled by default in Nextcloud, it must be explicitly enabled
+              here because it has the side-effect that personal information is even accessible to
+              unauthenticated users by default.
+              By default, the following properties are set to “Show to everyone”
+              if this flag is enabled:
+              - About
+              - Full name
+              - Headline
+              - Organisation
+              - Profile picture
+              - Role
+              - Twitter
+              - Website
+              Only has an effect in Nextcloud 23 and later.
+            '';
+          };
+        };
+      };
       default = {};
       description = lib.mdDoc ''
         Extra options which should be appended to Nextcloud's config.php file.
@@ -677,7 +797,7 @@ in {
 
   config = mkIf cfg.enable (mkMerge [
     { warnings = let
-        latest = 27;
+        latest = 28;
         upgradeWarning = major: nixos:
           ''
             A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
@@ -698,7 +818,8 @@ in {
         '')
         ++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
         ++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
-        ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"));
+        ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"))
+        ++ (optional (versionOlder cfg.package.version "28") (upgradeWarning 27 "24.05"));
 
       services.nextcloud.package = with pkgs;
         mkDefault (
@@ -708,15 +829,13 @@ in {
               nextcloud defined in an overlay, please set `services.nextcloud.package` to
               `pkgs.nextcloud`.
             ''
-          else if versionOlder stateVersion "22.11" then nextcloud24
           else if versionOlder stateVersion "23.05" then nextcloud25
           else if versionOlder stateVersion "23.11" then nextcloud26
-          else nextcloud27
+          else if versionOlder stateVersion "24.05" then nextcloud27
+          else nextcloud28
         );
 
-      services.nextcloud.phpPackage =
-        if versionOlder cfg.package.version "26" then pkgs.php81
-        else pkgs.php82;
+      services.nextcloud.phpPackage = pkgs.php82;
 
       services.nextcloud.phpOptions = mkMerge [
         (mapAttrs (const mkOptionDefault) defaultPHPSettings)
@@ -758,116 +877,23 @@ in {
         timerConfig.Unit = "nextcloud-cron.service";
       };
 
-      systemd.tmpfiles.rules = ["d ${cfg.home} 0750 nextcloud nextcloud"];
+      systemd.tmpfiles.rules = map (dir: "d ${dir} 0750 nextcloud nextcloud - -") [
+        "${cfg.home}"
+        "${datadir}/config"
+        "${datadir}/data"
+        "${cfg.home}/store-apps"
+      ] ++ [
+        "L+ ${datadir}/config/override.config.php - - - - ${overrideConfig}"
+      ];
 
       systemd.services = {
         # When upgrading the Nextcloud package, Nextcloud can report errors such as
         # "The files of the app [all apps in /var/lib/nextcloud/apps] were not replaced correctly"
         # Restarting phpfpm on Nextcloud package update fixes these issues (but this is a workaround).
-        phpfpm-nextcloud.restartTriggers = [ cfg.package ];
+        phpfpm-nextcloud.restartTriggers = [ webroot overrideConfig ];
 
         nextcloud-setup = let
           c = cfg.config;
-          writePhpArray = a: "[${concatMapStringsSep "," (val: ''"${toString val}"'') a}]";
-          requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
-          objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
-            'objectstore' => [
-              'class' => '\\OC\\Files\\ObjectStore\\S3',
-              'arguments' => [
-                'bucket' => '${s3.bucket}',
-                'autocreate' => ${boolToString s3.autocreate},
-                'key' => '${s3.key}',
-                'secret' => nix_read_secret('${s3.secretFile}'),
-                ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"}
-                ${optionalString (s3.port != null) "'port' => ${toString s3.port},"}
-                'use_ssl' => ${boolToString s3.useSsl},
-                ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
-                'use_path_style' => ${boolToString s3.usePathStyle},
-                ${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
-              ],
-            ]
-          '';
-
-          showAppStoreSetting = cfg.appstoreEnable != null || cfg.extraApps != {};
-          renderedAppStoreSetting =
-            let
-              x = cfg.appstoreEnable;
-            in
-              if x == null then "false"
-              else boolToString x;
-
-          nextcloudGreaterOrEqualThan = req: versionAtLeast cfg.package.version req;
-
-          overrideConfig = pkgs.writeText "nextcloud-config.php" ''
-            <?php
-            ${optionalString requiresReadSecretFunction ''
-              function nix_read_secret($file) {
-                if (!file_exists($file)) {
-                  throw new \RuntimeException(sprintf(
-                    "Cannot start Nextcloud, secret file %s set by NixOS doesn't seem to "
-                    . "exist! Please make sure that the file exists and has appropriate "
-                    . "permissions for user & group 'nextcloud'!",
-                    $file
-                  ));
-                }
-                return trim(file_get_contents($file));
-              }''}
-            function nix_decode_json_file($file, $error) {
-              if (!file_exists($file)) {
-                throw new \RuntimeException(sprintf($error, $file));
-              }
-              $decoded = json_decode(file_get_contents($file), true);
-
-              if (json_last_error() !== JSON_ERROR_NONE) {
-                throw new \RuntimeException(sprintf("Cannot decode %s, because: %s", $file, json_last_error_msg()));
-              }
-
-              return $decoded;
-            }
-            $CONFIG = [
-              'apps_paths' => [
-                ${optionalString (cfg.extraApps != { }) "[ 'path' => '${cfg.home}/nix-apps', 'url' => '/nix-apps', 'writable' => false ],"}
-                [ 'path' => '${cfg.home}/apps', 'url' => '/apps', 'writable' => false ],
-                [ 'path' => '${cfg.home}/store-apps', 'url' => '/store-apps', 'writable' => true ],
-              ],
-              ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
-              'datadirectory' => '${datadir}/data',
-              'skeletondirectory' => '${cfg.skeletonDirectory}',
-              ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
-              'log_type' => '${cfg.logType}',
-              'loglevel' => '${builtins.toString cfg.logLevel}',
-              ${optionalString (c.overwriteProtocol != null) "'overwriteprotocol' => '${c.overwriteProtocol}',"}
-              ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
-              ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
-              ${optionalString (c.dbport != null) "'dbport' => '${toString c.dbport}',"}
-              ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
-              ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
-              ${optionalString (c.dbpassFile != null) ''
-                  'dbpassword' => nix_read_secret(
-                    "${c.dbpassFile}"
-                  ),
-                ''
-              }
-              'dbtype' => '${c.dbtype}',
-              'trusted_domains' => ${writePhpArray ([ cfg.hostName ] ++ c.extraTrustedDomains)},
-              'trusted_proxies' => ${writePhpArray (c.trustedProxies)},
-              ${optionalString (c.defaultPhoneRegion != null) "'default_phone_region' => '${c.defaultPhoneRegion}',"}
-              ${optionalString (nextcloudGreaterOrEqualThan "23") "'profile.enabled' => ${boolToString cfg.globalProfiles},"}
-              ${objectstoreConfig}
-            ];
-
-            $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
-              "${jsonFormat.generate "nextcloud-extraOptions.json" cfg.extraOptions}",
-              "impossible: this should never happen (decoding generated extraOptions file %s failed)"
-            ));
-
-            ${optionalString (cfg.secretFile != null) ''
-              $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
-                "${cfg.secretFile}",
-                "Cannot start Nextcloud, secrets file %s set by NixOS doesn't exist!"
-              ));
-            ''}
-          '';
           occInstallCmd = let
             mkExport = { arg, value }: "export ${arg}=${value}";
             dbpass = {
@@ -888,7 +914,6 @@ in {
               # will be omitted.
               ${if c.dbname != null then "--database-name" else null} = ''"${c.dbname}"'';
               ${if c.dbhost != null then "--database-host" else null} = ''"${c.dbhost}"'';
-              ${if c.dbport != null then "--database-port" else null} = ''"${toString c.dbport}"'';
               ${if c.dbuser != null then "--database-user" else null} = ''"${c.dbuser}"'';
               "--database-pass" = "\"\$${dbpass.arg}\"";
               "--admin-user" = ''"${c.adminuser}"'';
@@ -905,7 +930,7 @@ in {
             (i: v: ''
               ${occ}/bin/nextcloud-occ config:system:set trusted_domains \
                 ${toString i} --value="${toString v}"
-            '') ([ cfg.hostName ] ++ cfg.config.extraTrustedDomains));
+            '') ([ cfg.hostName ] ++ cfg.extraOptions.trusted_domains));
 
         in {
           wantedBy = [ "multi-user.target" ];
@@ -913,6 +938,7 @@ in {
           after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
           requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
           path = [ occ ];
+          restartTriggers = [ overrideConfig ];
           script = ''
             ${optionalString (c.dbpassFile != null) ''
               if [ ! -r "${c.dbpassFile}" ]; then
@@ -933,25 +959,12 @@ in {
               exit 1
             fi
 
-            ln -sf ${cfg.package}/apps ${cfg.home}/
-
-            # Install extra apps
-            ln -sfT \
-              ${pkgs.linkFarm "nix-apps"
-                (mapAttrsToList (name: path: { inherit name path; }) cfg.extraApps)} \
-              ${cfg.home}/nix-apps
-
-            # create nextcloud directories.
-            # if the directories exist already with wrong permissions, we fix that
-            for dir in ${datadir}/config ${datadir}/data ${cfg.home}/store-apps ${cfg.home}/nix-apps; do
-              if [ ! -e $dir ]; then
-                install -o nextcloud -g nextcloud -d $dir
-              elif [ $(stat -c "%G" $dir) != "nextcloud" ]; then
-                chgrp -R nextcloud $dir
+            ${concatMapStrings (name: ''
+              if [ -d "${cfg.home}"/${name} ]; then
+                echo "Cleaning up ${name}; these are now bundled in the webroot store-path!"
+                rm -r "${cfg.home}"/${name}
               fi
-            done
-
-            ln -sf ${overrideConfig} ${datadir}/config/override.config.php
+            '') [ "nix-apps" "apps" ]}
 
             # Do not install if already installed
             if [[ ! -e ${datadir}/config/config.php ]]; then
@@ -980,7 +993,7 @@ in {
           environment.NEXTCLOUD_CONFIG_DIR = "${datadir}/config";
           serviceConfig.Type = "oneshot";
           serviceConfig.User = "nextcloud";
-          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${cfg.package}/cron.php";
+          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${webroot}/cron.php";
         };
         nextcloud-update-plugins = mkIf cfg.autoUpdateApps.enable {
           after = [ "nextcloud-setup.service" ];
@@ -1041,22 +1054,25 @@ in {
         user = "nextcloud";
       };
 
-      services.nextcloud = lib.mkIf cfg.configureRedis {
-        caching.redis = true;
-        extraOptions = {
+      services.nextcloud = {
+        caching.redis = lib.mkIf cfg.configureRedis true;
+        extraOptions = mkMerge [({
+          datadirectory = lib.mkDefault "${datadir}/data";
+          trusted_domains = [ cfg.hostName ];
+        }) (lib.mkIf cfg.configureRedis {
           "memcache.distributed" = ''\OC\Memcache\Redis'';
           "memcache.locking" = ''\OC\Memcache\Redis'';
           redis = {
             host = config.services.redis.servers.nextcloud.unixSocket;
             port = 0;
           };
-        };
+        })];
       };
 
       services.nginx.enable = mkDefault true;
 
       services.nginx.virtualHosts.${cfg.hostName} = {
-        root = cfg.package;
+        root = webroot;
         locations = {
           "= /robots.txt" = {
             priority = 100;
@@ -1073,14 +1089,6 @@ in {
               }
             '';
           };
-          "~ ^/store-apps" = {
-            priority = 201;
-            extraConfig = "root ${cfg.home};";
-          };
-          "~ ^/nix-apps" = {
-            priority = 201;
-            extraConfig = "root ${cfg.home};";
-          };
           "^~ /.well-known" = {
             priority = 210;
             extraConfig = ''
@@ -1129,10 +1137,13 @@ in {
               fastcgi_read_timeout ${builtins.toString cfg.fastcgiTimeout}s;
             '';
           };
-          "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm)$".extraConfig = ''
+          "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm|ogg|flac)$".extraConfig = ''
             try_files $uri /index.php$request_uri;
             expires 6M;
             access_log off;
+            location ~ \.mjs$ {
+              default_type text/javascript;
+            }
             location ~ \.wasm$ {
               default_type application/wasm;
             }
diff --git a/nixos/modules/services/web-apps/node-red.nix b/nixos/modules/services/web-apps/node-red.nix
index d775042fed164..82f89783d778a 100644
--- a/nixos/modules/services/web-apps/node-red.nix
+++ b/nixos/modules/services/web-apps/node-red.nix
@@ -19,7 +19,7 @@ in
   options.services.node-red = {
     enable = mkEnableOption (lib.mdDoc "the Node-RED service");
 
-    package = mkPackageOption pkgs "node-red" { };
+    package = mkPackageOption pkgs [ "nodePackages" "node-red" ] { };
 
     openFirewall = mkOption {
       type = types.bool;
diff --git a/nixos/modules/services/web-apps/openvscode-server.nix b/nixos/modules/services/web-apps/openvscode-server.nix
index 76a19dccae165..81b9d1f3b4c8c 100644
--- a/nixos/modules/services/web-apps/openvscode-server.nix
+++ b/nixos/modules/services/web-apps/openvscode-server.nix
@@ -159,6 +159,7 @@ in
     systemd.services.openvscode-server = {
       description = "OpenVSCode server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = cfg.extraPackages;
       environment = cfg.extraEnvironment;
diff --git a/nixos/modules/services/web-apps/outline.nix b/nixos/modules/services/web-apps/outline.nix
index d97b45d624187..702755dfa2ab8 100644
--- a/nixos/modules/services/web-apps/outline.nix
+++ b/nixos/modules/services/web-apps/outline.nix
@@ -586,6 +586,37 @@ in
       ensureDatabases = [ "outline" ];
     };
 
+    # Outline is unable to create the uuid-ossp extension when using postgresql 12, in later version this
+    # extension can be created without superuser permission. This services therefor this extension before
+    # outline starts and postgresql 12 is using on the host.
+    #
+    # Can be removed after postgresql 12 is dropped from nixos.
+    systemd.services.outline-postgresql =
+      let
+        pgsql = config.services.postgresql;
+      in
+        lib.mkIf (cfg.databaseUrl == "local" && pgsql.package == pkgs.postgresql_12) {
+          after = [ "postgresql.service" ];
+          bindsTo = [ "postgresql.service" ];
+          wantedBy = [ "outline.service" ];
+          partOf = [ "outline.service" ];
+          path = [
+            pgsql.package
+          ];
+          script = ''
+            set -o errexit -o pipefail -o nounset -o errtrace
+            shopt -s inherit_errexit
+
+            psql outline -tAc 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"'
+          '';
+
+          serviceConfig = {
+            User = pgsql.superUser;
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+        };
+
     services.redis.servers.outline = lib.mkIf (cfg.redisUrl == "local") {
       enable = true;
       user = config.services.outline.user;
diff --git a/nixos/modules/services/web-apps/peering-manager.nix b/nixos/modules/services/web-apps/peering-manager.nix
index d6f6077268d46..0382ce7174738 100644
--- a/nixos/modules/services/web-apps/peering-manager.nix
+++ b/nixos/modules/services/web-apps/peering-manager.nix
@@ -196,6 +196,7 @@ in {
     systemd.targets.peering-manager = {
       description = "Target for all Peering Manager services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "redis-peering-manager.service" ];
     };
 
diff --git a/nixos/modules/services/web-apps/suwayomi-server.md b/nixos/modules/services/web-apps/suwayomi-server.md
new file mode 100644
index 0000000000000..ff1e06c8a53ae
--- /dev/null
+++ b/nixos/modules/services/web-apps/suwayomi-server.md
@@ -0,0 +1,108 @@
+# Suwayomi-Server {#module-services-suwayomi-server}
+
+A free and open source manga reader server that runs extensions built for Tachiyomi.
+
+## Basic usage {#module-services-suwayomi-server-basic-usage}
+
+By default, the module will execute Suwayomi-Server backend and web UI:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+  };
+}
+```
+
+It runs in the systemd service named `suwayomi-server` in the data directory `/var/lib/suwayomi-server`.
+
+You can change the default parameters with some other parameters:
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    dataDir = "/var/lib/suwayomi"; # Default is "/var/lib/suwayomi-server"
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+    };
+  };
+}
+```
+
+If you want to create a desktop icon, you can activate the system tray option:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    dataDir = "/var/lib/suwayomi"; # Default is "/var/lib/suwayomi-server"
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+      server.enableSystemTray = true;
+    };
+  };
+}
+```
+
+## Basic authentication {#module-services-suwayomi-server-basic-auth}
+
+You can configure a basic authentication to the web interface with:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+      server = {
+        basicAuthEnabled = true;
+        basicAuthUsername = "username";
+
+        # NOTE: this is not a real upstream option
+        basicAuthPasswordFile = ./path/to/the/password/file;
+      };
+    };
+  };
+}
+```
+
+## Extra configuration {#module-services-suwayomi-server-extra-config}
+
+Not all the configuration options are available directly in this module, but you can add the other options of suwayomi-server with:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    openFirewall = true;
+
+    settings = {
+      server = {
+        port = 4567;
+        autoDownloadNewChapters = false;
+        maxSourcesInParallel" = 6;
+      };
+    };
+  };
+}
+```
diff --git a/nixos/modules/services/web-apps/suwayomi-server.nix b/nixos/modules/services/web-apps/suwayomi-server.nix
new file mode 100644
index 0000000000000..c4c1540edbee5
--- /dev/null
+++ b/nixos/modules/services/web-apps/suwayomi-server.nix
@@ -0,0 +1,260 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.suwayomi-server;
+  inherit (lib) mkOption mdDoc mkEnableOption mkIf types;
+in
+{
+  options = {
+    services.suwayomi-server = {
+      enable = mkEnableOption (mdDoc "Suwayomi, a free and open source manga reader server that runs extensions built for Tachiyomi.");
+
+      package = lib.mkPackageOptionMD pkgs "suwayomi-server" { };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/suwayomi-server";
+        example = "/var/data/mangas";
+        description = mdDoc ''
+          The path to the data directory in which Suwayomi-Server will download scans.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "suwayomi";
+        example = "root";
+        description = mdDoc ''
+          User account under which Suwayomi-Server runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "suwayomi";
+        example = "medias";
+        description = mdDoc ''
+          Group under which Suwayomi-Server runs.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to open the firewall for the port in {option}`services.suwayomi-server.settings.server.port`.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType =
+            let
+              recursiveAttrsType = with types; attrsOf (nullOr (oneOf [
+                str
+                path
+                int
+                float
+                bool
+                (listOf str)
+                (recursiveAttrsType // { description = "instances of this type recursively"; })
+              ]));
+            in
+            recursiveAttrsType;
+          options = {
+            server = {
+              ip = mkOption {
+                type = types.str;
+                default = "0.0.0.0";
+                example = "127.0.0.1";
+                description = mdDoc ''
+                  The ip that Suwayomi will bind to.
+                '';
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 8080;
+                example = 4567;
+                description = mdDoc ''
+                  The port that Suwayomi will listen to.
+                '';
+              };
+
+              basicAuthEnabled = mkEnableOption (mdDoc ''
+                Add basic access authentication to Suwayomi-Server.
+                Enabling this option is useful when hosting on a public network/the Internet
+              '');
+
+              basicAuthUsername = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                description = mdDoc ''
+                  The username value that you have to provide when authenticating.
+                '';
+              };
+
+              # NOTE: this is not a real upstream option
+              basicAuthPasswordFile = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                example = "/var/secrets/suwayomi-server-password";
+                description = mdDoc ''
+                  The password file containing the value that you have to provide when authenticating.
+                '';
+              };
+
+              downloadAsCbz = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Download chapters as `.cbz` files.
+                '';
+              };
+
+              localSourcePath = mkOption {
+                type = types.path;
+                default = cfg.dataDir;
+                defaultText = lib.literalExpression "suwayomi-server.dataDir";
+                example = "/var/data/local_mangas";
+                description = mdDoc ''
+                  Path to the local source folder.
+                '';
+              };
+
+              systemTrayEnabled = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Whether to enable a system tray icon, if possible.
+                '';
+              };
+            };
+          };
+        };
+        description = mdDoc ''
+          Configuration to write to {file}`server.conf`.
+          See <https://github.com/Suwayomi/Suwayomi-Server/wiki/Configuring-Suwayomi-Server> for more information.
+        '';
+        default = { };
+        example = {
+          server.socksProxyEnabled = true;
+          server.socksProxyHost = "yourproxyhost.com";
+          server.socksProxyPort = "8080";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [{
+      assertion = with cfg.settings.server; basicAuthEnabled -> (basicAuthUsername != null && basicAuthPasswordFile != null);
+      message = ''
+        [suwayomi-server]: the username and the password file cannot be null when the basic auth is enabled
+      '';
+    }];
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.server.port ];
+
+    users.groups = mkIf (cfg.group == "suwayomi") {
+      suwayomi = { };
+    };
+
+    users.users = mkIf (cfg.user == "suwayomi") {
+      suwayomi = {
+        group = cfg.group;
+        # Need to set the user home because the package writes to ~/.local/Tachidesk
+        home = cfg.dataDir;
+        description = "Suwayomi Daemon user";
+        isSystemUser = true;
+      };
+    };
+
+    systemd.tmpfiles.settings."10-suwayomi-server" = {
+      "${cfg.dataDir}/.local/share/Tachidesk".d = {
+        mode = "0700";
+        inherit (cfg) user group;
+      };
+    };
+
+    systemd.services.suwayomi-server =
+      let
+        flattenConfig = prefix: config:
+          lib.foldl'
+            lib.mergeAttrs
+            { }
+            (lib.attrValues
+              (lib.mapAttrs
+                (k: v:
+                  if !(lib.isAttrs v)
+                  then { "${prefix}${k}" = v; }
+                  else flattenConfig "${prefix}${k}." v
+                )
+                config
+              )
+            );
+
+        #  HOCON is a JSON superset that suwayomi-server use for configuration
+        toHOCON = attr:
+          let
+            attrType = builtins.typeOf attr;
+          in
+          if builtins.elem attrType [ "string" "path" "int" "float" ]
+          then ''"${toString attr}"''
+          else if attrType == "bool"
+          then lib.boolToString attr
+          else if attrType == "list"
+          then "[\n${lib.concatMapStringsSep ",\n" toHOCON attr}\n]"
+          else # attrs, lambda, null
+            throw ''
+              [suwayomi-server]: invalid config value type '${attrType}'.
+            '';
+
+        configFile = pkgs.writeText "server.conf" (lib.pipe cfg.settings [
+          (settings: lib.recursiveUpdate settings {
+            server.basicAuthPasswordFile = null;
+            server.basicAuthPassword =
+              if settings.server.basicAuthEnabled
+              then "$TACHIDESK_SERVER_BASIC_AUTH_PASSWORD"
+              else null;
+          })
+          (flattenConfig "")
+          (lib.filterAttrs (_: x: x != null))
+          (lib.mapAttrsToList (name: value: ''${name} = ${toHOCON value}''))
+          lib.concatLines
+        ]);
+
+      in
+      {
+        description = "A free and open source manga reader server that runs extensions built for Tachiyomi.";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
+        after = [ "network-online.target" ];
+
+        script = ''
+          ${lib.optionalString cfg.settings.server.basicAuthEnabled ''
+            export TACHIDESK_SERVER_BASIC_AUTH_PASSWORD="$(<${cfg.settings.server.basicAuthPasswordFile})"
+          ''}
+          ${lib.getExe pkgs.envsubst} -i ${configFile} -o ${cfg.dataDir}/.local/share/Tachidesk/server.conf
+          ${lib.getExe cfg.package} -Dsuwayomi.tachidesk.config.server.rootDir=${cfg.dataDir}
+        '';
+
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+
+          Type = "simple";
+          Restart = "on-failure";
+
+          StateDirectory = mkIf (cfg.dataDir == "/var/lib/suwayomi-server") "suwayomi-server";
+        };
+      };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ ratcornu ];
+    doc = ./suwayomi-server.md;
+  };
+}
diff --git a/nixos/modules/services/web-apps/windmill.nix b/nixos/modules/services/web-apps/windmill.nix
new file mode 100644
index 0000000000000..8e940dabdc1f8
--- /dev/null
+++ b/nixos/modules/services/web-apps/windmill.nix
@@ -0,0 +1,177 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.windmill;
+in
+{
+  options.services.windmill = {
+    enable = lib.mkEnableOption (lib.mdDoc "windmill service");
+
+    serverPort = lib.mkOption {
+      type = lib.types.port;
+      default = 8001;
+      description = lib.mdDoc "Port the windmill server listens on.";
+    };
+
+    lspPort = lib.mkOption {
+      type = lib.types.port;
+      default = 3001;
+      description = lib.mdDoc "Port the windmill lsp listens on.";
+    };
+
+    database = {
+      name = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database named like the user.
+        default = "windmill";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        # the simplest database setup is to have the database user like the name.
+        default = "windmill";
+        description = lib.mdDoc "Database user.";
+      };
+
+      urlPath = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          Path to the file containing the database url windmill should connect to. This is not deducted from database user and name as it might contain a secret
+        '';
+        example = "config.age.secrets.DATABASE_URL_FILE.path";
+      };
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to create a local database automatically.";
+      };
+    };
+
+    baseUrl = lib.mkOption {
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        The base url that windmill will be served on.
+      '';
+      example = "https://windmill.example.com";
+    };
+
+    logLevel = lib.mkOption {
+      type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
+      default = "info";
+      description = lib.mdDoc "Log level";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    services.postgresql = lib.optionalAttrs (cfg.database.createLocally) {
+      enable = lib.mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+
+   };
+
+   systemd.services =
+    let
+      serviceConfig = {
+        DynamicUser = true;
+        # using the same user to simplify db connection
+        User = cfg.database.user;
+        ExecStart = "${pkgs.windmill}/bin/windmill";
+
+        Restart = "always";
+        LoadCredential = [
+          "DATABASE_URL_FILE:${cfg.database.urlPath}"
+        ];
+      };
+    in
+    {
+
+    # coming from https://github.com/windmill-labs/windmill/blob/main/init-db-as-superuser.sql
+    # modified to not grant priviledges on all tables
+    # create role windmill_user and windmill_admin only if they don't exist
+    postgresql.postStart = lib.mkIf cfg.database.createLocally (lib.mkAfter ''
+      $PSQL -tA <<"EOF"
+DO $$
+BEGIN
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_user'
+    ) THEN
+        CREATE ROLE windmill_user;
+        GRANT ALL PRIVILEGES ON DATABASE ${cfg.database.name} TO windmill_user;
+    ELSE
+      RAISE NOTICE 'Role "windmill_user" already exists. Skipping.';
+    END IF;
+    IF NOT EXISTS (
+        SELECT FROM pg_catalog.pg_roles
+        WHERE rolname = 'windmill_admin'
+    ) THEN
+      CREATE ROLE windmill_admin WITH BYPASSRLS;
+      GRANT windmill_user TO windmill_admin;
+    ELSE
+      RAISE NOTICE 'Role "windmill_admin" already exists. Skipping.';
+    END IF;
+    GRANT windmill_admin TO windmill;
+END
+$$;
+EOF
+    '');
+
+     windmill-server = {
+        description = "Windmill server";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          PORT = builtins.toString cfg.serverPort;
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "server";
+        };
+      };
+
+     windmill-worker = {
+        description = "Windmill worker";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "default";
+          KEEP_JOB_DIR = "false";
+        };
+      };
+
+     windmill-worker-native = {
+        description = "Windmill worker native";
+        after = [ "network.target" ] ++ lib.optional cfg.database.createLocally "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = serviceConfig // { StateDirectory = "windmill-worker-native";};
+
+        environment = {
+          DATABASE_URL_FILE = "%d/DATABASE_URL_FILE";
+          WM_BASE_URL = cfg.baseUrl;
+          RUST_LOG = cfg.logLevel;
+          MODE = "worker";
+          WORKER_GROUP = "native";
+        };
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index 03d5634854a37..2f7306309d694 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -34,7 +34,7 @@ let
       # copy additional plugin(s), theme(s) and language(s)
       ${concatStringsSep "\n" (mapAttrsToList (name: theme: "cp -r ${theme} $out/share/wordpress/wp-content/themes/${name}") cfg.themes)}
       ${concatStringsSep "\n" (mapAttrsToList (name: plugin: "cp -r ${plugin} $out/share/wordpress/wp-content/plugins/${name}") cfg.plugins)}
-      ${concatMapStringsSep "\n" (language: "cp -r ${language}/* $out/share/wordpress/wp-content/languages/") cfg.languages}
+      ${concatMapStringsSep "\n" (language: "cp -r ${language} $out/share/wordpress/wp-content/languages/") cfg.languages}
     '';
   };
 
@@ -174,22 +174,22 @@ let
             List of path(s) to respective language(s) which are copied from the 'languages' directory.
           '';
           example = literalExpression ''
-            [(
+            [
               # Let's package the German language.
               # For other languages try to replace language and country code in the download URL with your desired one.
               # Reference https://translate.wordpress.org for available translations and
               # codes.
-              language-de = pkgs.stdenv.mkDerivation {
+              (pkgs.stdenv.mkDerivation {
                 name = "language-de";
                 src = pkgs.fetchurl {
                   url = "https://de.wordpress.org/wordpress-''${pkgs.wordpress.version}-de_DE.tar.gz";
                   # Name is required to invalidate the hash when wordpress is updated
-                  name = "wordpress-''${pkgs.wordpress.version}-language-de"
+                  name = "wordpress-''${pkgs.wordpress.version}-language-de";
                   sha256 = "sha256-dlas0rXTSV4JAl8f/UyMbig57yURRYRhTMtJwF9g8h0=";
                 };
                 installPhase = "mkdir -p $out; cp -r ./wp-content/languages/* $out/";
-              };
-            )];
+              })
+            ];
           '';
         };
 
diff --git a/nixos/modules/services/web-servers/agate.nix b/nixos/modules/services/web-servers/agate.nix
index dce425035ff72..e03174c87945b 100644
--- a/nixos/modules/services/web-servers/agate.nix
+++ b/nixos/modules/services/web-servers/agate.nix
@@ -71,6 +71,7 @@ in
     systemd.services.agate = {
       description = "Agate";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       script =
diff --git a/nixos/modules/services/web-servers/caddy/default.nix b/nixos/modules/services/web-servers/caddy/default.nix
index 497aa9ba956e0..95dc219d108cc 100644
--- a/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixos/modules/services/web-servers/caddy/default.nix
@@ -147,7 +147,7 @@ in
       default = configFile;
       defaultText = "A Caddyfile automatically generated by values from services.caddy.*";
       example = literalExpression ''
-        pkgs.writeTextDir "Caddyfile" '''
+        pkgs.writeText "Caddyfile" '''
           example.com
 
           root * /var/www/wordpress
@@ -164,9 +164,9 @@ in
     };
 
     adapter = mkOption {
-      default = if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null;
+      default = if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null;
       defaultText = literalExpression ''
-        if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null
+        if ((cfg.configFile != configFile) || (builtins.baseNameOf cfg.configFile) == "Caddyfile") then "caddyfile" else null
       '';
       example = literalExpression "nginx";
       type = with types; nullOr str;
@@ -342,8 +342,9 @@ in
       }
     '';
 
-    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    # https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes
     boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+    boot.kernel.sysctl."net.core.wmem_max" = mkDefault 2500000;
 
     systemd.packages = [ cfg.package ];
     systemd.services.caddy = {
diff --git a/nixos/modules/services/web-servers/mighttpd2.nix b/nixos/modules/services/web-servers/mighttpd2.nix
index bdd6d8b62aa36..bb75dc4f2ff47 100644
--- a/nixos/modules/services/web-servers/mighttpd2.nix
+++ b/nixos/modules/services/web-servers/mighttpd2.nix
@@ -101,6 +101,7 @@ in {
       ];
     systemd.services.mighttpd2 = {
       description = "Mighttpd2 web server";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
diff --git a/nixos/modules/services/web-servers/minio.nix b/nixos/modules/services/web-servers/minio.nix
index 6431db250476b..be6946657e23d 100644
--- a/nixos/modules/services/web-servers/minio.nix
+++ b/nixos/modules/services/web-servers/minio.nix
@@ -98,6 +98,7 @@ in
 
       services.minio = {
         description = "Minio Object Storage";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         serviceConfig = {
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index cf70dc3259456..6799de6c7d96c 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -334,8 +334,8 @@ let
           + optionalString vhost.default "default_server "
           + optionalString vhost.reuseport "reuseport "
           + optionalString (extraParameters != []) (concatStringsSep " "
-            (let inCompatibleParameters = [ "ssl" "proxy_protocol" "http2" ];
-                isCompatibleParameter = param: !(any (p: p == param) inCompatibleParameters);
+            (let inCompatibleParameters = [ "accept_filter" "backlog" "deferred" "fastopen" "http2" "proxy_protocol" "so_keepalive" "ssl" ];
+                isCompatibleParameter = param: !(any (p: lib.hasPrefix p param) inCompatibleParameters);
             in filter isCompatibleParameter extraParameters))
           + ";"))
           + "
@@ -352,10 +352,11 @@ let
 
         # The acme-challenge location doesn't need to be added if we are not using any automated
         # certificate provisioning and can also be omitted when we use a certificate obtained via a DNS-01 challenge
-        acmeLocation = optionalString (vhost.enableACME || (vhost.useACMEHost != null && config.security.acme.certs.${vhost.useACMEHost}.dnsProvider == null)) ''
+        acmeLocation = optionalString (vhost.enableACME || (vhost.useACMEHost != null && config.security.acme.certs.${vhost.useACMEHost}.dnsProvider == null))
           # Rule for legitimate ACME Challenge requests (like /.well-known/acme-challenge/xxxxxxxxx)
           # We use ^~ here, so that we don't check any regexes (which could
           # otherwise easily override this intended match accidentally).
+        ''
           location ^~ /.well-known/acme-challenge/ {
             ${optionalString (vhost.acmeFallbackHost != null) "try_files $uri @acme-fallback;"}
             ${optionalString (vhost.acmeRoot != null) "root ${vhost.acmeRoot};"}
@@ -375,10 +376,11 @@ let
             ${concatMapStringsSep "\n" listenString redirectListen}
 
             server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
-            ${acmeLocation}
+
             location / {
-              return 301 https://$host$request_uri;
+              return ${toString vhost.redirectCode} https://$host$request_uri;
             }
+            ${acmeLocation}
           }
         ''}
 
@@ -392,13 +394,6 @@ let
             http3 ${if vhost.http3 then "on" else "off"};
             http3_hq ${if vhost.http3_hq then "on" else "off"};
           ''}
-          ${acmeLocation}
-          ${optionalString (vhost.root != null) "root ${vhost.root};"}
-          ${optionalString (vhost.globalRedirect != null) ''
-            location / {
-              return 301 http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
-            }
-          ''}
           ${optionalString hasSSL ''
             ssl_certificate ${vhost.sslCertificate};
             ssl_certificate_key ${vhost.sslCertificateKey};
@@ -413,14 +408,16 @@ let
             ssl_conf_command Options KTLS;
           ''}
 
-          ${optionalString (hasSSL && vhost.quic && vhost.http3)
-            # Advertise that HTTP/3 is available
-          ''
-            add_header Alt-Svc 'h3=":$server_port"; ma=86400';
-          ''}
-
           ${mkBasicAuth vhostName vhost}
 
+          ${optionalString (vhost.root != null) "root ${vhost.root};"}
+
+          ${optionalString (vhost.globalRedirect != null) ''
+            location / {
+              return ${toString vhost.redirectCode} http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
+            }
+          ''}
+          ${acmeLocation}
           ${mkLocations vhost.locations}
 
           ${vhost.extraConfig}
@@ -449,7 +446,7 @@ let
       ${optionalString (config.tryFiles != null) "try_files ${config.tryFiles};"}
       ${optionalString (config.root != null) "root ${config.root};"}
       ${optionalString (config.alias != null) "alias ${config.alias};"}
-      ${optionalString (config.return != null) "return ${config.return};"}
+      ${optionalString (config.return != null) "return ${toString config.return};"}
       ${config.extraConfig}
       ${optionalString (config.proxyPass != null && config.recommendedProxySettings) "include ${recommendedProxyConfig};"}
       ${mkBasicAuth "sublocation" config}
@@ -472,7 +469,7 @@ let
 
   mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
 
-  oldHTTP2 = versionOlder cfg.package.version "1.25.1";
+  oldHTTP2 = (versionOlder cfg.package.version "1.25.1" && !(cfg.package.pname == "angie" || cfg.package.pname == "angieQuic"));
 in
 
 {
@@ -649,6 +646,8 @@ in
           Nginx package to use. This defaults to the stable version. Note
           that the nginx team recommends to use the mainline version which
           available in nixpkgs as `nginxMainline`.
+          Supported Nginx forks include `angie`, `openresty` and `tengine`.
+          For HTTP/3 support use `nginxQuic` or `angieQuic`.
         '';
       };
 
@@ -1128,14 +1127,6 @@ in
       }
 
       {
-        assertion = any (host: host.kTLS) (attrValues virtualHosts) -> versionAtLeast cfg.package.version "1.21.4";
-        message = ''
-          services.nginx.virtualHosts.<name>.kTLS requires nginx version
-          1.21.4 or above; see the documentation for services.nginx.package.
-        '';
-      }
-
-      {
         assertion = all (host: !(host.enableACME && host.useACMEHost != null)) (attrValues virtualHosts);
         message = ''
           Options services.nginx.service.virtualHosts.<name>.enableACME and
@@ -1144,18 +1135,20 @@ in
       }
 
       {
-        assertion = cfg.package.pname != "nginxQuic" -> !(cfg.enableQuicBPF);
+        assertion = cfg.package.pname != "nginxQuic" && cfg.package.pname != "angieQuic" -> !(cfg.enableQuicBPF);
         message = ''
           services.nginx.enableQuicBPF requires using nginxQuic package,
-          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;` or
+          `services.nginx.package = pkgs.angieQuic;`.
         '';
       }
 
       {
-        assertion = cfg.package.pname != "nginxQuic" -> all (host: !host.quic) (attrValues virtualHosts);
+        assertion = cfg.package.pname != "nginxQuic" && cfg.package.pname != "angieQuic" -> all (host: !host.quic) (attrValues virtualHosts);
         message = ''
-          services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic package,
-          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+          services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic or angie packages,
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;` or
+          `services.nginx.package = pkgs.angieQuic;`.
         '';
       }
 
@@ -1341,6 +1334,8 @@ in
       nginx.gid = config.ids.gids.nginx;
     };
 
+    boot.kernelModules = optional (versionAtLeast config.boot.kernelPackages.kernel.version "4.17") "tls";
+
     # do not delete the default temp directories created upon nginx startup
     systemd.tmpfiles.rules = [
       "X /tmp/systemd-private-%b-nginx.service-*/tmp/nginx_*"
diff --git a/nixos/modules/services/web-servers/nginx/location-options.nix b/nixos/modules/services/web-servers/nginx/location-options.nix
index 2728852058ea7..2138e551fd434 100644
--- a/nixos/modules/services/web-servers/nginx/location-options.nix
+++ b/nixos/modules/services/web-servers/nginx/location-options.nix
@@ -93,7 +93,7 @@ with lib;
     };
 
     return = mkOption {
-      type = types.nullOr types.str;
+      type = with types; nullOr (oneOf [ str int ]);
       default = null;
       example = "301 http://example.com$request_uri";
       description = lib.mdDoc ''
diff --git a/nixos/modules/services/web-servers/nginx/tailscale-auth.nix b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
new file mode 100644
index 0000000000000..a2e4d4a30be5c
--- /dev/null
+++ b/nixos/modules/services/web-servers/nginx/tailscale-auth.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx.tailscaleAuth;
+in
+{
+  options.services.nginx.tailscaleAuth = {
+    enable = mkEnableOption (lib.mdDoc "Enable tailscale.nginx-auth, to authenticate nginx users via tailscale.");
+
+    package = lib.mkPackageOptionMD pkgs "tailscale-nginx-auth" {};
+
+    user = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "User which runs tailscale-nginx-auth";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "tailscale-nginx-auth";
+      description = lib.mdDoc "Group which runs tailscale-nginx-auth";
+    };
+
+    expectedTailnet = mkOption {
+      default = "";
+      type = types.nullOr types.str;
+      example = "tailnet012345.ts.net";
+      description = lib.mdDoc ''
+        If you want to prevent node sharing from allowing users to access services
+        across tailnets, declare your expected tailnets domain here.
+      '';
+    };
+
+    socketPath = mkOption {
+      default = "/run/tailscale-nginx-auth/tailscale-nginx-auth.sock";
+      type = types.path;
+      description = lib.mdDoc ''
+        Path of the socket listening to nginx authorization requests.
+      '';
+    };
+
+    virtualHosts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        A list of nginx virtual hosts to put behind tailscale.nginx-auth
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.tailscale.enable = true;
+    services.nginx.enable = true;
+
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      inherit (cfg) group;
+    };
+    users.groups.${cfg.group} = { };
+    users.users.${config.services.nginx.user}.extraGroups = [ cfg.group ];
+    systemd.sockets.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication socket";
+      partOf = [ "tailscale-nginx-auth.service" ];
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ cfg.socketPath ];
+      socketConfig = {
+        SocketMode = "0660";
+        SocketUser = cfg.user;
+        SocketGroup = cfg.group;
+      };
+    };
+
+
+    systemd.services.tailscale-nginx-auth = {
+      description = "Tailscale NGINX Authentication service";
+      after = [ "nginx.service" ];
+      wants = [ "nginx.service" ];
+      requires = [ "tailscale-nginx-auth.socket" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package}";
+        RuntimeDirectory = "tailscale-nginx-auth";
+        User = cfg.user;
+        Group = cfg.group;
+
+        BindPaths = [ "/run/tailscale/tailscaled.sock" ];
+
+        CapabilityBoundingSet = "";
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+        ];
+      };
+    };
+
+    services.nginx.virtualHosts = genAttrs
+      cfg.virtualHosts
+      (vhost: {
+        locations."/auth" = {
+          extraConfig = ''
+            internal;
+
+            proxy_pass http://unix:${cfg.socketPath};
+            proxy_pass_request_body off;
+
+            # Upstream uses $http_host here, but we are using gixy to check nginx configurations
+            # gixy wants us to use $host: https://github.com/yandex/gixy/blob/master/docs/en/plugins/hostspoofing.md
+            proxy_set_header Host $host;
+            proxy_set_header Remote-Addr $remote_addr;
+            proxy_set_header Remote-Port $remote_port;
+            proxy_set_header Original-URI $request_uri;
+            proxy_set_header X-Scheme                $scheme;
+            proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+          '';
+        };
+        locations."/".extraConfig = ''
+          auth_request /auth;
+          auth_request_set $auth_user $upstream_http_tailscale_user;
+          auth_request_set $auth_name $upstream_http_tailscale_name;
+          auth_request_set $auth_login $upstream_http_tailscale_login;
+          auth_request_set $auth_tailnet $upstream_http_tailscale_tailnet;
+          auth_request_set $auth_profile_picture $upstream_http_tailscale_profile_picture;
+
+          proxy_set_header X-Webauth-User "$auth_user";
+          proxy_set_header X-Webauth-Name "$auth_name";
+          proxy_set_header X-Webauth-Login "$auth_login";
+          proxy_set_header X-Webauth-Tailnet "$auth_tailnet";
+          proxy_set_header X-Webauth-Profile-Picture "$auth_profile_picture";
+
+          ${lib.optionalString (cfg.expectedTailnet != "") ''proxy_set_header Expected-Tailnet "${cfg.expectedTailnet}";''}
+        '';
+      });
+  };
+
+  meta.maintainers = with maintainers; [ phaer ];
+
+}
diff --git a/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixos/modules/services/web-servers/nginx/vhost-options.nix
index 9db4c8e23025b..ea98439d3823d 100644
--- a/nixos/modules/services/web-servers/nginx/vhost-options.nix
+++ b/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -162,10 +162,11 @@ with lib;
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
-        Whether to add a separate nginx server block that permanently redirects (301)
-        all plain HTTP traffic to HTTPS. This will set defaults for
-        `listen` to listen on all interfaces on the respective default
-        ports (80, 443), where the non-SSL listens are used for the redirect vhosts.
+        Whether to add a separate nginx server block that redirects (defaults
+        to 301, configurable with `redirectCode`) all plain HTTP traffic to
+        HTTPS. This will set defaults for `listen` to listen on all interfaces
+        on the respective default ports (80, 443), where the non-SSL listens
+        are used for the redirect vhosts.
       '';
     };
 
@@ -234,9 +235,9 @@ with lib;
         which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
         and activate the QUIC transport protocol
         `services.nginx.virtualHosts.<name>.quic = true;`.
-        Note that HTTP/3 support is experimental and
-        *not* yet recommended for production.
+        Note that HTTP/3 support is experimental and *not* yet recommended for production.
         Read more at https://quic.nginx.org/
+        HTTP/3 availability must be manually advertised, preferably in each location block.
       '';
     };
 
@@ -249,8 +250,7 @@ with lib;
         which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
         and activate the QUIC transport protocol
         `services.nginx.virtualHosts.<name>.quic = true;`.
-        Note that special application protocol support is experimental and
-        *not* yet recommended for production.
+        Note that special application protocol support is experimental and *not* yet recommended for production.
         Read more at https://quic.nginx.org/
       '';
     };
@@ -307,8 +307,20 @@ with lib;
       default = null;
       example = "newserver.example.org";
       description = lib.mdDoc ''
-        If set, all requests for this host are redirected permanently to
-        the given hostname.
+        If set, all requests for this host are redirected (defaults to 301,
+        configurable with `redirectCode`) to the given hostname.
+      '';
+    };
+
+    redirectCode = mkOption {
+      type = types.ints.between 300 399;
+      default = 301;
+      example = 308;
+      description = lib.mdDoc ''
+        HTTP status used by `globalRedirect` and `forceSSL`. Possible usecases
+        include temporary (302, 307) redirects, keeping the request method and
+        body (307, 308), or explicitly resetting the method to GET (303).
+        See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections>.
       '';
     };
 
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index cc2c680b33424..fc9eb504ebf81 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -144,6 +144,7 @@ in {
 
     systemd.services.traefik = {
       description = "Traefik web server";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       startLimitIntervalSec = 86400;
diff --git a/nixos/modules/services/x11/desktop-managers/cinnamon.nix b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
index a882bb140d219..f5a6c05865c47 100644
--- a/nixos/modules/services/x11/desktop-managers/cinnamon.nix
+++ b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
@@ -79,20 +79,19 @@ in
           package = mkDefault pkgs.cinnamon.mint-cursor-themes;
         };
       };
-      services.xserver.displayManager.sessionCommands = ''
-        if test "$XDG_CURRENT_DESKTOP" = "Cinnamon"; then
-            true
-            ${concatMapStrings (p: ''
-              if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
-                export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
-              fi
-
-              if [ -d "${p}/lib/girepository-1.0" ]; then
-                export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
-                export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
-              fi
-            '') cfg.sessionPath}
-        fi
+
+      # Have to take care of GDM + Cinnamon on Wayland users
+      environment.extraInit = ''
+        ${concatMapStrings (p: ''
+          if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+            export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+          fi
+
+          if [ -d "${p}/lib/girepository-1.0" ]; then
+            export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+            export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+          fi
+        '') cfg.sessionPath}
       '';
 
       # Default services
@@ -200,8 +199,7 @@ in
         })
       ];
 
-      # https://salsa.debian.org/cinnamon-team/cinnamon/-/commit/f87c64f8d35ba406eb11ad442989a0716f6620cf#
-      xdg.portal.config.x-cinnamon.default = mkDefault [ "xapp" "gtk" ];
+      xdg.portal.configPackages = mkDefault [ pkgs.cinnamon.cinnamon-common ];
 
       # Override GSettings schemas
       environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
diff --git a/nixos/modules/services/x11/desktop-managers/gnome.nix b/nixos/modules/services/x11/desktop-managers/gnome.nix
index 20eca7746447b..2cf9bc2eac37e 100644
--- a/nixos/modules/services/x11/desktop-managers/gnome.nix
+++ b/nixos/modules/services/x11/desktop-managers/gnome.nix
@@ -449,7 +449,6 @@ in
             gnome-color-manager
             gnome-control-center
             gnome-shell-extensions
-            gnome-themes-extra
             pkgs.gnome-tour # GNOME Shell detects the .desktop file on first log-in.
             pkgs.gnome-user-docs
             pkgs.orca
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 9cc7c4381620f..fc9de2500ba46 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -26,10 +26,8 @@ let
         emptyValue.value = {};
       };
 
-  libsForQt5 = pkgs.plasma5Packages;
-  inherit (libsForQt5) kdeGear kdeFrameworks plasma5;
   inherit (lib)
-    getBin optionalAttrs optionalString literalExpression
+    getBin optionalAttrs literalExpression
     mkRemovedOptionModule mkRenamedOptionModule
     mkDefault mkIf mkMerge mkOption mkPackageOption types;
 
@@ -65,7 +63,7 @@ let
     # recognize that software that has been removed.
     rm -fv $HOME/.cache/ksycoca*
 
-    ${libsForQt5.kservice}/bin/kbuildsycoca5
+    ${pkgs.plasma5Packages.kservice}/bin/kbuildsycoca5
   '';
 
   set_XDG_CONFIG_HOME = ''
@@ -176,20 +174,19 @@ in
           owner = "root";
           group = "root";
           capabilities = "cap_sys_nice+ep";
-          source = "${getBin plasma5.kwin}/bin/kwin_wayland";
+          source = "${getBin pkgs.plasma5Packages.kwin}/bin/kwin_wayland";
         };
       } // optionalAttrs (!cfg.runUsingSystemd) {
         start_kdeinit = {
           setuid = true;
           owner = "root";
           group = "root";
-          source = "${getBin libsForQt5.kinit}/libexec/kf5/start_kdeinit";
+          source = "${getBin pkgs.plasma5Packages.kinit}/libexec/kf5/start_kdeinit";
         };
       };
 
       environment.systemPackages =
-        with libsForQt5;
-        with plasma5; with kdeGear; with kdeFrameworks;
+        with pkgs.plasma5Packages;
         let
           requiredPackages = [
             frameworkintegration
@@ -284,8 +281,8 @@ in
         ++ utils.removePackagesByName optionalPackages config.environment.plasma5.excludePackages
 
         # Phonon audio backend
-        ++ lib.optional (cfg.phononBackend == "gstreamer") libsForQt5.phonon-backend-gstreamer
-        ++ lib.optional (cfg.phononBackend == "vlc") libsForQt5.phonon-backend-vlc
+        ++ lib.optional (cfg.phononBackend == "gstreamer") pkgs.plasma5Packages.phonon-backend-gstreamer
+        ++ lib.optional (cfg.phononBackend == "vlc") pkgs.plasma5Packages.phonon-backend-vlc
 
         # Optional hardware support features
         ++ lib.optionals config.hardware.bluetooth.enable [ bluedevil bluez-qt pkgs.openobex pkgs.obexftp ]
@@ -295,13 +292,13 @@ in
         ++ lib.optional config.powerManagement.enable powerdevil
         ++ lib.optional config.services.colord.enable pkgs.colord-kde
         ++ lib.optional config.services.hardware.bolt.enable pkgs.plasma5Packages.plasma-thunderbolt
-        ++ lib.optionals config.services.samba.enable [ kdenetwork-filesharing pkgs.samba ]
+        ++ lib.optional config.services.samba.enable kdenetwork-filesharing
         ++ lib.optional config.services.xserver.wacom.enable pkgs.wacomtablet
         ++ lib.optional config.services.flatpak.enable flatpak-kcm;
 
       # Extra services for D-Bus activation
       services.dbus.packages = [
-        plasma5.kactivitymanagerd
+        pkgs.plasma5Packages.kactivitymanagerd
       ];
 
       environment.pathsToLink = [
@@ -334,10 +331,11 @@ in
         serif = [ "Noto Serif" ];
       };
 
-      programs.ssh.askPassword = mkDefault "${plasma5.ksshaskpass.out}/bin/ksshaskpass";
+      programs.ssh.askPassword = mkDefault "${pkgs.plasma5Packages.ksshaskpass.out}/bin/ksshaskpass";
 
       # Enable helpful DBus services.
       services.accounts-daemon.enable = true;
+      programs.dconf.enable = true;
       # when changing an account picture the accounts-daemon reads a temporary file containing the image which systemsettings5 may place under /tmp
       systemd.services.accounts-daemon.serviceConfig.PrivateTmp = false;
       services.power-profiles-daemon.enable = mkDefault true;
@@ -371,8 +369,8 @@ in
       };
 
       xdg.portal.enable = true;
-      xdg.portal.extraPortals = [ plasma5.xdg-desktop-portal-kde ];
-      xdg.portal.configPackages = mkDefault [ plasma5.xdg-desktop-portal-kde ];
+      xdg.portal.extraPortals = [ pkgs.plasma5Packages.xdg-desktop-portal-kde ];
+      xdg.portal.configPackages = mkDefault [ pkgs.plasma5Packages.xdg-desktop-portal-kde ];
       # xdg-desktop-portal-kde expects PipeWire to be running.
       # This does not, by default, replace PulseAudio.
       services.pipewire.enable = mkDefault true;
@@ -403,15 +401,14 @@ in
         ''
       ];
 
-      services.xserver.displayManager.sessionPackages = [ pkgs.libsForQt5.plasma5.plasma-workspace ];
+      services.xserver.displayManager.sessionPackages = [ pkgs.plasma5Packages.plasma-workspace ];
       # Default to be `plasma` (X11) instead of `plasmawayland`, since plasma wayland currently has
       # many tiny bugs.
       # See: https://github.com/NixOS/nixpkgs/issues/143272
       services.xserver.displayManager.defaultSession = mkDefault "plasma";
 
       environment.systemPackages =
-        with libsForQt5;
-        with plasma5; with kdeGear; with kdeFrameworks;
+        with pkgs.plasma5Packages;
         let
           requiredPackages = [
             ksystemstats
@@ -447,7 +444,7 @@ in
           script = ''
             ${set_XDG_CONFIG_HOME}
 
-            ${kdeFrameworks.kconfig}/bin/kwriteconfig5 \
+            ${pkgs.plasma5Packages.kconfig}/bin/kwriteconfig5 \
               --file startkderc --group General --key systemdBoot ${lib.boolToString cfg.runUsingSystemd}
           '';
         };
@@ -475,8 +472,7 @@ in
       ];
 
       environment.systemPackages =
-        with libsForQt5;
-        with plasma5; with kdeApplications; with kdeFrameworks;
+        with pkgs.plasma5Packages;
         [
           # Basic packages without which Plasma Mobile fails to work properly.
           plasma-mobile
@@ -535,7 +531,7 @@ in
         };
       };
 
-      services.xserver.displayManager.sessionPackages = [ pkgs.libsForQt5.plasma5.plasma-mobile ];
+      services.xserver.displayManager.sessionPackages = [ pkgs.plasma5Packages.plasma-mobile ];
     })
 
     # Plasma Bigscreen
diff --git a/nixos/modules/services/x11/display-managers/default.nix b/nixos/modules/services/x11/display-managers/default.nix
index 16a7ff1a4bd5e..3e2d5780a5cb1 100644
--- a/nixos/modules/services/x11/display-managers/default.nix
+++ b/nixos/modules/services/x11/display-managers/default.nix
@@ -514,7 +514,7 @@ in
 
     # Make xsessions and wayland sessions available in XDG_DATA_DIRS
     # as some programs have behavior that depends on them being present
-    environment.sessionVariables.XDG_DATA_DIRS = [
+    environment.sessionVariables.XDG_DATA_DIRS = lib.mkIf (cfg.displayManager.sessionPackages != [ ]) [
       "${cfg.displayManager.sessionData.desktops}/share"
     ];
   };
diff --git a/nixos/modules/services/x11/display-managers/sddm.nix b/nixos/modules/services/x11/display-managers/sddm.nix
index 6ca7a4425f892..0576619cc8d28 100644
--- a/nixos/modules/services/x11/display-managers/sddm.nix
+++ b/nixos/modules/services/x11/display-managers/sddm.nix
@@ -7,7 +7,7 @@ let
   cfg = dmcfg.sddm;
   xEnv = config.systemd.services.display-manager.environment;
 
-  sddm = pkgs.libsForQt5.sddm;
+  sddm = cfg.package;
 
   iniFmt = pkgs.formats.ini { };
 
@@ -108,6 +108,8 @@ in
         '';
       };
 
+      package = mkPackageOption pkgs [ "plasma5Packages" "sddm" ] {};
+
       enableHidpi = mkOption {
         type = types.bool;
         default = true;
diff --git a/nixos/modules/services/x11/hardware/libinput.nix b/nixos/modules/services/x11/hardware/libinput.nix
index d2a5b5895e0aa..0ea21eb1dce3a 100644
--- a/nixos/modules/services/x11/hardware/libinput.nix
+++ b/nixos/modules/services/x11/hardware/libinput.nix
@@ -130,9 +130,9 @@ let cfg = config.services.xserver.libinput;
         default = true;
         description =
           lib.mdDoc ''
-            Disables horizontal scrolling. When disabled, this driver will discard any horizontal scroll
-            events from libinput. Note that this does not disable horizontal scrolling, it merely
-            discards the horizontal axis from any scroll events.
+            Enables or disables horizontal scrolling. When disabled, this driver will discard any
+            horizontal scroll events from libinput. This does not disable horizontal scroll events
+            from libinput; it merely discards the horizontal axis from any scroll events.
           '';
       };
 
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 4a8f2f61caaf4..36f25d5547cae 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -804,14 +804,14 @@ in
       ];
 
     system.checks = singleton (pkgs.runCommand "xkb-validated" {
-      inherit (cfg.xkb) model layout variant options;
+      inherit (cfg.xkb) dir model layout variant options;
       nativeBuildInputs = with pkgs.buildPackages; [ xkbvalidate ];
       preferLocalBuild = true;
     } ''
       ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
         "export XKB_CONFIG_ROOT=${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
       }
-      xkbvalidate "$model" "$layout" "$variant" "$options"
+      XKB_CONFIG_ROOT="$dir" xkbvalidate "$model" "$layout" "$variant" "$options"
       touch "$out"
     '');
 
diff --git a/nixos/modules/system/activation/bootspec.nix b/nixos/modules/system/activation/bootspec.nix
index 98c234bc340d0..2ed6964b2a6a6 100644
--- a/nixos/modules/system/activation/bootspec.nix
+++ b/nixos/modules/system/activation/bootspec.nix
@@ -11,6 +11,7 @@
 let
   cfg = config.boot.bootspec;
   children = lib.mapAttrs (childName: childConfig: childConfig.configuration.system.build.toplevel) config.specialisation;
+  hasAtLeastOneInitrdSecret = lib.length (lib.attrNames config.boot.initrd.secrets) > 0;
   schemas = {
     v1 = rec {
       filename = "boot.json";
@@ -27,6 +28,7 @@ let
               label = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
             } // lib.optionalAttrs config.boot.initrd.enable {
               initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+            } // lib.optionalAttrs hasAtLeastOneInitrdSecret {
               initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
             };
           }));
diff --git a/nixos/modules/system/activation/switch-to-configuration.pl b/nixos/modules/system/activation/switch-to-configuration.pl
index e2f66a287bc4f..ba45231465fb4 100755
--- a/nixos/modules/system/activation/switch-to-configuration.pl
+++ b/nixos/modules/system/activation/switch-to-configuration.pl
@@ -889,9 +889,15 @@ while (my $f = <$list_active_users>) {
 
 close($list_active_users) || die("Unable to close the file handle to loginctl");
 
-# Set the new tmpfiles
-print STDERR "setting up tmpfiles\n";
-system("$new_systemd/bin/systemd-tmpfiles", "--create", "--remove", "--exclude-prefix=/dev") == 0 or $res = 3;
+# Restart sysinit-reactivation.target.
+# This target only exists to restart services ordered before sysinit.target. We
+# cannot use X-StopOnReconfiguration to restart sysinit.target because then ALL
+# services of the system would be restarted since all normal services have a
+# default dependency on sysinit.target. sysinit-reactivation.target ensures
+# that services ordered BEFORE sysinit.target get re-started in the correct
+# order. Ordering between these services is respected.
+print STDERR "restarting sysinit-reactivation.target\n";
+system("$new_systemd/bin/systemctl", "restart", "sysinit-reactivation.target") == 0 or $res = 4;
 
 # Before reloading we need to ensure that the units are still active. They may have been
 # deactivated because one of their requirements got stopped. If they are inactive
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index d16152ab9dec5..08e3dce708447 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -1,6 +1,6 @@
 { config, lib, pkgs, ... }:
 let
-  inherit (lib) mkOption mkDefault types optionalString stringAfter;
+  inherit (lib) mkOption mkDefault types optionalString;
 
   cfg = config.boot.binfmt;
 
diff --git a/nixos/modules/system/boot/clevis.md b/nixos/modules/system/boot/clevis.md
new file mode 100644
index 0000000000000..91eb728a919ea
--- /dev/null
+++ b/nixos/modules/system/boot/clevis.md
@@ -0,0 +1,51 @@
+# Clevis {#module-boot-clevis}
+
+[Clevis](https://github.com/latchset/clevis)
+is a framework for automated decryption of resources.
+Clevis allows for secure unattended disk decryption during boot, using decryption policies that must be satisfied for the data to decrypt.
+
+
+## Create a JWE file containing your secret {#module-boot-clevis-create-secret}
+
+The first step is to embed your secret in a [JWE](https://en.wikipedia.org/wiki/JSON_Web_Encryption) file.
+JWE files have to be created through the clevis command line. 3 types of policies are supported:
+
+1) TPM policies
+
+Secrets are pinned against the presence of a TPM2 device, for example:
+```
+echo hi | clevis encrypt tpm2 '{}' > hi.jwe
+```
+2) Tang policies
+
+Secrets are pinned against the presence of a Tang server, for example:
+```
+echo hi | clevis encrypt tang '{"url": "http://tang.local"}' > hi.jwe
+```
+
+3) Shamir Secret Sharing
+
+Using Shamir's Secret Sharing ([sss](https://en.wikipedia.org/wiki/Shamir%27s_secret_sharing)), secrets are pinned using a combination of the two preceding policies. For example:
+```
+echo hi | clevis encrypt sss \
+'{"t": 2, "pins": {"tpm2": {"pcr_ids": "0"}, "tang": {"url": "http://tang.local"}}}' \
+> hi.jwe
+```
+
+For more complete documentation on how to generate a secret with clevis, see the [clevis documentation](https://github.com/latchset/clevis).
+
+
+## Activate unattended decryption of a resource at boot {#module-boot-clevis-activate}
+
+In order to activate unattended decryption of a resource at boot, enable the `clevis` module:
+
+```
+boot.initrd.clevis.enable = true;
+```
+
+Then, specify the device you want to decrypt using a given clevis secret. Clevis will automatically try to decrypt the device at boot and will fallback to interactive unlocking if the decryption policy is not fulfilled.
+```
+boot.initrd.clevis.devices."/dev/nvme0n1p1".secretFile = ./nvme0n1p1.jwe;
+```
+
+Only `bcachefs`, `zfs` and `luks` encrypted devices are supported at this time.
diff --git a/nixos/modules/system/boot/clevis.nix b/nixos/modules/system/boot/clevis.nix
new file mode 100644
index 0000000000000..0c72590f93851
--- /dev/null
+++ b/nixos/modules/system/boot/clevis.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.boot.initrd.clevis;
+  systemd = config.boot.initrd.systemd;
+  supportedFs = [ "zfs" "bcachefs" ];
+in
+{
+  meta.maintainers = with maintainers; [ julienmalka camillemndn ];
+  meta.doc = ./clevis.md;
+
+  options = {
+    boot.initrd.clevis.enable = mkEnableOption (lib.mdDoc "Clevis in initrd");
+
+
+    boot.initrd.clevis.package = mkOption {
+      type = types.package;
+      default = pkgs.clevis;
+      defaultText = "pkgs.clevis";
+      description = lib.mdDoc "Clevis package";
+    };
+
+    boot.initrd.clevis.devices = mkOption {
+      description = "Encrypted devices that need to be unlocked at boot using Clevis";
+      default = { };
+      type = types.attrsOf (types.submodule ({
+        options.secretFile = mkOption {
+          description = lib.mdDoc "Clevis JWE file used to decrypt the device at boot, in concert with the chosen pin (one of TPM2, Tang server, or SSS).";
+          type = types.path;
+        };
+      }));
+    };
+
+    boot.initrd.clevis.useTang = mkOption {
+      description = "Whether the Clevis JWE file used to decrypt the devices uses a Tang server as a pin.";
+      default = false;
+      type = types.bool;
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    # Implementation of clevis unlocking for the supported filesystems are located directly in the respective modules.
+
+
+    assertions = (attrValues (mapAttrs
+      (device: _: {
+        assertion = (any (fs: fs.device == device && (elem fs.fsType supportedFs)) config.system.build.fileSystems) || (hasAttr device config.boot.initrd.luks.devices);
+        message = ''
+          No filesystem or LUKS device with the name ${device} is declared in your configuration.'';
+      })
+      cfg.devices));
+
+
+    warnings =
+      if cfg.useTang && !config.boot.initrd.network.enable && !config.boot.initrd.systemd.network.enable
+      then [ "In order to use a Tang pinned secret you must configure networking in initrd" ]
+      else [ ];
+
+    boot.initrd = {
+      extraUtilsCommands = mkIf (!systemd.enable) ''
+        copy_bin_and_libs ${pkgs.jose}/bin/jose
+        copy_bin_and_libs ${pkgs.curl}/bin/curl
+        copy_bin_and_libs ${pkgs.bash}/bin/bash
+
+        copy_bin_and_libs ${pkgs.tpm2-tools}/bin/.tpm2-wrapped
+        mv $out/bin/{.tpm2-wrapped,tpm2}
+        cp {${pkgs.tpm2-tss},$out}/lib/libtss2-tcti-device.so.0
+
+        copy_bin_and_libs ${cfg.package}/bin/.clevis-wrapped
+        mv $out/bin/{.clevis-wrapped,clevis}
+
+        for BIN in ${cfg.package}/bin/clevis-decrypt*; do
+          copy_bin_and_libs $BIN
+        done
+
+        for BIN in $out/bin/clevis{,-decrypt{,-null,-tang,-tpm2}}; do
+          sed -i $BIN -e 's,${pkgs.bash},,' -e 's,${pkgs.coreutils},,'
+        done
+
+        sed -i $out/bin/clevis-decrypt-tpm2 -e 's,tpm2_,tpm2 ,'
+      '';
+
+      secrets = lib.mapAttrs' (name: value: nameValuePair "/etc/clevis/${name}.jwe" value.secretFile) cfg.devices;
+
+      systemd = {
+        extraBin = mkIf systemd.enable {
+          clevis = "${cfg.package}/bin/clevis";
+          curl = "${pkgs.curl}/bin/curl";
+        };
+
+        storePaths = mkIf systemd.enable [
+          cfg.package
+          "${pkgs.jose}/bin/jose"
+          "${pkgs.curl}/bin/curl"
+          "${pkgs.tpm2-tools}/bin/tpm2_createprimary"
+          "${pkgs.tpm2-tools}/bin/tpm2_flushcontext"
+          "${pkgs.tpm2-tools}/bin/tpm2_load"
+          "${pkgs.tpm2-tools}/bin/tpm2_unseal"
+        ];
+      };
+    };
+  };
+}
diff --git a/nixos/modules/system/boot/grow-partition.nix b/nixos/modules/system/boot/grow-partition.nix
index 897602f9826ab..8a0fc3a03dac4 100644
--- a/nixos/modules/system/boot/grow-partition.nix
+++ b/nixos/modules/system/boot/grow-partition.nix
@@ -25,7 +25,7 @@ with lib;
     systemd.services.growpart = {
       wantedBy = [ "-.mount" ];
       after = [ "-.mount" ];
-      before = [ "systemd-growfs-root.service" ];
+      before = [ "systemd-growfs-root.service" "shutdown.target" ];
       conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig = {
diff --git a/nixos/modules/system/boot/initrd-ssh.nix b/nixos/modules/system/boot/initrd-ssh.nix
index a8cd2e8f05fcc..61e61f32bc5ee 100644
--- a/nixos/modules/system/boot/initrd-ssh.nix
+++ b/nixos/modules/system/boot/initrd-ssh.nix
@@ -243,8 +243,10 @@ in
 
       services.sshd = {
         description = "SSH Daemon";
-        wantedBy = ["initrd.target"];
-        after = ["network.target" "initrd-nixos-copy-secrets.service"];
+        wantedBy = [ "initrd.target" ];
+        after = [ "network.target" "initrd-nixos-copy-secrets.service" ];
+        before = [ "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
 
         # Keys from Nix store are world-readable, which sshd doesn't
         # like. If this were a real nix store and not the initrd, we
diff --git a/nixos/modules/system/boot/loader/grub/grub.nix b/nixos/modules/system/boot/loader/grub/grub.nix
index 7097e1d83dca9..0556c875241a1 100644
--- a/nixos/modules/system/boot/loader/grub/grub.nix
+++ b/nixos/modules/system/boot/loader/grub/grub.nix
@@ -36,7 +36,7 @@ let
     # Package set of targeted architecture
     if cfg.forcei686 then pkgs.pkgsi686Linux else pkgs;
 
-  realGrub = if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; }
+  realGrub = if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; zfs = cfg.zfsPackage; }
     else grubPkgs.grub2;
 
   grub =
@@ -614,6 +614,16 @@ in
         '';
       };
 
+      zfsPackage = mkOption {
+        type = types.package;
+        internal = true;
+        default = pkgs.zfs;
+        defaultText = literalExpression "pkgs.zfs";
+        description = lib.mdDoc ''
+          Which ZFS package to use if `config.boot.loader.grub.zfsSupport` is true.
+        '';
+      };
+
       efiSupport = mkOption {
         default = false;
         type = types.bool;
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index e2e7ffe59dcd2..055afe95df60b 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -15,19 +15,31 @@ import json
 from typing import NamedTuple, Dict, List
 from dataclasses import dataclass
 
+# These values will be replaced with actual values during the package build
+EFI_SYS_MOUNT_POINT = "@efiSysMountPoint@"
+TIMEOUT = "@timeout@"
+EDITOR = bool("@editor@")
+CONSOLE_MODE = "@consoleMode@"
+BOOTSPEC_TOOLS = "@bootspecTools@"
+DISTRO_NAME = "@distroName@"
+NIX = "@nix@"
+SYSTEMD = "@systemd@"
+CONFIGURATION_LIMIT = int("@configurationLimit@")
+CAN_TOUCH_EFI_VARIABLES = "@canTouchEfiVariables@"
+GRACEFUL = "@graceful@"
+COPY_EXTRA_FILES = "@copyExtraFiles@"
 
 @dataclass
 class BootSpec:
     init: str
     initrd: str
-    initrdSecrets: str
     kernel: str
     kernelParams: List[str]
     label: str
     system: str
     toplevel: str
     specialisations: Dict[str, "BootSpec"]
-
+    initrdSecrets: str | None = None
 
 
 libc = ctypes.CDLL("libc.so.6")
@@ -75,16 +87,16 @@ def generation_conf_filename(profile: str | None, generation: int, specialisatio
 
 
 def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
-    with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
-        if "@timeout@" != "":
-            f.write("timeout @timeout@\n")
+    with open(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", 'w') as f:
+        if TIMEOUT != "":
+            f.write(f"timeout {TIMEOUT}\n")
         f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
-        if not @editor@:
+        if not EDITOR:
             f.write("editor 0\n")
-        f.write("console-mode @consoleMode@\n")
+        f.write(f"console-mode {CONSOLE_MODE}\n")
         f.flush()
         os.fsync(f.fileno())
-    os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
+    os.rename(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
 
 
 def get_bootspec(profile: str | None, generation: int) -> BootSpec:
@@ -95,7 +107,7 @@ def get_bootspec(profile: str | None, generation: int) -> BootSpec:
         bootspec_json = json.load(boot_json_f)
     else:
         boot_json_str = subprocess.check_output([
-        "@bootspecTools@/bin/synthesize",
+        f"{BOOTSPEC_TOOLS}/bin/synthesize",
         "--version",
         "1",
         system_directory,
@@ -116,7 +128,7 @@ def copy_from_file(file: str, dry_run: bool = False) -> str:
     store_dir = os.path.basename(os.path.dirname(store_file_path))
     efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
     if not dry_run:
-        copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
+        copy_if_not_exists(store_file_path, f"{EFI_SYS_MOUNT_POINT}%s" % (efi_file_path))
     return efi_file_path
 
 def write_entry(profile: str | None, generation: int, specialisation: str | None,
@@ -126,14 +138,14 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
     kernel = copy_from_file(bootspec.kernel)
     initrd = copy_from_file(bootspec.initrd)
 
-    title = "@distroName@{profile}{specialisation}".format(
+    title = "{name}{profile}{specialisation}".format(
+        name=DISTRO_NAME,
         profile=" [" + profile + "]" if profile else "",
         specialisation=" (%s)" % specialisation if specialisation else "")
 
     try:
-        subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
-    except FileNotFoundError:
-        pass
+        if bootspec.initrdSecrets is not None:
+            subprocess.check_call([bootspec.initrdSecrets, f"{EFI_SYS_MOUNT_POINT}%s" % (initrd)])
     except subprocess.CalledProcessError:
         if current:
             print("failed to create initrd secrets!", file=sys.stderr)
@@ -143,7 +155,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
                   f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr)
             print("note: this is normal after having removed "
                   "or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
-    entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
+    entry_file = f"{EFI_SYS_MOUNT_POINT}/loader/entries/%s" % (
         generation_conf_filename(profile, generation, specialisation))
     tmp_path = "%s.tmp" % (entry_file)
     kernel_params = "init=%s " % bootspec.init
@@ -168,7 +180,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
 
 def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     gen_list = subprocess.check_output([
-        "@nix@/bin/nix-env",
+        f"{NIX}/bin/nix-env",
         "--list-generations",
         "-p",
         "/nix/var/nix/profiles/%s" % ("system-profiles/" + profile if profile else "system"),
@@ -177,7 +189,7 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     gen_lines = gen_list.split('\n')
     gen_lines.pop()
 
-    configurationLimit = @configurationLimit@
+    configurationLimit = CONFIGURATION_LIMIT
     configurations = [
         SystemIdentifier(
             profile=profile,
@@ -190,14 +202,14 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
 
 
 def remove_old_entries(gens: list[SystemIdentifier]) -> None:
-    rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
-    rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
+    rex_profile = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos-(.*)-generation-.*\.conf$")
+    rex_generation = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
     known_paths = []
     for gen in gens:
         bootspec = get_bootspec(gen.profile, gen.generation)
         known_paths.append(copy_from_file(bootspec.kernel, True))
         known_paths.append(copy_from_file(bootspec.initrd, True))
-    for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
+    for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/loader/entries/nixos*-generation-[1-9]*.conf"):
         if rex_profile.match(path):
             prof = rex_profile.sub(r"\1", path)
         else:
@@ -208,7 +220,7 @@ def remove_old_entries(gens: list[SystemIdentifier]) -> None:
             continue
         if not (prof, gen_number, None) in gens:
             os.unlink(path)
-    for path in glob.iglob("@efiSysMountPoint@/efi/nixos/*"):
+    for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/*"):
         if not path in known_paths and not os.path.isdir(path):
             os.unlink(path)
 
@@ -231,7 +243,7 @@ def install_bootloader(args: argparse.Namespace) -> None:
         # Since systemd version 232 a machine ID is required and it might not
         # be there on newly installed systems, so let's generate one so that
         # bootctl can find it and we can also pass it to write_entry() later.
-        cmd = ["@systemd@/bin/systemd-machine-id-setup", "--print"]
+        cmd = [f"{SYSTEMD}/bin/systemd-machine-id-setup", "--print"]
         machine_id = subprocess.run(
           cmd, text=True, check=True, stdout=subprocess.PIPE
         ).stdout.rstrip()
@@ -243,22 +255,22 @@ def install_bootloader(args: argparse.Namespace) -> None:
     # flags to pass to bootctl install/update
     bootctl_flags = []
 
-    if "@canTouchEfiVariables@" != "1":
+    if CAN_TOUCH_EFI_VARIABLES != "1":
         bootctl_flags.append("--no-variables")
 
-    if "@graceful@" == "1":
+    if GRACEFUL == "1":
         bootctl_flags.append("--graceful")
 
     if os.getenv("NIXOS_INSTALL_BOOTLOADER") == "1":
         # bootctl uses fopen() with modes "wxe" and fails if the file exists.
-        if os.path.exists("@efiSysMountPoint@/loader/loader.conf"):
-            os.unlink("@efiSysMountPoint@/loader/loader.conf")
+        if os.path.exists(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf"):
+            os.unlink(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
 
-        subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["install"])
+        subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["install"])
     else:
         # Update bootloader to latest if needed
-        available_out = subprocess.check_output(["@systemd@/bin/bootctl", "--version"], universal_newlines=True).split()[2]
-        installed_out = subprocess.check_output(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@", "status"], universal_newlines=True)
+        available_out = subprocess.check_output([f"{SYSTEMD}/bin/bootctl", "--version"], universal_newlines=True).split()[2]
+        installed_out = subprocess.check_output([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}", "status"], universal_newlines=True)
 
         # See status_binaries() in systemd bootctl.c for code which generates this
         installed_match = re.search(r"^\W+File:.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$",
@@ -277,10 +289,10 @@ def install_bootloader(args: argparse.Namespace) -> None:
 
         if installed_version < available_version:
             print("updating systemd-boot from %s to %s" % (installed_version, available_version))
-            subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["update"])
+            subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["update"])
 
-    os.makedirs("@efiSysMountPoint@/efi/nixos", exist_ok=True)
-    os.makedirs("@efiSysMountPoint@/loader/entries", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/loader/entries", exist_ok=True)
 
     gens = get_generations()
     for profile in get_profiles():
@@ -303,9 +315,9 @@ def install_bootloader(args: argparse.Namespace) -> None:
             else:
                 raise e
 
-    for root, _, files in os.walk('@efiSysMountPoint@/efi/nixos/.extra-files', topdown=False):
-        relative_root = root.removeprefix("@efiSysMountPoint@/efi/nixos/.extra-files").removeprefix("/")
-        actual_root = os.path.join("@efiSysMountPoint@", relative_root)
+    for root, _, files in os.walk(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", topdown=False):
+        relative_root = root.removeprefix(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files").removeprefix("/")
+        actual_root = os.path.join(f"{EFI_SYS_MOUNT_POINT}", relative_root)
 
         for file in files:
             actual_file = os.path.join(actual_root, file)
@@ -318,14 +330,14 @@ def install_bootloader(args: argparse.Namespace) -> None:
             os.rmdir(actual_root)
         os.rmdir(root)
 
-    os.makedirs("@efiSysMountPoint@/efi/nixos/.extra-files", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", exist_ok=True)
 
-    subprocess.check_call("@copyExtraFiles@")
+    subprocess.check_call(COPY_EXTRA_FILES)
 
 
 def main() -> None:
-    parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files')
-    parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot')
+    parser = argparse.ArgumentParser(description=f"Update {DISTRO_NAME}-related systemd-boot files")
+    parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help=f"The default {DISTRO_NAME} config to boot")
     args = parser.parse_args()
 
     try:
@@ -335,9 +347,9 @@ def main() -> None:
         # it can leave the system in an unbootable state, when a crash/outage
         # happens shortly after an update. To decrease the likelihood of this
         # event sync the efi filesystem after each update.
-        rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY))
+        rc = libc.syncfs(os.open(f"{EFI_SYS_MOUNT_POINT}", os.O_RDONLY))
         if rc != 0:
-            print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr)
+            print(f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr)
 
 
 if __name__ == '__main__':
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
index 9d55c21077d13..3b140726c2d6a 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
@@ -81,7 +81,11 @@ in {
 
       type = types.bool;
 
-      description = lib.mdDoc "Whether to enable the systemd-boot (formerly gummiboot) EFI boot manager";
+      description = lib.mdDoc ''
+        Whether to enable the systemd-boot (formerly gummiboot) EFI boot manager.
+        For more information about systemd-boot:
+        https://www.freedesktop.org/wiki/Software/systemd/systemd-boot/
+      '';
     };
 
     editor = mkOption {
diff --git a/nixos/modules/system/boot/luksroot.nix b/nixos/modules/system/boot/luksroot.nix
index ca560d63f3bdc..221e90b6f38fb 100644
--- a/nixos/modules/system/boot/luksroot.nix
+++ b/nixos/modules/system/boot/luksroot.nix
@@ -1,9 +1,11 @@
-{ config, options, lib, pkgs, ... }:
+{ config, options, lib, utils, pkgs, ... }:
 
 with lib;
 
 let
   luks = config.boot.initrd.luks;
+  clevis = config.boot.initrd.clevis;
+  systemd = config.boot.initrd.systemd;
   kernelPackages = config.boot.kernelPackages;
   defaultPrio = (mkOptionDefault {}).priority;
 
@@ -511,7 +513,7 @@ let
   postLVM = filterAttrs (n: v: !v.preLVM) luks.devices;
 
 
-  stage1Crypttab = pkgs.writeText "initrd-crypttab" (lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: let
+  stage1Crypttab = pkgs.writeText "initrd-crypttab" (lib.concatLines (lib.mapAttrsToList (n: v: let
     opts = v.crypttabExtraOpts
       ++ optional v.allowDiscards "discard"
       ++ optionals v.bypassWorkqueues [ "no-read-workqueue" "no-write-workqueue" ]
@@ -594,7 +596,7 @@ in
       '';
 
       type = with types; attrsOf (submodule (
-        { name, ... }: { options = {
+        { config, name, ... }: { options = {
 
           name = mkOption {
             visible = false;
@@ -894,6 +896,19 @@ in
             '';
           };
         };
+
+        config = mkIf (clevis.enable && (hasAttr name clevis.devices)) {
+          preOpenCommands = mkIf (!systemd.enable) ''
+            mkdir -p /clevis-${name}
+            mount -t ramfs none /clevis-${name}
+            clevis decrypt < /etc/clevis/${name}.jwe > /clevis-${name}/decrypted
+          '';
+          keyFile = "/clevis-${name}/decrypted";
+          fallbackToPassword = !systemd.enable;
+          postOpenCommands = mkIf (!systemd.enable) ''
+            umount /clevis-${name}
+          '';
+        };
       }));
     };
 
@@ -1081,6 +1096,35 @@ in
     boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + preCommands + concatStrings (mapAttrsToList openCommand preLVM) + postCommands);
     boot.initrd.postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + preCommands + concatStrings (mapAttrsToList openCommand postLVM) + postCommands);
 
+    boot.initrd.systemd.services = let devicesWithClevis = filterAttrs (device: _: (hasAttr device clevis.devices)) luks.devices; in
+      mkIf (clevis.enable && systemd.enable) (
+        (mapAttrs'
+          (name: _: nameValuePair "cryptsetup-clevis-${name}" {
+            wantedBy = [ "systemd-cryptsetup@${utils.escapeSystemdPath name}.service" ];
+            before = [
+              "systemd-cryptsetup@${utils.escapeSystemdPath name}.service"
+              "initrd-switch-root.target"
+              "shutdown.target"
+            ];
+            wants = [ "systemd-udev-settle.service" ] ++ optional clevis.useTang "network-online.target";
+            after = [ "systemd-modules-load.service" "systemd-udev-settle.service" ] ++ optional clevis.useTang "network-online.target";
+            script = ''
+              mkdir -p /clevis-${name}
+              mount -t ramfs none /clevis-${name}
+              umask 277
+              clevis decrypt < /etc/clevis/${name}.jwe > /clevis-${name}/decrypted
+            '';
+            conflicts = [ "initrd-switch-root.target" "shutdown.target" ];
+            unitConfig.DefaultDependencies = "no";
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStop = "${config.boot.initrd.systemd.package.util-linux}/bin/umount /clevis-${name}";
+            };
+          })
+          devicesWithClevis)
+      );
+
     environment.systemPackages = [ pkgs.cryptsetup ];
   };
 }
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 33261021480f1..f236a4c005ad6 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -597,6 +597,8 @@ let
           "DHCP"
           "DHCPServer"
           "LinkLocalAddressing"
+          "IPv6LinkLocalAddressGenerationMode"
+          "IPv6StableSecretAddress"
           "IPv4LLRoute"
           "DefaultRouteOnDevice"
           "LLMNR"
@@ -648,6 +650,7 @@ let
         (assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6"])
         (assertValueOneOf "DHCPServer" boolValues)
         (assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6" "fallback" "ipv4-fallback"])
+        (assertValueOneOf "IPv6LinkLocalAddressGenerationMode" ["eui64" "none" "stable-privacy" "random"])
         (assertValueOneOf "IPv4LLRoute" boolValues)
         (assertValueOneOf "DefaultRouteOnDevice" boolValues)
         (assertValueOneOf "LLMNR" (boolValues ++ ["resolve"]))
@@ -1612,7 +1615,7 @@ let
         description = lib.mdDoc ''
           Each attribute in this set specifies an option in the
           `[WireGuardPeer]` section of the unit.  See
-          {manpage}`systemd.network(5)` for details.
+          {manpage}`systemd.netdev(5)` for details.
         '';
       };
     };
diff --git a/nixos/modules/system/boot/resolved.nix b/nixos/modules/system/boot/resolved.nix
index b898a63179624..c42c88163c564 100644
--- a/nixos/modules/system/boot/resolved.nix
+++ b/nixos/modules/system/boot/resolved.nix
@@ -23,12 +23,13 @@ in
     };
 
     services.resolved.fallbackDns = mkOption {
-      default = [ ];
+      default = null;
       example = [ "8.8.8.8" "2001:4860:4860::8844" ];
-      type = types.listOf types.str;
+      type = types.nullOr (types.listOf types.str);
       description = lib.mdDoc ''
         A list of IPv4 and IPv6 addresses to use as the fallback DNS servers.
-        If this option is empty, a compiled-in list of DNS servers is used instead.
+        If this option is null, a compiled-in list of DNS servers is used instead.
+        Setting this option to an empty list will override the built-in list to an empty list, disabling fallback.
       '';
     };
 
@@ -94,6 +95,29 @@ in
       '';
     };
 
+    services.resolved.dnsovertls = mkOption {
+      default = "false";
+      example = "true";
+      type = types.enum [ "true" "opportunistic" "false" ];
+      description = lib.mdDoc ''
+        If set to
+        - `"true"`:
+            all DNS lookups will be encrypted. This requires
+            that the DNS server supports DNS-over-TLS and
+            has a valid certificate. If the hostname was specified
+            via the `address#hostname` format in {option}`services.resolved.domains`
+            then the specified hostname is used to validate its certificate.
+        - `"opportunistic"`:
+            all DNS lookups will attempt to be encrypted, but will fallback
+            to unecrypted requests if the server does not support DNS-over-TLS.
+            Note that this mode does allow for a malicious party to conduct a
+            downgrade attack by immitating the DNS server and pretending to not
+            support encryption.
+        - `"false"`:
+            all DNS lookups are done unencrypted.
+      '';
+    };
+
     services.resolved.extraConfig = mkOption {
       default = "";
       type = types.lines;
@@ -134,12 +158,13 @@ in
         [Resolve]
         ${optionalString (config.networking.nameservers != [])
           "DNS=${concatStringsSep " " config.networking.nameservers}"}
-        ${optionalString (cfg.fallbackDns != [])
+        ${optionalString (cfg.fallbackDns != null)
           "FallbackDNS=${concatStringsSep " " cfg.fallbackDns}"}
         ${optionalString (cfg.domains != [])
           "Domains=${concatStringsSep " " cfg.domains}"}
         LLMNR=${cfg.llmnr}
         DNSSEC=${cfg.dnssec}
+        DNSOverTLS=${cfg.dnsovertls}
         ${config.services.resolved.extraConfig}
       '';
 
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 87333999313e4..331ca5103ba61 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -451,6 +451,38 @@ in
         cfg.services
     );
 
+    assertions = let
+      mkOneAssert = typeStr: name: def: {
+        assertion = lib.elem "network-online.target" def.after -> lib.elem "network-online.target" (def.wants ++ def.requires ++ def.bindsTo);
+        message = "${name}.${typeStr} is ordered after 'network-online.target' but doesn't depend on it";
+      };
+      mkAsserts = typeStr: lib.mapAttrsToList (mkOneAssert typeStr);
+      mkMountAsserts = typeStr: map (m: mkOneAssert typeStr m.what m);
+    in mkMerge [
+      (concatLists (
+        mapAttrsToList
+          (name: service:
+            map (message: {
+              assertion = false;
+              inherit message;
+            }) (concatLists [
+              (optional ((builtins.elem "network-interfaces.target" service.after) || (builtins.elem "network-interfaces.target" service.wants))
+                "Service '${name}.service' is using the deprecated target network-interfaces.target, which no longer exists. Using network.target is recommended instead."
+              )
+            ])
+          )
+          cfg.services
+      ))
+      (mkAsserts "target" cfg.targets)
+      (mkAsserts "service" cfg.services)
+      (mkAsserts "socket" cfg.sockets)
+      (mkAsserts "timer" cfg.timers)
+      (mkAsserts "path" cfg.paths)
+      (mkMountAsserts "mount" cfg.mounts)
+      (mkMountAsserts "automount" cfg.automounts)
+      (mkAsserts "slice" cfg.slices)
+    ];
+
     system.build.units = cfg.units;
 
     system.nssModules = [ cfg.package.out ];
@@ -554,6 +586,13 @@ in
         unitConfig.X-StopOnReconfiguration = true;
       };
 
+    # This target only exists so that services ordered before sysinit.target
+    # are restarted in the correct order, notably BEFORE the other services,
+    # when switching configurations.
+    systemd.targets.sysinit-reactivation = {
+      description = "Reactivate sysinit units";
+    };
+
     systemd.units =
          mapAttrs' (n: v: nameValuePair "${n}.path"    (pathToUnit    n v)) cfg.paths
       // mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services
@@ -619,7 +658,6 @@ in
     systemd.services.systemd-udev-settle.restartIfChanged = false; # Causes long delays in nixos-rebuild
     systemd.targets.local-fs.unitConfig.X-StopOnReconfiguration = true;
     systemd.targets.remote-fs.unitConfig.X-StopOnReconfiguration = true;
-    systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     systemd.services.systemd-importd.environment = proxy_env;
     systemd.services.systemd-pstore.wantedBy = [ "sysinit.target" ]; # see #81138
 
diff --git a/nixos/modules/system/boot/systemd/initrd-secrets.nix b/nixos/modules/system/boot/systemd/initrd-secrets.nix
index 7b59c0cbe7b84..d375238aa146e 100644
--- a/nixos/modules/system/boot/systemd/initrd-secrets.nix
+++ b/nixos/modules/system/boot/systemd/initrd-secrets.nix
@@ -11,7 +11,8 @@
       description = "Copy secrets into place";
       # Run as early as possible
       wantedBy = [ "sysinit.target" ];
-      before = [ "cryptsetup-pre.target" ];
+      before = [ "cryptsetup-pre.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
 
       # We write the secrets to /.initrd-secrets and move them because this allows
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index 0e7d59b32075b..4ae07944afc3c 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -90,8 +90,6 @@ let
 
   fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
 
-  needMakefs = lib.any (fs: fs.autoFormat) fileSystems;
-
   kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
   modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
   firmware = config.hardware.firmware;
@@ -398,8 +396,7 @@ in {
           ManagerEnvironment=${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${lib.escapeShellArg v}") cfg.managerEnvironment)}
         '';
 
-        "/lib/modules".source = "${modulesClosure}/lib/modules";
-        "/lib/firmware".source = "${modulesClosure}/lib/firmware";
+        "/lib".source = "${modulesClosure}/lib";
 
         "/etc/modules-load.d/nixos.conf".text = concatStringsSep "\n" config.boot.initrd.kernelModules;
 
@@ -430,7 +427,7 @@ in {
         "${cfg.package}/lib/systemd/systemd-fsck"
         "${cfg.package}/lib/systemd/systemd-hibernate-resume"
         "${cfg.package}/lib/systemd/systemd-journald"
-        (lib.mkIf needMakefs "${cfg.package}/lib/systemd/systemd-makefs")
+        "${cfg.package}/lib/systemd/systemd-makefs"
         "${cfg.package}/lib/systemd/systemd-modules-load"
         "${cfg.package}/lib/systemd/systemd-remount-fs"
         "${cfg.package}/lib/systemd/systemd-shutdown"
diff --git a/nixos/modules/system/boot/systemd/journald-gateway.nix b/nixos/modules/system/boot/systemd/journald-gateway.nix
new file mode 100644
index 0000000000000..854965282344d
--- /dev/null
+++ b/nixos/modules/system/boot/systemd/journald-gateway.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.journald.gateway;
+
+  cliArgs = lib.cli.toGNUCommandLineShell { } {
+    # If either of these are null / false, they are not passed in the command-line
+    inherit (cfg) cert key trust system user merge;
+  };
+in
+{
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
+  options.services.journald.gateway = {
+    enable = lib.mkEnableOption "the HTTP gateway to the journal";
+
+    port = lib.mkOption {
+      default = 19531;
+      type = lib.types.port;
+      description = ''
+        The port to listen to.
+      '';
+    };
+
+    cert = lib.mkOption {
+      default = null;
+      type = with lib.types; nullOr str;
+      description = lib.mdDoc ''
+        The path to a file or `AF_UNIX` stream socket to read the server
+        certificate from.
+
+        The certificate must be in PEM format. This option switches
+        `systemd-journal-gatewayd` into HTTPS mode and must be used together
+        with {option}`services.journald.gateway.key`.
+      '';
+    };
+
+    key = lib.mkOption {
+      default = null;
+      type = with lib.types; nullOr str;
+      description = lib.mdDoc ''
+        Specify the path to a file or `AF_UNIX` stream socket to read the
+        secret server key corresponding to the certificate specified with
+        {option}`services.journald.gateway.cert` from.
+
+        The key must be in PEM format.
+
+        This key should not be world-readable, and must be readably by the
+        `systemd-journal-gateway` user.
+      '';
+    };
+
+    trust = lib.mkOption {
+      default = null;
+      type = with lib.types; nullOr str;
+      description = lib.mdDoc ''
+        Specify the path to a file or `AF_UNIX` stream socket to read a CA
+        certificate from.
+
+        The certificate must be in PEM format.
+
+        Setting this option enforces client certificate checking.
+      '';
+    };
+
+    system = lib.mkOption {
+      default = true;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Serve entries from system services and the kernel.
+
+        This has the same meaning as `--system` for {manpage}`journalctl(1)`.
+      '';
+    };
+
+    user = lib.mkOption {
+      default = true;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Serve entries from services for the current user.
+
+        This has the same meaning as `--user` for {manpage}`journalctl(1)`.
+      '';
+    };
+
+    merge = lib.mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Serve entries interleaved from all available journals, including other
+        machines.
+
+        This has the same meaning as `--merge` option for
+        {manpage}`journalctl(1)`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        # This prevents the weird case were disabling "system" and "user"
+        # actually enables both because the cli flags are not present.
+        assertion = cfg.system || cfg.user;
+        message = ''
+          systemd-journal-gatewayd cannot serve neither "system" nor "user"
+          journals.
+        '';
+      }
+    ];
+
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-journal-gatewayd.socket"
+      "systemd-journal-gatewayd.service"
+    ];
+
+    users.users.systemd-journal-gateway.uid = config.ids.uids.systemd-journal-gateway;
+    users.users.systemd-journal-gateway.group = "systemd-journal-gateway";
+    users.groups.systemd-journal-gateway.gid = config.ids.gids.systemd-journal-gateway;
+
+    systemd.services.systemd-journal-gatewayd.serviceConfig.ExecStart = [
+        # Clear the default command line
+        ""
+        "${pkgs.systemd}/lib/systemd/systemd-journal-gatewayd ${cliArgs}"
+    ];
+
+    systemd.sockets.systemd-journal-gatewayd = {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [
+        # Clear the default port
+        ""
+        (toString cfg.port)
+      ];
+    };
+  };
+}
diff --git a/nixos/modules/system/boot/systemd/journald-remote.nix b/nixos/modules/system/boot/systemd/journald-remote.nix
new file mode 100644
index 0000000000000..57a0a133e1c6d
--- /dev/null
+++ b/nixos/modules/system/boot/systemd/journald-remote.nix
@@ -0,0 +1,163 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.journald.remote;
+  format = pkgs.formats.systemd;
+
+  cliArgs = lib.cli.toGNUCommandLineShell { } {
+    inherit (cfg) output;
+    # "-3" specifies the file descriptor from the .socket unit.
+    "listen-${cfg.listen}" = "-3";
+  };
+in
+{
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
+  options.services.journald.remote = {
+    enable = lib.mkEnableOption "receiving systemd journals from the network";
+
+    listen = lib.mkOption {
+      default = "https";
+      type = lib.types.enum [ "https" "http" ];
+      description = lib.mdDoc ''
+        Which protocol to listen to.
+      '';
+    };
+
+    output = lib.mkOption {
+      default = "/var/log/journal/remote/";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        The location of the output journal.
+
+        In case the output file is not specified, journal files will be created
+        underneath the selected directory. Files will be called
+        {file}`remote-hostname.journal`, where the `hostname` part is the
+        escaped hostname of the source endpoint of the connection, or the
+        numerical address if the hostname cannot be determined.
+      '';
+    };
+
+    port = lib.mkOption {
+      default = 19532;
+      type = lib.types.port;
+      description = ''
+        The port to listen to.
+
+        Note that this option is used only if
+        {option}`services.journald.upload.listen` is configured to be either
+        "https" or "http".
+      '';
+    };
+
+    settings = lib.mkOption {
+      default = { };
+
+      description = lib.mdDoc ''
+        Configuration in the journal-remote configuration file. See
+        {manpage}`journal-remote.conf(5)` for available options.
+      '';
+
+      type = lib.types.submodule {
+        freeformType = format.type;
+
+        options.Remote = {
+          Seal = lib.mkOption {
+            default = false;
+            example = true;
+            type = lib.types.bool;
+            description = ''
+              Periodically sign the data in the journal using Forward Secure
+              Sealing.
+            '';
+          };
+
+          SplitMode = lib.mkOption {
+            default = "host";
+            example = "none";
+            type = lib.types.enum [ "host" "none" ];
+            description = lib.mdDoc ''
+              With "host", a separate output file is used, based on the
+              hostname of the other endpoint of a connection. With "none", only
+              one output journal file is used.
+            '';
+          };
+
+          ServerKeyFile = lib.mkOption {
+            default = "/etc/ssl/private/journal-remote.pem";
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              A path to a SSL secret key file in PEM format.
+
+              Note that due to security reasons, `systemd-journal-remote` will
+              refuse files from the world-readable `/nix/store`. This file
+              should be readable by the "" user.
+
+              This option can be used with `listen = "https"`. If the path
+              refers to an `AF_UNIX` stream socket in the file system a
+              connection is made to it and the key read from it.
+            '';
+          };
+
+          ServerCertificateFile = lib.mkOption {
+            default = "/etc/ssl/certs/journal-remote.pem";
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              A path to a SSL certificate file in PEM format.
+
+              This option can be used with `listen = "https"`. If the path
+              refers to an `AF_UNIX` stream socket in the file system a
+              connection is made to it and the certificate read from it.
+            '';
+          };
+
+          TrustedCertificateFile = lib.mkOption {
+            default = "/etc/ssl/ca/trusted.pem";
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              A path to a SSL CA certificate file in PEM format, or `all`.
+
+              If `all` is set, then client certificate checking will be
+              disabled.
+
+              This option can be used with `listen = "https"`. If the path
+              refers to an `AF_UNIX` stream socket in the file system a
+              connection is made to it and the certificate read from it.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-journal-remote.service"
+      "systemd-journal-remote.socket"
+    ];
+
+    systemd.services.systemd-journal-remote.serviceConfig.ExecStart = [
+      # Clear the default command line
+      ""
+      "${pkgs.systemd}/lib/systemd/systemd-journal-remote ${cliArgs}"
+    ];
+
+    systemd.sockets.systemd-journal-remote = {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [
+        # Clear the default port
+        ""
+        (toString cfg.port)
+      ];
+    };
+
+    # User and group used by systemd-journal-remote.service
+    users.groups.systemd-journal-remote = { };
+    users.users.systemd-journal-remote = {
+      isSystemUser = true;
+      group = "systemd-journal-remote";
+    };
+
+    environment.etc."systemd/journal-remote.conf".source =
+      format.generate "journal-remote.conf" cfg.settings;
+  };
+}
diff --git a/nixos/modules/system/boot/systemd/journald-upload.nix b/nixos/modules/system/boot/systemd/journald-upload.nix
new file mode 100644
index 0000000000000..6421e5fa486f9
--- /dev/null
+++ b/nixos/modules/system/boot/systemd/journald-upload.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.journald.upload;
+  format = pkgs.formats.systemd;
+in
+{
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
+  options.services.journald.upload = {
+    enable = lib.mkEnableOption "uploading the systemd journal to a remote server";
+
+    settings = lib.mkOption {
+      default = { };
+
+      description = lib.mdDoc ''
+        Configuration for journal-upload. See {manpage}`journal-upload.conf(5)`
+        for available options.
+      '';
+
+      type = lib.types.submodule {
+        freeformType = format.type;
+
+        options.Upload = {
+          URL = lib.mkOption {
+            type = lib.types.str;
+            example = "https://192.168.1.1";
+            description = ''
+              The URL to upload the journal entries to.
+
+              See the description of `--url=` option in
+              {manpage}`systemd-journal-upload(8)` for the description of
+              possible values.
+            '';
+          };
+
+          ServerKeyFile = lib.mkOption {
+            type = with lib.types; nullOr str;
+            example = lib.literalExpression "./server-key.pem";
+            # Since systemd-journal-upload uses a DynamicUser, permissions must
+            # be done using groups
+            description = ''
+              SSL key in PEM format.
+
+              In contrary to what the name suggests, this option configures the
+              client private key sent to the remote journal server.
+
+              This key should not be world-readable, and must be readably by
+              the `systemd-journal` group.
+            '';
+            default = null;
+          };
+
+          ServerCertificateFile = lib.mkOption {
+            type = with lib.types; nullOr str;
+            example = lib.literalExpression "./server-ca.pem";
+            description = ''
+              SSL CA certificate in PEM format.
+
+              In contrary to what the name suggests, this option configures the
+              client certificate sent to the remote journal server.
+            '';
+            default = null;
+          };
+
+          TrustedCertificateFile = lib.mkOption {
+            type = with lib.types; nullOr str;
+            example = lib.literalExpression "./ca";
+            description = ''
+              SSL CA certificate.
+
+              This certificate will be used to check the remote journal HTTPS
+              server certificate.
+            '';
+            default = null;
+          };
+
+          NetworkTimeoutSec = lib.mkOption {
+            type = with lib.types; nullOr str;
+            example = "1s";
+            description = ''
+              When network connectivity to the server is lost, this option
+              configures the time to wait for the connectivity to get restored.
+
+              If the server is not reachable over the network for the
+              configured time, `systemd-journal-upload` exits. Takes a value in
+              seconds (or in other time units if suffixed with "ms", "min",
+              "h", etc). For details, see {manpage}`systemd.time(5)`.
+            '';
+            default = null;
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.additionalUpstreamSystemUnits = [ "systemd-journal-upload.service" ];
+
+    systemd.services."systemd-journal-upload" = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Restart = "always";
+        # To prevent flooding the server in case the server is struggling
+        RestartSec = "3sec";
+      };
+    };
+
+    environment.etc."systemd/journal-upload.conf".source =
+      format.generate "journal-upload.conf" cfg.settings;
+  };
+}
diff --git a/nixos/modules/system/boot/systemd/journald.nix b/nixos/modules/system/boot/systemd/journald.nix
index 7e62a4c9bfedf..9a8e7d5926036 100644
--- a/nixos/modules/system/boot/systemd/journald.nix
+++ b/nixos/modules/system/boot/systemd/journald.nix
@@ -5,6 +5,10 @@ with lib;
 let
   cfg = config.services.journald;
 in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "journald" "enableHttpGateway" ] [ "services" "journald" "gateway" "enable" ])
+  ];
+
   options = {
     services.journald.console = mkOption {
       default = "";
@@ -71,14 +75,6 @@ in {
       '';
     };
 
-    services.journald.enableHttpGateway = mkOption {
-      default = false;
-      type = types.bool;
-      description = lib.mdDoc ''
-        Whether to enable the HTTP gateway to the journal.
-      '';
-    };
-
     services.journald.forwardToSyslog = mkOption {
       default = config.services.rsyslogd.enable || config.services.syslog-ng.enable;
       defaultText = literalExpression "services.rsyslogd.enable || services.syslog-ng.enable";
@@ -101,9 +97,6 @@ in {
       ] ++ (optional (!config.boot.isContainer) "systemd-journald-audit.socket") ++ [
       "systemd-journald-dev-log.socket"
       "syslog.socket"
-      ] ++ optionals cfg.enableHttpGateway [
-      "systemd-journal-gatewayd.socket"
-      "systemd-journal-gatewayd.service"
       ];
 
     environment.etc = {
@@ -124,12 +117,6 @@ in {
     };
 
     users.groups.systemd-journal.gid = config.ids.gids.systemd-journal;
-    users.users.systemd-journal-gateway.uid = config.ids.uids.systemd-journal-gateway;
-    users.users.systemd-journal-gateway.group = "systemd-journal-gateway";
-    users.groups.systemd-journal-gateway.gid = config.ids.gids.systemd-journal-gateway;
-
-    systemd.sockets.systemd-journal-gatewayd.wantedBy =
-      optional cfg.enableHttpGateway "sockets.target";
 
     systemd.services.systemd-journal-flush.restartIfChanged = false;
     systemd.services.systemd-journald.restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
diff --git a/nixos/modules/system/boot/systemd/oomd.nix b/nixos/modules/system/boot/systemd/oomd.nix
index fad755e278c77..000b18c01609a 100644
--- a/nixos/modules/system/boot/systemd/oomd.nix
+++ b/nixos/modules/system/boot/systemd/oomd.nix
@@ -3,14 +3,18 @@
   cfg = config.systemd.oomd;
 
 in {
+  imports = [
+    (lib.mkRenamedOptionModule [ "systemd" "oomd" "enableUserServices" ] [ "systemd" "oomd" "enableUserSlices" ])
+  ];
+
   options.systemd.oomd = {
     enable = lib.mkEnableOption (lib.mdDoc "the `systemd-oomd` OOM killer") // { default = true; };
 
     # Fedora enables the first and third option by default. See the 10-oomd-* files here:
-    # https://src.fedoraproject.org/rpms/systemd/tree/acb90c49c42276b06375a66c73673ac351025597
+    # https://src.fedoraproject.org/rpms/systemd/tree/806c95e1c70af18f81d499b24cd7acfa4c36ffd6
     enableRootSlice = lib.mkEnableOption (lib.mdDoc "oomd on the root slice (`-.slice`)");
     enableSystemSlice = lib.mkEnableOption (lib.mdDoc "oomd on the system slice (`system.slice`)");
-    enableUserServices = lib.mkEnableOption (lib.mdDoc "oomd on all user services (`user@.service`)");
+    enableUserSlices = lib.mkEnableOption (lib.mdDoc "oomd on all user slices (`user@.slice`) and all user owned slices");
 
     extraConfig = lib.mkOption {
       type = with lib.types; attrsOf (oneOf [ str int bool ]);
@@ -44,14 +48,24 @@ in {
     users.groups.systemd-oom = { };
 
     systemd.slices."-".sliceConfig = lib.mkIf cfg.enableRootSlice {
-      ManagedOOMSwap = "kill";
+      ManagedOOMMemoryPressure = "kill";
+      ManagedOOMMemoryPressureLimit = "80%";
     };
     systemd.slices."system".sliceConfig = lib.mkIf cfg.enableSystemSlice {
-      ManagedOOMSwap = "kill";
+      ManagedOOMMemoryPressure = "kill";
+      ManagedOOMMemoryPressureLimit = "80%";
     };
-    systemd.services."user@".serviceConfig = lib.mkIf cfg.enableUserServices {
+    systemd.slices."user-".sliceConfig = lib.mkIf cfg.enableUserSlices {
       ManagedOOMMemoryPressure = "kill";
-      ManagedOOMMemoryPressureLimit = "50%";
+      ManagedOOMMemoryPressureLimit = "80%";
+    };
+    systemd.user.units."slice" = lib.mkIf cfg.enableUserSlices {
+      text = ''
+        [Slice]
+        ManagedOOMMemoryPressure=kill
+        ManagedOOMMemoryPressureLimit=80%
+      '';
+      overrideStrategy = "asDropin";
     };
   };
 }
diff --git a/nixos/modules/system/boot/systemd/repart.nix b/nixos/modules/system/boot/systemd/repart.nix
index 5ac2ace56ba02..3be744acd0b3b 100644
--- a/nixos/modules/system/boot/systemd/repart.nix
+++ b/nixos/modules/system/boot/systemd/repart.nix
@@ -83,6 +83,9 @@ in
       }
     ];
 
+    # systemd-repart uses loopback devices for partition creation
+    boot.initrd.availableKernelModules = lib.optional initrdCfg.enable "loop";
+
     boot.initrd.systemd = lib.mkIf initrdCfg.enable {
       additionalUpstreamUnits = [
         "systemd-repart.service"
diff --git a/nixos/modules/system/boot/systemd/sysupdate.nix b/nixos/modules/system/boot/systemd/sysupdate.nix
index b1914a9c4e767..cab35ddf270cb 100644
--- a/nixos/modules/system/boot/systemd/sysupdate.nix
+++ b/nixos/modules/system/boot/systemd/sysupdate.nix
@@ -71,7 +71,7 @@ in
       type = with lib.types; attrsOf format.type;
       default = { };
       example = {
-        "10-uki.conf" = {
+        "10-uki" = {
           Transfer = {
             ProtectVersion = "%A";
           };
diff --git a/nixos/modules/system/boot/systemd/tmpfiles.nix b/nixos/modules/system/boot/systemd/tmpfiles.nix
index 183e2033ecb01..dae23eddd1e2b 100644
--- a/nixos/modules/system/boot/systemd/tmpfiles.nix
+++ b/nixos/modules/system/boot/systemd/tmpfiles.nix
@@ -150,6 +150,41 @@ in
       "systemd-tmpfiles-setup.service"
     ];
 
+    # Allow systemd-tmpfiles to be restarted by switch-to-configuration. This
+    # service is not pulled into the normal boot process. It only exists for
+    # switch-to-configuration.
+    #
+    # This needs to be a separate unit because it does not execute
+    # systemd-tmpfiles with `--boot` as that is supposed to only be executed
+    # once at boot time.
+    #
+    # Keep this aligned with the upstream `systemd-tmpfiles-setup.service` unit.
+    systemd.services."systemd-tmpfiles-resetup" = {
+      description = "Re-setup tmpfiles on a system that is already running.";
+
+      requiredBy = [ "sysinit-reactivation.target" ];
+      after = [ "local-fs.target" "systemd-sysusers.service" "systemd-journald.service" ];
+      before = [ "sysinit-reactivation.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      restartTriggers = [ config.environment.etc."tmpfiles.d".source ];
+
+      unitConfig.DefaultDependencies = false;
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "systemd-tmpfiles --create --remove --exclude-prefix=/dev";
+        SuccessExitStatus = "DATAERR CANTCREAT";
+        ImportCredential = [
+          "tmpfiles.*"
+          "loging.motd"
+          "login.issue"
+          "network.hosts"
+          "ssh.authorized_keys.root"
+        ];
+      };
+    };
+
     environment.etc = {
       "tmpfiles.d".source = (pkgs.symlinkJoin {
         name = "tmpfiles.d";
diff --git a/nixos/modules/system/boot/timesyncd.nix b/nixos/modules/system/boot/timesyncd.nix
index 7487cf97fe531..2666e4cd6b284 100644
--- a/nixos/modules/system/boot/timesyncd.nix
+++ b/nixos/modules/system/boot/timesyncd.nix
@@ -46,6 +46,13 @@ with lib;
       wantedBy = [ "sysinit.target" ];
       aliases = [ "dbus-org.freedesktop.timesync1.service" ];
       restartTriggers = [ config.environment.etc."systemd/timesyncd.conf".source ];
+      # systemd-timesyncd disables DNSSEC validation in the nss-resolve module by setting SYSTEMD_NSS_RESOLVE_VALIDATE to 0 in the unit file.
+      # This is required in order to solve the chicken-and-egg problem when DNSSEC validation needs the correct time to work, but to set the
+      # correct time, we need to connect to an NTP server, which usually requires resolving its hostname.
+      # In order for nss-resolve to be able to read this environment variable we patch systemd-timesyncd to disable NSCD and use NSS modules directly.
+      # This means that systemd-timesyncd needs to have NSS modules path in LD_LIBRARY_PATH. When systemd-resolved is disabled we still need to set
+      # NSS module path so that systemd-timesyncd keeps using other NSS modules that are configured in the system.
+      environment.LD_LIBRARY_PATH = config.system.nssModules.path;
 
       preStart = (
         # Ensure that we have some stored time to prevent
diff --git a/nixos/modules/tasks/filesystems.nix b/nixos/modules/tasks/filesystems.nix
index 91e30aa4c0af9..1378a0090c1df 100644
--- a/nixos/modules/tasks/filesystems.nix
+++ b/nixos/modules/tasks/filesystems.nix
@@ -406,7 +406,8 @@ in
             ConditionVirtualization = "!container";
             DefaultDependencies = false; # needed to prevent a cycle
           };
-          before = [ "systemd-pstore.service" ];
+          before = [ "systemd-pstore.service" "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
           wantedBy = [ "systemd-pstore.service" ];
         };
       };
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
index f28fd5cde9c19..fdb149a3d9a17 100644
--- a/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -57,7 +57,15 @@ let
   # bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
   firstDevice = fs: lib.head (lib.splitString ":" fs.device);
 
-  openCommand = name: fs: ''
+  openCommand = name: fs: if config.boot.initrd.clevis.enable && (lib.hasAttr (firstDevice fs) config.boot.initrd.clevis.devices) then ''
+    if clevis decrypt < /etc/clevis/${firstDevice fs}.jwe | bcachefs unlock ${firstDevice fs}
+    then
+      printf "unlocked ${name} using clevis\n"
+    else
+      printf "falling back to interactive unlocking...\n"
+      tryUnlock ${name} ${firstDevice fs}
+    fi
+  '' else ''
     tryUnlock ${name} ${firstDevice fs}
   '';
 
@@ -70,9 +78,10 @@ let
     value = {
       description = "Unlock bcachefs for ${fs.mountPoint}";
       requiredBy = [ mountUnit ];
-      before = [ mountUnit ];
-      bindsTo = [ deviceUnit ];
       after = [ deviceUnit ];
+      before = [ mountUnit "shutdown.target" ];
+      bindsTo = [ deviceUnit ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig = {
         Type = "oneshot";
@@ -114,15 +123,8 @@ in
       inherit assertions;
       # needed for systemd-remount-fs
       system.fsPackages = [ pkgs.bcachefs-tools ];
-
-      # FIXME: Replace this with `linuxPackages_testing` after NixOS 23.11 is released
-      # FIXME: Replace this with `linuxPackages_latest` when 6.7 is released, remove this line when the LTS version is at least 6.7
-      boot.kernelPackages = lib.mkDefault (
-        # FIXME: Remove warning after NixOS 23.11 is released
-        lib.warn "Please upgrade to Linux 6.7-rc1 or later: 'linuxPackages_testing_bcachefs' is deprecated. Use 'boot.kernelPackages = pkgs.linuxPackages_testing;' to silence this warning"
-        pkgs.linuxPackages_testing_bcachefs
-      );
-
+      # FIXME: Remove this line when the default kernel has bcachefs
+      boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
       systemd.services = lib.mapAttrs' (mkUnits "") (lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems);
     }
 
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 72bc79f31b68a..b289d2151eb79 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -17,6 +17,9 @@ let
   cfgZED = config.services.zfs.zed;
 
   selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
+  clevisDatasets = map (e: e.device) (filter (e: e.device != null && (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
+
+
   inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
   inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
 
@@ -68,7 +71,7 @@ let
     done
     poolReady() {
       pool="$1"
-      state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
+      state="$("${zpoolCmd}" import -d "${cfgZfs.devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
       if [[ "$state" = "ONLINE" ]]; then
         return 0
       else
@@ -105,12 +108,12 @@ let
 
   getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
     hasKeys = cfgZfs.requestEncryptionCredentials;
-    command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus ${pool}";
+    command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus -t volume,filesystem ${pool}";
   } else let
     keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
   in {
     hasKeys = keys != [];
-    command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus ${toString keys}";
+    command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus -t volume,filesystem ${toString keys}";
   };
 
   createImportService = { pool, systemd, force, prefix ? "" }:
@@ -120,14 +123,15 @@ let
       # but don't *require* it, because mounts shouldn't be killed if it's stopped.
       # In the future, hopefully someone will complete this:
       # https://github.com/zfsonlinux/zfs/pull/4943
-      wants = [ "systemd-udev-settle.service" ];
+      wants = [ "systemd-udev-settle.service" ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
       after = [
         "systemd-udev-settle.service"
         "systemd-modules-load.service"
         "systemd-ask-password-console.service"
-      ];
+      ] ++ optional (config.boot.initrd.clevis.useTang) "network-online.target";
       requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
-      before = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
+      before = getPoolMounts prefix pool ++ [ "shutdown.target" "zfs-import.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig = {
         DefaultDependencies = "no";
       };
@@ -154,6 +158,9 @@ let
           poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
         fi
         if poolImported "${pool}"; then
+        ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem} || true ") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
+
+
           ${optionalString keyLocations.hasKeys ''
             ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
               {
@@ -502,9 +509,15 @@ in
     };
 
     services.zfs.zed = {
-      enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
-        default = cfgZfs.package.enableMail;
-        defaultText = literalExpression "config.${optZfs.package}.enableMail";
+      enableMail = mkOption {
+        type = types.bool;
+        default = config.services.mail.sendmailSetuidWrapper != null;
+        defaultText = literalExpression ''
+          config.services.mail.sendmailSetuidWrapper != null
+        '';
+        description = mdDoc ''
+          Whether to enable ZED's ability to send emails.
+        '';
       };
 
       settings = mkOption {
@@ -545,14 +558,6 @@ in
           message = "The kernel module and the userspace tooling versions are not matching, this is an unsupported usecase.";
         }
         {
-          assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
-          message = ''
-            To allow ZED to send emails, ZFS needs to be configured to enable
-            this. To do so, one must override the `zfs` package and set
-            `enableMail` to true.
-          '';
-        }
-        {
           assertion = config.networking.hostId != null;
           message = "ZFS requires networking.hostId to be set";
         }
@@ -623,6 +628,9 @@ in
               fi
               poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
             fi
+
+            ${optionalString config.boot.initrd.clevis.enable (concatMapStringsSep "\n" (elem: "clevis decrypt < /etc/clevis/${elem}.jwe | zfs load-key ${elem}") (filter (p: (elemAt (splitString "/" p) 0) == pool) clevisDatasets))}
+
             ${if isBool cfgZfs.requestEncryptionCredentials
               then optionalString cfgZfs.requestEncryptionCredentials ''
                 zfs load-key -a
@@ -659,10 +667,17 @@ in
       # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
       boot.loader.grub = mkIf (inInitrd || inSystem) {
         zfsSupport = true;
+        zfsPackage = cfgZfs.package;
       };
 
       services.zfs.zed.settings = {
-        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
+        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault (
+          config.security.wrapperDir + "/" +
+          config.services.mail.sendmailSetuidWrapper.program
+        ));
+        # subject in header for sendmail
+        ZED_EMAIL_OPTS = mkIf cfgZED.enableMail (mkDefault "@ADDRESS@");
+
         PATH = lib.makeBinPath [
           cfgZfs.package
           pkgs.coreutils
diff --git a/nixos/modules/tasks/network-interfaces-scripted.nix b/nixos/modules/tasks/network-interfaces-scripted.nix
index e1ac7f24cb320..2f2d282fbefb4 100644
--- a/nixos/modules/tasks/network-interfaces-scripted.nix
+++ b/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -70,7 +70,8 @@ let
         deviceDependency = dev:
           # Use systemd service if we manage device creation, else
           # trust udev when not in a container
-          if (hasAttr dev (filterAttrs (k: v: v.virtual) cfg.interfaces)) ||
+          if (dev == null || dev == "lo") then []
+          else if (hasAttr dev (filterAttrs (k: v: v.virtual) cfg.interfaces)) ||
              (hasAttr dev cfg.bridges) ||
              (hasAttr dev cfg.bonds) ||
              (hasAttr dev cfg.macvlans) ||
@@ -78,7 +79,7 @@ let
              (hasAttr dev cfg.vlans) ||
              (hasAttr dev cfg.vswitches)
           then [ "${dev}-netdev.service" ]
-          else optional (dev != null && dev != "lo" && !config.boot.isContainer) (subsystemDevice dev);
+          else optional (!config.boot.isContainer) (subsystemDevice dev);
 
         hasDefaultGatewaySet = (cfg.defaultGateway != null && cfg.defaultGateway.address != "")
                             || (cfg.enableIPv6 && cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "");
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index 298add13437a0..ca0b219b3c93d 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1396,6 +1396,8 @@ in
       "net.ipv4.conf.all.forwarding" = mkDefault (any (i: i.proxyARP) interfaces);
       "net.ipv6.conf.all.disable_ipv6" = mkDefault (!cfg.enableIPv6);
       "net.ipv6.conf.default.disable_ipv6" = mkDefault (!cfg.enableIPv6);
+      # allow all users to do ICMP echo requests (ping)
+      "net.ipv4.ping_group_range" = mkDefault "0 2147483647";
       # networkmanager falls back to "/proc/sys/net/ipv6/conf/default/use_tempaddr"
       "net.ipv6.conf.default.use_tempaddr" = tempaddrValues.${cfg.tempAddresses}.sysctl;
     } // listToAttrs (forEach interfaces
@@ -1408,9 +1410,11 @@ in
 
     systemd.services.domainname = lib.mkIf (cfg.domain != null) {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
       serviceConfig.ExecStart = ''${pkgs.nettools}/bin/domainname "${cfg.domain}"'';
+      serviceConfig.Type = "oneshot";
     };
 
     environment.etc.hostid = mkIf (cfg.hostId != null) { source = hostidFile; };
@@ -1445,16 +1449,6 @@ in
       listToAttrs
     ];
 
-    # The network-interfaces target is kept for backwards compatibility.
-    # New modules must NOT use it.
-    systemd.targets.network-interfaces =
-      { description = "All Network Interfaces (deprecated)";
-        wantedBy = [ "network.target" ];
-        before = [ "network.target" ];
-        after = [ "network-pre.target" ];
-        unitConfig.X-StopOnReconfiguration = true;
-      };
-
     systemd.services = {
       network-local-commands = {
         description = "Extra networking commands.";
diff --git a/nixos/modules/tasks/trackpoint.nix b/nixos/modules/tasks/trackpoint.nix
index d197a0feb337c..b3f6f32eaa473 100644
--- a/nixos/modules/tasks/trackpoint.nix
+++ b/nixos/modules/tasks/trackpoint.nix
@@ -80,10 +80,17 @@ with lib;
         ACTION=="add|change", SUBSYSTEM=="input", ATTR{name}=="${cfg.device}", ATTR{device/speed}="${toString cfg.speed}", ATTR{device/sensitivity}="${toString cfg.sensitivity}"
       '';
 
-      system.activationScripts.trackpoint =
-        ''
+      systemd.services.trackpoint = {
+        wantedBy = [ "sysinit.target" ] ;
+        before = [ "sysinit.target" "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
+        unitConfig.DefaultDependencies = false;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.ExecStart = ''
           ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}"
         '';
+      };
     })
 
     (mkIf (cfg.emulateWheel) {
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index aa44f26426970..f0d9b95f81f6b 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -71,6 +71,7 @@ in
 
     systemd.services.fetch-ec2-metadata = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       path = [ pkgs.curl ];
       script = builtins.readFile ./ec2-metadata-fetcher.sh;
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index e712fac17a462..ac4cd752615da 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -202,6 +202,13 @@ in
 
     services.udev.packages = [ pkgs.waagent ];
 
+    # Provide waagent-shipped udev rules in initrd too.
+    boot.initrd.services.udev.packages = [ pkgs.waagent ];
+    # udev rules shell out to chmod, cut and readlink, which are all
+    # provided by pkgs.coreutils, which is in services.udev.path, but not
+    # boot.initrd.services.udev.binPackages.
+    boot.initrd.services.udev.binPackages = [ pkgs.coreutils ];
+
     networking.dhcpcd.persistent = true;
 
     services.logrotate = {
@@ -245,6 +252,27 @@ in
         pkgs.e2fsprogs
         pkgs.bash
 
+        pkgs.findutils
+        pkgs.gnugrep
+        pkgs.gnused
+        pkgs.iproute2
+        pkgs.iptables
+
+        # for hostname
+        pkgs.nettools
+
+        pkgs.openssh
+        pkgs.openssl
+        pkgs.parted
+
+        # for pidof
+        pkgs.procps
+
+        # for useradd, usermod
+        pkgs.shadow
+
+        pkgs.util-linux # for (u)mount, fdisk, sfdisk, mkswap
+
         # waagent's Microsoft.OSTCExtensions.VMAccessForLinux needs Python 3
         pkgs.python39
 
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index 47a5e462262d4..ea4cb916aa08d 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -5,7 +5,9 @@ let
   preseedFormat = pkgs.formats.yaml { };
 in
 {
-  meta.maintainers = [ lib.maintainers.adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.incus = {
@@ -148,10 +150,12 @@ in
       after = [
         "network-online.target"
         "lxcfs.service"
-      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+        "incus.socket"
+      ];
       requires = [
         "lxcfs.service"
-      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+        "incus.socket"
+      ];
       wants = [
         "network-online.target"
       ];
@@ -181,7 +185,7 @@ in
       };
     };
 
-    systemd.sockets.incus = lib.mkIf cfg.socketActivation {
+    systemd.sockets.incus = {
       description = "Incus UNIX socket";
       wantedBy = [ "sockets.target" ];
 
@@ -189,7 +193,6 @@ in
         ListenStream = "/var/lib/incus/unix.socket";
         SocketMode = "0660";
         SocketGroup = "incus-admin";
-        Service = "incus.service";
       };
     };
 
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
index e195ff937d68e..217242a8fbd22 100644
--- a/nixos/modules/virtualisation/libvirtd.nix
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -116,6 +116,15 @@ let
           QEMU's swtpm options.
         '';
       };
+
+      vhostUserPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = lib.literalExpression "[ pkgs.virtiofsd ]";
+        description = lib.mdDoc ''
+          Packages containing out-of-tree vhost-user drivers.
+        '';
+      };
     };
   };
 
@@ -502,6 +511,14 @@ in
     # https://libvirt.org/daemons.html#monolithic-systemd-integration
     systemd.sockets.libvirtd.wantedBy = [ "sockets.target" ];
 
+    systemd.tmpfiles.rules = let
+      vhostUserCollection = pkgs.buildEnv {
+        name = "vhost-user";
+        paths = cfg.qemu.vhostUserPackages;
+        pathsToLink = [ "/share/qemu/vhost-user" ];
+      };
+    in [ "L+ /var/lib/qemu/vhost-user - - - - ${vhostUserCollection}/share/qemu/vhost-user" ];
+
     security.polkit = {
       enable = true;
       extraConfig = ''
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 4db4df02fe8c9..8d3a480e6dc8c 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -1,7 +1,9 @@
 { lib, config, pkgs, ... }:
 
 {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   imports = [
     ./lxc-instance-common.nix
diff --git a/nixos/modules/virtualisation/lxc.nix b/nixos/modules/virtualisation/lxc.nix
index 5bd64a5f9a565..3febb4b4f2483 100644
--- a/nixos/modules/virtualisation/lxc.nix
+++ b/nixos/modules/virtualisation/lxc.nix
@@ -2,21 +2,19 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
-
   cfg = config.virtualisation.lxc;
-
 in
 
 {
-  ###### interface
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options.virtualisation.lxc = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description =
           lib.mdDoc ''
@@ -27,8 +25,8 @@ in
       };
 
     systemConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -38,8 +36,8 @@ in
       };
 
     defaultConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -49,8 +47,8 @@ in
       };
 
     usernetConfig =
-      mkOption {
-        type = types.lines;
+      lib.mkOption {
+        type = lib.types.lines;
         default = "";
         description =
           lib.mdDoc ''
@@ -62,7 +60,7 @@ in
 
   ###### implementation
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     environment.systemPackages = [ pkgs.lxc ];
     environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
     environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
diff --git a/nixos/modules/virtualisation/lxcfs.nix b/nixos/modules/virtualisation/lxcfs.nix
index fb0ba49f73044..b2eaec774a65c 100644
--- a/nixos/modules/virtualisation/lxcfs.nix
+++ b/nixos/modules/virtualisation/lxcfs.nix
@@ -2,18 +2,18 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.virtualisation.lxc.lxcfs;
 in {
-  meta.maintainers = [ maintainers.mic92 ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   ###### interface
   options.virtualisation.lxc.lxcfs = {
     enable =
-      mkOption {
-        type = types.bool;
+      lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           This enables LXCFS, a FUSE filesystem for LXC.
@@ -27,7 +27,7 @@ in {
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     systemd.services.lxcfs = {
       description = "FUSE filesystem for LXC";
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/virtualisation/lxd-agent.nix b/nixos/modules/virtualisation/lxd-agent.nix
index 5bcc86e3bcbe9..8a2a1530eeb79 100644
--- a/nixos/modules/virtualisation/lxd-agent.nix
+++ b/nixos/modules/virtualisation/lxd-agent.nix
@@ -45,7 +45,9 @@ let
     chown -R root:root "$PREFIX"
   '';
 in {
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   options = {
     virtualisation.lxd.agent.enable = lib.mkEnableOption (lib.mdDoc "Enable LXD agent");
@@ -56,18 +58,28 @@ in {
     systemd.services.lxd-agent = {
       enable = true;
       wantedBy = [ "multi-user.target" ];
-      path = [ pkgs.kmod pkgs.util-linux ];
+      before = [ "shutdown.target" ] ++ lib.optionals config.services.cloud-init.enable [
+        "cloud-init.target" "cloud-init.service" "cloud-init-local.service"
+      ];
+      conflicts = [ "shutdown.target" ];
+      path = [
+        pkgs.kmod
+        pkgs.util-linux
+
+        # allow `incus exec` to find system binaries
+        "/run/current-system/sw"
+      ];
 
       preStart = preStartScript;
 
       # avoid killing nixos-rebuild switch when executed through lxc exec
+      restartIfChanged = false;
       stopIfChanged = false;
 
       unitConfig = {
         Description = "LXD - agent";
         Documentation = "https://documentation.ubuntu.com/lxd/en/latest";
         ConditionPathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
-        Before = lib.optionals config.services.cloud-init.enable [ "cloud-init.target" "cloud-init.service" "cloud-init-local.service" ];
         DefaultDependencies = "no";
         StartLimitInterval = "60";
         StartLimitBurst = "10";
diff --git a/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixos/modules/virtualisation/lxd-virtual-machine.nix
index ba729465ec2f8..92434cb9babf4 100644
--- a/nixos/modules/virtualisation/lxd-virtual-machine.nix
+++ b/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -6,6 +6,10 @@ let
     then "ttyS0"
     else "ttyAMA0"; # aarch64
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     ./lxc-instance-common.nix
 
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index 6f628c4a6e328..e0d61b1754949 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -6,12 +6,14 @@ let
   cfg = config.virtualisation.lxd;
   preseedFormat = pkgs.formats.yaml {};
 in {
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
   imports = [
     (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
   ];
 
-  ###### interface
-
   options = {
     virtualisation.lxd = {
       enable = lib.mkOption {
@@ -31,21 +33,11 @@ in {
         '';
       };
 
-      package = lib.mkOption {
-        type = lib.types.package;
-        default = pkgs.lxd;
-        defaultText = lib.literalExpression "pkgs.lxd";
-        description = lib.mdDoc ''
-          The LXD package to use.
-        '';
-      };
+      package = lib.mkPackageOption pkgs "lxd" { };
 
-      lxcPackage = lib.mkOption {
-        type = lib.types.package;
-        default = pkgs.lxc;
-        defaultText = lib.literalExpression "pkgs.lxc";
-        description = lib.mdDoc ''
-          The LXC package to use with LXD (required for AppArmor profiles).
+      lxcPackage = lib.mkPackageOption pkgs "lxc" {
+        extraDescription = ''
+          Required for AppArmor profiles.
         '';
       };
 
@@ -147,7 +139,7 @@ in {
       ui = {
         enable = lib.mkEnableOption (lib.mdDoc "(experimental) LXD UI");
 
-        package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { };
+        package = lib.mkPackageOption pkgs [ "lxd-unwrapped" "ui" ] { };
       };
     };
   };
@@ -222,16 +214,14 @@ in {
         LimitNPROC = "infinity";
         TasksMax = "infinity";
 
-        Restart = "on-failure";
-        TimeoutStartSec = "${cfg.startTimeout}s";
-        TimeoutStopSec = "30s";
-
         # By default, `lxd` loads configuration files from hard-coded
         # `/usr/share/lxc/config` - since this is a no-go for us, we have to
         # explicitly tell it where the actual configuration files are
         Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
           "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
       };
+
+      unitConfig.ConditionPathExists = "!/var/lib/incus/.migrated-from-lxd";
     };
 
     systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
diff --git a/nixos/modules/virtualisation/oci-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index a4a40346f093b..07ed08ab2f84d 100644
--- a/nixos/modules/virtualisation/oci-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -267,6 +267,7 @@ let
     };
   in {
     wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+    wants = lib.optional (container.imageFile == null)  "network-online.target";
     after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ]
             # if imageFile is not set, the service needs the network to download the image from the registry
             ++ lib.optionals (container.imageFile == null) [ "network-online.target" ]
diff --git a/nixos/modules/virtualisation/podman/default.nix b/nixos/modules/virtualisation/podman/default.nix
index ec0b713e58b38..47382f9beab00 100644
--- a/nixos/modules/virtualisation/podman/default.nix
+++ b/nixos/modules/virtualisation/podman/default.nix
@@ -150,26 +150,33 @@ in
 
   };
 
-  config = lib.mkIf cfg.enable
-    {
+  config =
+    let
+      networkConfig = ({
+        dns_enabled = false;
+        driver = "bridge";
+        id = "0000000000000000000000000000000000000000000000000000000000000000";
+        internal = false;
+        ipam_options = { driver = "host-local"; };
+        ipv6_enabled = false;
+        name = "podman";
+        network_interface = "podman0";
+        subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
+      } // cfg.defaultNetwork.settings);
+      inherit (networkConfig) dns_enabled network_interface;
+    in
+    lib.mkIf cfg.enable {
       environment.systemPackages = [ cfg.package ]
         ++ lib.optional cfg.dockerCompat dockerCompat;
 
       # https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network
       environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) {
-        source = json.generate "podman.json" ({
-          dns_enabled = false;
-          driver = "bridge";
-          id = "0000000000000000000000000000000000000000000000000000000000000000";
-          internal = false;
-          ipam_options = { driver = "host-local"; };
-          ipv6_enabled = false;
-          name = "podman";
-          network_interface = "podman0";
-          subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
-        } // cfg.defaultNetwork.settings);
+        source = json.generate "podman.json" networkConfig;
       };
 
+      # containers cannot reach aardvark-dns otherwise
+      networking.firewall.interfaces.${network_interface}.allowedUDPPorts = lib.mkIf dns_enabled [ 53 ];
+
       virtualisation.containers = {
         enable = true; # Enable common /etc/containers configuration
         containersConf.settings = {
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index d92fd48a6103c..3d7f3ccb62f84 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -1256,6 +1256,8 @@ in
         unitConfig.RequiresMountsFor = "/sysroot/nix/.ro-store";
       }];
       services.rw-store = {
+        before = [ "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
         unitConfig = {
           DefaultDependencies = false;
           RequiresMountsFor = "/sysroot/nix/.rw-store";
diff --git a/nixos/modules/virtualisation/vmware-host.nix b/nixos/modules/virtualisation/vmware-host.nix
index 1eaa896fe0965..094114623a424 100644
--- a/nixos/modules/virtualisation/vmware-host.nix
+++ b/nixos/modules/virtualisation/vmware-host.nix
@@ -85,34 +85,43 @@ in
       };
     };
 
-    ###### wrappers activation script
+    # Services
 
-    system.activationScripts.vmwareWrappers =
-      lib.stringAfter [ "specialfs" "users" ]
-        ''
-          mkdir -p "${parentWrapperDir}"
-          chmod 755 "${parentWrapperDir}"
-          # We want to place the tmpdirs for the wrappers to the parent dir.
-          wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
-          chmod a+rx "$wrapperDir"
-          ${lib.concatStringsSep "\n" (vmwareWrappers)}
-          if [ -L ${wrapperDir} ]; then
-            # Atomically replace the symlink
-            # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
-            old=$(readlink -f ${wrapperDir})
-            if [ -e "${wrapperDir}-tmp" ]; then
-              rm --force --recursive "${wrapperDir}-tmp"
-            fi
-            ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
-            mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
-            rm --force --recursive "$old"
-          else
-            # For initial setup
-            ln --symbolic "$wrapperDir" "${wrapperDir}"
+    systemd.services."vmware-wrappers" = {
+      description = "Create VMVare Wrappers";
+      wantedBy = [ "multi-user.target" ];
+      before = [
+        "vmware-authdlauncher.service"
+        "vmware-networks-configuration.service"
+        "vmware-networks.service"
+        "vmware-usbarbitrator.service"
+      ];
+      after = [ "systemd-sysusers.service" ];
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      script = ''
+        mkdir -p "${parentWrapperDir}"
+        chmod 755 "${parentWrapperDir}"
+        # We want to place the tmpdirs for the wrappers to the parent dir.
+        wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
+        chmod a+rx "$wrapperDir"
+        ${lib.concatStringsSep "\n" (vmwareWrappers)}
+        if [ -L ${wrapperDir} ]; then
+          # Atomically replace the symlink
+          # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
+          old=$(readlink -f ${wrapperDir})
+          if [ -e "${wrapperDir}-tmp" ]; then
+            rm --force --recursive "${wrapperDir}-tmp"
           fi
-        '';
-
-    # Services
+          ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
+          mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
+          rm --force --recursive "$old"
+        else
+          # For initial setup
+          ln --symbolic "$wrapperDir" "${wrapperDir}"
+        fi
+      '';
+    };
 
     systemd.services."vmware-authdlauncher" = {
       description = "VMware Authentication Daemon";
diff --git a/nixos/modules/virtualisation/waydroid.nix b/nixos/modules/virtualisation/waydroid.nix
index b0e85b685083b..1f466c780cf22 100644
--- a/nixos/modules/virtualisation/waydroid.nix
+++ b/nixos/modules/virtualisation/waydroid.nix
@@ -32,7 +32,7 @@ in
     system.requiredKernelConfig = [
       (kCfg.isEnabled "ANDROID_BINDER_IPC")
       (kCfg.isEnabled "ANDROID_BINDERFS")
-      (kCfg.isEnabled "ASHMEM") # FIXME Needs memfd support instead on Linux 5.18 and waydroid 1.2.1
+      (kCfg.isEnabled "MEMFD_CREATE")
     ];
 
     /* NOTE: we always enable this flag even if CONFIG_PSI_DEFAULT_DISABLED is not on
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 9b4b92be6f3ac..7700441b1d6be 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -5,7 +5,7 @@
 { nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; }
 , stableBranch ? false
 , supportedSystems ? [ "aarch64-linux" "x86_64-linux" ]
-, limitedSupportedSystems ? [ "i686-linux" ]
+, limitedSupportedSystems ? [ ]
 }:
 
 let
@@ -90,6 +90,7 @@ in rec {
         (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSubvols")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.luksroot")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.lvm")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBootZfs")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBootFat")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBoot")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleLabels")
@@ -167,10 +168,11 @@ in rec {
         (onFullSupported "nixos.tests.xfce")
         (onFullSupported "nixpkgs.emacs")
         (onFullSupported "nixpkgs.jdk")
+        (onSystems ["x86_64-linux"] "nixpkgs.mesa_i686") # i686 sanity check + useful
         ["nixpkgs.tarball"]
 
-        # Ensure that nixpkgs-check-by-name is available in all release channels and nixos-unstable,
-        # so that a pre-built version can be used in CI for PR's on the corresponding development branches.
+        # Ensure that nixpkgs-check-by-name is available in nixos-unstable,
+        # so that a pre-built version can be used in CI for PR's
         # See ../pkgs/test/nixpkgs-check-by-name/README.md
         (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
       ];
diff --git a/nixos/tests/3proxy.nix b/nixos/tests/3proxy.nix
index 83d39de018a39..b80b4e166d481 100644
--- a/nixos/tests/3proxy.nix
+++ b/nixos/tests/3proxy.nix
@@ -134,6 +134,7 @@
   testScript = ''
     start_all()
 
+    peer0.systemctl("start network-online.target")
     peer0.wait_for_unit("network-online.target")
 
     peer1.wait_for_unit("3proxy.service")
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index e5f2d4c7934a1..272782dc2f621 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -522,6 +522,7 @@ in {
           'curl --data \'{"host": "${caDomain}", "addresses": ["${nodes.acme.networking.primaryIPAddress}"]}\' http://${dnsServerIP nodes}:8055/add-a'
       )
 
+      acme.systemctl("start network-online.target")
       acme.wait_for_unit("network-online.target")
       acme.wait_for_unit("pebble.service")
 
diff --git a/nixos/tests/adguardhome.nix b/nixos/tests/adguardhome.nix
index a6f790b83f5fc..80613ce825340 100644
--- a/nixos/tests/adguardhome.nix
+++ b/nixos/tests/adguardhome.nix
@@ -126,6 +126,7 @@
 
     with subtest("Testing successful DHCP start"):
         dhcpConf.wait_for_unit("adguardhome.service")
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
         # Test IP assignment via DHCP
         dhcpConf.wait_until_succeeds("ping -c 5 10.0.10.100")
diff --git a/nixos/tests/all-terminfo.nix b/nixos/tests/all-terminfo.nix
index dd47c66ee1c1e..2f5e56f09f26b 100644
--- a/nixos/tests/all-terminfo.nix
+++ b/nixos/tests/all-terminfo.nix
@@ -10,7 +10,11 @@ import ./make-test-python.nix ({ pkgs, ... }: rec {
         let
           o = builtins.tryEval drv;
         in
-        o.success && lib.isDerivation o.value && o.value ? outputs && builtins.elem "terminfo" o.value.outputs;
+        o.success &&
+        lib.isDerivation o.value &&
+        o.value ? outputs &&
+        builtins.elem "terminfo" o.value.outputs &&
+        !o.value.meta.broken;
       terminfos = lib.filterAttrs infoFilter pkgs;
       excludedTerminfos = lib.filterAttrs (_: drv: !(builtins.elem drv.terminfo config.environment.systemPackages)) terminfos;
       includedOuts = lib.filterAttrs (_: drv: builtins.elem drv.out config.environment.systemPackages) terminfos;
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 1ed0f760c9a21..9e27969190f75 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -120,6 +120,7 @@ in {
   amazon-ssm-agent = handleTest ./amazon-ssm-agent.nix {};
   amd-sev = runTest ./amd-sev.nix;
   anbox = runTest ./anbox.nix;
+  angie-api = handleTest ./angie-api.nix {};
   anki-sync-server = handleTest ./anki-sync-server.nix {};
   anuko-time-tracker = handleTest ./anuko-time-tracker.nix {};
   apcupsd = handleTest ./apcupsd.nix {};
@@ -135,6 +136,7 @@ in {
   authelia = handleTest ./authelia.nix {};
   avahi = handleTest ./avahi.nix {};
   avahi-with-resolved = handleTest ./avahi.nix { networkd = true; };
+  ayatana-indicators = handleTest ./ayatana-indicators.nix {};
   babeld = handleTest ./babeld.nix {};
   bazarr = handleTest ./bazarr.nix {};
   bcachefs = handleTestOn ["x86_64-linux" "aarch64-linux"] ./bcachefs.nix {};
@@ -187,6 +189,7 @@ in {
   chrony = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony.nix {};
   chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
   cinnamon = handleTest ./cinnamon.nix {};
+  cinnamon-wayland = handleTest ./cinnamon-wayland.nix {};
   cjdns = handleTest ./cjdns.nix {};
   clickhouse = handleTest ./clickhouse.nix {};
   cloud-init = handleTest ./cloud-init.nix {};
@@ -195,6 +198,7 @@ in {
   cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
   cockpit = handleTest ./cockpit.nix {};
   cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
+  code-server = handleTest ./code-server.nix {};
   coder = handleTest ./coder.nix {};
   collectd = handleTest ./collectd.nix {};
   connman = handleTest ./connman.nix {};
@@ -253,6 +257,7 @@ in {
   dolibarr = handleTest ./dolibarr.nix {};
   domination = handleTest ./domination.nix {};
   dovecot = handleTest ./dovecot.nix {};
+  drawterm = discoverTests (import ./drawterm.nix);
   drbd = handleTest ./drbd.nix {};
   dublin-traceroute = handleTest ./dublin-traceroute.nix {};
   earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {};
@@ -313,6 +318,7 @@ in {
   freetube = discoverTests (import ./freetube.nix);
   freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
   freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
+  freshrss-http-auth = handleTest ./freshrss-http-auth.nix {};
   frigate = handleTest ./frigate.nix {};
   frp = handleTest ./frp.nix {};
   frr = handleTest ./frr.nix {};
@@ -338,6 +344,7 @@ in {
   gnome-extensions = handleTest ./gnome-extensions.nix {};
   gnome-flashback = handleTest ./gnome-flashback.nix {};
   gnome-xorg = handleTest ./gnome-xorg.nix {};
+  gns3-server = handleTest ./gns3-server.nix {};
   gnupg = handleTest ./gnupg.nix {};
   go-neb = handleTest ./go-neb.nix {};
   gobgpd = handleTest ./gobgpd.nix {};
@@ -357,6 +364,7 @@ in {
   grow-partition = runTest ./grow-partition.nix;
   grub = handleTest ./grub.nix {};
   guacamole-server = handleTest ./guacamole-server.nix {};
+  guix = handleTest ./guix {};
   gvisor = handleTest ./gvisor.nix {};
   hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
   hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
@@ -403,7 +411,7 @@ in {
   incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
   influxdb = handleTest ./influxdb.nix {};
   influxdb2 = handleTest ./influxdb2.nix {};
-  initrd-network-openvpn = handleTest ./initrd-network-openvpn {};
+  initrd-network-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn {};
   initrd-network-ssh = handleTest ./initrd-network-ssh {};
   initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix {};
   initrdNetwork = handleTest ./initrd-network.nix {};
@@ -538,6 +546,7 @@ in {
   munin = handleTest ./munin.nix {};
   mutableUsers = handleTest ./mutable-users.nix {};
   mxisd = handleTest ./mxisd.nix {};
+  mympd = handleTest ./mympd.nix {};
   mysql = handleTest ./mysql/mysql.nix {};
   mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
   mysql-backup = handleTest ./mysql/mysql-backup.nix {};
@@ -574,12 +583,15 @@ in {
   nginx = handleTest ./nginx.nix {};
   nginx-auth = handleTest ./nginx-auth.nix {};
   nginx-etag = handleTest ./nginx-etag.nix {};
+  nginx-etag-compression = handleTest ./nginx-etag-compression.nix {};
   nginx-globalredirect = handleTest ./nginx-globalredirect.nix {};
   nginx-http3 = handleTest ./nginx-http3.nix {};
   nginx-modsecurity = handleTest ./nginx-modsecurity.nix {};
+  nginx-moreheaders = handleTest ./nginx-moreheaders.nix {};
   nginx-njs = handleTest ./nginx-njs.nix {};
   nginx-proxyprotocol = handleTest ./nginx-proxyprotocol {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-redirectcode = handleTest ./nginx-redirectcode.nix {};
   nginx-sso = handleTest ./nginx-sso.nix {};
   nginx-status-page = handleTest ./nginx-status-page.nix {};
   nginx-tmpdir = handleTest ./nginx-tmpdir.nix {};
@@ -594,7 +606,9 @@ in {
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
   nixos-rebuild-install-bootloader = handleTestOn ["x86_64-linux"] ./nixos-rebuild-install-bootloader.nix {};
   nixos-rebuild-specialisations = handleTestOn ["x86_64-linux"] ./nixos-rebuild-specialisations.nix {};
+  nixos-rebuild-target-host = handleTest ./nixos-rebuild-target-host.nix {};
   nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; };
+  nixseparatedebuginfod = handleTest ./nixseparatedebuginfod.nix {};
   node-red = handleTest ./node-red.nix {};
   nomad = handleTest ./nomad.nix {};
   non-default-filesystems = handleTest ./non-default-filesystems.nix {};
@@ -602,9 +616,12 @@ in {
   noto-fonts = handleTest ./noto-fonts.nix {};
   noto-fonts-cjk-qt-default-weight = handleTest ./noto-fonts-cjk-qt-default-weight.nix {};
   novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
+  npmrc = handleTest ./npmrc.nix {};
   nscd = handleTest ./nscd.nix {};
   nsd = handleTest ./nsd.nix {};
   ntfy-sh = handleTest ./ntfy-sh.nix {};
+  ntfy-sh-migration = handleTest ./ntfy-sh-migration.nix {};
+  ntpd-rs = handleTest ./ntpd-rs.nix {};
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
@@ -713,6 +730,7 @@ in {
   qgis-ltr = handleTest ./qgis.nix { qgisPackage = pkgs.qgis-ltr; };
   qownnotes = handleTest ./qownnotes.nix {};
   quake3 = handleTest ./quake3.nix {};
+  quicktun = handleTest ./quicktun.nix {};
   quorum = handleTest ./quorum.nix {};
   rabbitmq = handleTest ./rabbitmq.nix {};
   radarr = handleTest ./radarr.nix {};
@@ -731,6 +749,7 @@ in {
   rosenpass = handleTest ./rosenpass.nix {};
   rshim = handleTest ./rshim.nix {};
   rspamd = handleTest ./rspamd.nix {};
+  rspamd-trainer = handleTest ./rspamd-trainer.nix {};
   rss2email = handleTest ./rss2email.nix {};
   rstudio-server = handleTest ./rstudio-server.nix {};
   rsyncd = handleTest ./rsyncd.nix {};
@@ -759,6 +778,7 @@ in {
   sing-box = handleTest ./sing-box.nix {};
   slimserver = handleTest ./slimserver.nix {};
   slurm = handleTest ./slurm.nix {};
+  snmpd = handleTest ./snmpd.nix {};
   smokeping = handleTest ./smokeping.nix {};
   snapcast = handleTest ./snapcast.nix {};
   snapper = handleTest ./snapper.nix {};
@@ -774,6 +794,7 @@ in {
   spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
   sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {};
   sslh = handleTest ./sslh.nix {};
+  ssh-agent-auth = handleTest ./ssh-agent-auth.nix {};
   ssh-audit = handleTest ./ssh-audit.nix {};
   sssd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd.nix {};
   sssd-ldap = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd-ldap.nix {};
@@ -784,9 +805,11 @@ in {
   step-ca = handleTestOn ["x86_64-linux"] ./step-ca.nix {};
   stratis = handleTest ./stratis {};
   strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
+  stub-ld = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./stub-ld.nix {};
   stunnel = handleTest ./stunnel.nix {};
   sudo = handleTest ./sudo.nix {};
   sudo-rs = handleTest ./sudo-rs.nix {};
+  suwayomi-server = handleTest ./suwayomi-server.nix {};
   swap-file-btrfs = handleTest ./swap-file-btrfs.nix {};
   swap-partition = handleTest ./swap-partition.nix {};
   swap-random-encryption = handleTest ./swap-random-encryption.nix {};
@@ -798,6 +821,7 @@ in {
   syncthing-init = handleTest ./syncthing-init.nix {};
   syncthing-many-devices = handleTest ./syncthing-many-devices.nix {};
   syncthing-relay = handleTest ./syncthing-relay.nix {};
+  sysinit-reactivation = runTest ./sysinit-reactivation.nix;
   systemd = handleTest ./systemd.nix {};
   systemd-analyze = handleTest ./systemd-analyze.nix {};
   systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
@@ -823,9 +847,11 @@ in {
   systemd-initrd-vconsole = handleTest ./systemd-initrd-vconsole.nix {};
   systemd-initrd-networkd = handleTest ./systemd-initrd-networkd.nix {};
   systemd-initrd-networkd-ssh = handleTest ./systemd-initrd-networkd-ssh.nix {};
-  systemd-initrd-networkd-openvpn = handleTest ./initrd-network-openvpn { systemdStage1 = true; };
+  systemd-initrd-networkd-openvpn = handleTestOn [ "x86_64-linux" "i686-linux" ] ./initrd-network-openvpn { systemdStage1 = true; };
   systemd-initrd-vlan = handleTest ./systemd-initrd-vlan.nix {};
   systemd-journal = handleTest ./systemd-journal.nix {};
+  systemd-journal-gateway = handleTest ./systemd-journal-gateway.nix {};
+  systemd-journal-upload = handleTest ./systemd-journal-upload.nix {};
   systemd-machinectl = handleTest ./systemd-machinectl.nix {};
   systemd-networkd = handleTest ./systemd-networkd.nix {};
   systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix {};
@@ -841,10 +867,12 @@ in {
   systemd-shutdown = handleTest ./systemd-shutdown.nix {};
   systemd-sysupdate = runTest ./systemd-sysupdate.nix;
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
+  systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix {};
   systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
   systemd-misc = handleTest ./systemd-misc.nix {};
   systemd-userdbd = handleTest ./systemd-userdbd.nix {};
   systemd-homed = handleTest ./systemd-homed.nix {};
+  systemtap = handleTest ./systemtap.nix {};
   tandoor-recipes = handleTest ./tandoor-recipes.nix {};
   tang = handleTest ./tang.nix {};
   taskserver = handleTest ./taskserver.nix {};
@@ -889,7 +917,8 @@ in {
   unbound = handleTest ./unbound.nix {};
   unifi = handleTest ./unifi.nix {};
   unit-php = handleTest ./web-servers/unit-php.nix {};
-  upnp = handleTest ./upnp.nix {};
+  upnp.iptables = handleTest ./upnp.nix { useNftables = false; };
+  upnp.nftables = handleTest ./upnp.nix { useNftables = true; };
   uptermd = handleTest ./uptermd.nix {};
   uptime-kuma = handleTest ./uptime-kuma.nix {};
   usbguard = handleTest ./usbguard.nix {};
@@ -915,6 +944,7 @@ in {
   vsftpd = handleTest ./vsftpd.nix {};
   warzone2100 = handleTest ./warzone2100.nix {};
   wasabibackend = handleTest ./wasabibackend.nix {};
+  watchdogd = handleTest ./watchdogd.nix {};
   webhook = runTest ./webhook.nix;
   wiki-js = handleTest ./wiki-js.nix {};
   wine = handleTest ./wine.nix {};
@@ -932,6 +962,7 @@ in {
   xmonad-xdg-autostart = handleTest ./xmonad-xdg-autostart.nix {};
   xpadneo = handleTest ./xpadneo.nix {};
   xrdp = handleTest ./xrdp.nix {};
+  xrdp-with-audio-pulseaudio = handleTest ./xrdp-with-audio-pulseaudio.nix {};
   xscreensaver = handleTest ./xscreensaver.nix {};
   xss-lock = handleTest ./xss-lock.nix {};
   xterm = handleTest ./xterm.nix {};
diff --git a/nixos/tests/anbox.nix b/nixos/tests/anbox.nix
index dfd6c13d93181..a00116536db7e 100644
--- a/nixos/tests/anbox.nix
+++ b/nixos/tests/anbox.nix
@@ -15,7 +15,7 @@
     test-support.displayManager.auto.user = "alice";
 
     virtualisation.anbox.enable = true;
-    boot.kernelPackages = pkgs.linuxPackages_5_15;
+    boot.kernelPackages = pkgs.linuxKernel.packages.linux_5_15;
     virtualisation.memorySize = 2500;
   };
 
diff --git a/nixos/tests/angie-api.nix b/nixos/tests/angie-api.nix
new file mode 100644
index 0000000000000..4c8d6b54247b1
--- /dev/null
+++ b/nixos/tests/angie-api.nix
@@ -0,0 +1,148 @@
+import ./make-test-python.nix ({lib, pkgs, ...}:
+let
+  hosts = ''
+    192.168.2.101 example.com
+    192.168.2.101 api.example.com
+    192.168.2.101 backend.example.com
+  '';
+
+in
+{
+  name = "angie-api";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.101"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 ];
+      };
+
+      services.nginx = {
+        enable = true;
+        package = pkgs.angie;
+
+        upstreams = {
+          "backend-http" = {
+            servers = { "backend.example.com:8080" = { fail_timeout = "0"; }; };
+            extraConfig = ''
+              zone upstream 256k;
+            '';
+          };
+          "backend-socket" = {
+            servers = { "unix:/run/example.sock" = { fail_timeout = "0"; }; };
+            extraConfig = ''
+              zone upstream 256k;
+            '';
+          };
+        };
+
+        virtualHosts."api.example.com" = {
+          locations."/console/" = {
+            extraConfig = ''
+              api /status/;
+
+              allow 192.168.2.201;
+              deny all;
+            '';
+          };
+        };
+
+        virtualHosts."example.com" = {
+          locations."/test/" = {
+            root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
+              mkdir -p "$out/test"
+              cat > "$out/test/index.html" <<EOF
+              <html><body>Hello World!</body></html>
+              EOF
+            '');
+            extraConfig = ''
+              status_zone test_zone;
+
+              allow 192.168.2.201;
+              deny all;
+            '';
+          };
+          locations."/test/locked/" = {
+            extraConfig = ''
+              status_zone test_zone;
+
+              deny all;
+            '';
+          };
+          locations."/test/error/" = {
+            extraConfig = ''
+              status_zone test_zone;
+
+              allow all;
+            '';
+          };
+          locations."/upstream-http/" = {
+            proxyPass = "http://backend-http";
+          };
+          locations."/upstream-socket/" = {
+            proxyPass = "http://backend-socket";
+          };
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.201"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nginx")
+    server.wait_for_open_port(80)
+
+    # Check Angie version
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.angie.version' | grep '${pkgs.angie.version}'")
+
+    # Check access
+    client.succeed("curl --verbose --head http://api.example.com/console/ | grep 'HTTP/1.1 200'")
+    server.succeed("curl --verbose --head http://api.example.com/console/ | grep 'HTTP/1.1 403 Forbidden'")
+
+    # Check responses and requests
+    client.succeed("curl --verbose http://example.com/test/")
+    client.succeed("curl --verbose http://example.com/test/locked/")
+    client.succeed("curl --verbose http://example.com/test/locked/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    client.succeed("curl --verbose http://example.com/test/error/")
+    server.succeed("curl --verbose http://example.com/test/")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"200\"' | grep '1'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"403\"' | grep '3'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.responses.\"404\"' | grep '3'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.location_zones.test_zone.requests.total' | grep '7'")
+
+    # Check upstreams
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".state' | grep 'up'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".health.fails' | grep '0'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".state' | grep 'up'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".health.fails' | grep '0'")
+    client.succeed("curl --verbose http://example.com/upstream-http/")
+    client.succeed("curl --verbose http://example.com/upstream-socket/")
+    client.succeed("curl --verbose http://example.com/upstream-socket/")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-http\".peers.\"192.168.2.101:8080\".health.fails' | grep '1'")
+    client.succeed("curl --verbose http://api.example.com/console/ | jq -e '.http.upstreams.\"backend-socket\".peers.\"unix:/run/example.sock\".health.fails' | grep '2'")
+
+    server.shutdown()
+    client.shutdown()
+  '';
+})
diff --git a/nixos/tests/appliance-repart-image.nix b/nixos/tests/appliance-repart-image.nix
index 3f256db846214..1c4495baba131 100644
--- a/nixos/tests/appliance-repart-image.nix
+++ b/nixos/tests/appliance-repart-image.nix
@@ -8,6 +8,9 @@
 let
   rootPartitionLabel = "root";
 
+  imageId = "nixos-appliance";
+  imageVersion = "1-rc1";
+
   bootLoaderConfigPath = "/loader/entries/nixos.conf";
   kernelPath = "/EFI/nixos/kernel.efi";
   initrdPath = "/EFI/nixos/initrd.efi";
@@ -29,6 +32,9 @@ in
     # TODO(raitobezarius): revisit this when #244907 lands
     boot.loader.grub.enable = false;
 
+    system.image.id = imageId;
+    system.image.version = imageVersion;
+
     virtualisation.fileSystems = lib.mkForce {
       "/" = {
         device = "/dev/disk/by-partlabel/${rootPartitionLabel}";
@@ -99,7 +105,7 @@ in
       "-f",
       "qcow2",
       "-b",
-      "${nodes.machine.system.build.image}/image.raw",
+      "${nodes.machine.system.build.image}/${nodes.machine.image.repart.imageFile}",
       "-F",
       "raw",
       tmp_disk_image.name,
@@ -108,6 +114,10 @@ in
     # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
     os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
 
+    os_release = machine.succeed("cat /etc/os-release")
+    assert 'IMAGE_ID="${imageId}"' in os_release
+    assert 'IMAGE_VERSION="${imageVersion}"' in os_release
+
     bootctl_status = machine.succeed("bootctl status")
     assert "${bootLoaderConfigPath}" in bootctl_status
     assert "${kernelPath}" in bootctl_status
diff --git a/nixos/tests/auth-mysql.nix b/nixos/tests/auth-mysql.nix
index 0ed4b050a69a4..77a69eb1cd581 100644
--- a/nixos/tests/auth-mysql.nix
+++ b/nixos/tests/auth-mysql.nix
@@ -84,7 +84,7 @@ in
           getpwuid = ''
             SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
             FROM users \
-            WHERE id=%1$u \
+            WHERE uid=%1$u \
             LIMIT 1
           '';
           getspnam = ''
@@ -140,6 +140,7 @@ in
 
     machine.wait_for_unit("multi-user.target")
     machine.wait_for_unit("mysql.service")
+    machine.wait_until_succeeds("cat /etc/security/pam_mysql.conf | grep users.db_passwd")
     machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
 
     with subtest("Local login"):
diff --git a/nixos/tests/avahi.nix b/nixos/tests/avahi.nix
index c53a95903291c..d8f4d13340fbc 100644
--- a/nixos/tests/avahi.nix
+++ b/nixos/tests/avahi.nix
@@ -16,7 +16,7 @@ import ./make-test-python.nix {
     cfg = { ... }: {
       services.avahi = {
         enable = true;
-        nssmdns = true;
+        nssmdns4 = true;
         publish.addresses = true;
         publish.domain = true;
         publish.enable = true;
diff --git a/nixos/tests/ayatana-indicators.nix b/nixos/tests/ayatana-indicators.nix
new file mode 100644
index 0000000000000..2111a4a65b920
--- /dev/null
+++ b/nixos/tests/ayatana-indicators.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  user = "alice";
+in {
+  name = "ayatana-indicators";
+
+  meta = {
+    maintainers = lib.teams.lomiri.members;
+  };
+
+  nodes.machine = { config, ... }: {
+    imports = [
+      ./common/auto.nix
+      ./common/user-account.nix
+    ];
+
+    test-support.displayManager.auto = {
+      enable = true;
+      inherit user;
+    };
+
+    services.xserver = {
+      enable = true;
+      desktopManager.mate.enable = true;
+      displayManager.defaultSession = lib.mkForce "mate";
+    };
+
+    services.ayatana-indicators = {
+      enable = true;
+      packages = with pkgs; [
+        ayatana-indicator-messages
+      ] ++ (with pkgs.lomiri; [
+        lomiri-indicator-network
+      ]);
+    };
+
+    # Setup needed by some indicators
+
+    services.accounts-daemon.enable = true; # messages
+
+    # Lomiri-ish setup for Lomiri indicators
+    # TODO move into a Lomiri module, once the package set is far enough for the DE to start
+
+    networking.networkmanager.enable = true; # lomiri-network-indicator
+    # TODO potentially urfkill for lomiri-network-indicator?
+  };
+
+  # TODO session indicator starts up in a semi-broken state, but works fine after a restart. maybe being started before graphical session is truly up & ready?
+  testScript = { nodes, ... }: let
+    runCommandOverServiceList = list: command:
+      lib.strings.concatMapStringsSep "\n" command list;
+
+    runCommandOverAyatanaIndicators = runCommandOverServiceList
+      (builtins.filter
+        (service: !(lib.strings.hasPrefix "lomiri" service || lib.strings.hasPrefix "telephony-service" service))
+        nodes.machine.systemd.user.targets."ayatana-indicators".wants);
+
+    runCommandOverAllIndicators = runCommandOverServiceList
+      nodes.machine.systemd.user.targets."ayatana-indicators".wants;
+  in ''
+    start_all()
+    machine.wait_for_x()
+
+    # Desktop environment should reach graphical-session.target
+    machine.wait_for_unit("graphical-session.target", "${user}")
+
+    # MATE relies on XDG autostart to bring up the indicators.
+    # Not sure *when* XDG autostart fires them up, and awaiting pgrep success seems to misbehave?
+    machine.sleep(10)
+
+    # Now check if all indicators were brought up successfully, and kill them for later
+  '' + (runCommandOverAyatanaIndicators (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
+    machine.succeed("pgrep -f ${serviceExec}")
+    machine.succeed("pkill -f ${serviceExec}")
+  '')) + ''
+
+    # Ayatana target is the preferred way of starting up indicators on SystemD session, the graphical session is responsible for starting this if it supports them.
+    # Mate currently doesn't do this, so start it manually for checking (https://github.com/mate-desktop/mate-indicator-applet/issues/63)
+    machine.systemctl("start ayatana-indicators.target", "${user}")
+    machine.wait_for_unit("ayatana-indicators.target", "${user}")
+
+    # Let all indicator services do their startups, potential post-launch crash & restart cycles so we can properly check for failures
+    # Not sure if there's a better way of awaiting this without false-positive potential
+    machine.sleep(10)
+
+    # Now check if all indicator services were brought up successfully
+  '' + runCommandOverAllIndicators (service: ''
+    machine.wait_for_unit("${service}", "${user}")
+  '');
+})
diff --git a/nixos/tests/babeld.nix b/nixos/tests/babeld.nix
index d4df6f86d089d..e497aa5b64e15 100644
--- a/nixos/tests/babeld.nix
+++ b/nixos/tests/babeld.nix
@@ -120,10 +120,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
     ''
       start_all()
 
-      client.wait_for_unit("network-online.target")
-      local_router.wait_for_unit("network-online.target")
-      remote_router.wait_for_unit("network-online.target")
-
       local_router.wait_for_unit("babeld.service")
       remote_router.wait_for_unit("babeld.service")
 
diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix
index 4a73fea6a09d0..473b05d4c98e8 100644
--- a/nixos/tests/bittorrent.nix
+++ b/nixos/tests/bittorrent.nix
@@ -115,6 +115,7 @@ in
       start_all()
 
       # Wait for network and miniupnpd.
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       router.wait_for_unit("miniupnpd")
 
@@ -129,6 +130,7 @@ in
       tracker.succeed("chmod 644 /tmp/test.torrent")
 
       # Start the tracker.  !!! use a less crappy tracker
+      tracker.systemctl("start network-online.target")
       tracker.wait_for_unit("network-online.target")
       tracker.wait_for_unit("opentracker.service")
       tracker.wait_for_open_port(6969)
@@ -140,6 +142,7 @@ in
 
       # Now we should be able to download from the client behind the NAT.
       tracker.wait_for_unit("httpd")
+      client1.systemctl("start network-online.target")
       client1.wait_for_unit("network-online.target")
       client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
       client1.wait_for_file("${download-dir}/test.tar.bz2")
@@ -152,6 +155,7 @@ in
 
       # Now download from the second client.  This can only succeed if
       # the first client created a NAT hole in the router.
+      client2.systemctl("start network-online.target")
       client2.wait_for_unit("network-online.target")
       client2.succeed(
           "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
diff --git a/nixos/tests/bootspec.nix b/nixos/tests/bootspec.nix
index 9295500422a92..14928b2206251 100644
--- a/nixos/tests/bootspec.nix
+++ b/nixos/tests/bootspec.nix
@@ -112,10 +112,39 @@ in
 
       bootspec = json.loads(machine.succeed("jq -r '.\"org.nixos.bootspec.v1\"' /run/current-system/boot.json"))
 
-      assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
+      assert 'initrd' in bootspec, "Bootspec should contain initrd field when initrd is enabled"
+      assert 'initrdSecrets' not in bootspec, "Bootspec should not contain initrdSecrets when there's no initrdSecrets"
     '';
   };
 
+  # Check that initrd secrets create corresponding entries in bootspec.
+  initrd-secrets = makeTest {
+    name = "bootspec-with-initrd-secrets";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      # It's probably the case, but we want to make it explicit here.
+      boot.initrd.enable = true;
+      boot.initrd.secrets."/some/example" = pkgs.writeText "example-secret" "test";
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+
+      bootspec = json.loads(machine.succeed("jq -r '.\"org.nixos.bootspec.v1\"' /run/current-system/boot.json"))
+
+      assert 'initrdSecrets' in bootspec, "Bootspec should contain an 'initrdSecrets' field given there's an initrd secret"
+    '';
+  };
+
+
   # Check that specialisations create corresponding entries in bootspec.
   specialisation = makeTest {
     name = "bootspec-with-specialisation";
diff --git a/nixos/tests/btrbk-section-order.nix b/nixos/tests/btrbk-section-order.nix
index 20f1afcf80ec7..6082de947f66f 100644
--- a/nixos/tests/btrbk-section-order.nix
+++ b/nixos/tests/btrbk-section-order.nix
@@ -29,10 +29,12 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
   };
 
   testScript = ''
+    import difflib
     machine.wait_for_unit("basic.target")
-    got = machine.succeed("cat /etc/btrbk/local.conf")
+    got = machine.succeed("cat /etc/btrbk/local.conf").strip()
     expect = """
     backend btrfs-progs-sudo
+    stream_compress no
     timestamp_format long
     target ssh://global-target/
      ssh_user root
@@ -46,6 +48,9 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
        ssh_user root
     """.strip()
     print(got)
+    if got != expect:
+      diff = difflib.unified_diff(expect.splitlines(keepends=True), got.splitlines(keepends=True), fromfile="expected", tofile="got")
+      print("".join(diff))
     assert got == expect
   '';
 })
diff --git a/nixos/tests/btrbk.nix b/nixos/tests/btrbk.nix
index 5261321dfa2c5..403c9595530d8 100644
--- a/nixos/tests/btrbk.nix
+++ b/nixos/tests/btrbk.nix
@@ -27,7 +27,6 @@ import ./make-test-python.nix ({ pkgs, ... }:
         # don't do it with real ssh keys.
         environment.etc."btrbk_key".text = privateKey;
         services.btrbk = {
-          extraPackages = [ pkgs.lz4 ];
           instances = {
             remote = {
               onCalendar = "minutely";
diff --git a/nixos/tests/buildbot.nix b/nixos/tests/buildbot.nix
index dbf68aba9467f..149d73bba09c5 100644
--- a/nixos/tests/buildbot.nix
+++ b/nixos/tests/buildbot.nix
@@ -71,6 +71,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     gitrepo.wait_for_unit("multi-user.target")
 
     with subtest("Repo is accessible via git daemon"):
+        bbmaster.systemctl("start network-online.target")
         bbmaster.wait_for_unit("network-online.target")
         bbmaster.succeed("rm -rfv /tmp/fakerepo")
         bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
@@ -78,6 +79,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     with subtest("Master service and worker successfully connect"):
         bbmaster.wait_for_unit("buildbot-master.service")
         bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
+        bbworker.systemctl("start network-online.target")
         bbworker.wait_for_unit("network-online.target")
         bbworker.succeed("nc -z bbmaster 8010")
         bbworker.succeed("nc -z bbmaster 9989")
@@ -104,5 +106,5 @@ import ./make-test-python.nix ({ pkgs, ... }: {
         bbworker.fail("nc -z bbmaster 8011")
   '';
 
-  meta.maintainers = with pkgs.lib.maintainers; [ ];
+  meta.maintainers = pkgs.lib.teams.buildbot.members;
 })
diff --git a/nixos/tests/c2fmzq.nix b/nixos/tests/c2fmzq.nix
index d8ec816c7d29c..0dd89f6881dd9 100644
--- a/nixos/tests/c2fmzq.nix
+++ b/nixos/tests/c2fmzq.nix
@@ -9,6 +9,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
       settings = {
         verbose = 3; # debug
+        # make sure multiple freeform options evaluate
+        allow-new-accounts = true;
+        auto-approve-new-accounts = true;
+        licenses = false;
       };
     };
     environment = {
@@ -71,5 +75,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     with subtest("Test that PWA is served"):
         msg = machine.succeed("curl -sSfL http://localhost:8080")
         assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}"
+
+    with subtest("A setting with false value is properly passed"):
+        machine.succeed("systemctl show -p ExecStart --value c2fmzq-server.service | grep -F -- '--licenses=false'");
   '';
 })
diff --git a/nixos/tests/caddy.nix b/nixos/tests/caddy.nix
index 5a0d3539394b6..41d8e57de4686 100644
--- a/nixos/tests/caddy.nix
+++ b/nixos/tests/caddy.nix
@@ -48,11 +48,19 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           };
         };
       };
+      specialisation.explicit-config-file.configuration = {
+        services.caddy.configFile = pkgs.writeText "Caddyfile" ''
+        localhost:80
+
+        respond "hello world"
+        '';
+      };
     };
   };
 
   testScript = { nodes, ... }:
     let
+      explicitConfigFile = "${nodes.webserver.system.build.toplevel}/specialisation/explicit-config-file";
       justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
       multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
       rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
@@ -84,5 +92,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           )
           webserver.wait_for_open_port(80)
           webserver.succeed("curl http://localhost | grep hello")
+
+      with subtest("explicit configFile"):
+          webserver.succeed(
+              "${explicitConfigFile}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(80)
+          webserver.succeed("curl http://localhost | grep hello")
     '';
 })
diff --git a/nixos/tests/ceph-single-node.nix b/nixos/tests/ceph-single-node.nix
index 4a5636fac1563..a3a4072365af8 100644
--- a/nixos/tests/ceph-single-node.nix
+++ b/nixos/tests/ceph-single-node.nix
@@ -182,16 +182,19 @@ let
     monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
     monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
 
+    # This test has been commented out due to the upstream issue with pyo3
+    # that has broken this dashboard
+    # Reference: https://www.spinics.net/lists/ceph-users/msg77812.html
     # Enable the dashboard and recheck health
-    monA.succeed(
-        "ceph mgr module enable dashboard",
-        "ceph config set mgr mgr/dashboard/ssl false",
-        # default is 8080 but it's better to be explicit
-        "ceph config set mgr mgr/dashboard/server_port 8080",
-    )
-    monA.wait_for_open_port(8080)
-    monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
-    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    # monA.succeed(
+    #     "ceph mgr module enable dashboard",
+    #     "ceph config set mgr mgr/dashboard/ssl false",
+    #     # default is 8080 but it's better to be explicit
+    #     "ceph config set mgr mgr/dashboard/server_port 8080",
+    # )
+    # monA.wait_for_open_port(8080)
+    # monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
+    # monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
   '';
 in {
   name = "basic-single-node-ceph-cluster";
diff --git a/nixos/tests/cinnamon-wayland.nix b/nixos/tests/cinnamon-wayland.nix
new file mode 100644
index 0000000000000..824a606004cc0
--- /dev/null
+++ b/nixos/tests/cinnamon-wayland.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "cinnamon-wayland";
+
+  meta.maintainers = lib.teams.cinnamon.members;
+
+  nodes.machine = { nodes, ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.cinnamon.enable = true;
+    services.xserver.displayManager = {
+      autoLogin.enable = true;
+      autoLogin.user = nodes.machine.users.users.alice.name;
+      defaultSession = "cinnamon-wayland";
+    };
+
+    # For the sessionPath subtest.
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+      env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
+      su = command: "su - ${user.name} -c '${env} ${command}'";
+
+      # Call javascript in cinnamon (the shell), returns a tuple (success, output),
+      # where `success` is true if the dbus call was successful and `output` is what
+      # the javascript evaluates to.
+      eval = name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
+    in
+    ''
+      machine.wait_for_unit("display-manager.service")
+
+      with subtest("Wait for wayland server"):
+          machine.wait_for_file("/run/user/${toString user.uid}/wayland-0")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Wait for the Cinnamon shell"):
+          # Correct output should be (true, '2')
+          # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
+          machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
+
+      with subtest("Check if Cinnamon components actually start"):
+          for i in ["csd-media-keys", "xapp-sn-watcher", "nemo-desktop"]:
+            machine.wait_until_succeeds(f"pgrep -f {i}")
+          machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
+          machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
+
+      with subtest("Check if sessionPath option actually works"):
+          machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
+
+      with subtest("Open Cinnamon Settings"):
+          machine.succeed("${su "cinnamon-settings themes >&2 &"}")
+          machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
+          machine.wait_for_text('(Style|Appearance|Color)')
+          machine.sleep(2)
+          machine.screenshot("cinnamon_settings")
+
+      with subtest("Check if screensaver works"):
+          # This is not supported at the moment.
+          # https://trello.com/b/HHs01Pab/cinnamon-wayland
+          machine.execute("${su "cinnamon-screensaver-command -l >&2 &"}")
+          machine.wait_until_succeeds("journalctl -b --grep 'Cinnamon Screensaver is unavailable on Wayland'")
+
+      with subtest("Open GNOME Terminal"):
+          machine.succeed("${su "dbus-launch gnome-terminal"}")
+          machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
+          machine.sleep(2)
+
+      with subtest("Check if Cinnamon has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
+    '';
+})
diff --git a/nixos/tests/cinnamon.nix b/nixos/tests/cinnamon.nix
index 7637b55a2b124..eab907d0b712c 100644
--- a/nixos/tests/cinnamon.nix
+++ b/nixos/tests/cinnamon.nix
@@ -7,6 +7,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     imports = [ ./common/user-account.nix ];
     services.xserver.enable = true;
     services.xserver.desktopManager.cinnamon.enable = true;
+
+    # For the sessionPath subtest.
+    services.xserver.desktopManager.cinnamon.sessionPath = [ pkgs.gnome.gpaste ];
   };
 
   enableOCR = true;
@@ -49,6 +52,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
           machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
 
+      with subtest("Check if sessionPath option actually works"):
+          machine.succeed("${eval "imports.gi.GIRepository.Repository.get_search_path\\(\\)"} | grep gpaste")
+
       with subtest("Open Cinnamon Settings"):
           machine.succeed("${su "cinnamon-settings themes >&2 &"}")
           machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
diff --git a/nixos/tests/cloud-init.nix b/nixos/tests/cloud-init.nix
index 786e01add7d4b..0b4c5a55c80a6 100644
--- a/nixos/tests/cloud-init.nix
+++ b/nixos/tests/cloud-init.nix
@@ -73,6 +73,7 @@ in makeTest {
   };
   testScript = ''
     # To wait until cloud-init terminates its run
+    unnamed.wait_for_unit("cloud-init-local.service")
     unnamed.wait_for_unit("cloud-final.service")
 
     unnamed.succeed("cat /tmp/cloudinit-write-file | grep -q 'cloudinit'")
diff --git a/nixos/tests/code-server.nix b/nixos/tests/code-server.nix
new file mode 100644
index 0000000000000..7d523dfc617e3
--- /dev/null
+++ b/nixos/tests/code-server.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+{
+  name = "code-server";
+
+  nodes = {
+    machine = {pkgs, ...}: {
+      services.code-server = {
+        enable = true;
+        auth = "none";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("code-server.service")
+    machine.wait_for_open_port(4444)
+    machine.succeed("curl -k --fail http://localhost:4444", timeout=10)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+})
diff --git a/nixos/tests/containers-custom-pkgs.nix b/nixos/tests/containers-custom-pkgs.nix
index e8740ac631345..57184787c85f6 100644
--- a/nixos/tests/containers-custom-pkgs.nix
+++ b/nixos/tests/containers-custom-pkgs.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
 in {
   name = "containers-custom-pkgs";
   meta = {
-    maintainers = with lib.maintainers; [ adisbladis erikarvstedt ];
+    maintainers = with lib.maintainers; [ erikarvstedt ];
   };
 
   nodes.machine = { config, ... }: {
diff --git a/nixos/tests/containers-imperative.nix b/nixos/tests/containers-imperative.nix
index 18bec1db78e88..fff00e4f73a85 100644
--- a/nixos/tests/containers-imperative.nix
+++ b/nixos/tests/containers-imperative.nix
@@ -13,6 +13,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       nix.settings.sandbox = false;
       nix.settings.substituters = []; # don't try to access cache.nixos.org
 
+      virtualisation.memorySize = 2048;
       virtualisation.writableStore = true;
       # Make sure we always have all the required dependencies for creating a
       # container available within the VM, because we don't have network access.
diff --git a/nixos/tests/corerad.nix b/nixos/tests/corerad.nix
index b6f5d7fc6f75b..dd2bec794a1a0 100644
--- a/nixos/tests/corerad.nix
+++ b/nixos/tests/corerad.nix
@@ -56,6 +56,8 @@ import ./make-test-python.nix (
 
       with subtest("Wait for CoreRAD and network ready"):
           # Ensure networking is online and CoreRAD is ready.
+          router.systemctl("start network-online.target")
+          client.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
           client.wait_for_unit("network-online.target")
           router.wait_for_unit("corerad.service")
diff --git a/nixos/tests/curl-impersonate.nix b/nixos/tests/curl-impersonate.nix
index 7954e9e5584c4..33b10da1dfd0f 100644
--- a/nixos/tests/curl-impersonate.nix
+++ b/nixos/tests/curl-impersonate.nix
@@ -144,6 +144,8 @@ in {
     start_all()
 
     with subtest("Wait for network"):
+        web.systemctl("start network-online.target")
+        curl.systemctl("start network-online.target")
         web.wait_for_unit("network-online.target")
         curl.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/dhparams.nix b/nixos/tests/dhparams.nix
index 021042fafdb10..8d7082c114001 100644
--- a/nixos/tests/dhparams.nix
+++ b/nixos/tests/dhparams.nix
@@ -18,6 +18,8 @@ import ./make-test-python.nix {
         systemd.services.foo = {
           description = "Check systemd Ordering";
           wantedBy = [ "multi-user.target" ];
+          before = [ "shutdown.target" ];
+          conflicts = [ "shutdown.target" ];
           unitConfig = {
             # This is to make sure that the dhparams generation of foo occurs
             # before this service so we need this service to start as early as
diff --git a/nixos/tests/docker-tools.nix b/nixos/tests/docker-tools.nix
index fcdfa586fd55d..90af817e75ed3 100644
--- a/nixos/tests/docker-tools.nix
+++ b/nixos/tests/docker-tools.nix
@@ -11,7 +11,7 @@ let
       # Rootfs diffs for layers 1 and 2 are identical (and empty)
       layer1 = pkgs.dockerTools.buildImage {  name = "empty";  };
       layer2 = layer1.overrideAttrs (_: { fromImage = layer1; });
-      repeatedRootfsDiffs = pkgs.runCommandNoCC "image-with-links.tar" {
+      repeatedRootfsDiffs = pkgs.runCommand "image-with-links.tar" {
         nativeBuildInputs = [pkgs.jq];
       } ''
         mkdir contents
diff --git a/nixos/tests/drawterm.nix b/nixos/tests/drawterm.nix
new file mode 100644
index 0000000000000..1d444bb55433b
--- /dev/null
+++ b/nixos/tests/drawterm.nix
@@ -0,0 +1,58 @@
+{ system, pkgs }:
+let
+  tests = {
+    xorg = {
+      node = { pkgs, ... }: {
+        imports = [ ./common/user-account.nix ./common/x11.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager.sessionCommands = ''
+          ${pkgs.drawterm}/bin/drawterm -g 1024x768 &
+        '';
+        test-support.displayManager.auto.user = "alice";
+      };
+      systems = [ "x86_64-linux" "aarch64-linux" ];
+    };
+    wayland = {
+      node = { pkgs, ... }: {
+        imports = [ ./common/wayland-cage.nix ];
+        services.cage.program = "${pkgs.drawterm-wayland}/bin/drawterm";
+      };
+      systems = [ "x86_64-linux" ];
+    };
+  };
+
+  mkTest = name: machine:
+    import ./make-test-python.nix ({ pkgs, ... }: {
+      inherit name;
+
+      nodes = { "${name}" = machine; };
+
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ moody ];
+      };
+
+      enableOCR = true;
+
+      testScript = ''
+        @polling_condition
+        def drawterm_running():
+            machine.succeed("pgrep drawterm")
+
+        start_all()
+
+        machine.wait_for_unit("graphical.target")
+        drawterm_running.wait() # type: ignore[union-attr]
+        machine.wait_for_text("cpu")
+        machine.send_chars("cpu\n")
+        machine.wait_for_text("auth")
+        machine.send_chars("cpu\n")
+        machine.wait_for_text("ending")
+        machine.screenshot("out.png")
+      '';
+
+    });
+  mkTestOn = systems: name: machine:
+    if pkgs.lib.elem system systems then mkTest name machine
+    else { ... }: { };
+in
+builtins.mapAttrs (k: v: mkTestOn v.systems k v.node { inherit system; }) tests
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index 900ea6320100f..b5a8cb532ae0a 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -1,6 +1,6 @@
 # To run the test on the unfree ELK use the following command:
 # cd path/to/nixpkgs
-# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
+# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-7
 
 { system ? builtins.currentSystem,
   config ? {},
@@ -120,7 +120,7 @@ let
               };
 
               elasticsearch-curator = {
-                enable = true;
+                enable = elk ? elasticsearch-curator;
                 actionYAML = ''
                 ---
                 actions:
@@ -246,7 +246,7 @@ let
           one.wait_until_succeeds(
               expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
           )
-    '' + ''
+    '' + lib.optionalString (elk ? elasticsearch-curator) ''
       with subtest("Elasticsearch-curator works"):
           one.systemctl("stop logstash")
           one.systemctl("start elasticsearch-curator")
diff --git a/nixos/tests/eris-server.nix b/nixos/tests/eris-server.nix
index a50db3afebf5f..b9d2b57401e0a 100644
--- a/nixos/tests/eris-server.nix
+++ b/nixos/tests/eris-server.nix
@@ -3,7 +3,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
   meta.maintainers = with lib.maintainers; [ ehmry ];
 
   nodes.server = {
-    environment.systemPackages = [ pkgs.eris-go pkgs.nim.pkgs.eris ];
+    environment.systemPackages = [ pkgs.eris-go pkgs.eriscmd ];
     services.eris-server = {
       enable = true;
       decode = true;
diff --git a/nixos/tests/ferm.nix b/nixos/tests/ferm.nix
index be43877445ebf..87c67ac623479 100644
--- a/nixos/tests/ferm.nix
+++ b/nixos/tests/ferm.nix
@@ -55,6 +55,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     ''
       start_all()
 
+      client.systemctl("start network-online.target")
+      server.systemctl("start network-online.target")
       client.wait_for_unit("network-online.target")
       server.wait_for_unit("network-online.target")
       server.wait_for_unit("ferm.service")
diff --git a/nixos/tests/frp.nix b/nixos/tests/frp.nix
index 2f5c0f8ec933b..1f57c031a53a5 100644
--- a/nixos/tests/frp.nix
+++ b/nixos/tests/frp.nix
@@ -18,10 +18,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable = true;
         role = "server";
         settings = {
-          common = {
-            bind_port = 7000;
-            vhost_http_port = 80;
-          };
+          bindPort = 7000;
+          vhostHTTPPort = 80;
         };
       };
     };
@@ -59,15 +57,16 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable = true;
         role = "client";
         settings = {
-          common = {
-            server_addr = "10.0.0.1";
-            server_port = 7000;
-          };
-          web = {
-            type = "http";
-            local_port = 80;
-            custom_domains = "10.0.0.1";
-          };
+          serverAddr = "10.0.0.1";
+          serverPort = 7000;
+          proxies = [
+            {
+              name = "web";
+              type = "http";
+              localPort = 80;
+              customDomains = [ "10.0.0.1" ];
+            }
+          ];
         };
       };
     };
diff --git a/nixos/tests/frr.nix b/nixos/tests/frr.nix
index 598d7a7d28675..0d1a6a694a82c 100644
--- a/nixos/tests/frr.nix
+++ b/nixos/tests/frr.nix
@@ -29,7 +29,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       name = "frr";
 
       meta = with pkgs.lib.maintainers; {
-        maintainers = [ hexa ];
+        maintainers = [ ];
       };
 
       nodes = {
diff --git a/nixos/tests/ft2-clone.nix b/nixos/tests/ft2-clone.nix
index a8395d4ebaa62..5476b38c00bd2 100644
--- a/nixos/tests/ft2-clone.nix
+++ b/nixos/tests/ft2-clone.nix
@@ -4,12 +4,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     maintainers = [ fgaz ];
   };
 
-  nodes.machine = { config, pkgs, ... }: {
+  nodes.machine = { pkgs, ... }: {
     imports = [
       ./common/x11.nix
     ];
 
-    services.xserver.enable = true;
     sound.enable = true;
     environment.systemPackages = [ pkgs.ft2-clone ];
   };
@@ -30,4 +29,3 @@ import ./make-test-python.nix ({ pkgs, ... }: {
       machine.screenshot("screen")
     '';
 })
-
diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix
index bb07b6e97b7fb..052fa902b4504 100644
--- a/nixos/tests/gitdaemon.nix
+++ b/nixos/tests/gitdaemon.nix
@@ -59,6 +59,9 @@ in {
     with subtest("git daemon starts"):
         server.wait_for_unit("git-daemon.service")
 
+
+    server.systemctl("start network-online.target")
+    client.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
     client.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/gitlab.nix b/nixos/tests/gitlab.nix
index 88cd774f815a5..8d31264253119 100644
--- a/nixos/tests/gitlab.nix
+++ b/nixos/tests/gitlab.nix
@@ -34,7 +34,7 @@ in {
     gitlab = { ... }: {
       imports = [ common/user-account.nix ];
 
-      virtualisation.memorySize = if pkgs.stdenv.is64bit then 4096 else 2047;
+      virtualisation.memorySize = 6144;
       virtualisation.cores = 4;
       virtualisation.useNixStoreImage = true;
       virtualisation.writableStore = false;
diff --git a/nixos/tests/gns3-server.nix b/nixos/tests/gns3-server.nix
new file mode 100644
index 0000000000000..e37d751f5f640
--- /dev/null
+++ b/nixos/tests/gns3-server.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "gns3-server";
+  meta.maintainers = [ lib.maintainers.anthonyroussel ];
+
+  nodes.machine =
+    { ... }:
+    let
+      tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+        openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 365 \
+          -subj '/CN=localhost'
+        install -D -t $out key.pem cert.pem
+      '';
+    in {
+      services.gns3-server = {
+        enable = true;
+        auth = {
+          enable = true;
+          user = "user";
+          passwordFile = pkgs.writeText "gns3-auth-password-file" "password";
+        };
+        ssl = {
+          enable = true;
+          certFile = "${tls-cert}/cert.pem";
+          keyFile = "${tls-cert}/key.pem";
+        };
+        dynamips.enable = true;
+        ubridge.enable = true;
+        vpcs.enable = true;
+      };
+
+      security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+    };
+
+  testScript = let
+    createProject = pkgs.writeText "createProject.json" (builtins.toJSON {
+      name = "test_project";
+    });
+  in
+  ''
+    start_all()
+
+    machine.wait_for_unit("gns3-server.service")
+    machine.wait_for_open_port(3080)
+
+    with subtest("server is listening"):
+      machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/version")
+
+    with subtest("create dummy project"):
+      machine.succeed("curl -sSfL -u user:password https://localhost:3080/v2/projects -d @${createProject}")
+
+    with subtest("logging works"):
+      log_path = "/var/log/gns3/server.log"
+      machine.wait_for_file(log_path)
+  '';
+})
diff --git a/nixos/tests/google-oslogin/default.nix b/nixos/tests/google-oslogin/default.nix
index 72c87d7153bdf..cd05af6b9ed7a 100644
--- a/nixos/tests/google-oslogin/default.nix
+++ b/nixos/tests/google-oslogin/default.nix
@@ -12,7 +12,7 @@ let
 in {
   name = "google-oslogin";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ adisbladis flokli ];
+    maintainers = [ flokli ];
   };
 
   nodes = {
@@ -71,4 +71,3 @@ in {
     )
   '';
   })
-
diff --git a/nixos/tests/guix/basic.nix b/nixos/tests/guix/basic.nix
new file mode 100644
index 0000000000000..9b943b8965e60
--- /dev/null
+++ b/nixos/tests/guix/basic.nix
@@ -0,0 +1,42 @@
+# Take note the Guix store directory is empty. Also, we're trying to prevent
+# Guix from trying to downloading substitutes because of the restricted
+# access (assuming it's in a sandboxed environment).
+#
+# So this test is what it is: a basic test while trying to use Guix as much as
+# we possibly can (including the API) without triggering its download alarm.
+
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "guix-basic";
+  meta.maintainers = with lib.maintainers; [ foo-dogsquared ];
+
+  nodes.machine = { config, ... }: {
+    environment.etc."guix/scripts".source = ./scripts;
+    services.guix = {
+      enable = true;
+      gc.enable = true;
+    };
+  };
+
+  testScript = ''
+    import pathlib
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("guix-daemon.service")
+    machine.succeed("systemctl start guix-gc.service")
+
+    # Can't do much here since the environment has restricted network access.
+    with subtest("Guix basic package management"):
+      machine.succeed("guix build --dry-run --verbosity=0 hello")
+      machine.succeed("guix show hello")
+
+    # This is to see if the Guix API is usable and mostly working.
+    with subtest("Guix API scripting"):
+      scripts_dir = pathlib.Path("/etc/guix/scripts")
+
+      text_msg = "Hello there, NixOS!"
+      text_store_file = machine.succeed(f"guix repl -- {scripts_dir}/create-file-to-store.scm '{text_msg}'")
+      assert machine.succeed(f"cat {text_store_file}") == text_msg
+
+      machine.succeed(f"guix repl -- {scripts_dir}/add-existing-files-to-store.scm {scripts_dir}")
+  '';
+})
diff --git a/nixos/tests/guix/default.nix b/nixos/tests/guix/default.nix
new file mode 100644
index 0000000000000..a017668c05a75
--- /dev/null
+++ b/nixos/tests/guix/default.nix
@@ -0,0 +1,8 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+
+{
+  basic = import ./basic.nix { inherit system pkgs; };
+  publish = import ./publish.nix { inherit system pkgs; };
+}
diff --git a/nixos/tests/guix/publish.nix b/nixos/tests/guix/publish.nix
new file mode 100644
index 0000000000000..eb56fc97478cc
--- /dev/null
+++ b/nixos/tests/guix/publish.nix
@@ -0,0 +1,96 @@
+# Testing out the substitute server with two machines in a local network. As a
+# bonus, we'll also test a feature of the substitute server being able to
+# advertise its service to the local network with Avahi.
+
+import ../make-test-python.nix ({ pkgs, lib, ... }: let
+  publishPort = 8181;
+  inherit (builtins) toString;
+in {
+  name = "guix-publish";
+
+  meta.maintainers = with lib.maintainers; [ foo-dogsquared ];
+
+  nodes = let
+    commonConfig = { config, ... }: {
+      # We'll be using '--advertise' flag with the
+      # substitute server which requires Avahi.
+      services.avahi = {
+        enable = true;
+        nssmdns4 = true;
+        publish = {
+          enable = true;
+          userServices = true;
+        };
+      };
+    };
+  in {
+    server = { config, lib, pkgs, ... }: {
+      imports = [ commonConfig ];
+
+      services.guix = {
+        enable = true;
+        publish = {
+          enable = true;
+          port = publishPort;
+
+          generateKeyPair = true;
+          extraArgs = [ "--advertise" ];
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ publishPort ];
+    };
+
+    client = { config, lib, pkgs, ... }: {
+      imports = [ commonConfig ];
+
+      services.guix = {
+        enable = true;
+
+        extraArgs = [
+          # Force to only get all substitutes from the local server. We don't
+          # have anything in the Guix store directory and we cannot get
+          # anything from the official substitute servers anyways.
+          "--substitute-urls='http://server.local:${toString publishPort}'"
+
+          # Enable autodiscovery of the substitute servers in the local
+          # network. This machine shouldn't need to import the signing key from
+          # the substitute server since it is automatically done anyways.
+          "--discover=yes"
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    import pathlib
+
+    start_all()
+
+    scripts_dir = pathlib.Path("/etc/guix/scripts")
+
+    for machine in machines:
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("guix-daemon.service")
+      machine.wait_for_unit("avahi-daemon.service")
+
+    server.wait_for_unit("guix-publish.service")
+    server.wait_for_open_port(${toString publishPort})
+    server.succeed("curl http://localhost:${toString publishPort}/")
+
+    # Now it's the client turn to make use of it.
+    substitute_server = "http://server.local:${toString publishPort}"
+    client.systemctl("start network-online.target")
+    client.wait_for_unit("network-online.target")
+    response = client.succeed(f"curl {substitute_server}")
+    assert "Guix Substitute Server" in response
+
+    # Authorizing the server to be used as a substitute server.
+    client.succeed(f"curl -O {substitute_server}/signing-key.pub")
+    client.succeed("guix archive --authorize < ./signing-key.pub")
+
+    # Since we're using the substitute server with the `--advertise` flag, we
+    # might as well check it.
+    client.succeed("avahi-browse --resolve --terminate _guix_publish._tcp | grep '_guix_publish._tcp'")
+  '';
+})
diff --git a/nixos/tests/guix/scripts/add-existing-files-to-store.scm b/nixos/tests/guix/scripts/add-existing-files-to-store.scm
new file mode 100644
index 0000000000000..fa47320b6a511
--- /dev/null
+++ b/nixos/tests/guix/scripts/add-existing-files-to-store.scm
@@ -0,0 +1,52 @@
+;; A simple script that adds each file given from the command-line into the
+;; store and checks them if it's the same.
+(use-modules (guix)
+             (srfi srfi-1)
+             (ice-9 ftw)
+             (rnrs io ports))
+
+;; This is based from tests/derivations.scm from Guix source code.
+(define* (directory-contents dir #:optional (slurp get-bytevector-all))
+         "Return an alist representing the contents of DIR"
+         (define prefix-len (string-length dir))
+         (sort (file-system-fold (const #t)
+                                 (lambda (path stat result)
+                                   (alist-cons (string-drop path prefix-len)
+                                               (call-with-input-file path slurp)
+                                               result))
+                                 (lambda (path stat result) result)
+                                 (lambda (path stat result) result)
+                                 (lambda (path stat result) result)
+                                 (lambda (path stat errno result) result)
+                                 '()
+                                 dir)
+               (lambda (e1 e2)
+                 (string<? (car e1) (car e2)))))
+
+(define* (check-if-same store drv path)
+         "Check if the given path and its store item are the same"
+         (let* ((filetype (stat:type (stat drv))))
+           (case filetype
+             ((regular)
+              (and (valid-path? store drv)
+                   (equal? (call-with-input-file path get-bytevector-all)
+                           (call-with-input-file drv get-bytevector-all))))
+             ((directory)
+              (and (valid-path? store drv)
+                   (equal? (directory-contents path)
+                           (directory-contents drv))))
+             (else #f))))
+
+(define* (add-and-check-item-to-store store path)
+         "Add PATH to STORE and check if the contents are the same"
+         (let* ((store-item (add-to-store store
+                                          (basename path)
+                                          #t "sha256" path))
+                (is-same (check-if-same store store-item path)))
+           (if (not is-same)
+             (exit 1))))
+
+(with-store store
+            (map (lambda (path)
+                   (add-and-check-item-to-store store (readlink* path)))
+                 (cdr (command-line))))
diff --git a/nixos/tests/guix/scripts/create-file-to-store.scm b/nixos/tests/guix/scripts/create-file-to-store.scm
new file mode 100644
index 0000000000000..467e4c4fd53f2
--- /dev/null
+++ b/nixos/tests/guix/scripts/create-file-to-store.scm
@@ -0,0 +1,8 @@
+;; A script that creates a store item with the given text and prints the
+;; resulting store item path.
+(use-modules (guix))
+
+(with-store store
+            (display (add-text-to-store store "guix-basic-test-text"
+                                        (string-join
+                                          (cdr (command-line))))))
diff --git a/nixos/tests/haproxy.nix b/nixos/tests/haproxy.nix
index 555474d7f2999..1730938737577 100644
--- a/nixos/tests/haproxy.nix
+++ b/nixos/tests/haproxy.nix
@@ -1,22 +1,42 @@
-import ./make-test-python.nix ({ pkgs, ...}: {
+import ./make-test-python.nix ({ lib, pkgs, ...}: {
   name = "haproxy";
   nodes = {
-    machine = { ... }: {
-      services.haproxy = {
+    server = { ... }: {
+     services.haproxy = {
         enable = true;
         config = ''
+          global
+            limited-quic
+
           defaults
+            mode http
             timeout connect 10s
+            timeout client 10s
+            timeout server 10s
+
+            log /dev/log local0 debug err
+            option logasap
+            option httplog
+            option httpslog
 
           backend http_server
-            mode http
-            server httpd [::1]:8000
+            server httpd [::1]:8000 alpn http/1.1
 
           frontend http
-            bind *:80
-            mode http
+            bind :80
+            bind :443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h2,http/1.1
+            bind quic4@:443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h3 allow-0rtt
+
+            http-after-response add-header alt-svc 'h3=":443"; ma=60' if { ssl_fc }
+
             http-request use-service prometheus-exporter if { path /metrics }
             use_backend http_server
+
+          frontend http-cert-auth
+            bind :8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt
+            bind quic4@:8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt alpn h3
+
+            use_backend http_server
         '';
       };
       services.httpd = {
@@ -30,24 +50,75 @@ import ./make-test-python.nix ({ pkgs, ...}: {
           }];
         };
       };
+      networking.firewall.allowedTCPPorts = [ 80 443 8443 ];
+      networking.firewall.allowedUDPPorts = [ 443 8443 ];
+     };
+    client = { ... }: {
+      environment.systemPackages = [ pkgs.curlHTTP3 ];
     };
   };
   testScript = ''
+    # Helpers
+    def cmd(command):
+      print(f"+{command}")
+      r = os.system(command)
+      if r != 0:
+        raise Exception(f"Command {command} failed with exit code {r}")
+
+    def openssl(command):
+      cmd(f"${pkgs.openssl}/bin/openssl {command}")
+
+    # Generate CA.
+    openssl("req -new -newkey rsa:4096 -nodes -x509 -days 7 -subj '/C=ZZ/ST=Cloud/L=Unspecified/O=NixOS/OU=Tests/CN=CA Certificate' -keyout cacert.key -out cacert.crt")
+
+    # Generate and sign Server.
+    openssl("req -newkey rsa:4096 -nodes -subj '/CN=server/OU=Tests/O=NixOS' -keyout server.key -out server.csr")
+    openssl("x509 -req -in server.csr -out server.crt -CA cacert.crt -CAkey cacert.key -days 7")
+    cmd("cat server.crt server.key > fullchain.pem")
+
+    # Generate and sign Client.
+    openssl("req -newkey rsa:4096 -nodes -subj '/CN=client/OU=Tests/O=NixOS' -keyout client.key -out client.csr")
+    openssl("x509 -req -in client.csr -out client.crt -CA cacert.crt -CAkey cacert.key -days 7")
+    cmd("cat client.crt client.key > client.pem")
+
+    # Start the actual test.
     start_all()
-    machine.wait_for_unit("multi-user.target")
-    machine.wait_for_unit("haproxy.service")
-    machine.wait_for_unit("httpd.service")
-    assert "We are all good!" in machine.succeed("curl -fk http://localhost:80/index.txt")
-    assert "haproxy_process_pool_allocated_bytes" in machine.succeed(
-        "curl -fk http://localhost:80/metrics"
-    )
+    server.copy_from_host("fullchain.pem", "/etc/ssl/fullchain.pem")
+    server.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
+    server.succeed("chmod 0644 /etc/ssl/fullchain.pem /etc/ssl/cacert.crt")
+
+    client.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
+    client.copy_from_host("client.pem", "/root/client.pem")
+
+    server.wait_for_unit("multi-user.target")
+    server.wait_for_unit("haproxy.service")
+    server.wait_for_unit("httpd.service")
+
+    assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
+    assert "haproxy_process_pool_allocated_bytes" in client.succeed("curl -f http://server/metrics")
+
+    with subtest("https"):
+      assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt https://server/index.txt")
+
+    with subtest("https-cert-auth"):
+      # Client must succeed in authenticating with the right certificate.
+      assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
+      # Client must fail without certificate.
+      client.fail("curl --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
+
+    with subtest("h3"):
+      assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server/index.txt")
+
+    with subtest("h3-cert-auth"):
+      # Client must succeed in authenticating with the right certificate.
+      assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
+      # Client must fail without certificate.
+      client.fail("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
 
     with subtest("reload"):
-        machine.succeed("systemctl reload haproxy")
+        server.succeed("systemctl reload haproxy")
         # wait some time to ensure the following request hits the reloaded haproxy
-        machine.sleep(5)
-        assert "We are all good!" in machine.succeed(
-            "curl -fk http://localhost:80/index.txt"
-        )
+        server.sleep(5)
+        assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
   '';
 })
diff --git a/nixos/tests/harmonia.nix b/nixos/tests/harmonia.nix
index 6cf9ad4d23358..a9beac82f8e12 100644
--- a/nixos/tests/harmonia.nix
+++ b/nixos/tests/harmonia.nix
@@ -13,6 +13,9 @@
 
       networking.firewall.allowedTCPPorts = [ 5000 ];
       system.extraDependencies = [ pkgs.emptyFile ];
+
+      # check that extra-allowed-users is effective for harmonia
+      nix.settings.allowed-users = [];
     };
 
     client01 = {
diff --git a/nixos/tests/home-assistant.nix b/nixos/tests/home-assistant.nix
index e1588088ba198..05fb2fa1e06aa 100644
--- a/nixos/tests/home-assistant.nix
+++ b/nixos/tests/home-assistant.nix
@@ -43,7 +43,7 @@ in {
 
       # test loading custom components
       customComponents = with pkgs.home-assistant-custom-components; [
-        prometheus-sensor
+        prometheus_sensor
       ];
 
       # test loading lovelace modules
@@ -182,7 +182,7 @@ in {
         hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
 
     with subtest("Check that lovelace modules are referenced and fetchable"):
-        hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
+        hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
         hass.succeed("curl --fail http://localhost:8123/local/nixos-lovelace-modules/mini-graph-card-bundle.js")
 
     with subtest("Check that optional dependencies are in the PYTHONPATH"):
diff --git a/nixos/tests/hostname.nix b/nixos/tests/hostname.nix
index 6122e2ffeb83a..dffec956bc0b6 100644
--- a/nixos/tests/hostname.nix
+++ b/nixos/tests/hostname.nix
@@ -34,6 +34,7 @@ let
 
         machine = ${hostName}
 
+        machine.systemctl("start network-online.target")
         machine.wait_for_unit("network-online.target")
 
         # Test if NixOS computes the correct FQDN (either a FQDN or an error/null):
diff --git a/nixos/tests/incron.nix b/nixos/tests/incron.nix
index c978ff27dfad5..d016360ba0ef8 100644
--- a/nixos/tests/incron.nix
+++ b/nixos/tests/incron.nix
@@ -13,9 +13,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       '';
 
       # ensure the directory to be monitored exists before incron is started
-      system.activationScripts.incronTest = ''
-        mkdir /test
-      '';
+      systemd.tmpfiles.settings.incron-test = {
+        "/test".d = { };
+      };
     };
 
   testScript = ''
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index 49a22c08aad1c..2fa1709c7484b 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -14,7 +14,9 @@ in
 {
   name = "incus-container";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { ... }: {
     virtualisation = {
@@ -54,6 +56,10 @@ in
           retry(instance_is_up)
         machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
 
+    with subtest("Container mounts lxcfs overlays"):
+        machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
+        machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
+
     with subtest("Container CPU limits can be managed"):
         set_container("limits.cpu 1")
         cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index c88974605e306..26e8a4ac4c772 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -6,9 +6,8 @@
 }:
 {
   container = import ./container.nix { inherit system pkgs; };
+  lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
-  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
-    inherit system pkgs;
-  };
+  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
new file mode 100644
index 0000000000000..67245b54e7527
--- /dev/null
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -0,0 +1,112 @@
+import ../make-test-python.nix (
+
+  { pkgs, lib, ... }:
+
+  let
+    releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
+
+    container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+    container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+  in
+  {
+    name = "lxd-to-incus";
+
+    meta = {
+      maintainers = lib.teams.lxc.members;
+    };
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        environment.systemPackages = [ pkgs.lxd-to-incus ];
+
+        virtualisation = {
+          diskSize = 6144;
+          cores = 2;
+          memorySize = 2048;
+
+          lxd.enable = true;
+          lxd.preseed = {
+            networks = [
+              {
+                name = "nixostestbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "nixostestbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "nixostest_pool";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+              {
+                name = "nixos_notdefault";
+                devices = { };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "nixostest_pool";
+                driver = "dir";
+              }
+            ];
+          };
+
+          incus.enable = true;
+        };
+      };
+
+    testScript = ''
+      def lxd_wait_for_preseed(_) -> bool:
+        _, output = machine.systemctl("is-active lxd-preseed.service")
+        return ("inactive" in output)
+
+      def lxd_instance_is_up(_) -> bool:
+          status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+          return status == 0
+
+      def incus_instance_is_up(_) -> bool:
+          status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+          return status == 0
+
+      with machine.nested("initialize lxd and resources"):
+        machine.wait_for_unit("sockets.target")
+        machine.wait_for_unit("lxd.service")
+        retry(lxd_wait_for_preseed)
+
+        machine.succeed("lxc image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
+        machine.succeed("lxc launch nixos container")
+        retry(lxd_instance_is_up)
+
+      machine.wait_for_unit("incus.service")
+
+      with machine.nested("run migration"):
+          machine.succeed("lxd-to-incus --yes")
+
+      with machine.nested("verify resources migrated to incus"):
+          machine.succeed("incus config show container")
+          retry(incus_instance_is_up)
+          machine.succeed("incus exec container -- true")
+          machine.succeed("incus profile show default | grep nixostestbr0")
+          machine.succeed("incus profile show default | grep nixostest_pool")
+          machine.succeed("incus profile show nixos_notdefault")
+          machine.succeed("incus storage show nixostest_pool")
+          machine.succeed("incus network show nixostestbr0")
+    '';
+  }
+)
diff --git a/nixos/tests/incus/preseed.nix b/nixos/tests/incus/preseed.nix
index 47b2d0cd62284..a488d71f3c92a 100644
--- a/nixos/tests/incus/preseed.nix
+++ b/nixos/tests/incus/preseed.nix
@@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 {
   name = "incus-preseed";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
diff --git a/nixos/tests/incus/socket-activated.nix b/nixos/tests/incus/socket-activated.nix
index 4d25b26a15f5d..fca536b7054f0 100644
--- a/nixos/tests/incus/socket-activated.nix
+++ b/nixos/tests/incus/socket-activated.nix
@@ -3,7 +3,9 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
 {
   name = "incus-socket-activated";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
diff --git a/nixos/tests/incus/virtual-machine.nix b/nixos/tests/incus/virtual-machine.nix
index bfa116679d43b..343a25ca72970 100644
--- a/nixos/tests/incus/virtual-machine.nix
+++ b/nixos/tests/incus/virtual-machine.nix
@@ -19,7 +19,9 @@ in
 {
   name = "incus-virtual-machine";
 
-  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
 
   nodes.machine = {...}: {
     virtualisation = {
@@ -51,5 +53,8 @@ in
 
     with subtest("lxd-agent is started"):
         machine.succeed("incus exec ${instance-name} systemctl is-active lxd-agent")
+
+    with subtest("lxd-agent has a valid path"):
+        machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
   '';
 })
diff --git a/nixos/tests/initrd-network-openvpn/default.nix b/nixos/tests/initrd-network-openvpn/default.nix
index 769049905eb8c..69db7dd1037f7 100644
--- a/nixos/tests/initrd-network-openvpn/default.nix
+++ b/nixos/tests/initrd-network-openvpn/default.nix
@@ -59,18 +59,19 @@ import ../make-test-python.nix ({ lib, ...}:
 
             # This command does not fork to keep the VM in the state where
             # only the initramfs is loaded
-            preLVMCommands =
-            ''
-              /bin/nc -p 1234 -lke /bin/echo TESTVALUE
-            '';
+            preLVMCommands = lib.mkIf (!systemdStage1)
+              ''
+                /bin/nc -p 1234 -lke /bin/echo TESTVALUE
+              '';
 
             network = {
               enable = true;
 
               # Work around udhcpc only getting a lease on eth0
-              postCommands = ''
-                /bin/ip addr add 192.168.1.2/24 dev eth1
-              '';
+              postCommands = lib.mkIf (!systemdStage1)
+                ''
+                  /bin/ip addr add 192.168.1.2/24 dev eth1
+                '';
 
               # Example configuration for OpenVPN
               # This is the main reason for this test
diff --git a/nixos/tests/input-remapper.nix b/nixos/tests/input-remapper.nix
index 1b0350063f7f2..2ef55a01b2905 100644
--- a/nixos/tests/input-remapper.nix
+++ b/nixos/tests/input-remapper.nix
@@ -46,7 +46,8 @@ import ./make-test-python.nix ({ pkgs, ... }:
       machine.execute("su - sybil -c input-remapper-gtk >&2 &")
 
       machine.wait_for_text("Input Remapper")
-      machine.wait_for_text("Preset")
-      machine.wait_for_text("Change Key")
+      machine.wait_for_text("Device")
+      machine.wait_for_text("Presets")
+      machine.wait_for_text("Editor")
     '';
   })
diff --git a/nixos/tests/installed-tests/flatpak.nix b/nixos/tests/installed-tests/flatpak.nix
index 9524d890c4025..fa191202f52d4 100644
--- a/nixos/tests/installed-tests/flatpak.nix
+++ b/nixos/tests/installed-tests/flatpak.nix
@@ -7,6 +7,7 @@ makeInstalledTest {
   testConfig = {
     xdg.portal.enable = true;
     xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
+    xdg.portal.config.common.default = "gtk";
     services.flatpak.enable = true;
     environment.systemPackages = with pkgs; [ gnupg ostree python3 ];
     virtualisation.memorySize = 2047;
diff --git a/nixos/tests/installer-systemd-stage-1.nix b/nixos/tests/installer-systemd-stage-1.nix
index 1b4c92b584b95..662017935412c 100644
--- a/nixos/tests/installer-systemd-stage-1.nix
+++ b/nixos/tests/installer-systemd-stage-1.nix
@@ -22,6 +22,7 @@
     # lvm
     separateBoot
     separateBootFat
+    separateBootZfs
     simple
     simpleLabels
     simpleProvided
@@ -32,6 +33,10 @@
     stratisRoot
     swraid
     zfsroot
+    clevisLuks
+    clevisLuksFallback
+    clevisZfs
+    clevisZfsFallback
     ;
 
 }
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index e9ec287498509..7576fae41f83b 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -12,6 +12,7 @@ let
   # The configuration to install.
   makeConfig = { bootLoader, grubDevice, grubIdentifier, grubUseEfi
                , extraConfig, forceGrubReinstallCount ? 0, flake ? false
+               , clevisTest
                }:
     pkgs.writeText "configuration.nix" ''
       { config, lib, pkgs, modulesPath, ... }:
@@ -52,6 +53,15 @@ let
 
         boot.initrd.secrets."/etc/secret" = ./secret;
 
+        ${optionalString clevisTest ''
+          boot.kernelParams = [ "console=tty0" "ip=192.168.1.1:::255.255.255.0::eth1:none" ];
+          boot.initrd = {
+            availableKernelModules = [ "tpm_tis" ];
+            clevis = { enable = true; useTang = true; };
+            network.enable = true;
+          };
+          ''}
+
         users.users.alice = {
           isNormalUser = true;
           home = "/home/alice";
@@ -71,7 +81,7 @@ let
   # partitions and filesystems.
   testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi, grubIdentifier
                   , postInstallCommands, preBootCommands, postBootCommands, extraConfig
-                  , testSpecialisationConfig, testFlakeSwitch
+                  , testSpecialisationConfig, testFlakeSwitch, clevisTest, clevisFallbackTest
                   }:
     let iface = "virtio";
         isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
@@ -79,12 +89,16 @@ let
     in if !isEfi && !pkgs.stdenv.hostPlatform.isx86 then ''
       machine.succeed("true")
     '' else ''
+      import subprocess
+      tpm_folder = os.environ['NIX_BUILD_TOP']
       def assemble_qemu_flags():
           flags = "-cpu max"
           ${if (system == "x86_64-linux" || system == "i686-linux")
             then ''flags += " -m 1024"''
             else ''flags += " -m 768 -enable-kvm -machine virt,gic-version=host"''
           }
+          ${optionalString clevisTest ''flags += f" -chardev socket,id=chrtpm,path={tpm_folder}/swtpm-sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0"''}
+          ${optionalString clevisTest ''flags += " -device virtio-net-pci,netdev=vlan1,mac=52:54:00:12:11:02 -netdev vde,id=vlan1,sock=\"$QEMU_VDE_SOCKET_1\""''}
           return flags
 
 
@@ -110,8 +124,47 @@ let
       def create_machine_named(name):
           return create_machine({**default_flags, "name": name})
 
+      class Tpm:
+            def __init__(self):
+                self.start()
+
+            def start(self):
+                self.proc = subprocess.Popen(["${pkgs.swtpm}/bin/swtpm",
+                    "socket",
+                    "--tpmstate", f"dir={tpm_folder}/swtpm",
+                    "--ctrl", f"type=unixio,path={tpm_folder}/swtpm-sock",
+                    "--tpm2"
+                    ])
+
+                # Check whether starting swtpm failed
+                try:
+                    exit_code = self.proc.wait(timeout=0.2)
+                    if exit_code is not None and exit_code != 0:
+                        raise Exception("failed to start swtpm")
+                except subprocess.TimeoutExpired:
+                    pass
+
+            """Check whether the swtpm process exited due to an error"""
+            def check(self):
+                exit_code = self.proc.poll()
+                if exit_code is not None and exit_code != 0:
+                    raise Exception("swtpm process died")
+
+
+      os.mkdir(f"{tpm_folder}/swtpm")
+      tpm = Tpm()
+      tpm.check()
+
+      start_all()
+      ${optionalString clevisTest ''
+      tang.wait_for_unit("sockets.target")
+      tang.systemctl("start network-online.target")
+      tang.wait_for_unit("network-online.target")
+      machine.systemctl("start network-online.target")
+      machine.wait_for_unit("network-online.target")
+      ''}
+      machine.wait_for_unit("multi-user.target")
 
-      machine.start()
 
       with subtest("Assert readiness of login prompt"):
           machine.succeed("echo hello")
@@ -127,13 +180,24 @@ let
           machine.copy_from_host(
               "${ makeConfig {
                     inherit bootLoader grubDevice grubIdentifier
-                            grubUseEfi extraConfig;
+                            grubUseEfi extraConfig clevisTest;
                   }
               }",
               "/mnt/etc/nixos/configuration.nix",
           )
           machine.copy_from_host("${pkgs.writeText "secret" "secret"}", "/mnt/etc/nixos/secret")
 
+      ${optionalString clevisTest ''
+        with subtest("Create the Clevis secret with Tang"):
+             machine.systemctl("start network-online.target")
+             machine.wait_for_unit("network-online.target")
+             machine.succeed('echo -n password | clevis encrypt sss \'{"t": 2, "pins": {"tpm2": {}, "tang": {"url": "http://192.168.1.2"}}}\' -y > /mnt/etc/nixos/clevis-secret.jwe')''}
+
+      ${optionalString clevisFallbackTest ''
+        with subtest("Shutdown Tang to check fallback to interactive prompt"):
+            tang.shutdown()
+      ''}
+
       with subtest("Perform the installation"):
           machine.succeed("nixos-install < /dev/null >&2")
 
@@ -200,7 +264,7 @@ let
           machine.copy_from_host_via_shell(
               "${ makeConfig {
                     inherit bootLoader grubDevice grubIdentifier
-                            grubUseEfi extraConfig;
+                            grubUseEfi extraConfig clevisTest;
                     forceGrubReinstallCount = 1;
                   }
               }",
@@ -229,7 +293,7 @@ let
       machine.copy_from_host_via_shell(
           "${ makeConfig {
                 inherit bootLoader grubDevice grubIdentifier
-                grubUseEfi extraConfig;
+                grubUseEfi extraConfig clevisTest;
                 forceGrubReinstallCount = 2;
               }
           }",
@@ -303,7 +367,7 @@ let
         """)
         machine.copy_from_host_via_shell(
           "${makeConfig {
-               inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig;
+               inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig clevisTest;
                forceGrubReinstallCount = 1;
                flake = true;
             }}",
@@ -379,6 +443,8 @@ let
     , enableOCR ? false, meta ? {}
     , testSpecialisationConfig ? false
     , testFlakeSwitch ? false
+    , clevisTest ? false
+    , clevisFallbackTest ? false
     }:
     makeTest {
       inherit enableOCR;
@@ -416,13 +482,13 @@ let
           virtualisation.rootDevice = "/dev/vdb";
           virtualisation.bootLoaderDevice = "/dev/vda";
           virtualisation.qemu.diskInterface = "virtio";
-
-          # We don't want to have any networking in the guest whatsoever.
-          # Also, if any vlans are enabled, the guest will reboot
-          # (with a different configuration for legacy reasons),
-          # and spend 5 minutes waiting for the vlan interface to show up
-          # (which will never happen).
-          virtualisation.vlans = [];
+          virtualisation.qemu.options = mkIf (clevisTest) [
+            "-chardev socket,id=chrtpm,path=$NIX_BUILD_TOP/swtpm-sock"
+            "-tpmdev emulator,id=tpm0,chardev=chrtpm"
+            "-device tpm-tis,tpmdev=tpm0"
+          ];
+          # We don't want to have any networking in the guest apart from the clevis tests.
+          virtualisation.vlans = mkIf (!clevisTest) [];
 
           boot.loader.systemd-boot.enable = mkIf (bootLoader == "systemd-boot") true;
 
@@ -447,14 +513,8 @@ let
             ntp
             perlPackages.ListCompare
             perlPackages.XMLLibXML
-            python3Minimal
             # make-options-doc/default.nix
-            (let
-                self = (pkgs.python3Minimal.override {
-                  inherit self;
-                  includeSiteCustomize = true;
-                });
-              in self.withPackages (p: [ p.mistune ]))
+            (python3.withPackages (p: [ p.mistune ]))
             shared-mime-info
             sudo
             texinfo
@@ -471,7 +531,7 @@ let
           in [
             (pkgs.grub2.override { inherit zfsSupport; })
             (pkgs.grub2_efi.override { inherit zfsSupport; })
-          ]);
+          ]) ++ optionals clevisTest [ pkgs.klibc ];
 
           nix.settings = {
             substituters = mkForce [];
@@ -480,12 +540,21 @@ let
           };
         };
 
+      } // optionalAttrs clevisTest {
+        tang = {
+          services.tang = {
+            enable = true;
+            listenStream = [ "80" ];
+            ipAddressAllow = [ "192.168.1.0/24" ];
+          };
+          networking.firewall.allowedTCPPorts = [ 80 ];
+        };
       };
 
       testScript = testScriptFun {
         inherit bootLoader createPartitions postInstallCommands preBootCommands postBootCommands
                 grubDevice grubIdentifier grubUseEfi extraConfig
-                testSpecialisationConfig testFlakeSwitch;
+                testSpecialisationConfig testFlakeSwitch clevisTest clevisFallbackTest;
       };
     };
 
@@ -586,6 +655,145 @@ let
       zfs = super.zfs.overrideAttrs(_: {meta.platforms = [];});}
     )];
   };
+
+ mkClevisBcachefsTest = { fallback ? false }: makeInstallerTest "clevis-bcachefs${optionalString fallback "-fallback"}" {
+    clevisTest = true;
+    clevisFallbackTest = fallback;
+    enableOCR = fallback;
+    extraInstallerConfig = {
+      imports = [ no-zfs-module ];
+      boot.supportedFilesystems = [ "bcachefs" ];
+      environment.systemPackages = with pkgs; [ keyutils clevis ];
+    };
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"
+        + " mkpart primary linux-swap 100M 1024M"
+        + " mkpart primary 1024M -1s",
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "keyctl link @u @s",
+        "echo -n password | mkfs.bcachefs -L root --encrypted /dev/vda3",
+        "echo -n password | bcachefs unlock /dev/vda3",
+        "echo -n password | mount -t bcachefs /dev/vda3 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount LABEL=boot /mnt/boot",
+        "udevadm settle")
+    '';
+    extraConfig = ''
+      boot.initrd.clevis.devices."/dev/vda3".secretFile = "/etc/nixos/clevis-secret.jwe";
+
+      # We override what nixos-generate-config has generated because we do
+      # not know the UUID in advance.
+      fileSystems."/" = lib.mkForce { device = "/dev/vda3"; fsType = "bcachefs"; };
+    '';
+    preBootCommands = ''
+      tpm = Tpm()
+      tpm.check()
+    '' + optionalString fallback ''
+      machine.start()
+      machine.wait_for_text("enter passphrase for")
+      machine.send_chars("password\n")
+    '';
+  };
+
+  mkClevisLuksTest = { fallback ? false }: makeInstallerTest "clevis-luks${optionalString fallback "-fallback"}" {
+    clevisTest = true;
+    clevisFallbackTest = fallback;
+    enableOCR = fallback;
+    extraInstallerConfig = {
+      environment.systemPackages = with pkgs; [ clevis ];
+    };
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"
+        + " mkpart primary linux-swap 100M 1024M"
+        + " mkpart primary 1024M -1s",
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "modprobe dm_mod dm_crypt",
+        "echo -n password | cryptsetup luksFormat -q /dev/vda3 -",
+        "echo -n password | cryptsetup luksOpen --key-file - /dev/vda3 crypt-root",
+        "mkfs.ext3 -L nixos /dev/mapper/crypt-root",
+        "mount LABEL=nixos /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount LABEL=boot /mnt/boot",
+        "udevadm settle")
+    '';
+    extraConfig = ''
+      boot.initrd.clevis.devices."crypt-root".secretFile = "/etc/nixos/clevis-secret.jwe";
+    '';
+    preBootCommands = ''
+      tpm = Tpm()
+      tpm.check()
+    '' + optionalString fallback ''
+      machine.start()
+      ${if systemdStage1 then ''
+      machine.wait_for_text("Please enter")
+      '' else ''
+      machine.wait_for_text("Passphrase for")
+      ''}
+      machine.send_chars("password\n")
+    '';
+  };
+
+  mkClevisZfsTest = { fallback ? false }: makeInstallerTest "clevis-zfs${optionalString fallback "-fallback"}" {
+    clevisTest = true;
+    clevisFallbackTest = fallback;
+    enableOCR = fallback;
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+      environment.systemPackages = with pkgs; [ clevis ];
+    };
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"
+        + " mkpart primary linux-swap 100M 1024M"
+        + " mkpart primary 1024M -1s",
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "zpool create -O mountpoint=legacy rpool /dev/vda3",
+        "echo -n password | zfs create"
+        + " -o encryption=aes-256-gcm -o keyformat=passphrase rpool/root",
+        "mount -t zfs rpool/root /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount LABEL=boot /mnt/boot",
+        "udevadm settle")
+    '';
+    extraConfig = ''
+      boot.initrd.clevis.devices."rpool/root".secretFile = "/etc/nixos/clevis-secret.jwe";
+      boot.zfs.requestEncryptionCredentials = true;
+
+
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+    preBootCommands = ''
+      tpm = Tpm()
+      tpm.check()
+    '' + optionalString fallback ''
+      machine.start()
+      ${if systemdStage1 then ''
+      machine.wait_for_text("Enter key for rpool/root")
+      '' else ''
+      machine.wait_for_text("Key load error")
+      ''}
+      machine.send_chars("password\n")
+    '';
+  };
+
 in {
 
   # !!! `parted mkpart' seems to silently create overlapping partitions.
@@ -667,6 +875,78 @@ in {
     '';
   };
 
+  # Same as the previous, but with ZFS /boot.
+  separateBootZfs = makeInstallerTest "separateBootZfs" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+    };
+
+    extraConfig = ''
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 256MB"   # /boot
+          + " mkpart primary linux-swap 256MB 1280M"
+          + " mkpart primary ext2 1280M -1s", # /
+          "udevadm settle",
+
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+
+          "mkfs.ext4 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+
+          # Use as many ZFS features as possible to verify that GRUB can handle them
+          "zpool create"
+            " -o compatibility=grub2"
+            " -O utf8only=on"
+            " -O normalization=formD"
+            " -O compression=lz4"      # Activate the lz4_compress feature
+            " -O xattr=sa"
+            " -O acltype=posixacl"
+            " bpool /dev/vda1",
+          "zfs create"
+            " -o recordsize=1M"        # Prepare activating the large_blocks feature
+            " -o mountpoint=legacy"
+            " -o relatime=on"
+            " -o quota=1G"
+            " -o filesystem_limit=100" # Activate the filesystem_limits features
+            " bpool/boot",
+
+          # Snapshotting the top-level dataset would trigger a bug in GRUB2: https://github.com/openzfs/zfs/issues/13873
+          "zfs snapshot bpool/boot@snap-1",                     # Prepare activating the livelist and bookmarks features
+          "zfs clone bpool/boot@snap-1 bpool/test",             # Activate the livelist feature
+          "zfs bookmark bpool/boot@snap-1 bpool/boot#bookmark", # Activate the bookmarks feature
+          "zpool checkpoint bpool",                             # Activate the zpool_checkpoint feature
+          "mkdir -p /mnt/boot",
+          "mount -t zfs bpool/boot /mnt/boot",
+          "touch /mnt/boot/empty",                              # Activate zilsaxattr feature
+          "dd if=/dev/urandom of=/mnt/boot/test bs=1M count=1", # Activate the large_blocks feature
+
+          # Print out all enabled and active ZFS features (and some other stuff)
+          "sync /mnt/boot",
+          "zpool get all bpool >&2",
+
+          # Abort early if GRUB2 doesn't like the disks
+          "grub-probe --target=device /mnt/boot >&2",
+      )
+    '';
+
+    # umount & export bpool before shutdown
+    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
+    postInstallCommands = ''
+      machine.succeed("umount /mnt/boot")
+      machine.succeed("zpool export bpool")
+    '';
+  };
+
   # zfs on / with swap
   zfsroot = makeInstallerTest "zfs-root" {
     extraInstallerConfig = {
@@ -686,7 +966,7 @@ in {
     createPartitions = ''
       machine.succeed(
           "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-          + " mkpart primary 1M 100MB"  # bpool
+          + " mkpart primary 1M 100MB"  # /boot
           + " mkpart primary linux-swap 100M 1024M"
           + " mkpart primary 1024M -1s", # rpool
           "udevadm settle",
@@ -698,20 +978,12 @@ in {
           "zfs create -o mountpoint=legacy rpool/root/usr",
           "mkdir /mnt/usr",
           "mount -t zfs rpool/root/usr /mnt/usr",
-          "zpool create -o compatibility=grub2 bpool /dev/vda1",
-          "zfs create -o mountpoint=legacy bpool/boot",
+          "mkfs.vfat -n BOOT /dev/vda1",
           "mkdir /mnt/boot",
-          "mount -t zfs bpool/boot /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
           "udevadm settle",
       )
     '';
-
-    # umount & export bpool before shutdown
-    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
-    postInstallCommands = ''
-      machine.succeed("umount /mnt/boot")
-      machine.succeed("zpool export bpool")
-    '';
   };
 
   # Create two physical LVM partitions combined into one volume group
@@ -991,68 +1263,6 @@ in {
     '';
   };
 
-  bcachefsLinuxTesting = makeInstallerTest "bcachefs-linux-testing" {
-    extraInstallerConfig = {
-      imports = [ no-zfs-module ];
-
-      boot = {
-        supportedFilesystems = [ "bcachefs" ];
-        kernelPackages = pkgs.linuxPackages_testing;
-      };
-    };
-
-    extraConfig = ''
-      boot.kernelPackages = pkgs.linuxPackages_testing;
-    '';
-
-    createPartitions = ''
-      machine.succeed(
-        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-        + " mkpart primary ext2 1M 100MB"          # /boot
-        + " mkpart primary linux-swap 100M 1024M"  # swap
-        + " mkpart primary 1024M -1s",             # /
-        "udevadm settle",
-        "mkswap /dev/vda2 -L swap",
-        "swapon -L swap",
-        "mkfs.bcachefs -L root /dev/vda3",
-        "mount -t bcachefs /dev/vda3 /mnt",
-        "mkfs.ext3 -L boot /dev/vda1",
-        "mkdir -p /mnt/boot",
-        "mount /dev/vda1 /mnt/boot",
-      )
-    '';
-  };
-
-  bcachefsUpgradeToLinuxTesting = makeInstallerTest "bcachefs-upgrade-to-linux-testing" {
-    extraInstallerConfig = {
-      imports = [ no-zfs-module ];
-      boot.supportedFilesystems = [ "bcachefs" ];
-      # We don't have network access in the VM, we need this for `nixos-install`
-      system.extraDependencies = [ pkgs.linux_testing ];
-    };
-
-    extraConfig = ''
-      boot.kernelPackages = pkgs.linuxPackages_testing;
-    '';
-
-    createPartitions = ''
-      machine.succeed(
-        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-        + " mkpart primary ext2 1M 100MB"          # /boot
-        + " mkpart primary linux-swap 100M 1024M"  # swap
-        + " mkpart primary 1024M -1s",             # /
-        "udevadm settle",
-        "mkswap /dev/vda2 -L swap",
-        "swapon -L swap",
-        "mkfs.bcachefs -L root /dev/vda3",
-        "mount -t bcachefs /dev/vda3 /mnt",
-        "mkfs.ext3 -L boot /dev/vda1",
-        "mkdir -p /mnt/boot",
-        "mount /dev/vda1 /mnt/boot",
-      )
-    '';
-  };
-
   # Test using labels to identify volumes in grub
   simpleLabels = makeInstallerTest "simpleLabels" {
     createPartitions = ''
@@ -1175,6 +1385,13 @@ in {
       )
     '';
   };
+} // {
+  clevisBcachefs = mkClevisBcachefsTest { };
+  clevisBcachefsFallback = mkClevisBcachefsTest { fallback = true; };
+  clevisLuks = mkClevisLuksTest { };
+  clevisLuksFallback = mkClevisLuksTest { fallback = true; };
+  clevisZfs = mkClevisZfsTest { };
+  clevisZfsFallback = mkClevisZfsTest { fallback = true; };
 } // optionalAttrs systemdStage1 {
   stratisRoot = makeInstallerTest "stratisRoot" {
     createPartitions = ''
diff --git a/nixos/tests/invidious.nix b/nixos/tests/invidious.nix
index 701e8e5e7a3fc..e31cd87f6a004 100644
--- a/nixos/tests/invidious.nix
+++ b/nixos/tests/invidious.nix
@@ -5,48 +5,72 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     maintainers = [ sbruder ];
   };
 
-  nodes.machine = { config, lib, pkgs, ... }: {
-    services.invidious = {
-      enable = true;
+  nodes = {
+    postgres-tcp = { config, pkgs, ... }: {
+      services.postgresql = {
+        enable = true;
+        initialScript = pkgs.writeText "init-postgres-with-password" ''
+          CREATE USER invidious WITH PASSWORD 'correct horse battery staple';
+          CREATE DATABASE invidious WITH OWNER invidious;
+        '';
+        enableTCPIP = true;
+        authentication = ''
+          host invidious invidious samenet scram-sha-256
+        '';
+      };
+      networking.firewall.allowedTCPPorts = [ config.services.postgresql.port ];
     };
+    machine = { config, lib, pkgs, ... }: {
+      services.invidious = {
+        enable = true;
+      };
 
-    specialisation = {
-      nginx.configuration = {
-        services.invidious = {
-          nginx.enable = true;
-          domain = "invidious.example.com";
-        };
-        services.nginx.virtualHosts."invidious.example.com" = {
-          forceSSL = false;
-          enableACME = false;
+      specialisation = {
+        nginx.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            domain = "invidious.example.com";
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
+          };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
-      };
-      postgres-tcp.configuration = {
-        services.invidious = {
-          database = {
-            createLocally = false;
-            host = "127.0.0.1";
-            passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple");
+        nginx-scale.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            domain = "invidious.example.com";
+            serviceScale = 3;
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
           };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        # Normally not needed because when connecting to postgres over TCP/IP
-        # the database is most likely on another host.
-        systemd.services.invidious = {
-          after = [ "postgresql.service" ];
-          requires = [ "postgresql.service" ];
+        nginx-scale-ytproxy.configuration = {
+          services.invidious = {
+            nginx.enable = true;
+            http3-ytproxy.enable = true;
+            domain = "invidious.example.com";
+            serviceScale = 3;
+          };
+          services.nginx.virtualHosts."invidious.example.com" = {
+            forceSSL = false;
+            enableACME = false;
+          };
+          networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
         };
-        services.postgresql =
-          let
-            inherit (config.services.invidious.settings.db) dbname user;
-          in
-          {
-            enable = true;
-            initialScript = pkgs.writeText "init-postgres-with-password" ''
-              CREATE USER kemal WITH PASSWORD 'correct horse battery staple';
-              CREATE DATABASE invidious OWNER kemal;
-            '';
+        postgres-tcp.configuration = {
+          services.invidious = {
+            database = {
+              createLocally = false;
+              host = "postgres-tcp";
+              passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple");
+            };
           };
+        };
       };
     };
   };
@@ -63,6 +87,9 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     url = "http://localhost:${toString nodes.machine.config.services.invidious.port}"
     port = ${toString nodes.machine.config.services.invidious.port}
 
+    # start postgres vm now
+    postgres_tcp.start()
+
     machine.wait_for_open_port(port)
     curl_assert_status_code(f"{url}/search", 200)
 
@@ -70,9 +97,26 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     machine.wait_for_open_port(80)
     curl_assert_status_code("http://invidious.example.com/search", 200)
 
-    # Remove the state so the `initialScript` gets run
-    machine.succeed("systemctl stop postgresql")
-    machine.succeed("rm -r /var/lib/postgresql")
+    activate_specialisation("nginx-scale")
+    machine.wait_for_open_port(80)
+    # this depends on nginx round-robin behaviour for the upstream servers
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    machine.succeed("journalctl -eu invidious.service | grep -o '200 GET /search'")
+    machine.succeed("journalctl -eu invidious-1.service | grep -o '200 GET /search'")
+    machine.succeed("journalctl -eu invidious-2.service | grep -o '200 GET /search'")
+
+    activate_specialisation("nginx-scale-ytproxy")
+    machine.wait_for_unit("http3-ytproxy.service")
+    machine.wait_for_open_port(80)
+    machine.wait_until_succeeds("ls /run/http3-ytproxy/socket/http-proxy.sock")
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+    # this should error out as no internet connectivity is available in the test
+    curl_assert_status_code("http://invidious.example.com/vi/dQw4w9WgXcQ/mqdefault.jpg", 502)
+    machine.succeed("journalctl -eu http3-ytproxy.service | grep -o 'dQw4w9WgXcQ'")
+
+    postgres_tcp.wait_for_unit("postgresql.service")
     activate_specialisation("postgres-tcp")
     machine.wait_for_open_port(port)
     curl_assert_status_code(f"{url}/search", 200)
diff --git a/nixos/tests/invoiceplane.nix b/nixos/tests/invoiceplane.nix
index 70ed96ee39f35..0b51707171996 100644
--- a/nixos/tests/invoiceplane.nix
+++ b/nixos/tests/invoiceplane.nix
@@ -27,56 +27,80 @@ import ./make-test-python.nix ({ pkgs, ... }:
       networking.firewall.allowedTCPPorts = [ 80 ];
       networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
     };
+
+    invoiceplane_nginx = { ... }: {
+      services.invoiceplane.webserver = "nginx";
+      services.invoiceplane.sites = {
+        "site1.local" = {
+          database.name = "invoiceplane1";
+          database.createLocally = true;
+          enable = true;
+        };
+        "site2.local" = {
+          database.name = "invoiceplane2";
+          database.createLocally = true;
+          enable = true;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
   };
 
   testScript = ''
     start_all()
 
     invoiceplane_caddy.wait_for_unit("caddy")
-    invoiceplane_caddy.wait_for_open_port(80)
-    invoiceplane_caddy.wait_for_open_port(3306)
+    invoiceplane_nginx.wait_for_unit("nginx")
 
     site_names = ["site1.local", "site2.local"]
 
-    for site_name in site_names:
-        machine.wait_for_unit(f"phpfpm-invoiceplane-{site_name}")
+    machines = [invoiceplane_caddy, invoiceplane_nginx]
+
+    for machine in machines:
+      machine.wait_for_open_port(80)
+      machine.wait_for_open_port(3306)
+
+      for site_name in site_names:
+          machine.wait_for_unit(f"phpfpm-invoiceplane-{site_name}")
 
-        with subtest("Website returns welcome screen"):
-            assert "Please install InvoicePlane" in machine.succeed(f"curl -L {site_name}")
+          with subtest("Website returns welcome screen"):
+              assert "Please install InvoicePlane" in machine.succeed(f"curl -L {site_name}")
 
-        with subtest("Finish InvoicePlane setup"):
-          machine.succeed(
-            f"curl -sSfL --cookie-jar cjar {site_name}/setup/language"
-          )
-          csrf_token = machine.succeed(
-            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
-          )
-          machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&ip_lang=english&btn_continue=Continue' {site_name}/setup/language"
-          )
-          csrf_token = machine.succeed(
-            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
-          )
-          machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/prerequisites"
-          )
-          csrf_token = machine.succeed(
-            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
-          )
-          machine.succeed(
-            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/configure_database"
-          )
-          csrf_token = machine.succeed(
-            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
-          )
-          machine.succeed(
-            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/install_tables"
-          )
-          csrf_token = machine.succeed(
-            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
-          )
-          machine.succeed(
-            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/upgrade_tables"
-          )
+          with subtest("Finish InvoicePlane setup"):
+            machine.succeed(
+              f"curl -sSfL --cookie-jar cjar {site_name}/setup/language"
+            )
+            csrf_token = machine.succeed(
+              "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+            )
+            machine.succeed(
+              f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&ip_lang=english&btn_continue=Continue' {site_name}/setup/language"
+            )
+            csrf_token = machine.succeed(
+              "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+            )
+            machine.succeed(
+              f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/prerequisites"
+            )
+            csrf_token = machine.succeed(
+              "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+            )
+            machine.succeed(
+              f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/configure_database"
+            )
+            csrf_token = machine.succeed(
+              "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+            )
+            machine.succeed(
+              f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/install_tables"
+            )
+            csrf_token = machine.succeed(
+              "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+            )
+            machine.succeed(
+              f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/upgrade_tables"
+            )
   '';
 })
diff --git a/nixos/tests/iscsi-root.nix b/nixos/tests/iscsi-root.nix
index eb0719edc3796..0d7c48464eecc 100644
--- a/nixos/tests/iscsi-root.nix
+++ b/nixos/tests/iscsi-root.nix
@@ -7,8 +7,8 @@ import ./make-test-python.nix (
       {
         name = "iscsi";
         meta = {
-          maintainers = pkgs.lib.teams.deshaw.members
-          ++ (with pkgs.lib.maintainers; [ ajs124 ]);
+          maintainers = lib.teams.deshaw.members
+            ++ lib.teams.helsinki-systems.members;
         };
 
         nodes = {
diff --git a/nixos/tests/kanidm.nix b/nixos/tests/kanidm.nix
index 3f5bca397740e..fa24d4a8a5e13 100644
--- a/nixos/tests/kanidm.nix
+++ b/nixos/tests/kanidm.nix
@@ -67,6 +67,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       ''
         start_all()
         server.wait_for_unit("kanidm.service")
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
 
         with subtest("Test HTTP interface"):
diff --git a/nixos/tests/kerberos/heimdal.nix b/nixos/tests/kerberos/heimdal.nix
index 47f9d0285aef7..393289f7a92ca 100644
--- a/nixos/tests/kerberos/heimdal.nix
+++ b/nixos/tests/kerberos/heimdal.nix
@@ -1,5 +1,6 @@
 import ../make-test-python.nix ({pkgs, ...}: {
   name = "kerberos_server-heimdal";
+
   nodes.machine = { config, libs, pkgs, ...}:
   { services.kerberos_server =
     { enable = true;
@@ -7,16 +8,18 @@ import ../make-test-python.nix ({pkgs, ...}: {
         "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
       };
     };
-    krb5 = {
+    security.krb5 = {
       enable = true;
-      kerberos = pkgs.heimdal;
-      libdefaults = {
-        default_realm = "FOO.BAR";
-      };
-      realms = {
-        "FOO.BAR" = {
-          admin_server = "machine";
-          kdc = "machine";
+      package = pkgs.heimdal;
+      settings = {
+        libdefaults = {
+          default_realm = "FOO.BAR";
+        };
+        realms = {
+          "FOO.BAR" = {
+            admin_server = "machine";
+            kdc = "machine";
+          };
         };
       };
     };
@@ -39,4 +42,6 @@ import ../make-test-python.nix ({pkgs, ...}: {
         "kinit -kt alice.keytab alice",
     )
   '';
+
+  meta.maintainers = [ pkgs.lib.maintainers.dblsaiko ];
 })
diff --git a/nixos/tests/kerberos/mit.nix b/nixos/tests/kerberos/mit.nix
index 7e427ffef0ba8..1191d047abbf0 100644
--- a/nixos/tests/kerberos/mit.nix
+++ b/nixos/tests/kerberos/mit.nix
@@ -1,5 +1,6 @@
 import ../make-test-python.nix ({pkgs, ...}: {
   name = "kerberos_server-mit";
+
   nodes.machine = { config, libs, pkgs, ...}:
   { services.kerberos_server =
     { enable = true;
@@ -7,16 +8,18 @@ import ../make-test-python.nix ({pkgs, ...}: {
         "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
       };
     };
-    krb5 = {
+    security.krb5 = {
       enable = true;
-      kerberos = pkgs.krb5;
-      libdefaults = {
-        default_realm = "FOO.BAR";
-      };
-      realms = {
-        "FOO.BAR" = {
-          admin_server = "machine";
-          kdc = "machine";
+      package = pkgs.krb5;
+      settings = {
+        libdefaults = {
+          default_realm = "FOO.BAR";
+        };
+        realms = {
+          "FOO.BAR" = {
+            admin_server = "machine";
+            kdc = "machine";
+          };
         };
       };
     };
@@ -38,4 +41,6 @@ import ../make-test-python.nix ({pkgs, ...}: {
         "echo alice_pw | sudo -u alice kinit",
     )
   '';
+
+  meta.maintainers = [ pkgs.lib.maintainers.dblsaiko ];
 })
diff --git a/nixos/tests/kernel-generic.nix b/nixos/tests/kernel-generic.nix
index 352deb521a478..72d31246b75d3 100644
--- a/nixos/tests/kernel-generic.nix
+++ b/nixos/tests/kernel-generic.nix
@@ -31,6 +31,7 @@ let
       linux_5_15_hardened
       linux_6_1_hardened
       linux_6_5_hardened
+      linux_6_6_hardened
       linux_rt_5_4
       linux_rt_5_10
       linux_rt_5_15
diff --git a/nixos/tests/krb5/default.nix b/nixos/tests/krb5/default.nix
index dd5b2f37202e8..ede085632c634 100644
--- a/nixos/tests/krb5/default.nix
+++ b/nixos/tests/krb5/default.nix
@@ -1,5 +1,4 @@
 { system ? builtins.currentSystem }:
 {
   example-config = import ./example-config.nix { inherit system; };
-  deprecated-config = import ./deprecated-config.nix { inherit system; };
 }
diff --git a/nixos/tests/krb5/deprecated-config.nix b/nixos/tests/krb5/deprecated-config.nix
deleted file mode 100644
index aca29ae6ca2b2..0000000000000
--- a/nixos/tests/krb5/deprecated-config.nix
+++ /dev/null
@@ -1,50 +0,0 @@
-# Verifies that the configuration suggested in deprecated example values
-# will result in the expected output.
-
-import ../make-test-python.nix ({ pkgs, ...} : {
-  name = "krb5-with-deprecated-config";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ eqyiel ];
-  };
-
-  nodes.machine =
-    { ... }: {
-      krb5 = {
-        enable = true;
-        defaultRealm = "ATHENA.MIT.EDU";
-        domainRealm = "athena.mit.edu";
-        kdc = "kerberos.mit.edu";
-        kerberosAdminServer = "kerberos.mit.edu";
-      };
-    };
-
-  testScript =
-    let snapshot = pkgs.writeText "krb5-with-deprecated-config.conf" ''
-      [libdefaults]
-        default_realm = ATHENA.MIT.EDU
-
-      [realms]
-        ATHENA.MIT.EDU = {
-          admin_server = kerberos.mit.edu
-          kdc = kerberos.mit.edu
-        }
-
-      [domain_realm]
-        .athena.mit.edu = ATHENA.MIT.EDU
-        athena.mit.edu = ATHENA.MIT.EDU
-
-      [capaths]
-
-
-      [appdefaults]
-
-
-      [plugins]
-
-    '';
-  in ''
-    machine.succeed(
-        "diff /etc/krb5.conf ${snapshot}"
-    )
-  '';
-})
diff --git a/nixos/tests/krb5/example-config.nix b/nixos/tests/krb5/example-config.nix
index 9a5c3b2af2490..33bed481b39fc 100644
--- a/nixos/tests/krb5/example-config.nix
+++ b/nixos/tests/krb5/example-config.nix
@@ -4,78 +4,77 @@
 import ../make-test-python.nix ({ pkgs, ...} : {
   name = "krb5-with-example-config";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ eqyiel ];
+    maintainers = [ eqyiel dblsaiko ];
   };
 
   nodes.machine =
     { pkgs, ... }: {
-      krb5 = {
+      security.krb5 = {
         enable = true;
-        kerberos = pkgs.krb5;
-        libdefaults = {
-          default_realm = "ATHENA.MIT.EDU";
-        };
-        realms = {
-          "ATHENA.MIT.EDU" = {
-            admin_server = "athena.mit.edu";
-            kdc = [
-              "athena01.mit.edu"
-              "athena02.mit.edu"
-            ];
+        package = pkgs.krb5;
+        settings = {
+          includedir = [
+            "/etc/krb5.conf.d"
+          ];
+          include = [
+            "/etc/krb5-extra.conf"
+          ];
+          libdefaults = {
+            default_realm = "ATHENA.MIT.EDU";
           };
-        };
-        domain_realm = {
-          "example.com" = "EXAMPLE.COM";
-          ".example.com" = "EXAMPLE.COM";
-        };
-        capaths = {
-          "ATHENA.MIT.EDU" = {
-            "EXAMPLE.COM" = ".";
+          realms = {
+            "ATHENA.MIT.EDU" = {
+              admin_server = "athena.mit.edu";
+              kdc = [
+                "athena01.mit.edu"
+                "athena02.mit.edu"
+              ];
+            };
           };
-          "EXAMPLE.COM" = {
-            "ATHENA.MIT.EDU" = ".";
+          domain_realm = {
+            "example.com" = "EXAMPLE.COM";
+            ".example.com" = "EXAMPLE.COM";
           };
-        };
-        appdefaults = {
-          pam = {
-            debug = false;
-            ticket_lifetime = 36000;
-            renew_lifetime = 36000;
-            max_timeout = 30;
-            timeout_shift = 2;
-            initial_timeout = 1;
+          capaths = {
+            "ATHENA.MIT.EDU" = {
+              "EXAMPLE.COM" = ".";
+            };
+            "EXAMPLE.COM" = {
+              "ATHENA.MIT.EDU" = ".";
+            };
           };
-        };
-        plugins = {
-          ccselect = {
-            disable = "k5identity";
+          appdefaults = {
+            pam = {
+              debug = false;
+              ticket_lifetime = 36000;
+              renew_lifetime = 36000;
+              max_timeout = 30;
+              timeout_shift = 2;
+              initial_timeout = 1;
+            };
+          };
+          plugins.ccselect.disable = "k5identity";
+          logging = {
+            kdc = "SYSLOG:NOTICE";
+            admin_server = "SYSLOG:NOTICE";
+            default = "SYSLOG:NOTICE";
           };
         };
-        extraConfig = ''
-          [logging]
-            kdc          = SYSLOG:NOTICE
-            admin_server = SYSLOG:NOTICE
-            default      = SYSLOG:NOTICE
-        '';
       };
     };
 
   testScript =
     let snapshot = pkgs.writeText "krb5-with-example-config.conf" ''
-      [libdefaults]
-        default_realm = ATHENA.MIT.EDU
-
-      [realms]
-        ATHENA.MIT.EDU = {
-          admin_server = athena.mit.edu
-          kdc = athena01.mit.edu
-          kdc = athena02.mit.edu
+      [appdefaults]
+        pam = {
+          debug = false
+          initial_timeout = 1
+          max_timeout = 30
+          renew_lifetime = 36000
+          ticket_lifetime = 36000
+          timeout_shift = 2
         }
 
-      [domain_realm]
-        .example.com = EXAMPLE.COM
-        example.com = EXAMPLE.COM
-
       [capaths]
         ATHENA.MIT.EDU = {
           EXAMPLE.COM = .
@@ -84,25 +83,32 @@ import ../make-test-python.nix ({ pkgs, ...} : {
           ATHENA.MIT.EDU = .
         }
 
-      [appdefaults]
-        pam = {
-          debug = false
-          initial_timeout = 1
-          max_timeout = 30
-          renew_lifetime = 36000
-          ticket_lifetime = 36000
-          timeout_shift = 2
-        }
+      [domain_realm]
+        .example.com = EXAMPLE.COM
+        example.com = EXAMPLE.COM
+
+      [libdefaults]
+        default_realm = ATHENA.MIT.EDU
+
+      [logging]
+        admin_server = SYSLOG:NOTICE
+        default = SYSLOG:NOTICE
+        kdc = SYSLOG:NOTICE
 
       [plugins]
         ccselect = {
           disable = k5identity
         }
 
-      [logging]
-        kdc          = SYSLOG:NOTICE
-        admin_server = SYSLOG:NOTICE
-        default      = SYSLOG:NOTICE
+      [realms]
+        ATHENA.MIT.EDU = {
+          admin_server = athena.mit.edu
+          kdc = athena01.mit.edu
+          kdc = athena02.mit.edu
+        }
+
+      include /etc/krb5-extra.conf
+      includedir /etc/krb5.conf.d
     '';
   in ''
     machine.succeed(
diff --git a/nixos/tests/kubo/default.nix b/nixos/tests/kubo/default.nix
index 629922fc366db..d8c0c69dc1fbd 100644
--- a/nixos/tests/kubo/default.nix
+++ b/nixos/tests/kubo/default.nix
@@ -1,5 +1,7 @@
 { recurseIntoAttrs, runTest }:
 recurseIntoAttrs {
   kubo = runTest ./kubo.nix;
-  kubo-fuse = runTest ./kubo-fuse.nix;
+  # The FUSE functionality is completely broken since Kubo v0.24.0
+  # See https://github.com/ipfs/kubo/issues/10242
+  # kubo-fuse = runTest ./kubo-fuse.nix;
 }
diff --git a/nixos/tests/kubo/kubo.nix b/nixos/tests/kubo/kubo.nix
index 7965ad2773854..b8222c652b33c 100644
--- a/nixos/tests/kubo/kubo.nix
+++ b/nixos/tests/kubo/kubo.nix
@@ -46,6 +46,13 @@
             f"ipfs --api /unix/run/ipfs.sock cat /ipfs/{ipfs_hash.strip()} | grep fnord2"
         )
 
+    machine.stop_job("ipfs")
+
+    with subtest("Socket activation for the Gateway"):
+        machine.succeed(
+            f"curl 'http://127.0.0.1:8080/ipfs/{ipfs_hash.strip()}' | grep fnord2"
+        )
+
     with subtest("Setting dataDir works properly with the hardened systemd unit"):
         machine.succeed("test -e /mnt/ipfs/config")
         machine.succeed("test ! -e /var/lib/ipfs/")
diff --git a/nixos/tests/lemmy.nix b/nixos/tests/lemmy.nix
index de2c4938fe231..e8d747f89a9e7 100644
--- a/nixos/tests/lemmy.nix
+++ b/nixos/tests/lemmy.nix
@@ -59,6 +59,7 @@ in
         server.succeed("curl --fail localhost:${toString uiPort}")
 
     with subtest("Lemmy-UI responds through the caddy reverse proxy"):
+        server.systemctl("start network-online.target")
         server.wait_for_unit("network-online.target")
         server.wait_for_unit("caddy.service")
         server.wait_for_open_port(80)
@@ -66,6 +67,7 @@ in
         assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}"
 
     with subtest("the server is exposed externally"):
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
         client.succeed("curl -v --fail ${lemmyNodeName}")
 
diff --git a/nixos/tests/livebook-service.nix b/nixos/tests/livebook-service.nix
index 9397e3cb75ffa..56b4eb932f343 100644
--- a/nixos/tests/livebook-service.nix
+++ b/nixos/tests/livebook-service.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         enableUserService = true;
         port = 20123;
         environmentFile = pkgs.writeText "livebook.env" ''
-          LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+          LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
         '';
         options = {
           cookie = "chocolate chip";
@@ -22,7 +22,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
 
   testScript = { nodes, ... }:
     let
-      user = nodes.machine.config.users.users.alice;
+      user = nodes.machine.users.users.alice;
       sudo = lib.concatStringsSep " " [
         "XDG_RUNTIME_DIR=/run/user/${toString user.uid}"
         "sudo"
diff --git a/nixos/tests/lvm2/systemd-stage-1.nix b/nixos/tests/lvm2/systemd-stage-1.nix
index b581f2b23507e..1c95aadfcb3f1 100644
--- a/nixos/tests/lvm2/systemd-stage-1.nix
+++ b/nixos/tests/lvm2/systemd-stage-1.nix
@@ -54,9 +54,9 @@
     '';
   }.${flavour};
 
-in import ../make-test-python.nix ({ pkgs, ... }: {
+in import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-${flavour}-systemd-stage-1";
-  meta.maintainers = with pkgs.lib.maintainers; [ das_j ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     imports = [ extraConfig ];
diff --git a/nixos/tests/lvm2/thinpool.nix b/nixos/tests/lvm2/thinpool.nix
index 14781a8a60459..f49c8980613ce 100644
--- a/nixos/tests/lvm2/thinpool.nix
+++ b/nixos/tests/lvm2/thinpool.nix
@@ -1,7 +1,7 @@
 { kernelPackages ? null }:
 import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-thinpool";
-  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     virtualisation.emptyDiskImages = [ 4096 ];
diff --git a/nixos/tests/lvm2/vdo.nix b/nixos/tests/lvm2/vdo.nix
index 5b014c2f72223..75c1fc094e97f 100644
--- a/nixos/tests/lvm2/vdo.nix
+++ b/nixos/tests/lvm2/vdo.nix
@@ -1,7 +1,7 @@
 { kernelPackages ? null }:
-import ../make-test-python.nix ({ pkgs, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lvm2-vdo";
-  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+  meta.maintainers = lib.teams.helsinki-systems.members;
 
   nodes.machine = { pkgs, lib, ... }: {
     # Minimum required size for VDO volume: 5063921664 bytes
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index 0ebe73d872f2b..ef9c3f4bbee7e 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -18,8 +18,8 @@ let
 in {
   name = "lxd-container";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 adamcstephens ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/nftables.nix b/nixos/tests/lxd/nftables.nix
index d98bd4952906b..e6ce4089d719d 100644
--- a/nixos/tests/lxd/nftables.nix
+++ b/nixos/tests/lxd/nftables.nix
@@ -5,11 +5,11 @@
 # iptables to nftables requires a full reboot, which is a bit hard inside NixOS
 # tests.
 
-import ../make-test-python.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, lib, ...} : {
   name = "lxd-nftables";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/preseed.nix b/nixos/tests/lxd/preseed.nix
index 7d89b9f56daa4..fb80dcf3893e4 100644
--- a/nixos/tests/lxd/preseed.nix
+++ b/nixos/tests/lxd/preseed.nix
@@ -4,7 +4,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
   name = "lxd-preseed";
 
   meta = {
-    maintainers = with lib.maintainers; [ adamcstephens ];
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/ui.nix b/nixos/tests/lxd/ui.nix
index ff651725ba705..c442f44ab81cd 100644
--- a/nixos/tests/lxd/ui.nix
+++ b/nixos/tests/lxd/ui.nix
@@ -1,8 +1,8 @@
 import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lxd-ui";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ jnsgruk ];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = { lib, ... }: {
diff --git a/nixos/tests/lxd/virtual-machine.nix b/nixos/tests/lxd/virtual-machine.nix
index 93705e9350c5a..2a9dd8fcdbf61 100644
--- a/nixos/tests/lxd/virtual-machine.nix
+++ b/nixos/tests/lxd/virtual-machine.nix
@@ -18,8 +18,8 @@ let
 in {
   name = "lxd-virtual-machine";
 
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [adamcstephens];
+  meta = {
+    maintainers = lib.teams.lxc.members;
   };
 
   nodes.machine = {lib, ...}: {
diff --git a/nixos/tests/mate.nix b/nixos/tests/mate.nix
index 78ba59c5fc20d..48582e18d520c 100644
--- a/nixos/tests/mate.nix
+++ b/nixos/tests/mate.nix
@@ -27,9 +27,12 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     hardware.pulseaudio.enable = true;
   };
 
+  enableOCR = true;
+
   testScript = { nodes, ... }:
     let
       user = nodes.machine.users.users.alice;
+      env = "DISPLAY=:0.0 DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
     in
     ''
       with subtest("Wait for login"):
@@ -48,11 +51,31 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           machine.wait_for_window("Bottom Panel")
           machine.wait_until_succeeds("pgrep caja")
           machine.wait_for_window("Caja")
+          machine.wait_for_text('(Applications|Places|System)')
+          machine.wait_for_text('(Computer|Home|Trash)')
+
+      with subtest("Lock the screen"):
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'")
+          machine.succeed("su - ${user.name} -c '${env} mate-screensaver-command -l >&2 &'")
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is active'")
+          machine.sleep(2)
+          machine.send_chars("${user.password}", delay=0.2)
+          machine.wait_for_text("${user.description}")
+          machine.screenshot("screensaver")
+          machine.send_chars("\n")
+          machine.wait_until_succeeds("su - ${user.name} -c '${env} mate-screensaver-command -q' | grep 'The screensaver is inactive'")
+
+      with subtest("Open MATE control center"):
+          machine.succeed("su - ${user.name} -c '${env} mate-control-center >&2 &'")
+          machine.wait_for_window("Control Center")
+          machine.wait_for_text('(Groups|Administration|Hardware)')
 
       with subtest("Open MATE terminal"):
-          machine.succeed("su - ${user.name} -c 'DISPLAY=:0.0 mate-terminal >&2 &'")
+          machine.succeed("su - ${user.name} -c '${env} mate-terminal >&2 &'")
           machine.wait_for_window("Terminal")
-          machine.sleep(20)
+
+      with subtest("Check if MATE has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep -E 'mate|marco|caja'")
           machine.screenshot("screen")
     '';
 })
diff --git a/nixos/tests/miriway.nix b/nixos/tests/miriway.nix
index f12c4d5ecc41e..a0987d9fc41b6 100644
--- a/nixos/tests/miriway.nix
+++ b/nixos/tests/miriway.nix
@@ -31,7 +31,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable-x11=
 
         ctrl-alt=t:foot --maximized
-        ctrl-alt=a:env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty --option window.startup_mode=maximized
+        ctrl-alt=a:env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty --option window.startup_mode=\"maximized\"
 
         shell-component=dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY
 
diff --git a/nixos/tests/mobilizon.nix b/nixos/tests/mobilizon.nix
index 398c8530dc565..2b070ca9d9609 100644
--- a/nixos/tests/mobilizon.nix
+++ b/nixos/tests/mobilizon.nix
@@ -10,7 +10,7 @@ import ./make-test-python.nix ({ lib, ... }:
     meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
 
     nodes.server =
-      { pkgs, ... }:
+      { ... }:
       {
         services.mobilizon = {
           enable = true;
@@ -25,8 +25,6 @@ import ./make-test-python.nix ({ lib, ... }:
           };
         };
 
-        services.postgresql.package = pkgs.postgresql_14;
-
         security.pki.certificateFiles = [ certs.ca.cert ];
 
         services.nginx.virtualHosts."${mobilizonDomain}" = {
diff --git a/nixos/tests/mongodb.nix b/nixos/tests/mongodb.nix
index 1afc891817aff..68be6926865ec 100644
--- a/nixos/tests/mongodb.nix
+++ b/nixos/tests/mongodb.nix
@@ -27,7 +27,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
   in {
     name = "mongodb";
     meta = with pkgs.lib.maintainers; {
-      maintainers = [ bluescreen303 offline rvl phile314 ];
+      maintainers = [ bluescreen303 offline phile314 ];
     };
 
     nodes = {
diff --git a/nixos/tests/munin.nix b/nixos/tests/munin.nix
index 4ec17e0339df0..e371b2dffa6b8 100644
--- a/nixos/tests/munin.nix
+++ b/nixos/tests/munin.nix
@@ -37,8 +37,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     with subtest("ensure munin-node starts and listens on 4949"):
         one.wait_for_unit("munin-node.service")
         one.wait_for_open_port(4949)
+
     with subtest("ensure munin-cron output is correct"):
         one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd")
         one.wait_for_file("/var/www/munin/one/index.html")
+        one.wait_for_file("/var/www/munin/one/one/diskstat_iops_vda-day.png", timeout=60)
   '';
 })
diff --git a/nixos/tests/musescore.nix b/nixos/tests/musescore.nix
index 6aeb0558a49db..0720631ed284b 100644
--- a/nixos/tests/musescore.nix
+++ b/nixos/tests/musescore.nix
@@ -63,14 +63,11 @@ in
 
     machine.send_key("tab")
     machine.send_key("tab")
-    machine.send_key("tab")
-    machine.send_key("tab")
-    machine.send_key("right")
-    machine.send_key("right")
     machine.send_key("ret")
 
-    machine.sleep(1)
+    machine.sleep(2)
 
+    machine.send_key("tab")
     # Type the beginning of https://de.wikipedia.org/wiki/Alle_meine_Entchen
     machine.send_chars("cdef6gg5aaaa7g")
     machine.sleep(1)
diff --git a/nixos/tests/mympd.nix b/nixos/tests/mympd.nix
new file mode 100644
index 0000000000000..ac6a896966e6b
--- /dev/null
+++ b/nixos/tests/mympd.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({pkgs, lib, ... }: {
+  name = "mympd";
+
+  nodes.mympd = {
+    services.mympd = {
+      enable = true;
+      settings = {
+        http_port = 8081;
+      };
+    };
+
+    services.mpd.enable = true;
+  };
+
+  testScript = ''
+    start_all();
+    machine.wait_for_unit("mympd.service");
+
+    # Ensure that mympd can connect to mpd
+    machine.wait_until_succeeds(
+      "journalctl -eu mympd -o cat | grep 'Connected to MPD'"
+    )
+
+    # Ensure that the web server is working
+    machine.succeed("curl http://localhost:8081 --compressed | grep -o myMPD")
+  '';
+})
diff --git a/nixos/tests/mysql/mariadb-galera.nix b/nixos/tests/mysql/mariadb-galera.nix
index c9962f49c02fd..7455abbce5fb0 100644
--- a/nixos/tests/mysql/mariadb-galera.nix
+++ b/nixos/tests/mysql/mariadb-galera.nix
@@ -17,8 +17,8 @@ let
     galeraPackage ? pkgs.mariadb-galera
   }: makeTest {
     name = "${name}-galera-mariabackup";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ izorkin ajs124 das_j ];
+    meta = {
+      maintainers = with lib.maintainers; [ izorkin ] ++ lib.teams.helsinki-systems.members;
     };
 
     # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
diff --git a/nixos/tests/mysql/mysql-backup.nix b/nixos/tests/mysql/mysql-backup.nix
index 968f56dd3c9bd..451f5c04ce467 100644
--- a/nixos/tests/mysql/mysql-backup.nix
+++ b/nixos/tests/mysql/mysql-backup.nix
@@ -15,9 +15,6 @@ let
     name ? mkTestName package
   }: makeTest {
     name = "${name}-backup";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ rvl ];
-    };
 
     nodes = {
       master = { pkgs, ... }: {
diff --git a/nixos/tests/mysql/mysql-replication.nix b/nixos/tests/mysql/mysql-replication.nix
index 8f1695eb97e27..83da1e7b6cb88 100644
--- a/nixos/tests/mysql/mysql-replication.nix
+++ b/nixos/tests/mysql/mysql-replication.nix
@@ -18,8 +18,8 @@ let
     name ? mkTestName package,
   }: makeTest {
     name = "${name}-replication";
-    meta = with pkgs.lib.maintainers; {
-      maintainers = [ ajs124 das_j ];
+    meta = {
+      maintainers = lib.teams.helsinki-systems.members;
     };
 
     nodes = {
diff --git a/nixos/tests/mysql/mysql.nix b/nixos/tests/mysql/mysql.nix
index 3e059cad09e97..0a61f9d38fe2e 100644
--- a/nixos/tests/mysql/mysql.nix
+++ b/nixos/tests/mysql/mysql.nix
@@ -18,8 +18,8 @@ let
     hasRocksDB ? pkgs.stdenv.hostPlatform.is64bit
   }: makeTest {
     inherit name;
-    meta = with lib.maintainers; {
-      maintainers = [ ajs124 das_j ];
+    meta = {
+      maintainers = lib.teams.helsinki-systems.members;
     };
 
     nodes = {
diff --git a/nixos/tests/networking.nix b/nixos/tests/networking.nix
index 768d0cfa2238c..6bd89902eedb3 100644
--- a/nixos/tests/networking.nix
+++ b/nixos/tests/networking.nix
@@ -130,6 +130,7 @@ let
           start_all()
 
           client.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Make sure DHCP server is not started"):
@@ -222,6 +223,7 @@ let
           start_all()
 
           client.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Wait until we have an ip address on each interface"):
@@ -849,6 +851,7 @@ let
 
           client.wait_for_unit("network.target")
           client_with_privacy.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Wait until we have an ip address"):
diff --git a/nixos/tests/nextcloud/basic.nix b/nixos/tests/nextcloud/basic.nix
index ab1d8353dba0b..428fe0aa10db9 100644
--- a/nixos/tests/nextcloud/basic.nix
+++ b/nixos/tests/nextcloud/basic.nix
@@ -13,10 +13,12 @@ in {
     # The only thing the client needs to do is download a file.
     client = { ... }: {
       services.davfs2.enable = true;
-      system.activationScripts.davfs2-secrets = ''
-        echo "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}" > /tmp/davfs2-secrets
-        chmod 600 /tmp/davfs2-secrets
-      '';
+      systemd.tmpfiles.settings.nextcloud = {
+        "/tmp/davfs2-secrets"."f+" = {
+          mode = "0600";
+          argument = "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}";
+        };
+      };
       virtualisation.fileSystems = {
         "/mnt/dav" = {
           device = "http://nextcloud/remote.php/dav/files/${adminuser}";
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index 19d04b28b4f99..84ac371537271 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -22,4 +22,4 @@ foldl
     };
   })
 { }
-  [ 26 27 ]
+  [ 26 27 28 ]
diff --git a/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
index 586bf50fd939c..d95af8a89d07a 100644
--- a/nixos/tests/nextcloud/with-postgresql-and-redis.nix
+++ b/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -32,7 +32,6 @@ in {
           adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
             ${adminpass}
           '');
-          trustedProxies = [ "::1" ];
         };
         notify_push = {
           enable = true;
@@ -42,6 +41,7 @@ in {
         extraApps = {
           inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push;
         };
+        extraOptions.trusted_proxies = [ "::1" ];
       };
 
       services.redis.servers."nextcloud".enable = true;
diff --git a/nixos/tests/nfs/kerberos.nix b/nixos/tests/nfs/kerberos.nix
index a7d08bc628c62..5944b53319a0b 100644
--- a/nixos/tests/nfs/kerberos.nix
+++ b/nixos/tests/nfs/kerberos.nix
@@ -1,15 +1,17 @@
 import ../make-test-python.nix ({ pkgs, lib, ... }:
 
 let
-  krb5 =
-    { enable = true;
-      domain_realm."nfs.test"   = "NFS.TEST";
+  security.krb5 = {
+    enable = true;
+    settings = {
+      domain_realm."nfs.test" = "NFS.TEST";
       libdefaults.default_realm = "NFS.TEST";
-      realms."NFS.TEST" =
-        { admin_server = "server.nfs.test";
-          kdc = "server.nfs.test";
-        };
+      realms."NFS.TEST" = {
+        admin_server = "server.nfs.test";
+        kdc = "server.nfs.test";
+      };
     };
+  };
 
   hosts =
     ''
@@ -32,7 +34,7 @@ in
 
   nodes = {
     client = { lib, ... }:
-      { inherit krb5 users;
+      { inherit security users;
 
         networking.extraHosts = hosts;
         networking.domain = "nfs.test";
@@ -48,7 +50,7 @@ in
       };
 
     server = { lib, ...}:
-      { inherit krb5 users;
+      { inherit security users;
 
         networking.extraHosts = hosts;
         networking.domain = "nfs.test";
@@ -103,6 +105,7 @@ in
       server.wait_for_unit("rpc-gssd.service")
       server.wait_for_unit("rpc-svcgssd.service")
 
+      client.systemctl("start network-online.target")
       client.wait_for_unit("network-online.target")
 
       # add principals to client keytab
@@ -128,4 +131,6 @@ in
           expected = ["alice", "users"]
           assert ids == expected, f"ids incorrect: got {ids} expected {expected}"
     '';
+
+  meta.maintainers = [ lib.maintainers.dblsaiko ];
 })
diff --git a/nixos/tests/nginx-etag-compression.nix b/nixos/tests/nginx-etag-compression.nix
new file mode 100644
index 0000000000000..67493ae299841
--- /dev/null
+++ b/nixos/tests/nginx-etag-compression.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix {
+  name = "nginx-etag-compression";
+
+  nodes.machine = { pkgs, lib, ... }: {
+    services.nginx = {
+      enable = true;
+      recommendedGzipSettings = true;
+      virtualHosts.default = {
+        root = pkgs.runCommandLocal "testdir" {} ''
+          mkdir "$out"
+          cat > "$out/index.html" <<EOF
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          EOF
+          ${pkgs.gzip}/bin/gzip -k "$out/index.html"
+        '';
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+
+    etag_plain = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:' http://127.0.0.1/")
+    etag_gzip = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:gzip' http://127.0.0.1/")
+
+    with subtest("different representations have different etags"):
+      assert etag_plain != etag_gzip, f"etags should differ: {etag_plain} == {etag_gzip}"
+
+    with subtest("etag for uncompressed response is reproducible"):
+      etag_plain_repeat = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:' http://127.0.0.1/")
+      assert etag_plain == etag_plain_repeat, f"etags should be the same: {etag_plain} != {etag_plain_repeat}"
+
+    with subtest("etag for compressed response is reproducible"):
+      etag_gzip_repeat = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:gzip' http://127.0.0.1/")
+      assert etag_gzip == etag_gzip_repeat, f"etags should be the same: {etag_gzip} != {etag_gzip_repeat}"
+  '';
+}
diff --git a/nixos/tests/nginx-http3.nix b/nixos/tests/nginx-http3.nix
index fc9f31037f989..22f7f61f10ce6 100644
--- a/nixos/tests/nginx-http3.nix
+++ b/nixos/tests/nginx-http3.nix
@@ -1,97 +1,113 @@
-import ./make-test-python.nix ({lib, pkgs, ...}:
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
 let
   hosts = ''
     192.168.2.101 acme.test
   '';
 
 in
-{
-  name = "nginx-http3";
-  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
-
-  nodes = {
-    server = { pkgs, ... }: {
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.101"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 443 ];
-        firewall.allowedUDPPorts = [ 443 ];
-      };
-
-      security.pki.certificates = [
-        (builtins.readFile ./common/acme/server/ca.cert.pem)
-      ];
-
-      services.nginx = {
-        enable = true;
-        package = pkgs.nginxQuic;
-
-        virtualHosts."acme.test" = {
-          onlySSL = true;
-          sslCertificate = ./common/acme/server/acme.test.cert.pem;
-          sslCertificateKey = ./common/acme/server/acme.test.key.pem;
-          http2 = true;
-          http3 = true;
-          http3_hq = false;
-          quic = true;
-          reuseport = true;
-          root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
-            mkdir "$out"
-            cat > "$out/index.html" <<EOF
-            <html><body>Hello World!</body></html>
-            EOF
-            cat > "$out/example.txt" <<EOF
-            Check http3 protocol.
-            EOF
-          '');
-        };
-      };
-    };
-
-    client = { pkgs, ... }: {
-      environment.systemPackages = [ pkgs.curlHTTP3 ];
-      networking = {
-        interfaces.eth1 = {
-          ipv4.addresses = [
-            { address = "192.168.2.201"; prefixLength = 24; }
-          ];
-        };
-        extraHosts = hosts;
-      };
 
-      security.pki.certificates = [
-        (builtins.readFile ./common/acme/server/ca.cert.pem)
-      ];
-    };
-  };
+builtins.listToAttrs (
+  builtins.map
+    (nginxPackage:
+      {
+        name = pkgs.lib.getName nginxPackage;
+        value = makeTest {
+          name = "nginx-http3-${pkgs.lib.getName nginxPackage}";
+          meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
 
-  testScript = ''
-    start_all()
+          nodes = {
+            server = { lib, pkgs, ... }: {
+              networking = {
+                interfaces.eth1 = {
+                  ipv4.addresses = [
+                    { address = "192.168.2.101"; prefixLength = 24; }
+                  ];
+                };
+                extraHosts = hosts;
+                firewall.allowedTCPPorts = [ 443 ];
+                firewall.allowedUDPPorts = [ 443 ];
+              };
 
-    server.wait_for_unit("nginx")
-    server.wait_for_open_port(443)
+              security.pki.certificates = [
+                (builtins.readFile ./common/acme/server/ca.cert.pem)
+              ];
 
-    # Check http connections
-    client.succeed("curl --verbose --http3-only https://acme.test | grep 'Hello World!'")
+              services.nginx = {
+                enable = true;
+                package = nginxPackage;
 
-    # Check downloadings
-    client.succeed("curl --verbose --http3-only https://acme.test/example.txt --output /tmp/example.txt")
-    client.succeed("cat /tmp/example.txt | grep 'Check http3 protocol.'")
+                virtualHosts."acme.test" = {
+                  onlySSL = true;
+                  sslCertificate = ./common/acme/server/acme.test.cert.pem;
+                  sslCertificateKey = ./common/acme/server/acme.test.key.pem;
+                  http2 = true;
+                  http3 = true;
+                  http3_hq = false;
+                  quic = true;
+                  reuseport = true;
+                  root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
+                    mkdir "$out"
+                    cat > "$out/index.html" <<EOF
+                    <html><body>Hello World!</body></html>
+                    EOF
+                    cat > "$out/example.txt" <<EOF
+                    Check http3 protocol.
+                    EOF
+                  '');
+                };
+              };
+            };
 
-    # Check header reading
-    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'content-type'")
-    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'HTTP/3 200'")
-    client.succeed("curl --verbose --http3-only --head https://acme.test/error | grep 'HTTP/3 404'")
+            client = { pkgs, ... }: {
+              environment.systemPackages = [ pkgs.curlHTTP3 ];
+              networking = {
+                interfaces.eth1 = {
+                  ipv4.addresses = [
+                    { address = "192.168.2.201"; prefixLength = 24; }
+                  ];
+                };
+                extraHosts = hosts;
+              };
 
-    # Check change User-Agent
-    client.succeed("curl --verbose --http3-only --user-agent 'Curl test 3.0' https://acme.test")
-    server.succeed("cat /var/log/nginx/access.log | grep 'Curl test 3.0'")
+              security.pki.certificates = [
+                (builtins.readFile ./common/acme/server/ca.cert.pem)
+              ];
+            };
+          };
 
-    server.shutdown()
-    client.shutdown()
-  '';
-})
+          testScript = ''
+            start_all()
+
+            server.wait_for_unit("nginx")
+            server.wait_for_open_port(443)
+
+            # Check http connections
+            client.succeed("curl --verbose --http3-only https://acme.test | grep 'Hello World!'")
+
+            # Check downloadings
+            client.succeed("curl --verbose --http3-only https://acme.test/example.txt --output /tmp/example.txt")
+            client.succeed("cat /tmp/example.txt | grep 'Check http3 protocol.'")
+
+            # Check header reading
+            client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'content-type'")
+            client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'HTTP/3 200'")
+            client.succeed("curl --verbose --http3-only --head https://acme.test/error | grep 'HTTP/3 404'")
+
+            # Check change User-Agent
+            client.succeed("curl --verbose --http3-only --user-agent 'Curl test 3.0' https://acme.test")
+            server.succeed("cat /var/log/nginx/access.log | grep 'Curl test 3.0'")
+
+            server.shutdown()
+            client.shutdown()
+          '';
+        };
+      }
+    )
+    [ pkgs.angieQuic pkgs.nginxQuic ]
+)
diff --git a/nixos/tests/nginx-moreheaders.nix b/nixos/tests/nginx-moreheaders.nix
new file mode 100644
index 0000000000000..560dcf9ce0b82
--- /dev/null
+++ b/nixos/tests/nginx-moreheaders.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix {
+  name = "nginx-more-headers";
+
+  nodes = {
+    webserver = { pkgs, ... }: {
+      services.nginx = {
+        enable = true;
+
+        virtualHosts.test = {
+          locations = {
+            "/".return = "200 blub";
+            "/some" =  {
+              return = "200 blub";
+              extraConfig = ''
+                more_set_headers "Referrer-Policy: no-referrer";
+              '';
+            };
+          };
+          extraConfig = ''
+            more_set_headers "X-Powered-By: nixos";
+          '';
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test | grep -q \"X-Powered-By: nixos\"")
+    webserver.fail("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test | grep -q \"Referrer-Policy: no-referrer\"")
+
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test/some | grep -q \"X-Powered-By: nixos\"")
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test/some | grep -q \"Referrer-Policy: no-referrer\"")
+  '';
+}
diff --git a/nixos/tests/nginx-redirectcode.nix b/nixos/tests/nginx-redirectcode.nix
new file mode 100644
index 0000000000000..f60434a21a85d
--- /dev/null
+++ b/nixos/tests/nginx-redirectcode.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nginx-redirectcode";
+  meta.maintainers = with lib.maintainers; [ misterio77 ];
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {
+          globalRedirect = "example.com/foo";
+          # With 308 (and 307), the method and body are to be kept when following it
+          redirectCode = 308;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    # Check the status code
+    webserver.succeed("curl -si http://localhost | grep '^HTTP/[0-9.]\+ 308 Permanent Redirect'")
+  '';
+})
diff --git a/nixos/tests/nginx-variants.nix b/nixos/tests/nginx-variants.nix
index 0faa0127669dd..8c24052aacce3 100644
--- a/nixos/tests/nginx-variants.nix
+++ b/nixos/tests/nginx-variants.nix
@@ -7,17 +7,17 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 
 builtins.listToAttrs (
   builtins.map
-    (nginxName:
+    (nginxPackage:
       {
-        name = nginxName;
+        name = pkgs.lib.getName nginxPackage;
         value = makeTest {
-          name = "nginx-variant-${nginxName}";
+          name = "nginx-variant-${pkgs.lib.getName nginxPackage}";
 
           nodes.machine = { pkgs, ... }: {
             services.nginx = {
               enable = true;
               virtualHosts.localhost.locations."/".return = "200 'foo'";
-              package = pkgs."${nginxName}";
+              package = nginxPackage;
             };
           };
 
@@ -29,5 +29,5 @@ builtins.listToAttrs (
         };
       }
     )
-    [ "nginxStable" "nginxMainline" "nginxQuic" "nginxShibboleth" "openresty" "tengine" ]
+    [ pkgs.angie pkgs.angieQuic pkgs.nginxStable pkgs.nginxMainline pkgs.nginxQuic pkgs.nginxShibboleth pkgs.openresty pkgs.tengine ]
 )
diff --git a/nixos/tests/nitter.nix b/nixos/tests/nitter.nix
index 8bc55ba8c69fc..114f1aac7c7af 100644
--- a/nixos/tests/nitter.nix
+++ b/nixos/tests/nitter.nix
@@ -1,13 +1,28 @@
 import ./make-test-python.nix ({ pkgs, ... }:
 
+let
+  # In a real deployment this should naturally not common from the nix store
+  # and be seeded via agenix or as a non-nix managed file.
+  #
+  # These credentials are from the nitter wiki and are expired. We must provide
+  # credentials in the correct format, otherwise nitter fails to start. They
+  # must not be valid, as unauthorized errors are handled gracefully.
+  guestAccountFile = pkgs.writeText "guest_accounts.jsonl" ''
+    {"oauth_token":"1719213587296620928-BsXY2RIJEw7fjxoNwbBemgjJhueK0m","oauth_token_secret":"N0WB0xhL4ng6WTN44aZO82SUJjz7ssI3hHez2CUhTiYqy"}
+  '';
+in
 {
   name = "nitter";
   meta.maintainers = with pkgs.lib.maintainers; [ erdnaxe ];
 
   nodes.machine = {
-    services.nitter.enable = true;
-    # Test CAP_NET_BIND_SERVICE
-    services.nitter.server.port = 80;
+    services.nitter = {
+      enable = true;
+      # Test CAP_NET_BIND_SERVICE
+      server.port = 80;
+      # Provide dummy guest accounts
+      guestAccounts = guestAccountFile;
+    };
   };
 
   testScript = ''
diff --git a/nixos/tests/nixos-rebuild-specialisations.nix b/nixos/tests/nixos-rebuild-specialisations.nix
index 444ff7a3b9771..9192b8a8a030b 100644
--- a/nixos/tests/nixos-rebuild-specialisations.nix
+++ b/nixos/tests/nixos-rebuild-specialisations.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
       virtualisation = {
         cores = 2;
-        memorySize = 2048;
+        memorySize = 4096;
       };
     };
   };
diff --git a/nixos/tests/nixos-rebuild-target-host.nix b/nixos/tests/nixos-rebuild-target-host.nix
new file mode 100644
index 0000000000000..8d60b788abf38
--- /dev/null
+++ b/nixos/tests/nixos-rebuild-target-host.nix
@@ -0,0 +1,136 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nixos-rebuild-target-host";
+
+  nodes = {
+    deployer = { lib, ... }: let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+    in {
+      imports = [ ../modules/profiles/installation-device.nix ];
+
+      nix.settings = {
+        substituters = lib.mkForce [ ];
+        hashed-mirrors = null;
+        connect-timeout = 1;
+      };
+
+      environment.systemPackages = [ pkgs.passh ];
+
+      system.includeBuildDependencies = true;
+
+      virtualisation = {
+        cores = 2;
+        memorySize = 2048;
+      };
+
+      system.build.privateKey = snakeOilPrivateKey;
+      system.build.publicKey = snakeOilPublicKey;
+    };
+
+    target = { nodes, lib, ... }: let
+      targetConfig = {
+        documentation.enable = false;
+        services.openssh.enable = true;
+
+        users.users.root.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+        users.users.alice.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+        users.users.bob.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+
+        users.users.alice.extraGroups = [ "wheel" ];
+        users.users.bob.extraGroups = [ "wheel" ];
+
+        # Disable sudo for root to ensure sudo isn't called without `--use-remote-sudo`
+        security.sudo.extraRules = lib.mkForce [
+          { groups = [ "wheel" ]; commands = [ { command = "ALL"; } ]; }
+          { users = [ "alice" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+        ];
+
+        nix.settings.trusted-users = [ "@wheel" ];
+      };
+    in {
+      imports = [ ./common/user-account.nix ];
+
+      config = lib.mkMerge [
+        targetConfig
+        {
+          system.build = {
+            inherit targetConfig;
+          };
+
+          networking.hostName = "target";
+        }
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      sshConfig = builtins.toFile "ssh.conf" ''
+        UserKnownHostsFile=/dev/null
+        StrictHostKeyChecking=no
+      '';
+
+      targetConfigJSON = pkgs.writeText "target-configuration.json"
+        (builtins.toJSON nodes.target.system.build.targetConfig);
+
+      targetNetworkJSON = pkgs.writeText "target-network.json"
+        (builtins.toJSON nodes.target.system.build.networkConfig);
+
+      configFile = hostname: pkgs.writeText "configuration.nix" ''
+        { lib, modulesPath, ... }: {
+          imports = [
+            (modulesPath + "/virtualisation/qemu-vm.nix")
+            (modulesPath + "/testing/test-instrumentation.nix")
+            (modulesPath + "/../tests/common/user-account.nix")
+            (lib.modules.importJSON ./target-configuration.json)
+            (lib.modules.importJSON ./target-network.json)
+            ./hardware-configuration.nix
+          ];
+
+          boot.loader.grub = {
+            enable = true;
+            device = "/dev/vda";
+            forceInstall = true;
+          };
+
+          # this will be asserted
+          networking.hostName = "${hostname}";
+        }
+      '';
+    in
+    ''
+      start_all()
+      target.wait_for_open_port(22)
+
+      deployer.wait_until_succeeds("ping -c1 target")
+      deployer.succeed("install -Dm 600 ${nodes.deployer.system.build.privateKey} ~root/.ssh/id_ecdsa")
+      deployer.succeed("install ${sshConfig} ~root/.ssh/config")
+
+      target.succeed("nixos-generate-config")
+      deployer.succeed("scp alice@target:/etc/nixos/hardware-configuration.nix /root/hardware-configuration.nix")
+
+      deployer.copy_from_host("${configFile "config-1-deployed"}", "/root/configuration-1.nix")
+      deployer.copy_from_host("${configFile "config-2-deployed"}", "/root/configuration-2.nix")
+      deployer.copy_from_host("${configFile "config-3-deployed"}", "/root/configuration-3.nix")
+      deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
+      deployer.copy_from_host("${targetConfigJSON}", "/root/target-configuration.json")
+
+      # Ensure sudo is disabled for root
+      target.fail("sudo true")
+
+      # This test also ensures that sudo is not called without --use-remote-sudo
+      with subtest("Deploy to root@target"):
+        deployer.succeed("nixos-rebuild switch -I nixos-config=/root/configuration-1.nix --target-host root@target &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-1-deployed", f"{target_hostname=}"
+
+      with subtest("Deploy to alice@target with passwordless sudo"):
+        deployer.succeed("nixos-rebuild switch -I nixos-config=/root/configuration-2.nix --target-host alice@target --use-remote-sudo &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-2-deployed", f"{target_hostname=}"
+
+      with subtest("Deploy to bob@target with password based sudo"):
+        deployer.succeed("passh -c 3 -C -p ${nodes.target.users.users.bob.password} -P \"\[sudo\] password\" nixos-rebuild switch -I nixos-config=/root/configuration-3.nix --target-host bob@target --use-remote-sudo &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-3-deployed", f"{target_hostname=}"
+    '';
+})
diff --git a/nixos/tests/nixseparatedebuginfod.nix b/nixos/tests/nixseparatedebuginfod.nix
new file mode 100644
index 0000000000000..7c192a73c7064
--- /dev/null
+++ b/nixos/tests/nixseparatedebuginfod.nix
@@ -0,0 +1,80 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  secret-key = "key-name:/COlMSRbehSh6YSruJWjL+R0JXQUKuPEn96fIb+pLokEJUjcK/2Gv8Ai96D7JGay5gDeUTx5wdpPgNvum9YtwA==";
+  public-key = "key-name:BCVI3Cv9hr/AIveg+yRmsuYA3lE8ecHaT4Db7pvWLcA=";
+in
+{
+  name = "nixseparatedebuginfod";
+  /* A binary cache with debug info and source for nix */
+  nodes.cache = { pkgs, ... }: {
+    services.nix-serve = {
+      enable = true;
+      secretKeyFile = builtins.toFile "secret-key" secret-key;
+      openFirewall = true;
+    };
+    system.extraDependencies = [
+      pkgs.nix.debug
+      pkgs.nix.src
+      pkgs.sl
+    ];
+  };
+  /* the machine where we need the debuginfo */
+  nodes.machine = {
+    imports = [
+      ../modules/installer/cd-dvd/channel.nix
+    ];
+    services.nixseparatedebuginfod.enable = true;
+    nix.settings = {
+      substituters = lib.mkForce [ "http://cache:5000" ];
+      trusted-public-keys = [ public-key ];
+    };
+    environment.systemPackages = [
+      pkgs.valgrind
+      pkgs.gdb
+      (pkgs.writeShellScriptBin "wait_for_indexation" ''
+        set -x
+        while debuginfod-find debuginfo /run/current-system/sw/bin/nix |& grep 'File too large'; do
+          sleep 1;
+        done
+      '')
+    ];
+  };
+  testScript = ''
+    start_all()
+    cache.wait_for_unit("nix-serve.service")
+    cache.wait_for_open_port(5000)
+    machine.wait_for_unit("nixseparatedebuginfod.service")
+    machine.wait_for_open_port(1949)
+
+    with subtest("show the config to debug the test"):
+      machine.succeed("nix --extra-experimental-features nix-command show-config |& logger")
+      machine.succeed("cat /etc/nix/nix.conf |& logger")
+    with subtest("check that the binary cache works"):
+      machine.succeed("nix-store -r ${pkgs.sl}")
+
+    # nixseparatedebuginfod needs .drv to associate executable -> source
+    # on regular systems this would be provided by nixos-rebuild
+    machine.succeed("nix-instantiate '<nixpkgs>' -A nix")
+
+    machine.succeed("timeout 600 wait_for_indexation")
+
+    # test debuginfod-find
+    machine.succeed("debuginfod-find debuginfo /run/current-system/sw/bin/nix")
+
+    # test that gdb can fetch source
+    out = machine.succeed("gdb /run/current-system/sw/bin/nix --batch -x ${builtins.toFile "commands" ''
+    start
+    l
+    ''}")
+    print(out)
+    assert 'int main(' in out
+
+    # test that valgrind can display location information
+    # this relies on the fact that valgrind complains about nix
+    # libgc helps in this regard, and we also ask valgrind to show leak kinds
+    # which are usually false positives.
+    out = machine.succeed("valgrind --leak-check=full --show-leak-kinds=all nix-env --version 2>&1")
+    print(out)
+    assert 'main.cc' in out
+  '';
+})
diff --git a/nixos/tests/npmrc.nix b/nixos/tests/npmrc.nix
new file mode 100644
index 0000000000000..dbf24d372feb4
--- /dev/null
+++ b/nixos/tests/npmrc.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ ... }:
+let
+  machineName = "machine";
+  settingName = "prefix";
+  settingValue = "/some/path";
+in
+{
+  name = "npmrc";
+
+  nodes."${machineName}".programs.npm = {
+    enable = true;
+    npmrc = ''
+      ${settingName} = ${settingValue}
+    '';
+  };
+
+  testScript = ''
+    ${machineName}.start()
+
+    assert ${machineName}.succeed("npm config get ${settingName}") == "${settingValue}\n"
+  '';
+})
diff --git a/nixos/tests/ntfy-sh-migration.nix b/nixos/tests/ntfy-sh-migration.nix
new file mode 100644
index 0000000000000..de6660052d679
--- /dev/null
+++ b/nixos/tests/ntfy-sh-migration.nix
@@ -0,0 +1,77 @@
+# the ntfy-sh module was switching to DynamicUser=true. this test assures that
+# the migration does not break existing setups.
+#
+# this test works doing a migration and asserting ntfy-sh runs properly. first,
+# ntfy-sh is configured to use a static user and group. then ntfy-sh is
+# started and tested. after that, ntfy-sh is shut down and a systemd drop
+# in configuration file is used to upate the service configuration to use
+# DynamicUser=true. then the ntfy-sh is started again and tested.
+
+import ./make-test-python.nix {
+  name = "ntfy-sh";
+
+  nodes.machine = {
+    lib,
+    pkgs,
+    ...
+  }: {
+    environment.etc."ntfy-sh-dynamic-user.conf".text = ''
+      [Service]
+      Group=new-ntfy-sh
+      User=new-ntfy-sh
+      DynamicUser=true
+    '';
+
+    services.ntfy-sh.enable = true;
+    services.ntfy-sh.settings.base-url = "http://localhost:2586";
+
+    systemd.services.ntfy-sh.serviceConfig = {
+      DynamicUser = lib.mkForce false;
+      ExecStartPre = [
+        "${pkgs.coreutils}/bin/id"
+        "${pkgs.coreutils}/bin/ls -lahd /var/lib/ntfy-sh/"
+        "${pkgs.coreutils}/bin/ls -lah /var/lib/ntfy-sh/"
+      ];
+      Group = lib.mkForce "old-ntfy-sh";
+      User = lib.mkForce "old-ntfy-sh";
+    };
+
+    users.users.old-ntfy-sh = {
+      isSystemUser = true;
+      group = "old-ntfy-sh";
+    };
+
+    users.groups.old-ntfy-sh = {};
+  };
+
+  testScript = ''
+    import json
+
+    msg = "Test notification"
+
+    def test_ntfysh():
+      machine.wait_for_unit("ntfy-sh.service")
+      machine.wait_for_open_port(2586)
+
+      machine.succeed(f"curl -d '{msg}' localhost:2586/test")
+
+      text = machine.succeed("curl -s localhost:2586/test/json?poll=1")
+      for line in text.splitlines():
+        notif = json.loads(line)
+        assert msg == notif["message"], "Wrong message"
+
+      machine.succeed("ntfy user list")
+
+    machine.wait_for_unit("multi-user.target")
+
+    test_ntfysh()
+
+    machine.succeed("systemctl stop ntfy-sh.service")
+    machine.succeed("mkdir -p /run/systemd/system/ntfy-sh.service.d")
+    machine.succeed("cp /etc/ntfy-sh-dynamic-user.conf /run/systemd/system/ntfy-sh.service.d/dynamic-user.conf")
+    machine.succeed("systemctl daemon-reload")
+    machine.succeed("systemctl start ntfy-sh.service")
+
+    test_ntfysh()
+  '';
+}
diff --git a/nixos/tests/ntpd-rs.nix b/nixos/tests/ntpd-rs.nix
new file mode 100644
index 0000000000000..6f3c80e87f072
--- /dev/null
+++ b/nixos/tests/ntpd-rs.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "ntpd-rs";
+
+  meta = {
+    maintainers = with lib.maintainers; [ fpletz ];
+  };
+
+  nodes = {
+    client = {
+      services.ntpd-rs = {
+        enable = true;
+        metrics.enable = true;
+        useNetworkingTimeServers = false;
+        settings = {
+          source = [
+            {
+              mode = "server";
+              address = "server";
+            }
+          ];
+          synchronization = {
+            minimum-agreeing-sources = 1;
+          };
+        };
+      };
+    };
+    server = {
+      networking.firewall.allowedUDPPorts = [ 123 ];
+      services.ntpd-rs = {
+        enable = true;
+        metrics.enable = true;
+        settings = {
+          server = [
+            { listen = "[::]:123"; }
+          ];
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    for machine in (server, client):
+      machine.wait_for_unit('multi-user.target')
+      machine.succeed('systemctl is-active ntpd-rs.service')
+      machine.succeed('systemctl is-active ntpd-rs-metrics.service')
+      machine.succeed('curl http://localhost:9975/metrics | grep ntp_uptime_seconds')
+  '';
+})
diff --git a/nixos/tests/oci-containers.nix b/nixos/tests/oci-containers.nix
index 1afa9df36dfa4..205ce623d089c 100644
--- a/nixos/tests/oci-containers.nix
+++ b/nixos/tests/oci-containers.nix
@@ -12,7 +12,7 @@ let
     name = "oci-containers-${backend}";
 
     meta.maintainers = lib.teams.serokell.members
-                       ++ (with lib.maintainers; [ adisbladis benley mkaito ]);
+                       ++ (with lib.maintainers; [ benley mkaito ]);
 
     nodes = {
       ${backend} = { pkgs, ... }: {
diff --git a/nixos/tests/opensmtpd-rspamd.nix b/nixos/tests/opensmtpd-rspamd.nix
index 19969a7b47ddd..e413a2050bd61 100644
--- a/nixos/tests/opensmtpd-rspamd.nix
+++ b/nixos/tests/opensmtpd-rspamd.nix
@@ -119,6 +119,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     smtp1.wait_for_unit("opensmtpd")
     smtp2.wait_for_unit("opensmtpd")
diff --git a/nixos/tests/opensmtpd.nix b/nixos/tests/opensmtpd.nix
index 17c1a569ba0d9..d32f82ed33b8c 100644
--- a/nixos/tests/opensmtpd.nix
+++ b/nixos/tests/opensmtpd.nix
@@ -104,6 +104,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     smtp1.wait_for_unit("opensmtpd")
     smtp2.wait_for_unit("opensmtpd")
diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix
index 799497477993b..8074fd2ed4838 100644
--- a/nixos/tests/openssh.nix
+++ b/nixos/tests/openssh.nix
@@ -34,6 +34,19 @@ in {
         ];
       };
 
+    server-lazy-socket = {
+      virtualisation.vlans = [ 1 2 ];
+      services.openssh = {
+        enable = true;
+        startWhenNeeded = true;
+        ports = [ 2222 ];
+        listenAddresses = [ { addr = "0.0.0.0"; } ];
+      };
+      users.users.root.openssh.authorizedKeys.keys = [
+        snakeOilPublicKey
+      ];
+    };
+
     server-localhost-only =
       { ... }:
 
@@ -96,7 +109,9 @@ in {
       };
 
     client =
-      { ... }: { };
+      { ... }: {
+        virtualisation.vlans = [ 1 2 ];
+      };
 
   };
 
@@ -109,6 +124,7 @@ in {
 
     server_lazy.wait_for_unit("sshd.socket", timeout=30)
     server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30)
+    server_lazy_socket.wait_for_unit("sshd.socket", timeout=30)
 
     with subtest("manual-authkey"):
         client.succeed("mkdir -m 700 /root/.ssh")
@@ -145,6 +161,16 @@ in {
             timeout=30
         )
 
+    with subtest("socket activation on a non-standard port"):
+        client.succeed(
+            "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+        )
+        client.succeed("chmod 600 privkey.snakeoil")
+        client.succeed(
+            "ssh -p 2222 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.2.4 true",
+            timeout=30
+        )
+
     with subtest("configured-authkey"):
         client.succeed(
             "cat ${snakeOilPrivateKey} > privkey.snakeoil"
diff --git a/nixos/tests/opentabletdriver.nix b/nixos/tests/opentabletdriver.nix
index b7583f6dd2648..a71a007c41100 100644
--- a/nixos/tests/opentabletdriver.nix
+++ b/nixos/tests/opentabletdriver.nix
@@ -20,9 +20,11 @@ in {
     ''
       machine.start()
       machine.wait_for_x()
+
+      machine.wait_for_unit('graphical.target')
       machine.wait_for_unit("opentabletdriver.service", "${testUser}")
 
-      machine.succeed("cat /etc/udev/rules.d/99-opentabletdriver.rules")
+      machine.succeed("cat /etc/udev/rules.d/70-opentabletdriver.rules")
       # Will fail if service is not running
       # Needs to run as the same user that started the service
       machine.succeed("su - ${testUser} -c 'otd detect'")
diff --git a/nixos/tests/os-prober.nix b/nixos/tests/os-prober.nix
index dae1306bd69d0..034de0620d885 100644
--- a/nixos/tests/os-prober.nix
+++ b/nixos/tests/os-prober.nix
@@ -95,7 +95,7 @@ in {
           ntp
           perlPackages.ListCompare
           perlPackages.XMLLibXML
-          python3Minimal
+          python3
           shared-mime-info
           stdenv
           sudo
diff --git a/nixos/tests/owncast.nix b/nixos/tests/owncast.nix
index debb34f5009dc..73aac4e704751 100644
--- a/nixos/tests/owncast.nix
+++ b/nixos/tests/owncast.nix
@@ -31,6 +31,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
+    server.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     server.wait_for_unit("network-online.target")
     server.wait_for_unit("owncast.service")
diff --git a/nixos/tests/pam/pam-file-contents.nix b/nixos/tests/pam/pam-file-contents.nix
index 2bafd90618e97..accaa4cc70a94 100644
--- a/nixos/tests/pam/pam-file-contents.nix
+++ b/nixos/tests/pam/pam-file-contents.nix
@@ -7,7 +7,7 @@ import ../make-test-python.nix ({ pkgs, ... }: {
   nodes.machine = { ... }: {
     imports = [ ../../modules/profiles/minimal.nix ];
 
-    krb5.enable = true;
+    security.krb5.enable = true;
 
     users = {
       mutableUsers = false;
diff --git a/nixos/tests/pantheon.nix b/nixos/tests/pantheon.nix
index be1351283d99a..69a28c397bedc 100644
--- a/nixos/tests/pantheon.nix
+++ b/nixos/tests/pantheon.nix
@@ -26,6 +26,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
 
     with subtest("Test we can see usernames in elementary-greeter"):
         machine.wait_for_text("${user.description}")
+        machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor")
         # OCR was struggling with this one.
         # machine.wait_for_text("${bob.description}")
         # Ensure the password box is focused by clicking it.
@@ -39,21 +40,29 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.wait_for_x()
         machine.wait_for_file("${user.home}/.Xauthority")
         machine.succeed("xauth merge ${user.home}/.Xauthority")
+        machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
 
     with subtest("Check that logging in has given the user ownership of devices"):
         machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
 
-    with subtest("Check if pantheon session components actually start"):
-        machine.wait_until_succeeds("pgrep gala")
-        machine.wait_for_window("gala")
-        machine.wait_until_succeeds("pgrep -f io.elementary.wingpanel")
-        machine.wait_for_window("io.elementary.wingpanel")
-        machine.wait_until_succeeds("pgrep plank")
-        machine.wait_for_window("plank")
-        machine.wait_until_succeeds("pgrep -f gsd-media-keys")
+    with subtest("Check if Pantheon components actually start"):
+        for i in ["gala", "io.elementary.wingpanel", "plank", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]:
+            machine.wait_until_succeeds(f"pgrep -f {i}")
+        for i in ["gala", "io.elementary.wingpanel", "plank"]:
+            machine.wait_for_window(i)
         machine.wait_for_unit("bamfdaemon.service", "${user.name}")
         machine.wait_for_unit("io.elementary.files.xdg-desktop-portal.service", "${user.name}")
 
+    with subtest("Check if various environment variables are set"):
+        cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/gala)/environ"
+        machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'")
+        # Hopefully from the sessionPath option.
+        machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'")
+        # Hopefully from login shell.
+        machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'")
+        # See elementary-session-settings packaging.
+        machine.succeed(f"{cmd} | grep 'XDG_CONFIG_DIRS' | grep 'elementary-default-settings'")
+
     with subtest("Open elementary videos"):
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'")
         machine.sleep(2)
@@ -61,6 +70,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.wait_for_text("No Videos Open")
 
     with subtest("Open elementary calendar"):
+        machine.wait_until_succeeds("pgrep -f evolution-calendar-factory")
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'")
         machine.sleep(2)
         machine.wait_for_window("io.elementary.calendar")
@@ -75,6 +85,14 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
         machine.wait_for_window("io.elementary.terminal")
 
+    with subtest("Trigger multitasking view"):
+        cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1"
+        env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"
+        machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
+        machine.sleep(3)
+        machine.screenshot("multitasking")
+        machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
+
     with subtest("Check if gala has ever coredumped"):
         machine.fail("coredumpctl --json=short | grep gala")
         # So you can see the dock in the below screenshot.
diff --git a/nixos/tests/paperless.nix b/nixos/tests/paperless.nix
index 6a51cc522bdc5..3d834b29958de 100644
--- a/nixos/tests/paperless.nix
+++ b/nixos/tests/paperless.nix
@@ -21,7 +21,7 @@ import ./make-test-python.nix ({ lib, ... }: {
           }
         ];
       };
-      services.paperless.extraConfig = {
+      services.paperless.settings = {
         PAPERLESS_DBHOST = "/run/postgresql";
       };
     };
diff --git a/nixos/tests/pgadmin4.nix b/nixos/tests/pgadmin4.nix
index 3ee7ed19fa1c5..b726a3eb3b946 100644
--- a/nixos/tests/pgadmin4.nix
+++ b/nixos/tests/pgadmin4.nix
@@ -4,31 +4,49 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
   name = "pgadmin4";
   meta.maintainers = with lib.maintainers; [ mkg20001 gador ];
 
-  nodes.machine = { pkgs, ... }: {
+  nodes = {
+    machine = { pkgs, ... }: {
 
-    imports = [ ./common/user-account.nix ];
+      imports = [ ./common/user-account.nix ];
 
-    environment.systemPackages = with pkgs; [
-      wget
-      curl
-      pgadmin4-desktopmode
-    ];
+      environment.systemPackages = with pkgs; [
+        wget
+        curl
+        pgadmin4-desktopmode
+      ];
 
-    services.postgresql = {
-      enable = true;
-      authentication = ''
-        host    all             all             localhost               trust
-      '';
+      services.postgresql = {
+        enable = true;
+        authentication = ''
+          host    all             all             localhost               trust
+        '';
+      };
+
+      services.pgadmin = {
+        port = 5051;
+        enable = true;
+        initialEmail = "bruh@localhost.de";
+        initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+      };
     };
+    machine2 = { pkgs, ... }: {
+
+      imports = [ ./common/user-account.nix ];
 
-    services.pgadmin = {
-      port = 5051;
-      enable = true;
-      initialEmail = "bruh@localhost.de";
-      initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+      services.postgresql = {
+        enable = true;
+      };
+
+      services.pgadmin = {
+        enable = true;
+        initialEmail = "bruh@localhost.de";
+        initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+        minimumPasswordLength = 12;
+      };
     };
   };
 
+
   testScript = ''
     with subtest("Check pgadmin module"):
       machine.wait_for_unit("postgresql")
@@ -37,6 +55,11 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       machine.wait_until_succeeds("curl -sS localhost:5051/login | grep \"<title>pgAdmin 4</title>\" > /dev/null")
       # check for missing support files (css, js etc). Should catch not-generated files during build. See e.g. https://github.com/NixOS/nixpkgs/pull/229184
       machine.succeed("wget -nv --level=1 --spider --recursive localhost:5051/login")
+      # test idempotenceny
+      machine.systemctl("restart pgadmin.service")
+      machine.wait_for_unit("pgadmin")
+      machine.wait_until_succeeds("curl -sS localhost:5051")
+      machine.wait_until_succeeds("curl -sS localhost:5051/login | grep \"<title>pgAdmin 4</title>\" > /dev/null")
 
     # pgadmin4 module saves the configuration to /etc/pgadmin/config_system.py
     # pgadmin4-desktopmode tries to read that as well. This normally fails with a PermissionError, as the config file
@@ -49,5 +72,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       machine.wait_until_succeeds("curl -sS localhost:5050")
       machine.wait_until_succeeds("curl -sS localhost:5050/browser/ | grep \"<title>pgAdmin 4</title>\" > /dev/null")
       machine.succeed("wget -nv --level=1 --spider --recursive localhost:5050/browser")
+
+    with subtest("Check pgadmin minimum password length"):
+      machine2.wait_for_unit("postgresql")
+      machine2.wait_for_console_text("Password must be at least 12 characters long")
   '';
 })
diff --git a/nixos/tests/pgjwt.nix b/nixos/tests/pgjwt.nix
index 4793a3e315031..8d3310b74eb3b 100644
--- a/nixos/tests/pgjwt.nix
+++ b/nixos/tests/pgjwt.nix
@@ -11,7 +11,7 @@ with pkgs; {
     {
       services.postgresql = {
         enable = true;
-        extraPlugins = [ pgjwt pgtap ];
+        extraPlugins = ps: with ps; [ pgjwt pgtap ];
       };
     };
   };
diff --git a/nixos/tests/podman/default.nix b/nixos/tests/podman/default.nix
index 0e1f420f2a7de..3eea45832f0a6 100644
--- a/nixos/tests/podman/default.nix
+++ b/nixos/tests/podman/default.nix
@@ -24,8 +24,6 @@ import ../make-test-python.nix (
         virtualisation.podman.enable = true;
 
         virtualisation.podman.defaultNetwork.settings.dns_enabled = true;
-
-        networking.firewall.allowedUDPPorts = [ 53 ];
       };
       docker = { pkgs, ... }: {
         virtualisation.podman.enable = true;
diff --git a/nixos/tests/postgis.nix b/nixos/tests/postgis.nix
index d0685abc510c9..dacf4e576c071 100644
--- a/nixos/tests/postgis.nix
+++ b/nixos/tests/postgis.nix
@@ -9,10 +9,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        services.postgresql = let mypg = pkgs.postgresql; in {
+        services.postgresql = {
             enable = true;
-            package = mypg;
-            extraPlugins = with mypg.pkgs; [
+            package = pkgs.postgresql;
+            extraPlugins = ps: with ps; [
               postgis
             ];
         };
@@ -24,6 +24,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     master.wait_for_unit("postgresql")
     master.sleep(10)  # Hopefully this is long enough!!
     master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis;'")
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_raster;'")
     master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_topology;'")
   '';
 })
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 7840130d4a364..5872b02b609e1 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -431,8 +431,8 @@ let
     };
 
     kea = let
-      controlSocketPathV4 = "/run/kea-dhcp4/dhcp4.sock";
-      controlSocketPathV6 = "/run/kea-dhcp6/dhcp6.sock";
+      controlSocketPathV4 = "/run/kea/dhcp4.sock";
+      controlSocketPathV6 = "/run/kea/dhcp6.sock";
     in
     {
       exporterConfig = {
@@ -806,6 +806,7 @@ let
     nginx = {
       exporterConfig = {
         enable = true;
+        constLabels = [ "foo=bar" ];
       };
       metricProvider = {
         services.nginx = {
@@ -818,7 +819,7 @@ let
         wait_for_unit("nginx.service")
         wait_for_unit("prometheus-nginx-exporter.service")
         wait_for_open_port(9113)
-        succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up 1'")
+        succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up{foo=\"bar\"} 1'")
       '';
     };
 
@@ -1052,6 +1053,50 @@ let
       '';
     };
 
+    ping = {
+      exporterConfig = {
+        enable = true;
+
+        settings = {
+          targets = [ {
+            "localhost" = {
+              alias = "local machine";
+              env = "prod";
+              type = "domain";
+            };
+          } {
+            "127.0.0.1" = {
+              alias = "local machine";
+              type = "v4";
+            };
+          } {
+            "::1" = {
+              alias = "local machine";
+              type = "v6";
+            };
+          } {
+            "google.com" = {};
+          } ];
+          dns = {};
+          ping = {
+            interval = "2s";
+            timeout = "3s";
+            history-size = 42;
+            payload-size = 56;
+          };
+          log = {
+            level = "warn";
+          };
+        };
+      };
+
+      exporterTest = ''
+        wait_for_unit("prometheus-ping-exporter.service")
+        wait_for_open_port(9427)
+        succeed("curl -sSf http://localhost:9427/metrics | grep 'ping_up{.*} 1'")
+      '';
+    };
+
     postfix = {
       exporterConfig = {
         enable = true;
@@ -1347,9 +1392,11 @@ let
     snmp = {
       exporterConfig = {
         enable = true;
-        configuration.default = {
-          version = 2;
-          auth.community = "public";
+        configuration = {
+          auths.public_v2 = {
+            community = "public";
+            version = 2;
+          };
         };
       };
       exporterTest = ''
diff --git a/nixos/tests/promscale.nix b/nixos/tests/promscale.nix
index d4825b6d7f551..da18628f2482c 100644
--- a/nixos/tests/promscale.nix
+++ b/nixos/tests/promscale.nix
@@ -27,7 +27,7 @@ let
         services.postgresql = {
           enable = true;
           package = postgresql-package;
-          extraPlugins = with postgresql-package.pkgs; [
+          extraPlugins = ps: with ps; [
             timescaledb
             promscale_extension
           ];
diff --git a/nixos/tests/prowlarr.nix b/nixos/tests/prowlarr.nix
index af669afd57004..663743546459f 100644
--- a/nixos/tests/prowlarr.nix
+++ b/nixos/tests/prowlarr.nix
@@ -11,6 +11,8 @@ import ./make-test-python.nix ({ lib, ... }:
   testScript = ''
     machine.wait_for_unit("prowlarr.service")
     machine.wait_for_open_port(9696)
-    machine.succeed("curl --fail http://localhost:9696/")
+    response = machine.succeed("curl --fail http://localhost:9696/")
+    assert '<title>Prowlarr</title>' in response, "Login page didn't load successfully"
+    machine.succeed("[ -d /var/lib/prowlarr ]")
   '';
 })
diff --git a/nixos/tests/qemu-vm-restrictnetwork.nix b/nixos/tests/qemu-vm-restrictnetwork.nix
index 49a105ef10767..49aefcc099bda 100644
--- a/nixos/tests/qemu-vm-restrictnetwork.nix
+++ b/nixos/tests/qemu-vm-restrictnetwork.nix
@@ -21,6 +21,8 @@ import ./make-test-python.nix ({
 
     else:
       start_all()
+      unrestricted.systemctl("start network-online.target")
+      restricted.systemctl("start network-online.target")
       unrestricted.wait_for_unit("network-online.target")
       restricted.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/quicktun.nix b/nixos/tests/quicktun.nix
new file mode 100644
index 0000000000000..a5a6324571174
--- /dev/null
+++ b/nixos/tests/quicktun.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "quicktun";
+  meta.maintainers = with lib.maintainers; [ h7x4 ];
+
+  nodes = {
+    machine = { ... }: {
+      services.quicktun."test-tunnel" = {
+        protocol = "raw";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("quicktun-test-tunnel.service")
+  '';
+})
diff --git a/nixos/tests/rspamd-trainer.nix b/nixos/tests/rspamd-trainer.nix
new file mode 100644
index 0000000000000..9c157903d24b6
--- /dev/null
+++ b/nixos/tests/rspamd-trainer.nix
@@ -0,0 +1,155 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in {
+  name = "rspamd-trainer";
+  meta = with pkgs.lib.maintainers; { maintainers = [ onny ]; };
+
+  nodes = {
+    machine = { options, config, ... }: {
+
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+
+      networking.extraHosts = ''
+        127.0.0.1 ${domain}
+     '';
+
+      services.rspamd-trainer = {
+        enable = true;
+        settings = {
+          HOST = domain;
+          USERNAME = "spam@${domain}";
+          INBOXPREFIX = "INBOX/";
+        };
+        secrets = [
+          # Do not use this in production. This will make passwords
+          # world-readable in the Nix store
+          "${pkgs.writeText "secrets" ''
+            PASSWORD = test123
+          ''}"
+        ];
+      };
+
+      services.maddy = {
+        enable = true;
+        hostname = domain;
+        primaryDomain = domain;
+        ensureAccounts = [ "spam@${domain}" ];
+        ensureCredentials = {
+          # Do not use this in production. This will make passwords world-readable
+          # in the Nix store
+          "spam@${domain}".passwordFile = "${pkgs.writeText "postmaster" "test123"}";
+        };
+        tls = {
+          loader = "file";
+          certificates = [{
+            certPath = "${certs.${domain}.cert}";
+            keyPath = "${certs.${domain}.key}";
+          }];
+        };
+        config = builtins.replaceStrings [
+          "imap tcp://0.0.0.0:143"
+          "submission tcp://0.0.0.0:587"
+        ] [
+          "imap tls://0.0.0.0:993 tcp://0.0.0.0:143"
+          "submission tls://0.0.0.0:465 tcp://0.0.0.0:587"
+        ] options.services.maddy.config.default;
+      };
+
+      services.rspamd = {
+        enable = true;
+        locals = {
+          "redis.conf".text = ''
+            servers = "${config.services.redis.servers.rspamd.unixSocket}";
+          '';
+          "classifier-bayes.conf".text = ''
+            backend = "redis";
+            autolearn = true;
+          '';
+        };
+      };
+
+      services.redis.servers.rspamd = {
+        enable = true;
+        port = 0;
+        unixSocket = "/run/redis-rspamd/redis.sock";
+        user = config.services.rspamd.user;
+      };
+
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "send-testmail" { } ''
+          import smtplib
+          import ssl
+          from email.mime.text import MIMEText
+          context = ssl.create_default_context()
+          msg = MIMEText("Hello World")
+          msg['Subject'] = 'Test'
+          msg['From'] = "spam@${domain}"
+          msg['To'] = "spam@${domain}"
+          with smtplib.SMTP_SSL(host='${domain}', port=465, context=context) as smtp:
+              smtp.login('spam@${domain}', 'test123')
+              smtp.sendmail(
+                'spam@${domain}', 'spam@${domain}', msg.as_string()
+              )
+        '')
+        (pkgs.writers.writePython3Bin "create-mail-dirs" { } ''
+          import imaplib
+          with imaplib.IMAP4_SSL('${domain}') as imap:
+              imap.login('spam@${domain}', 'test123')
+              imap.create("\"INBOX/report_spam\"")
+              imap.create("\"INBOX/report_ham\"")
+              imap.create("\"INBOX/report_spam_reply\"")
+              imap.select("INBOX")
+              imap.copy("1", "\"INBOX/report_ham\"")
+              imap.logout()
+        '')
+        (pkgs.writers.writePython3Bin "test-imap" { } ''
+          import imaplib
+          with imaplib.IMAP4_SSL('${domain}') as imap:
+              imap.login('spam@${domain}', 'test123')
+              imap.select("INBOX/learned_ham")
+              status, refs = imap.search(None, 'ALL')
+              assert status == 'OK'
+              assert len(refs) == 1
+              status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+              assert status == 'OK'
+              assert msg[0][1].strip() == b"Hello World"
+              imap.logout()
+        '')
+      ];
+
+
+
+    };
+
+  };
+
+  testScript = { nodes }: ''
+    start_all()
+    machine.wait_for_unit("maddy.service")
+    machine.wait_for_open_port(143)
+    machine.wait_for_open_port(993)
+    machine.wait_for_open_port(587)
+    machine.wait_for_open_port(465)
+
+    # Send test mail to spam@domain
+    machine.succeed("send-testmail")
+
+    # Create mail directories required for rspamd-trainer and copy mail from
+    # INBOX into INBOX/report_ham
+    machine.succeed("create-mail-dirs")
+
+    # Start rspamd-trainer. It should read mail from INBOX/report_ham
+    machine.wait_for_unit("rspamd.service")
+    machine.wait_for_unit("redis-rspamd.service")
+    machine.wait_for_file("/run/rspamd/rspamd.sock")
+    machine.succeed("systemctl start rspamd-trainer.service")
+
+    # Check if mail got processed by rspamd-trainer successfully and check for
+    # it in INBOX/learned_ham
+    machine.succeed("test-imap")
+  '';
+})
diff --git a/nixos/tests/rss2email.nix b/nixos/tests/rss2email.nix
index f32326feb50fb..60b27b95fabe4 100644
--- a/nixos/tests/rss2email.nix
+++ b/nixos/tests/rss2email.nix
@@ -55,6 +55,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    server.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
     server.wait_for_unit("opensmtpd")
     server.wait_for_unit("dovecot2")
diff --git a/nixos/tests/slimserver.nix b/nixos/tests/slimserver.nix
index c3f7b6fde4de0..95cbdcf4a2a15 100644
--- a/nixos/tests/slimserver.nix
+++ b/nixos/tests/slimserver.nix
@@ -39,8 +39,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
       with subtest("squeezelite player successfully connects to slimserver"):
           machine.wait_for_unit("squeezelite.service")
-          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep 'slimproto:937 connected'")
-          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep 'sendHELO:148 mac:'").strip().split(" ")[-1]
+          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep -E 'slimproto:[0-9]+ connected'")
+          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep -E 'sendHELO:[0-9]+ mac:'").strip().split(" ")[-1]
           player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'")
           assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found"
     '';
diff --git a/nixos/tests/snmpd.nix b/nixos/tests/snmpd.nix
new file mode 100644
index 0000000000000..9248a6b390101
--- /dev/null
+++ b/nixos/tests/snmpd.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "snmpd";
+
+  nodes.snmpd = {
+    environment.systemPackages = with pkgs; [
+      net-snmp
+    ];
+
+    services.snmpd = {
+      enable = true;
+      configText = ''
+        rocommunity public
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all();
+    machine.wait_for_unit("snmpd.service")
+    machine.succeed("snmpwalk -v 2c -c public localhost | grep SNMPv2-MIB::sysName.0");
+  '';
+
+})
diff --git a/nixos/tests/sogo.nix b/nixos/tests/sogo.nix
index acdad8d0f473b..e9059a2ab7734 100644
--- a/nixos/tests/sogo.nix
+++ b/nixos/tests/sogo.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({ pkgs, ... }: {
   name = "sogo";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 das_j ];
+    maintainers = [];
   };
 
   nodes = {
diff --git a/nixos/tests/spark/default.nix b/nixos/tests/spark/default.nix
index 462f0d23a4032..034e9711bed52 100644
--- a/nixos/tests/spark/default.nix
+++ b/nixos/tests/spark/default.nix
@@ -1,28 +1,48 @@
-import ../make-test-python.nix ({...}: {
-  name = "spark";
+{ pkgs, ... }:
 
-  nodes = {
-    worker = { nodes, pkgs, ... }: {
-      services.spark.worker = {
-        enable = true;
-        master = "master:7077";
+let
+  inherit (pkgs) lib;
+  tests = {
+    default = testsForPackage { sparkPackage = pkgs.spark; };
+  };
+
+  testsForPackage = args: lib.recurseIntoAttrs {
+    sparkCluster = testSparkCluster args;
+    passthru.override = args': testsForPackage (args // args');
+  };
+  testSparkCluster = { sparkPackage, ... }: pkgs.testers.nixosTest ({
+    name = "spark";
+
+    nodes = {
+      worker = { nodes, pkgs, ... }: {
+        services.spark = {
+          package = sparkPackage;
+          worker = {
+            enable = true;
+            master = "master:7077";
+          };
+        };
+        virtualisation.memorySize = 2048;
       };
-      virtualisation.memorySize = 2048;
-    };
-    master = { config, pkgs, ... }: {
-      services.spark.master = {
-        enable = true;
-        bind = "0.0.0.0";
+      master = { config, pkgs, ... }: {
+        services.spark = {
+          package = sparkPackage;
+          master = {
+            enable = true;
+            bind = "0.0.0.0";
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
       };
-      networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
     };
-  };
 
-  testScript = ''
-    master.wait_for_unit("spark-master.service")
-    worker.wait_for_unit("spark-worker.service")
-    worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
-    assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
-    worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
-  '';
-})
+    testScript = ''
+      master.wait_for_unit("spark-master.service")
+      worker.wait_for_unit("spark-worker.service")
+      worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
+      assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
+      worker.succeed("spark-submit --version | systemd-cat")
+      worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
+    '';
+  });
+in tests
diff --git a/nixos/tests/ssh-agent-auth.nix b/nixos/tests/ssh-agent-auth.nix
new file mode 100644
index 0000000000000..fee40afd61539
--- /dev/null
+++ b/nixos/tests/ssh-agent-auth.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+  let
+    inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+  in {
+    name = "ssh-agent-auth";
+    meta.maintainers = with lib.maintainers; [ nicoo ];
+
+    nodes = let nodeConfig = n: { ... }: {
+      users.users = {
+        admin = {
+          isNormalUser = true;
+          extraGroups = [ "wheel" ];
+          openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+        };
+        foo.isNormalUser = true;
+      };
+
+      security.pam.sshAgentAuth = {
+        # Must be specified, as nixpkgs CI expects everything to eval without warning
+        authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ];
+        enable = true;
+      };
+      security.${lib.replaceStrings [ "_" ] [ "-" ] n} = {
+        enable = true;
+        wheelNeedsPassword = true;  # We are checking `pam_ssh_agent_auth(8)` works for a sudoer
+      };
+
+      # Necessary for pam_ssh_agent_auth  >_>'
+      services.openssh.enable = true;
+    };
+    in lib.genAttrs [ "sudo" "sudo_rs" ] nodeConfig;
+
+    testScript = let
+      privateKeyPath = "/home/admin/.ssh/id_ecdsa";
+      userScript = pkgs.writeShellScript "test-script" ''
+        set -e
+        ssh-add -q ${privateKeyPath}
+
+        # faketty needed to ensure `sudo` doesn't write to the controlling PTY,
+        #  which would break the test-driver's line-oriented protocol.
+        ${lib.getExe pkgs.faketty} sudo -u foo -- id -un
+      '';
+    in ''
+      for vm in (sudo, sudo_rs):
+        sudo_impl = vm.name.replace("_", "-")
+        with subtest(f"wheel user can auth with ssh-agent for {sudo_impl}"):
+            vm.copy_from_host("${snakeOilPrivateKey}", "${privateKeyPath}")
+            vm.succeed("chmod -R 0700 /home/admin")
+            vm.succeed("chown -R admin:users /home/admin")
+
+            # Run `userScript` in an environment with an SSH-agent available
+            assert vm.succeed("sudo -u admin -- ssh-agent ${userScript} 2>&1").strip() == "foo"
+    '';
+  }
+)
diff --git a/nixos/tests/ssh-audit.nix b/nixos/tests/ssh-audit.nix
index bd6255b8044d9..25772aba3ea08 100644
--- a/nixos/tests/ssh-audit.nix
+++ b/nixos/tests/ssh-audit.nix
@@ -70,6 +70,7 @@ import ./make-test-python.nix (
       ${serverName}.succeed("${pkgs.ssh-audit}/bin/ssh-audit 127.0.0.1")
 
       # Wait for client to be able to connect to the server
+      ${clientName}.systemctl("start network-online.target")
       ${clientName}.wait_for_unit("network-online.target")
 
       # Set up trusted private key
diff --git a/nixos/tests/stub-ld.nix b/nixos/tests/stub-ld.nix
new file mode 100644
index 0000000000000..25161301741b7
--- /dev/null
+++ b/nixos/tests/stub-ld.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "stub-ld";
+
+  nodes.machine = { lib, ... }:
+    {
+      environment.stub-ld.enable = true;
+
+      specialisation.nostub = {
+        inheritParentConfig = true;
+
+        configuration = { ... }: {
+          environment.stub-ld.enable = lib.mkForce false;
+        };
+      };
+    };
+
+  testScript = let
+    libDir = pkgs.stdenv.hostPlatform.libDir;
+    ldsoBasename = lib.last (lib.splitString "/" pkgs.stdenv.cc.bintools.dynamicLinker);
+
+    check32 = pkgs.stdenv.isx86_64;
+    pkgs32 = pkgs.pkgsi686Linux;
+
+    libDir32 = pkgs32.stdenv.hostPlatform.libDir;
+    ldsoBasename32 = lib.last (lib.splitString "/" pkgs32.stdenv.cc.bintools.dynamicLinker);
+
+    test-exec = builtins.mapAttrs (n: v: pkgs.runCommand "test-exec-${n}" { src = pkgs.fetchurl v; } "mkdir -p $out;cd $out;tar -xzf $src") {
+      x86_64-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-x86_64-unknown-linux-gnu.tar.gz";
+      x86_64-linux.hash = "sha256-3zySzx8MKFprMOi++yr2ZGASE0aRfXHQuG3SN+kWUCI=";
+      i686-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-i686-unknown-linux-gnu.tar.gz";
+      i686-linux.hash = "sha256-fWNiATFeg0B2pfB5zndlnzGn7Ztl8diVS1rFLEDnSLU=";
+      aarch64-linux.url = "https://github.com/rustic-rs/rustic/releases/download/v0.6.1/rustic-v0.6.1-aarch64-unknown-linux-gnu.tar.gz";
+      aarch64-linux.hash = "sha256-hnldbd2cctQIAhIKoEZLIWY8H3jiFBClkNy2UlyyvAs=";
+    };
+    exec-name = "rustic";
+
+    if32 = pythonStatement: if check32 then pythonStatement else "pass";
+  in
+    ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("Check for stub (enabled, initial)"):
+          machine.succeed('test -L /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('test -L /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Try FHS executable"):
+          machine.copy_from_host('${test-exec.${pkgs.system}}','test-exec')
+          machine.succeed('if test-exec/${exec-name} 2>outfile; then false; else [ $? -eq 127 ];fi')
+          machine.succeed('grep -qi nixos outfile')
+          ${if32 "machine.copy_from_host('${test-exec.${pkgs32.system}}','test-exec32')"}
+          ${if32 "machine.succeed('if test-exec32/${exec-name} 2>outfile32; then false; else [ $? -eq 127 ];fi')"}
+          ${if32 "machine.succeed('grep -qi nixos outfile32')"}
+
+      with subtest("Disable stub"):
+          machine.succeed("/run/booted-system/specialisation/nostub/bin/switch-to-configuration test")
+
+      with subtest("Check for stub (disabled)"):
+          machine.fail('test -e /${libDir}/${ldsoBasename}')
+          ${if32 "machine.fail('test -e /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Create file in stub location (to be overwritten)"):
+          machine.succeed('mkdir -p /${libDir};touch /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('mkdir -p /${libDir32};touch /${libDir32}/${ldsoBasename32}')"}
+
+      with subtest("Re-enable stub"):
+          machine.succeed("/run/booted-system/bin/switch-to-configuration test")
+
+      with subtest("Check for stub (enabled, final)"):
+          machine.succeed('test -L /${libDir}/${ldsoBasename}')
+          ${if32 "machine.succeed('test -L /${libDir32}/${ldsoBasename32}')"}
+    '';
+})
diff --git a/nixos/tests/stunnel.nix b/nixos/tests/stunnel.nix
index 07fba435d4df6..f8cfa0414761d 100644
--- a/nixos/tests/stunnel.nix
+++ b/nixos/tests/stunnel.nix
@@ -19,8 +19,10 @@ let
   makeCert = { config, pkgs, ... }: {
     systemd.services.create-test-cert = {
       wantedBy = [ "sysinit.target" ];
-      before = [ "sysinit.target" ];
+      before = [ "sysinit.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
       unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
       script = ''
         ${pkgs.openssl}/bin/openssl req -batch -x509 -newkey rsa -nodes -out /test-cert.pem -keyout /test-key.pem -subj /CN=${config.networking.hostName}
         ( umask 077; cat /test-key.pem /test-cert.pem > /test-key-and-cert.pem )
diff --git a/nixos/tests/suwayomi-server.nix b/nixos/tests/suwayomi-server.nix
new file mode 100644
index 0000000000000..36072028380b8
--- /dev/null
+++ b/nixos/tests/suwayomi-server.nix
@@ -0,0 +1,46 @@
+{ system ? builtins.currentSystem
+, pkgs
+, lib ? pkgs.lib
+}:
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (lib) recursiveUpdate;
+
+  baseTestConfig = {
+    meta.maintainers = with lib.maintainers; [ ratcornu ];
+    nodes.machine = { pkgs, ... }: {
+      services.suwayomi-server = {
+        enable = true;
+        settings.server.port = 1234;
+      };
+    };
+    testScript = ''
+      machine.wait_for_unit("suwayomi-server.service")
+      machine.wait_for_open_port(1234)
+      machine.succeed("curl --fail http://localhost:1234/")
+    '';
+  };
+in
+
+{
+  without-auth = makeTest (recursiveUpdate baseTestConfig {
+    name = "suwayomi-server-without-auth";
+  });
+
+  with-auth = makeTest (recursiveUpdate baseTestConfig {
+    name = "suwayomi-server-with-auth";
+
+    nodes.machine = { pkgs, ... }: {
+      services.suwayomi-server = {
+        enable = true;
+
+        settings.server = {
+          port = 1234;
+          basicAuthEnabled = true;
+          basicAuthUsername = "alice";
+          basicAuthPasswordFile = pkgs.writeText "snakeoil-pass.txt" "pass";
+        };
+      };
+    };
+  });
+}
diff --git a/nixos/tests/sway.nix b/nixos/tests/sway.nix
index 695d4a7708104..185c5b1b0aa90 100644
--- a/nixos/tests/sway.nix
+++ b/nixos/tests/sway.nix
@@ -134,7 +134,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     machine.wait_for_file("/tmp/sway-ipc.sock")
 
     # Test XWayland (foot does not support X):
-    swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty")
+    swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty")
     wait_for_window("alice@machine")
     machine.send_chars("test-x11\n")
     machine.wait_for_file("/tmp/test-x11-exit-ok")
diff --git a/nixos/tests/sysinit-reactivation.nix b/nixos/tests/sysinit-reactivation.nix
new file mode 100644
index 0000000000000..1a0caecb610a3
--- /dev/null
+++ b/nixos/tests/sysinit-reactivation.nix
@@ -0,0 +1,107 @@
+# This runs to two scenarios but in one tests:
+# - A post-sysinit service needs to be restarted AFTER tmpfiles was restarted.
+# - A service needs to be restarted BEFORE tmpfiles is restarted
+
+{ lib, ... }:
+
+let
+  makeGeneration = generation: {
+    "${generation}".configuration = {
+      systemd.services.pre-sysinit-before-tmpfiles.environment.USER =
+        lib.mkForce "${generation}-tmpfiles-user";
+
+      systemd.services.pre-sysinit-after-tmpfiles.environment = {
+        NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-pre-sysinit-after-tmpfiles";
+        PATH_TO_CREATE = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
+      };
+
+      systemd.services.post-sysinit.environment = {
+        NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
+        PATH_TO_CREATE = lib.mkForce "/run/${generation}-created-by-post-sysinit";
+      };
+
+      systemd.tmpfiles.settings.test = lib.mkForce {
+        "/run/${generation}-needed-by-pre-sysinit-after-tmpfiles".f.user =
+          "${generation}-tmpfiles-user";
+      };
+    };
+  };
+in
+
+{
+
+  name = "sysinit-reactivation";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    systemd.services.pre-sysinit-before-tmpfiles = {
+      wantedBy = [ "sysinit.target" ];
+      requiredBy = [ "sysinit-reactivation.target" ];
+      before = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment.USER = "tmpfiles-user";
+      script = "${pkgs.shadow}/bin/useradd $USER";
+    };
+
+    systemd.services.pre-sysinit-after-tmpfiles = {
+      wantedBy = [ "sysinit.target" ];
+      requiredBy = [ "sysinit-reactivation.target" ];
+      after = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment = {
+        NEEDED_PATH = "/run/needed-by-pre-sysinit-after-tmpfiles";
+        PATH_TO_CREATE = "/run/needed-by-post-sysinit";
+      };
+      script = ''
+        if [[ -e $NEEDED_PATH ]]; then
+          touch $PATH_TO_CREATE
+        fi
+      '';
+    };
+
+    systemd.services.post-sysinit = {
+      wantedBy = [ "default.target" ];
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment = {
+        NEEDED_PATH = "/run/needed-by-post-sysinit";
+        PATH_TO_CREATE = "/run/created-by-post-sysinit";
+      };
+      script = ''
+        if [[ -e $NEEDED_PATH ]]; then
+          touch $PATH_TO_CREATE
+        fi
+      '';
+    };
+
+    systemd.tmpfiles.settings.test = {
+      "/run/needed-by-pre-sysinit-after-tmpfiles".f.user =
+        "tmpfiles-user";
+    };
+
+    specialisation = lib.mkMerge [
+      (makeGeneration "second")
+      (makeGeneration "third")
+    ];
+  };
+
+  testScript = { nodes, ... }: ''
+    def switch(generation):
+      toplevel = "${nodes.machine.system.build.toplevel}";
+      machine.succeed(f"{toplevel}/specialisation/{generation}/bin/switch-to-configuration switch")
+
+    machine.wait_for_unit("default.target")
+    machine.succeed("test -e /run/created-by-post-sysinit")
+
+    switch("second")
+    machine.succeed("test -e /run/second-created-by-post-sysinit")
+
+    switch("third")
+    machine.succeed("test -e /run/third-created-by-post-sysinit")
+  '';
+}
diff --git a/nixos/tests/systemd-boot.nix b/nixos/tests/systemd-boot.nix
index 256a18532b0a2..c0b37a230df0f 100644
--- a/nixos/tests/systemd-boot.nix
+++ b/nixos/tests/systemd-boot.nix
@@ -253,7 +253,7 @@ in
   };
 
   garbage-collect-entry = makeTest {
-    name = "systemd-boot-switch-test";
+    name = "systemd-boot-garbage-collect-entry";
     meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
 
     nodes = {
diff --git a/nixos/tests/systemd-initrd-networkd.nix b/nixos/tests/systemd-initrd-networkd.nix
index 9c4ddb6e4b363..691f4300d7a23 100644
--- a/nixos/tests/systemd-initrd-networkd.nix
+++ b/nixos/tests/systemd-initrd-networkd.nix
@@ -33,7 +33,8 @@ let
       boot.initrd.network.flushBeforeStage2 = flush;
       systemd.services.check-flush = {
         requiredBy = ["multi-user.target"];
-        before = ["network-pre.target" "multi-user.target"];
+        before = [ "network-pre.target" "multi-user.target" "shutdown.target" ];
+        conflicts = [ "shutdown.target" ];
         wants = ["network-pre.target"];
         unitConfig.DefaultDependencies = false;
         serviceConfig.Type = "oneshot";
diff --git a/nixos/tests/systemd-journal-gateway.nix b/nixos/tests/systemd-journal-gateway.nix
new file mode 100644
index 0000000000000..1d20943f23880
--- /dev/null
+++ b/nixos/tests/systemd-journal-gateway.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "systemd-journal-gateway";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ minijackson raitobezarius ];
+  };
+
+  # Named client for coherence with the systemd-journal-upload test, and for
+  # certificate validation
+  nodes.client = {
+    services.journald.gateway = {
+      enable = true;
+      cert = "/run/secrets/client/cert.pem";
+      key = "/run/secrets/client/key.pem";
+      trust = "/run/secrets/ca.cert.pem";
+    };
+  };
+
+  testScript = ''
+    import json
+    import subprocess
+    import tempfile
+
+    tmpdir_o = tempfile.TemporaryDirectory()
+    tmpdir = tmpdir_o.name
+
+    def generate_pems(domain: str):
+      subprocess.run(
+        [
+          "${pkgs.minica}/bin/minica",
+          "--ca-key=ca.key.pem",
+          "--ca-cert=ca.cert.pem",
+          f"--domains={domain}",
+        ],
+        cwd=str(tmpdir),
+      )
+
+    with subtest("Creating keys and certificates"):
+      generate_pems("server")
+      generate_pems("client")
+
+    client.wait_for_unit("multi-user.target")
+
+    def copy_pem(file: str):
+      machine.copy_from_host(source=f"{tmpdir}/{file}", target=f"/run/secrets/{file}")
+      machine.succeed(f"chmod 644 /run/secrets/{file}")
+
+    with subtest("Copying keys and certificates"):
+      machine.succeed("mkdir -p /run/secrets/{client,server}")
+      copy_pem("server/cert.pem")
+      copy_pem("server/key.pem")
+      copy_pem("client/cert.pem")
+      copy_pem("client/key.pem")
+      copy_pem("ca.cert.pem")
+
+    client.wait_for_unit("multi-user.target")
+
+    curl = '${pkgs.curl}/bin/curl'
+    accept_json = '--header "Accept: application/json"'
+    cacert = '--cacert /run/secrets/ca.cert.pem'
+    cert = '--cert /run/secrets/server/cert.pem'
+    key = '--key /run/secrets/server/key.pem'
+    base_url = 'https://client:19531'
+
+    curl_cli = f"{curl} {accept_json} {cacert} {cert} {key} --fail"
+
+    machine_info = client.succeed(f"{curl_cli} {base_url}/machine")
+    assert json.loads(machine_info)["hostname"] == "client", "wrong machine name"
+
+    # The HTTP request should have started the gateway service, triggered by
+    # the .socket unit
+    client.wait_for_unit("systemd-journal-gatewayd.service")
+
+    identifier = "nixos-test"
+    message = "Hello from NixOS test infrastructure"
+
+    client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'")
+
+    # max-time is a workaround against a bug in systemd-journal-gatewayd where
+    # if TLS is enabled, the connection is never closed. Since it will timeout,
+    # we ignore the return code.
+    entries = client.succeed(
+        f"{curl_cli} --max-time 5 {base_url}/entries?SYSLOG_IDENTIFIER={identifier} || true"
+    )
+
+    # Number of entries should be only 1
+    added_entry = json.loads(entries)
+    assert added_entry["SYSLOG_IDENTIFIER"] == identifier and added_entry["MESSAGE"] == message, "journal entry does not correspond"
+  '';
+})
diff --git a/nixos/tests/systemd-journal-upload.nix b/nixos/tests/systemd-journal-upload.nix
new file mode 100644
index 0000000000000..0cbde379aee96
--- /dev/null
+++ b/nixos/tests/systemd-journal-upload.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "systemd-journal-upload";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ minijackson raitobezarius ];
+  };
+
+  nodes.server = { nodes, ... }: {
+    services.journald.remote = {
+      enable = true;
+      listen = "http";
+      settings.Remote = {
+        ServerCertificateFile = "/run/secrets/sever.cert.pem";
+        ServerKeyFile = "/run/secrets/sever.key.pem";
+        TrustedCertificateFile = "/run/secrets/ca.cert.pem";
+        Seal = true;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = [ nodes.server.services.journald.remote.port ];
+  };
+
+  nodes.client = { lib, nodes, ... }: {
+    services.journald.upload = {
+      enable = true;
+      settings.Upload = {
+        URL = "http://server:${toString nodes.server.services.journald.remote.port}";
+        ServerCertificateFile = "/run/secrets/client.cert.pem";
+        ServerKeyFile = "/run/secrets/client.key.pem";
+        TrustedCertificateFile = "/run/secrets/ca.cert.pem";
+      };
+    };
+
+    # Wait for the PEMs to arrive
+    systemd.services.systemd-journal-upload.wantedBy = lib.mkForce [];
+    systemd.paths.systemd-journal-upload = {
+      wantedBy = [ "default.target" ];
+      # This file must be copied last
+      pathConfig.PathExists = [ "/run/secrets/ca.cert.pem" ];
+    };
+  };
+
+  testScript = ''
+    import subprocess
+    import tempfile
+
+    tmpdir_o = tempfile.TemporaryDirectory()
+    tmpdir = tmpdir_o.name
+
+    def generate_pems(domain: str):
+      subprocess.run(
+        [
+          "${pkgs.minica}/bin/minica",
+          "--ca-key=ca.key.pem",
+          "--ca-cert=ca.cert.pem",
+          f"--domains={domain}",
+        ],
+        cwd=str(tmpdir),
+      )
+
+    with subtest("Creating keys and certificates"):
+      generate_pems("server")
+      generate_pems("client")
+
+    server.wait_for_unit("multi-user.target")
+    client.wait_for_unit("multi-user.target")
+
+    def copy_pems(machine: Machine, domain: str):
+      machine.succeed("mkdir /run/secrets")
+      machine.copy_from_host(
+        source=f"{tmpdir}/{domain}/cert.pem",
+        target=f"/run/secrets/{domain}.cert.pem",
+      )
+      machine.copy_from_host(
+        source=f"{tmpdir}/{domain}/key.pem",
+        target=f"/run/secrets/{domain}.key.pem",
+      )
+      # Should be last
+      machine.copy_from_host(
+        source=f"{tmpdir}/ca.cert.pem",
+        target="/run/secrets/ca.cert.pem",
+      )
+
+    with subtest("Copying keys and certificates"):
+      copy_pems(server, "server")
+      copy_pems(client, "client")
+
+    client.wait_for_unit("systemd-journal-upload.service")
+    # The journal upload should have started the remote service, triggered by
+    # the .socket unit
+    server.wait_for_unit("systemd-journal-remote.service")
+
+    identifier = "nixos-test"
+    message = "Hello from NixOS test infrastructure"
+
+    client.succeed(f"systemd-cat --identifier={identifier} <<< '{message}'")
+    server.wait_until_succeeds(
+      f"journalctl --file /var/log/journal/remote/remote-*.journal --identifier={identifier} | grep -F '{message}'"
+    )
+  '';
+})
diff --git a/nixos/tests/systemd-journal.nix b/nixos/tests/systemd-journal.nix
index d2063a3b9a44e..ad60c0f547a41 100644
--- a/nixos/tests/systemd-journal.nix
+++ b/nixos/tests/systemd-journal.nix
@@ -6,17 +6,11 @@ import ./make-test-python.nix ({ pkgs, ... }:
     maintainers = [ lewo ];
   };
 
-  nodes.machine = { pkgs, lib, ... }: {
-    services.journald.enableHttpGateway = true;
-  };
+  nodes.machine = { };
 
   testScript = ''
     machine.wait_for_unit("multi-user.target")
 
     machine.succeed("journalctl --grep=systemd")
-
-    machine.succeed(
-        "${pkgs.curl}/bin/curl -s localhost:19531/machine | ${pkgs.jq}/bin/jq -e '.hostname == \"machine\"'"
-    )
   '';
 })
diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix
index cf0ccb7442118..665d8b5a05291 100644
--- a/nixos/tests/systemd-networkd-dhcpserver.nix
+++ b/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -101,6 +101,9 @@ import ./make-test-python.nix ({pkgs, ...}: {
   };
   testScript = { ... }: ''
     start_all()
+
+    router.systemctl("start network-online.target")
+    client.systemctl("start network-online.target")
     router.wait_for_unit("systemd-networkd-wait-online.service")
     client.wait_for_unit("systemd-networkd-wait-online.service")
     client.wait_until_succeeds("ping -c 5 10.0.2.1")
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
index 54f371e6c070f..1e55341657bdb 100644
--- a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -263,9 +263,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           };
         };
       };
-
-      # make the network-online target a requirement, we wait for it in our test script
-      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     };
 
     # This is the client behind the router. We should be receiving router
@@ -278,9 +275,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         useNetworkd = true;
         useDHCP = false;
       };
-
-      # make the network-online target a requirement, we wait for it in our test script
-      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     };
   };
 
@@ -294,6 +288,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     # Since we only care about IPv6 that should not involve waiting for legacy
     # IP leases.
     client.start()
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
 
     # the static address on the router should not be reachable
@@ -312,6 +307,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     isp.wait_for_unit("multi-user.target")
 
     # wait until the uplink interface has a good status
+    router.systemctl("start network-online.target")
     router.wait_for_unit("network-online.target")
     router.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
 
diff --git a/nixos/tests/systemd-nspawn.nix b/nixos/tests/systemd-nspawn.nix
index 1a4251ef069e8..b86762233d183 100644
--- a/nixos/tests/systemd-nspawn.nix
+++ b/nixos/tests/systemd-nspawn.nix
@@ -38,6 +38,7 @@ in {
     start_all()
 
     server.wait_for_unit("nginx.service")
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw")
     client.succeed(
diff --git a/nixos/tests/systemd-timesyncd-nscd-dnssec.nix b/nixos/tests/systemd-timesyncd-nscd-dnssec.nix
new file mode 100644
index 0000000000000..697dd824e3453
--- /dev/null
+++ b/nixos/tests/systemd-timesyncd-nscd-dnssec.nix
@@ -0,0 +1,61 @@
+# This test verifies that systemd-timesyncd can resolve the NTP server hostname when DNSSEC validation
+# fails even though it is enforced in the systemd-resolved settings. It is required in order to solve
+# the chicken-and-egg problem when DNSSEC validation needs the correct time to work, but to set the
+# correct time, we need to connect to an NTP server, which usually requires resolving its hostname.
+#
+# This test does the following:
+# - Sets up a DNS server (tinydns) listening on the eth1 ip addess, serving .ntp and fake.ntp records.
+# - Configures that DNS server as a resolver and enables DNSSEC in systemd-resolved settings.
+# - Configures systemd-timesyncd to use fake.ntp hostname as an NTP server.
+# - Performs a regular DNS lookup, to ensure it fails due to broken DNSSEC.
+# - Waits until systemd-timesyncd resolves fake.ntp by checking its debug output.
+#   Here, we don't expect systemd-timesyncd to connect and synchronize time because there is no NTP
+#   server running. For this test to succeed, we only need to ensure that systemd-timesyncd
+#   resolves the IP address of the fake.ntp host.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  ntpHostname = "fake.ntp";
+  ntpIP = "192.0.2.1";
+in
+{
+  name = "systemd-timesyncd";
+  nodes.machine = { pkgs, lib, config, ... }:
+    let
+      eth1IP = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address;
+    in
+    {
+      # Setup a local DNS server for the NTP domain on the eth1 IP address
+      services.tinydns = {
+        enable = true;
+        ip = eth1IP;
+        data = ''
+          .ntp:${eth1IP}
+          +.${ntpHostname}:${ntpIP}
+        '';
+      };
+
+      # Enable systemd-resolved with DNSSEC and use the local DNS as a name server
+      services.resolved.enable = true;
+      services.resolved.dnssec = "true";
+      networking.nameservers = [ eth1IP ];
+
+      # Configure systemd-timesyncd to use our NTP hostname
+      services.timesyncd.enable = lib.mkForce true;
+      services.timesyncd.servers = [ ntpHostname ];
+      services.timesyncd.extraConfig = ''
+        FallbackNTP=${ntpHostname}
+      '';
+
+      # The debug output is necessary to determine whether systemd-timesyncd successfully resolves our NTP hostname or not
+      systemd.services.systemd-timesyncd.environment.SYSTEMD_LOG_LEVEL = "debug";
+    };
+
+  testScript = ''
+    machine.wait_for_unit("tinydns.service")
+    machine.wait_for_unit("systemd-timesyncd.service")
+    machine.fail("resolvectl query ${ntpHostname}")
+    machine.wait_until_succeeds("journalctl -u systemd-timesyncd.service --grep='Resolved address ${ntpIP}:123 for ${ntpHostname}'")
+  '';
+})
diff --git a/nixos/tests/systemtap.nix b/nixos/tests/systemtap.nix
new file mode 100644
index 0000000000000..5cd79d66e872b
--- /dev/null
+++ b/nixos/tests/systemtap.nix
@@ -0,0 +1,50 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}@args:
+
+with pkgs.lib;
+
+let
+  stapScript = pkgs.writeText "test.stp" ''
+    probe kernel.function("do_sys_poll") {
+      println("kernel function probe & println work")
+      exit()
+    }
+  '';
+
+  ## TODO shared infra with ../kernel-generic.nix
+  testsForLinuxPackages = linuxPackages: (import ./make-test-python.nix ({ pkgs, ... }: {
+    name = "kernel-${linuxPackages.kernel.version}";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ bendlas ];
+    };
+
+    nodes.machine = { ... }:
+      {
+        boot.kernelPackages = linuxPackages;
+        programs.systemtap.enable = true;
+      };
+
+    testScript =
+      ''
+        with subtest("Capture stap ouput"):
+            output = machine.succeed("stap ${stapScript} 2>&1")
+
+        with subtest("Ensure that expected output from stap script is there"):
+            assert "kernel function probe & println work\n" == output, "kernel function probe & println work\n != " + output
+      '';
+  }) args);
+
+  ## TODO shared infra with ../kernel-generic.nix
+  kernels = {
+    inherit (pkgs.linuxKernel.packageAliases) linux_default linux_latest;
+  };
+
+in mapAttrs (_: lP: testsForLinuxPackages lP) kernels // {
+  passthru = {
+    inherit testsForLinuxPackages;
+
+    testsForKernel = kernel: testsForLinuxPackages (pkgs.linuxPackagesFor kernel);
+  };
+}
diff --git a/nixos/tests/tayga.nix b/nixos/tests/tayga.nix
index 44974f6efea83..4aade67d74d0d 100644
--- a/nixos/tests/tayga.nix
+++ b/nixos/tests/tayga.nix
@@ -206,6 +206,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
   testScript = ''
     # start client and server
     for machine in client, server:
+      machine.systemctl("start network-online.target")
       machine.wait_for_unit("network-online.target")
       machine.log(machine.execute("ip addr")[1])
       machine.log(machine.execute("ip route")[1])
@@ -214,6 +215,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
     # test systemd-networkd and nixos-scripts based router
     for router in router_systemd, router_nixos:
       router.start()
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       router.wait_for_unit("tayga.service")
       router.log(machine.execute("ip addr")[1])
diff --git a/nixos/tests/telegraf.nix b/nixos/tests/telegraf.nix
index af9c5c387a5dd..c3cdb1645213a 100644
--- a/nixos/tests/telegraf.nix
+++ b/nixos/tests/telegraf.nix
@@ -12,7 +12,6 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     services.telegraf.extraConfig = {
       agent.interval = "1s";
       agent.flush_interval = "1s";
-      inputs.procstat = {};
       inputs.exec = {
         commands = [
           "${pkgs.runtimeShell} -c 'echo $SECRET,tag=a i=42i'"
diff --git a/nixos/tests/teleport.nix b/nixos/tests/teleport.nix
index cdf762b128448..d68917c6c7acb 100644
--- a/nixos/tests/teleport.nix
+++ b/nixos/tests/teleport.nix
@@ -9,7 +9,8 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 let
   packages = with pkgs; {
     "default" = teleport;
-    "11" = teleport_11;
+    "12" = teleport_12;
+    "13" = teleport_13;
   };
 
   minimal = package: {
diff --git a/nixos/tests/terminal-emulators.nix b/nixos/tests/terminal-emulators.nix
index 2306c03c18e76..3c1188ca88c99 100644
--- a/nixos/tests/terminal-emulators.nix
+++ b/nixos/tests/terminal-emulators.nix
@@ -23,9 +23,8 @@ with pkgs.lib;
 let tests = {
       alacritty.pkg = p: p.alacritty;
 
-      # times out after spending many hours
-      #contour.pkg = p: p.contour;
-      #contour.cmd = "contour $command";
+      contour.pkg = p: p.contour;
+      contour.cmd = "contour early-exit-threshold 0 execute $command";
 
       cool-retro-term.pkg = p: p.cool-retro-term;
       cool-retro-term.colourTest = false; # broken by gloss effect
@@ -62,6 +61,8 @@ let tests = {
 
       konsole.pkg = p: p.plasma5Packages.konsole;
 
+      lomiri-terminal-app.pkg = p: p.lomiri.lomiri-terminal-app;
+
       lxterminal.pkg = p: p.lxterminal;
 
       mate-terminal.pkg = p: p.mate.mate-terminal;
diff --git a/nixos/tests/timescaledb.nix b/nixos/tests/timescaledb.nix
index 00a7f9af09fb8..ba0a3cec6076f 100644
--- a/nixos/tests/timescaledb.nix
+++ b/nixos/tests/timescaledb.nix
@@ -52,7 +52,7 @@ let
         services.postgresql = {
           enable = true;
           package = postgresql-package;
-          extraPlugins = with postgresql-package.pkgs; [
+          extraPlugins = ps: with ps; [
             timescaledb
             timescaledb_toolkit
           ];
diff --git a/nixos/tests/tomcat.nix b/nixos/tests/tomcat.nix
index ff58ca8ac618b..df5cb033b78f0 100644
--- a/nixos/tests/tomcat.nix
+++ b/nixos/tests/tomcat.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
   name = "tomcat";
   meta.maintainers = [ lib.maintainers.anthonyroussel ];
 
diff --git a/nixos/tests/trafficserver.nix b/nixos/tests/trafficserver.nix
index e4557c6c50e54..94d0e4dd926e9 100644
--- a/nixos/tests/trafficserver.nix
+++ b/nixos/tests/trafficserver.nix
@@ -104,6 +104,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ats.wait_for_open_port(80)
     httpbin.wait_for_unit("httpbin")
     httpbin.wait_for_open_port(80)
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
 
     with subtest("Traffic Server is running"):
diff --git a/nixos/tests/tsja.nix b/nixos/tests/tsja.nix
index 176783088d8d5..f34358ff3f5f3 100644
--- a/nixos/tests/tsja.nix
+++ b/nixos/tests/tsja.nix
@@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
       {
         services.postgresql = {
           enable = true;
-          extraPlugins = with config.services.postgresql.package.pkgs; [
+          extraPlugins = ps: with ps; [
             tsja
           ];
         };
diff --git a/nixos/tests/tsm-client-gui.nix b/nixos/tests/tsm-client-gui.nix
index e11501da53d0c..c9632546db6ef 100644
--- a/nixos/tests/tsm-client-gui.nix
+++ b/nixos/tests/tsm-client-gui.nix
@@ -18,9 +18,9 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
       defaultServername = "testserver";
       servers.testserver = {
         # 192.0.0.8 is a "dummy address" according to RFC 7600
-        server = "192.0.0.8";
-        node = "SOME-NODE";
-        passwdDir = "/tmp";
+        tcpserveraddress = "192.0.0.8";
+        nodename = "SOME-NODE";
+        passworddir = "/tmp";
       };
     };
   };
diff --git a/nixos/tests/typesense.nix b/nixos/tests/typesense.nix
index 4f07a2e194be8..87ed248257ea0 100644
--- a/nixos/tests/typesense.nix
+++ b/nixos/tests/typesense.nix
@@ -18,6 +18,7 @@ in {
   testScript = ''
     machine.wait_for_unit("typesense.service")
     machine.wait_for_open_port(${toString testPort})
-    assert machine.succeed("curl --fail http://localhost:${toString testPort}/health") == '{"ok":true}'
+    # After waiting for the port, typesense still hasn't initialized the database, so wait until we can connect successfully
+    assert machine.wait_until_succeeds("curl --fail http://localhost:${toString testPort}/health") == '{"ok":true}'
   '';
 })
diff --git a/nixos/tests/ulogd/ulogd.py b/nixos/tests/ulogd/ulogd.py
index d20daa4d733a2..76a8d0c6e24a3 100644
--- a/nixos/tests/ulogd/ulogd.py
+++ b/nixos/tests/ulogd/ulogd.py
@@ -1,5 +1,6 @@
 start_all()
 machine.wait_for_unit("ulogd.service")
+machine.systemctl("start network-online.target")
 machine.wait_for_unit("network-online.target")
 
 with subtest("Ulogd is running"):
diff --git a/nixos/tests/unbound.nix b/nixos/tests/unbound.nix
index f6732390b4347..39a01259edeb5 100644
--- a/nixos/tests/unbound.nix
+++ b/nixos/tests/unbound.nix
@@ -106,8 +106,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
               {
                 name = ".";
                 forward-addr = [
-                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv6.addresses).address
-                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv4.addresses).address
+                  (lib.head nodes.authoritative.networking.interfaces.eth1.ipv6.addresses).address
+                  (lib.head nodes.authoritative.networking.interfaces.eth1.ipv4.addresses).address
                 ];
               }
             ];
@@ -168,8 +168,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
           "unbound-extra1.conf".text = ''
             forward-zone:
             name: "example.local."
-            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}
-            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}
+            forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}
+            forward-addr: ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}
           '';
           "unbound-extra2.conf".text = ''
             auth-zone:
@@ -187,8 +187,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       client = { lib, nodes, ... }: {
         imports = [ common ];
         networking.nameservers = [
-          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address
-          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address
+          (lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address
+          (lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address
         ];
         networking.interfaces.eth1.ipv4.addresses = [
           { address = "192.168.0.10"; prefixLength = 24; }
@@ -276,7 +276,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       resolver.wait_for_unit("multi-user.target")
 
       with subtest("client should be able to query the resolver"):
-          test(client, ["${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
+          test(client, ["${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
 
       # discard the client we do not need anymore
       client.shutdown()
@@ -298,7 +298,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
           ).strip()
 
           # Thank you black! Can't really break this line into a readable version.
-          expected = "example.local. IN forward ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"
+          expected = "example.local. IN forward ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.networking.interfaces.eth1.ipv4.addresses).address}"
           assert out == expected, f"Expected `{expected}` but got `{out}` instead."
           local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards")
 
diff --git a/nixos/tests/upnp.nix b/nixos/tests/upnp.nix
index af7cc1fe24130..93bc08f752ce3 100644
--- a/nixos/tests/upnp.nix
+++ b/nixos/tests/upnp.nix
@@ -5,7 +5,7 @@
 # this succeeds an external client will try to connect to the port
 # mapping.
 
-import ./make-test-python.nix ({ pkgs, ... }:
+import ./make-test-python.nix ({ pkgs, useNftables, ... }:
 
 let
   internalRouterAddress = "192.168.3.1";
@@ -27,6 +27,7 @@ in
           networking.nat.enable = true;
           networking.nat.internalInterfaces = [ "eth2" ];
           networking.nat.externalInterface = "eth1";
+          networking.nftables.enable = useNftables;
           networking.firewall.enable = true;
           networking.firewall.trustedInterfaces = [ "eth2" ];
           networking.interfaces.eth1.ipv4.addresses = [
@@ -80,11 +81,13 @@ in
       start_all()
 
       # Wait for network and miniupnpd.
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       # $router.wait_for_unit("nat")
-      router.wait_for_unit("firewall.service")
+      router.wait_for_unit("${if useNftables then "nftables" else "firewall"}.service")
       router.wait_for_unit("miniupnpd")
 
+      client1.systemctl("start network-online.target")
       client1.wait_for_unit("network-online.target")
 
       client1.succeed("upnpc -a ${internalClient1Address} 9000 9000 TCP")
diff --git a/nixos/tests/uptermd.nix b/nixos/tests/uptermd.nix
index 429e3c9dd5ff3..469aa5047c27c 100644
--- a/nixos/tests/uptermd.nix
+++ b/nixos/tests/uptermd.nix
@@ -28,6 +28,7 @@ in
     start_all()
 
     server.wait_for_unit("uptermd.service")
+    server.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
 
     # wait for upterm port to be reachable
diff --git a/nixos/tests/varnish.nix b/nixos/tests/varnish.nix
index 9dcdeec9d8c8f..76cea1ada5477 100644
--- a/nixos/tests/varnish.nix
+++ b/nixos/tests/varnish.nix
@@ -3,12 +3,12 @@
 , pkgs ? import ../.. { inherit system; }
 , package
 }:
-import ./make-test-python.nix ({ pkgs, ... }: let
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
   testPath = pkgs.hello;
 in {
   name = "varnish";
-  meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 ];
+  meta = {
+    maintainers = lib.teams.helsinki-systems.members;
   };
 
   nodes = {
diff --git a/nixos/tests/watchdogd.nix b/nixos/tests/watchdogd.nix
new file mode 100644
index 0000000000000..663e97cbae104
--- /dev/null
+++ b/nixos/tests/watchdogd.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "watchdogd";
+  meta.maintainers = with lib.maintainers; [ vifino ];
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.qemu.options = [
+      "-device i6300esb" # virtual watchdog timer
+    ];
+    boot.kernelModules = [ "i6300esb" ];
+    services.watchdogd.enable = true;
+    services.watchdogd.settings = {
+      supervisor.enabled = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("watchdogd.service")
+
+    assert "i6300ESB" in machine.succeed("watchdogctl status")
+    machine.succeed("watchdogctl test")
+  '';
+})
diff --git a/nixos/tests/xrdp-with-audio-pulseaudio.nix b/nixos/tests/xrdp-with-audio-pulseaudio.nix
new file mode 100644
index 0000000000000..27da7c457c493
--- /dev/null
+++ b/nixos/tests/xrdp-with-audio-pulseaudio.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  # How to interactively test this module if the audio actually works
+
+  # - nix run .#pulseaudio-module-xrdp.tests.xrdp-with-audio-pulseaudio.driverInteractive
+  # - test_script() # launches the terminal and the tests itself
+  # - server.send_monitor_command("hostfwd_add tcp::3389-:3389") # forward the RDP port to the host
+  # - Connect with the RDP client you like (ex: Remmina)
+  # - Don't forget to enable audio support. In remmina: Advanced -> Audio output mode to Local (default is Off)
+  # - Open a browser or something that plays sound. Ex: chromium
+
+  name = "xrdp-with-audio-pulseaudio";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lucasew ];
+  };
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ ./common/user-account.nix ];
+
+      environment.etc."xrdp/test.txt".text = "Shouldn't conflict";
+
+      services.xrdp.enable = true;
+      services.xrdp.audio.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.xterm}/bin/xterm";
+
+      hardware.pulseaudio = {
+        enable = true;
+      };
+
+      systemd.user.services.pactl-list = {
+        script = ''
+          while [ ! -S /tmp/.xrdp/xrdp_chansrv_audio_in_socket_* ]; do
+            sleep 1
+          done
+          sleep 1
+          ${pkgs.pulseaudio}/bin/pactl list
+          echo Source:
+          ${pkgs.pulseaudio}/bin/pactl get-default-source | tee /tmp/pulseaudio-source
+          echo Sink:
+          ${pkgs.pulseaudio}/bin/pactl get-default-sink | tee /tmp/pulseaudio-sink
+
+        '';
+        wantedBy = [ "default.target" ];
+      };
+
+      networking.firewall.allowedTCPPorts = [ 3389 ];
+    };
+
+    client = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      test-support.displayManager.auto.user = "alice";
+
+      environment.systemPackages = [ pkgs.freerdp ];
+
+      services.xrdp.enable = true;
+      services.xrdp.audio.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.icewm}/bin/icewm";
+
+      hardware.pulseaudio = {
+        enable = true;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.client.config.users.users.alice;
+  in ''
+    start_all()
+
+    client.wait_for_x()
+    client.wait_for_file("${user.home}/.Xauthority")
+    client.succeed("xauth merge ${user.home}/.Xauthority")
+
+    client.sleep(5)
+
+    client.execute("xterm >&2 &")
+    client.sleep(1)
+
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:127.0.0.1 /u:${user.name} /p:${user.password} /sound\n")
+
+    client.sleep(10)
+
+    client.succeed("[ -S /tmp/.xrdp/xrdp_chansrv_audio_in_socket_* ]") # checks if it's a socket
+    client.sleep(5)
+    client.screenshot("localrdp")
+
+    client.execute("xterm >&2 &")
+    client.sleep(1)
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:server /u:${user.name} /p:${user.password} /sound\n")
+    client.sleep(10)
+
+    server.succeed("[ -S /tmp/.xrdp/xrdp_chansrv_audio_in_socket_* ]") # checks if it's a socket
+    server.succeed('[ "$(cat /tmp/pulseaudio-source)" == "xrdp-source" ]')
+    server.succeed('[ "$(cat /tmp/pulseaudio-sink)" == "xrdp-sink" ]')
+    client.screenshot("remoterdp")
+  '';
+})
diff --git a/nixos/tests/zammad.nix b/nixos/tests/zammad.nix
index 7a2d40e82b3ed..faae1949e37b9 100644
--- a/nixos/tests/zammad.nix
+++ b/nixos/tests/zammad.nix
@@ -4,9 +4,13 @@ import ./make-test-python.nix (
   {
     name = "zammad";
 
-    meta.maintainers = with lib.maintainers; [ garbas taeer n0emis ];
+    meta.maintainers = with lib.maintainers; [ taeer n0emis netali ];
 
     nodes.machine = { config, ... }: {
+      virtualisation = {
+        memorySize = 2048;
+      };
+
       services.zammad.enable = true;
       services.zammad.secretKeyBaseFile = pkgs.writeText "secret" ''
         52882ef142066e09ab99ce816ba72522e789505caba224a52d750ec7dc872c2c371b2fd19f16b25dfbdd435a4dd46cb3df9f82eb63fafad715056bdfe25740d6
@@ -44,9 +48,10 @@ import ./make-test-python.nix (
     testScript = ''
       start_all()
       machine.wait_for_unit("postgresql.service")
+      machine.wait_for_unit("redis-zammad.service")
       machine.wait_for_unit("zammad-web.service")
       machine.wait_for_unit("zammad-websocket.service")
-      machine.wait_for_unit("zammad-scheduler.service")
+      machine.wait_for_unit("zammad-worker.service")
       # wait for zammad to fully come up
       machine.sleep(120)
 
diff --git a/nixos/tests/zfs.nix b/nixos/tests/zfs.nix
index ad4ea254f34d7..8fedcf095af69 100644
--- a/nixos/tests/zfs.nix
+++ b/nixos/tests/zfs.nix
@@ -19,7 +19,7 @@ let
     makeTest {
       name = "zfs-" + name;
       meta = with pkgs.lib.maintainers; {
-        maintainers = [ adisbladis elvishjerricco ];
+        maintainers = [ elvishjerricco ];
       };
 
       nodes.machine = { config, pkgs, lib, ... }:
@@ -210,6 +210,7 @@ in {
     enableSystemdStage1 = true;
   };
 
+  installerBoot = (import ./installer.nix { }).separateBootZfs;
   installer = (import ./installer.nix { }).zfsroot;
 
   expand-partitions = makeTest {
diff --git a/nixos/tests/zrepl.nix b/nixos/tests/zrepl.nix
index b16c7eddc7aec..bdf11122c73f6 100644
--- a/nixos/tests/zrepl.nix
+++ b/nixos/tests/zrepl.nix
@@ -42,6 +42,7 @@ import ./make-test-python.nix (
       start_all()
 
       with subtest("Wait for zrepl and network ready"):
+          host.systemctl("start network-online.target")
           host.wait_for_unit("network-online.target")
           host.wait_for_unit("zrepl.service")