about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/administration/imperative-containers.section.md2
-rw-r--r--nixos/doc/manual/configuration/profiles.chapter.md1
-rw-r--r--nixos/doc/manual/configuration/profiles/perlless.section.md11
-rw-r--r--nixos/doc/manual/configuration/user-mgmt.chapter.md15
-rw-r--r--nixos/doc/manual/development/etc-overlay.section.md36
-rw-r--r--nixos/doc/manual/development/unit-handling.section.md39
-rw-r--r--nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md3
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md50
-rw-r--r--nixos/lib/eval-config.nix1
-rw-r--r--nixos/maintainers/option-usages.nix6
-rw-r--r--nixos/modules/config/shells-environment.nix3
-rw-r--r--nixos/modules/config/users-groups.nix10
-rw-r--r--nixos/modules/image/repart-image.nix36
-rw-r--r--nixos/modules/image/repart.nix76
-rw-r--r--nixos/modules/misc/version.nix34
-rw-r--r--nixos/modules/module-list.nix6
-rw-r--r--nixos/modules/profiles/perlless.nix31
-rw-r--r--nixos/modules/programs/ssh.nix1
-rw-r--r--nixos/modules/security/acme/default.md10
-rw-r--r--nixos/modules/security/acme/default.nix4
-rw-r--r--nixos/modules/services/audio/gmediarender.nix1
-rw-r--r--nixos/modules/services/audio/jmusicbot.nix1
-rw-r--r--nixos/modules/services/audio/spotifyd.nix1
-rw-r--r--nixos/modules/services/audio/ympd.nix1
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix2
-rw-r--r--nixos/modules/services/continuous-integration/gitea-actions-runner.nix1
-rw-r--r--nixos/modules/services/continuous-integration/github-runner/options.nix1
-rw-r--r--nixos/modules/services/continuous-integration/hydra/default.nix1
-rw-r--r--nixos/modules/services/databases/firebird.nix2
-rw-r--r--nixos/modules/services/databases/lldap.nix1
-rw-r--r--nixos/modules/services/databases/openldap.nix1
-rw-r--r--nixos/modules/services/desktops/geoclue2.nix2
-rw-r--r--nixos/modules/services/editors/emacs.nix21
-rw-r--r--nixos/modules/services/games/archisteamfarm.nix (renamed from nixos/modules/services/games/asf.nix)100
-rw-r--r--nixos/modules/services/home-automation/evcc.nix1
-rw-r--r--nixos/modules/services/home-automation/home-assistant.nix1
-rw-r--r--nixos/modules/services/logging/journaldriver.nix1
-rw-r--r--nixos/modules/services/mail/roundcube.nix1
-rw-r--r--nixos/modules/services/mail/sympa.nix2
-rw-r--r--nixos/modules/services/matrix/synapse.nix2
-rw-r--r--nixos/modules/services/misc/amazon-ssm-agent.nix1
-rw-r--r--nixos/modules/services/misc/bcg.nix2
-rw-r--r--nixos/modules/services/misc/domoticz.nix1
-rw-r--r--nixos/modules/services/misc/etesync-dav.nix1
-rw-r--r--nixos/modules/services/misc/mediatomb.nix1
-rw-r--r--nixos/modules/services/misc/metabase.nix1
-rw-r--r--nixos/modules/services/misc/moonraker.nix2
-rw-r--r--nixos/modules/services/misc/nix-gc.nix25
-rw-r--r--nixos/modules/services/misc/nix-ssh-serve.nix4
-rw-r--r--nixos/modules/services/misc/ollama.nix8
-rw-r--r--nixos/modules/services/misc/paperless.nix1
-rw-r--r--nixos/modules/services/misc/portunus.nix5
-rw-r--r--nixos/modules/services/misc/taskserver/helper-tool.py34
-rw-r--r--nixos/modules/services/monitoring/mackerel-agent.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/alertmanager.nix1
-rw-r--r--nixos/modules/services/monitoring/teamviewer.nix1
-rw-r--r--nixos/modules/services/monitoring/telegraf.nix1
-rw-r--r--nixos/modules/services/monitoring/watchdogd.nix131
-rw-r--r--nixos/modules/services/network-filesystems/openafs/client.nix1
-rw-r--r--nixos/modules/services/network-filesystems/samba.nix2
-rw-r--r--nixos/modules/services/networking/bird.nix9
-rw-r--r--nixos/modules/services/networking/bitcoind.nix1
-rw-r--r--nixos/modules/services/networking/dante.nix1
-rw-r--r--nixos/modules/services/networking/ergo.nix1
-rw-r--r--nixos/modules/services/networking/expressvpn.nix1
-rw-r--r--nixos/modules/services/networking/frp.nix22
-rw-r--r--nixos/modules/services/networking/headscale.nix1
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/default.nix3
-rw-r--r--nixos/modules/services/networking/ivpn.nix2
-rw-r--r--nixos/modules/services/networking/kea.nix7
-rw-r--r--nixos/modules/services/networking/keepalived/default.nix9
-rw-r--r--nixos/modules/services/networking/mosquitto.nix1
-rw-r--r--nixos/modules/services/networking/mullvad-vpn.nix2
-rw-r--r--nixos/modules/services/networking/nbd.nix1
-rw-r--r--nixos/modules/services/networking/ocserv.nix1
-rw-r--r--nixos/modules/services/networking/pleroma.nix1
-rw-r--r--nixos/modules/services/networking/rosenpass.nix1
-rw-r--r--nixos/modules/services/networking/rxe.nix2
-rw-r--r--nixos/modules/services/networking/soju.nix1
-rw-r--r--nixos/modules/services/networking/strongswan-swanctl/module.nix1
-rw-r--r--nixos/modules/services/networking/strongswan.nix1
-rw-r--r--nixos/modules/services/networking/syncplay.nix1
-rw-r--r--nixos/modules/services/networking/wasabibackend.nix1
-rw-r--r--nixos/modules/services/networking/znc/default.nix1
-rw-r--r--nixos/modules/services/security/certmgr.nix1
-rw-r--r--nixos/modules/services/security/clamav.nix2
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix1
-rw-r--r--nixos/modules/services/system/cachix-agent/default.nix1
-rw-r--r--nixos/modules/services/system/cachix-watch-store.nix1
-rw-r--r--nixos/modules/services/system/cloud-init.nix5
-rw-r--r--nixos/modules/services/system/dbus.nix1
-rw-r--r--nixos/modules/services/video/go2rtc/default.nix1
-rw-r--r--nixos/modules/services/web-apps/akkoma.nix2
-rw-r--r--nixos/modules/services/web-apps/alps.nix1
-rw-r--r--nixos/modules/services/web-apps/c2fmzq-server.nix1
-rw-r--r--nixos/modules/services/web-apps/code-server.nix1
-rw-r--r--nixos/modules/services/web-apps/healthchecks.nix1
-rw-r--r--nixos/modules/services/web-apps/netbox.nix20
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix211
-rw-r--r--nixos/modules/services/web-apps/openvscode-server.nix1
-rw-r--r--nixos/modules/services/web-apps/peering-manager.nix1
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.md108
-rw-r--r--nixos/modules/services/web-apps/suwayomi-server.nix260
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix10
-rw-r--r--nixos/modules/services/web-servers/agate.nix1
-rw-r--r--nixos/modules/services/web-servers/mighttpd2.nix1
-rw-r--r--nixos/modules/services/web-servers/minio.nix1
-rw-r--r--nixos/modules/services/web-servers/traefik.nix1
-rw-r--r--nixos/modules/services/web-servers/ttyd.nix3
-rw-r--r--nixos/modules/services/x11/xserver.nix4
-rwxr-xr-xnixos/modules/system/activation/switch-to-configuration.pl12
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py89
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix6
-rw-r--r--nixos/modules/system/boot/resolved.nix24
-rw-r--r--nixos/modules/system/boot/systemd.nix53
-rw-r--r--nixos/modules/system/boot/systemd/sysusers.nix169
-rw-r--r--nixos/modules/system/boot/systemd/tmpfiles.nix35
-rw-r--r--nixos/modules/system/boot/uki.nix85
-rw-r--r--nixos/modules/system/etc/build-composefs-dump.py209
-rwxr-xr-xnixos/modules/system/etc/check-build-composefs-dump.sh8
-rw-r--r--nixos/modules/system/etc/etc-activation.nix98
-rw-r--r--nixos/modules/system/etc/etc.nix116
-rw-r--r--nixos/modules/tasks/auto-upgrade.nix12
-rw-r--r--nixos/modules/testing/test-instrumentation.nix5
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix1
-rw-r--r--nixos/modules/virtualisation/incus.nix9
-rw-r--r--nixos/modules/virtualisation/lxd.nix6
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix1
-rw-r--r--nixos/modules/virtualisation/podman/default.nix33
-rw-r--r--nixos/tests/3proxy.nix1
-rw-r--r--nixos/tests/acme.nix1
-rw-r--r--nixos/tests/activation/etc-overlay-immutable.nix30
-rw-r--r--nixos/tests/activation/etc-overlay-mutable.nix30
-rw-r--r--nixos/tests/activation/perlless.nix24
-rw-r--r--nixos/tests/adguardhome.nix1
-rw-r--r--nixos/tests/all-tests.nix12
-rw-r--r--nixos/tests/appliance-repart-image.nix33
-rw-r--r--nixos/tests/ayatana-indicators.nix53
-rw-r--r--nixos/tests/babeld.nix4
-rw-r--r--nixos/tests/bittorrent.nix4
-rw-r--r--nixos/tests/buildbot.nix2
-rw-r--r--nixos/tests/cloud-init.nix1
-rw-r--r--nixos/tests/corerad.nix2
-rw-r--r--nixos/tests/curl-impersonate.nix2
-rw-r--r--nixos/tests/elk.nix6
-rw-r--r--nixos/tests/ferm.nix2
-rw-r--r--nixos/tests/frp.nix25
-rw-r--r--nixos/tests/gitdaemon.nix3
-rw-r--r--nixos/tests/guix/publish.nix1
-rw-r--r--nixos/tests/haproxy.nix109
-rw-r--r--nixos/tests/hostname.nix1
-rw-r--r--nixos/tests/incus/default.nix5
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix112
-rw-r--r--nixos/tests/installer.nix3
-rw-r--r--nixos/tests/kanidm.nix1
-rw-r--r--nixos/tests/keepalived.nix3
-rw-r--r--nixos/tests/lemmy.nix2
-rw-r--r--nixos/tests/miriway.nix2
-rw-r--r--nixos/tests/networking.nix3
-rw-r--r--nixos/tests/nfs/kerberos.nix1
-rw-r--r--nixos/tests/nginx-moreheaders.nix37
-rw-r--r--nixos/tests/nixops/default.nix6
-rw-r--r--nixos/tests/opensmtpd-rspamd.nix1
-rw-r--r--nixos/tests/opensmtpd.nix1
-rw-r--r--nixos/tests/owncast.nix2
-rw-r--r--nixos/tests/podman/default.nix2
-rw-r--r--nixos/tests/postgis.nix1
-rw-r--r--nixos/tests/qemu-vm-restrictnetwork.nix2
-rw-r--r--nixos/tests/rss2email.nix1
-rw-r--r--nixos/tests/ssh-audit.nix1
-rw-r--r--nixos/tests/suwayomi-server.nix46
-rw-r--r--nixos/tests/sysinit-reactivation.nix107
-rw-r--r--nixos/tests/systemd-networkd-dhcpserver.nix3
-rw-r--r--nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix8
-rw-r--r--nixos/tests/systemd-nspawn.nix1
-rw-r--r--nixos/tests/systemd-sysusers-immutable.nix64
-rw-r--r--nixos/tests/systemd-sysusers-mutable.nix71
-rw-r--r--nixos/tests/tayga.nix2
-rw-r--r--nixos/tests/trafficserver.nix1
-rw-r--r--nixos/tests/ulogd/ulogd.py1
-rw-r--r--nixos/tests/upnp.nix2
-rw-r--r--nixos/tests/uptermd.nix1
-rw-r--r--nixos/tests/watchdogd.nix22
-rw-r--r--nixos/tests/web-apps/netbox-upgrade.nix4
-rw-r--r--nixos/tests/web-servers/stargazer.nix108
-rw-r--r--nixos/tests/web-servers/ttyd.nix19
-rw-r--r--nixos/tests/zrepl.nix1
187 files changed, 2969 insertions, 452 deletions
diff --git a/nixos/doc/manual/administration/imperative-containers.section.md b/nixos/doc/manual/administration/imperative-containers.section.md
index f45991780c4b9..852305ad81486 100644
--- a/nixos/doc/manual/administration/imperative-containers.section.md
+++ b/nixos/doc/manual/administration/imperative-containers.section.md
@@ -77,7 +77,7 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
 
 There are several ways to change the configuration of the container.
 First, on the host, you can edit
-`/var/lib/container/name/etc/nixos/configuration.nix`, and run
+`/var/lib/nixos-containers/foo/etc/nixos/configuration.nix`, and run
 
 ```ShellSession
 # nixos-container update foo
diff --git a/nixos/doc/manual/configuration/profiles.chapter.md b/nixos/doc/manual/configuration/profiles.chapter.md
index 9f1f48f742ac5..9f6c11b0d59d5 100644
--- a/nixos/doc/manual/configuration/profiles.chapter.md
+++ b/nixos/doc/manual/configuration/profiles.chapter.md
@@ -29,6 +29,7 @@ profiles/graphical.section.md
 profiles/hardened.section.md
 profiles/headless.section.md
 profiles/installation-device.section.md
+profiles/perlless.section.md
 profiles/minimal.section.md
 profiles/qemu-guest.section.md
 ```
diff --git a/nixos/doc/manual/configuration/profiles/perlless.section.md b/nixos/doc/manual/configuration/profiles/perlless.section.md
new file mode 100644
index 0000000000000..bf055971cfc42
--- /dev/null
+++ b/nixos/doc/manual/configuration/profiles/perlless.section.md
@@ -0,0 +1,11 @@
+# Perlless {#sec-perlless}
+
+::: {.warning}
+If you enable this profile, you will NOT be able to switch to a new
+configuration and thus you will not be able to rebuild your system with
+nixos-rebuild!
+:::
+
+Render your system completely perlless (i.e. without the perl interpreter). This
+includes a mechanism so that your build fails if it contains a Nix store path
+that references the string "perl".
diff --git a/nixos/doc/manual/configuration/user-mgmt.chapter.md b/nixos/doc/manual/configuration/user-mgmt.chapter.md
index b35b38f6e964a..71d61ce4c641b 100644
--- a/nixos/doc/manual/configuration/user-mgmt.chapter.md
+++ b/nixos/doc/manual/configuration/user-mgmt.chapter.md
@@ -89,3 +89,18 @@ A user can be deleted using `userdel`:
 The flag `-r` deletes the user's home directory. Accounts can be
 modified using `usermod`. Unix groups can be managed using `groupadd`,
 `groupmod` and `groupdel`.
+
+## Create users and groups with `systemd-sysusers` {#sec-systemd-sysusers}
+
+::: {.note}
+This is experimental.
+:::
+
+Instead of using a custom perl script to create users and groups, you can use
+systemd-sysusers:
+
+```nix
+systemd.sysusers.enable = true;
+```
+
+The primary benefit of this is to remove a dependency on perl.
diff --git a/nixos/doc/manual/development/etc-overlay.section.md b/nixos/doc/manual/development/etc-overlay.section.md
new file mode 100644
index 0000000000000..e6f6d8d4ca1ef
--- /dev/null
+++ b/nixos/doc/manual/development/etc-overlay.section.md
@@ -0,0 +1,36 @@
+# `/etc` via overlay filesystem {#sec-etc-overlay}
+
+::: {.note}
+This is experimental and requires a kernel version >= 6.6 because it uses
+new overlay features and relies on the new mount API.
+:::
+
+Instead of using a custom perl script to activate `/etc`, you activate it via an
+overlay filesystem:
+
+```nix
+system.etc.overlay.enable = true;
+```
+
+Using an overlay has two benefits:
+
+1. it removes a dependency on perl
+2. it makes activation faster (up to a few seconds)
+
+By default, the `/etc` overlay is mounted writable (i.e. there is a writable
+upper layer). However, you can also mount `/etc` immutably (i.e. read-only) by
+setting:
+
+```nix
+system.etc.overlay.mutable = false;
+```
+
+The overlay is atomically replaced during system switch. However, files that
+have been modified will NOT be overwritten. This is the biggest change compared
+to the perl-based system.
+
+If you manually make changes to `/etc` on your system and then switch to a new
+configuration where `system.etc.overlay.mutable = false;`, you will not be able
+to see the previously made changes in `/etc` anymore. However the changes are
+not completely gone, they are still in the upperdir of the previous overlay in
+`/.rw-etc/upper`.
diff --git a/nixos/doc/manual/development/unit-handling.section.md b/nixos/doc/manual/development/unit-handling.section.md
index 32d44dbfff054..d5ba6a9529d01 100644
--- a/nixos/doc/manual/development/unit-handling.section.md
+++ b/nixos/doc/manual/development/unit-handling.section.md
@@ -63,3 +63,42 @@ checks:
     is **restart**ed with the others. If it is set, both the service and the
     socket are **stop**ped and the socket is **start**ed, leaving socket
     activation to start the service when it's needed.
+
+## Sysinit reactivation {#sec-sysinit-reactivation}
+
+[`sysinit.target`](https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html#sysinit.target)
+is a systemd target that encodes system initialization (i.e. early startup). A
+few units that need to run very early in the bootup process are ordered to
+finish before this target is reached. Probably the most notable one of these is
+`systemd-tmpfiles-setup.service`. We will refer to these units as "sysinit
+units".
+
+"Normal" systemd units, by default, are ordered AFTER `sysinit.target`. In
+other words, these "normal" units expect all services ordered before
+`sysinit.target` to have finished without explicity declaring this dependency
+relationship for each dependency. See the [systemd
+bootup](https://www.freedesktop.org/software/systemd/man/latest/bootup.html)
+for more details on the bootup process.
+
+When restarting both a unit ordered before `sysinit.target` as well as one
+after, this presents a problem because they would be started at the same time
+as they do not explicitly declare their dependency relations.
+
+To solve this, NixOS has an artificial `sysinit-reactivation.target` which
+allows you to ensure that services ordered before `sysinit.target` are
+restarted correctly. This applies both to the ordering between these sysinit
+services as well as ensuring that sysinit units are restarted before "normal"
+units.
+
+To make an existing sysinit service restart correctly during system switch, you
+have to declare:
+
+```nix
+systemd.services.my-sysinit = {
+  requiredBy = [ "sysinit-reactivation.target" ];
+  before = [ "sysinit-reactivation.target" ];
+  restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
+};
+```
+
+You need to configure appropriate `restartTriggers` specific to your service.
diff --git a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
index ccadb819e061d..28c06f999dac2 100644
--- a/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
+++ b/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
@@ -37,7 +37,7 @@ of actions is always the same:
 - Forget about the failed state of units (`systemctl reset-failed`)
 - Reload systemd (`systemctl daemon-reload`)
 - Reload systemd user instances (`systemctl --user daemon-reload`)
-- Set up tmpfiles (`systemd-tmpfiles --create`)
+- Reactivate sysinit (`systemctl restart sysinit-reactivation.target`)
 - Reload units (`systemctl reload`)
 - Restart units (`systemctl restart`)
 - Start units (`systemctl start`)
@@ -56,4 +56,5 @@ explained in the next sections.
 unit-handling.section.md
 activation-script.section.md
 non-switchable-systems.section.md
+etc-overlay.section.md
 ```
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index b6f5e2b80d4de..6956c1ab6053e 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -18,6 +18,22 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
+- A new option `systemd.sysusers.enable` was added. If enabled, users and
+  groups are created with systemd-sysusers instead of with a custom perl script.
+
+- A new option `system.etc.overlay.enable` was added. If enabled, `/etc` is
+  mounted via an overlayfs instead of being created by a custom perl script.
+
+- It is now possible to have a completely perlless system (i.e. a system
+  without perl). Previously, the NixOS activation depended on two perl scripts
+  which can now be replaced via an opt-in mechanism. To make your system
+  perlless, you can use the new perlless profile:
+  ```
+  { modulesPath, ... }: {
+    imports = [ "${modulesPath}/profiles/perlless.nix" ];
+  }
+  ```
+
 ## New Services {#sec-release-24.05-new-services}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@@ -37,6 +53,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 - [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
 The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
 
+- [Suwayomi Server](https://github.com/Suwayomi/Suwayomi-Server), a free and open source manga reader server that runs extensions built for [Tachiyomi](https://tachiyomi.org). Available as [services.suwayomi-server](#opt-services.suwayomi-server.enable).
+
 - [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
 
 - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
@@ -69,6 +87,12 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The legacy and long deprecated systemd target `network-interfaces.target` has been removed. Use `network.target` instead.
 
+- `services.frp.settings` now generates the frp configuration file in TOML format as [recommended by upstream](https://github.com/fatedier/frp#configuration-files), instead of the legacy INI format. This has also introduced other changes in the configuration file structure and options.
+  - The `settings.common` section in the configuration is no longer valid and all the options form inside it now goes directly under `settings`.
+  - The `_` separating words in the configuration options is removed so the options are now in camel case. For example: `server_addr` becomes `serverAddr`, `server_port` becomes `serverPort` etc.
+  - Proxies are now defined with a new option `settings.proxies` which takes a list of proxies.
+  - Consult the [upstream documentation](https://github.com/fatedier/frp#example-usage) for more details on the changes.
+
 - `mkosi` was updated to v20. Parts of the user interface have changed. Consult the
   release notes of [v19](https://github.com/systemd/mkosi/releases/tag/v19) and
   [v20](https://github.com/systemd/mkosi/releases/tag/v20) for a list of changes.
@@ -108,14 +132,36 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 - `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively.
   Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts.
 
+- `multi-user.target` no longer depends on `network-online.target`.
+  This will potentially break services that assumed this was the case in the past.
+  This was changed for consistency with other distributions as well as improved boot times.
+
+  We have added a warning for services that are
+  `after = [ "network-online.target" ]` but do not depend on it (e.g. using `wants`).
+
+- `services.archisteamfarm` no longer uses the abbreviation `asf` for its state directory (`/var/lib/asf`), user and group (both `asf`). Instead the long name `archisteamfarm` is used.
+  Configurations with `system.stateVersion` 23.11 or earlier, default to the old stateDirectory until the 24.11 release and must either set the option explicitly or move the data to the new directory.
+
 - `networking.iproute2.enable` now does not set `environment.etc."iproute2/rt_tables".text`.
 
   Setting `environment.etc."iproute2/{CONFIG_FILE_NAME}".text` will override the whole configuration file instead of appending it to the upstream configuration file.
 
   `CONFIG_FILE_NAME` includes `bpf_pinning`, `ematch_map`, `group`, `nl_protos`, `rt_dsfield`, `rt_protos`, `rt_realms`, `rt_scopes`, and `rt_tables`.
 
+- `netbox` was updated to v3.7. `services.netbox.package` still defaults
+  to v3.6 if `stateVersion` is earlier than 24.05. Refer to upstream's breaking
+  changes [for
+  v3.7.0](https://github.com/netbox-community/netbox/releases/tag/v3.7.0) and
+  upgrade NetBox by changing `services.netbox.package`. Database migrations
+  will be run automatically.
+
 - The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher.
 
+- switch-to-configuration does not directly call systemd-tmpfiles anymore.
+  Instead, the new artificial sysinit-reactivation.target is introduced which
+  allows to restart multiple services that are ordered before sysinit.target
+  and respect the ordering between the services.
+
 - The `systemd.oomd` module behavior is changed as:
 
   - Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358).
@@ -134,6 +180,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   The module now includes an optional config check, that is enabled by default, to make the change obvious before any deployment.
   More information about the configuration syntax change is available in the [upstream repository](https://github.com/prometheus/snmp_exporter/blob/b75fc6b839ee3f3ccbee68bee55f1ae99555084a/auth-split-migration.md).
 
+- [watchdogd](https://troglobit.com/projects/watchdogd/), a system and process supervisor using watchdog timers. Available as [services.watchdogd](#opt-services.watchdogd.enable).
+
 ## Other Notable Changes {#sec-release-24.05-notable-changes}
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@@ -191,6 +239,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 - `services.zfs.zed.enableMail` now uses the global `sendmail` wrapper defined by an email module
   (such as msmtp or Postfix). It no longer requires using a special ZFS build with email support.
 
+- `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
+
 - The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
 
 - Gitea 1.21 upgrade has several breaking changes, including:
diff --git a/nixos/lib/eval-config.nix b/nixos/lib/eval-config.nix
index da099f86aa2ce..8bab3752073ff 100644
--- a/nixos/lib/eval-config.nix
+++ b/nixos/lib/eval-config.nix
@@ -110,6 +110,7 @@ let
   withExtraAttrs = configuration: configuration // {
     inherit extraArgs;
     inherit (configuration._module.args) pkgs;
+    inherit lib;
     extendModules = args: withExtraAttrs (configuration.extendModules args);
   };
 in
diff --git a/nixos/maintainers/option-usages.nix b/nixos/maintainers/option-usages.nix
index 11247666ecda9..e9bafa21a58ae 100644
--- a/nixos/maintainers/option-usages.nix
+++ b/nixos/maintainers/option-usages.nix
@@ -9,17 +9,17 @@
 
 # This file is made to be used as follow:
 #
-#   $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
+#   $ nix-instantiate ./option-usages.nix --argstr testOption service.xserver.enable -A txtContent --eval
 #
 # or
 #
-#   $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
+#   $ nix-build ./option-usages.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
 #
 # Other targets exists such as `dotContent`, `dot`, and `pdf`.  If you are
 # looking for the option usage of multiple options, you can provide a list
 # as argument.
 #
-#   $ nix-build ./option-usage.nix --arg testOptions \
+#   $ nix-build ./option-usages.nix --arg testOptions \
 #      '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
 #      -A txt -o gummiboot.list
 #
diff --git a/nixos/modules/config/shells-environment.nix b/nixos/modules/config/shells-environment.nix
index bc6583442edf2..a8476bd2aaedd 100644
--- a/nixos/modules/config/shells-environment.nix
+++ b/nixos/modules/config/shells-environment.nix
@@ -214,7 +214,8 @@ in
       ''
         # Create the required /bin/sh symlink; otherwise lots of things
         # (notably the system() function) won't work.
-        mkdir -m 0755 -p /bin
+        mkdir -p /bin
+        chmod 0755 /bin
         ln -sfn "${cfg.binsh}" /bin/.sh.tmp
         mv /bin/.sh.tmp /bin/sh # atomically replace /bin/sh
       '';
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 2aed620eb154c..967ad0846d75b 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -685,7 +685,7 @@ in {
       shadow.gid = ids.gids.shadow;
     };
 
-    system.activationScripts.users = {
+    system.activationScripts.users = if !config.systemd.sysusers.enable then {
       supportsDryActivation = true;
       text = ''
         install -m 0700 -d /root
@@ -694,7 +694,7 @@ in {
         ${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON ])}/bin/perl \
         -w ${./update-users-groups.pl} ${spec}
       '';
-    };
+    } else ""; # keep around for backwards compatibility
 
     system.activationScripts.update-lingering = let
       lingerDir = "/var/lib/systemd/linger";
@@ -711,7 +711,9 @@ in {
     '';
 
     # Warn about user accounts with deprecated password hashing schemes
-    system.activationScripts.hashes = {
+    # This does not work when the users and groups are created by
+    # systemd-sysusers because the users are created too late then.
+    system.activationScripts.hashes = if !config.systemd.sysusers.enable then {
       deps = [ "users" ];
       text = ''
         users=()
@@ -729,7 +731,7 @@ in {
           printf ' - %s\n' "''${users[@]}"
         fi
       '';
-    };
+    } else ""; # keep around for backwards compatibility
 
     # for backwards compatibility
     system.activationScripts.groups = stringAfter [ "users" ] "";
diff --git a/nixos/modules/image/repart-image.nix b/nixos/modules/image/repart-image.nix
index b4a1dfe51ff3a..a12b4fb14fb16 100644
--- a/nixos/modules/image/repart-image.nix
+++ b/nixos/modules/image/repart-image.nix
@@ -10,6 +10,8 @@
 , systemd
 , fakeroot
 , util-linux
+
+  # filesystem tools
 , dosfstools
 , mtools
 , e2fsprogs
@@ -18,8 +20,13 @@
 , btrfs-progs
 , xfsprogs
 
+  # compression tools
+, zstd
+, xz
+
   # arguments
-, name
+, imageFileBasename
+, compression
 , fileSystems
 , partitions
 , split
@@ -52,14 +59,25 @@ let
   };
 
   fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
+
+  compressionPkg = {
+    "zstd" = zstd;
+    "xz" = xz;
+  }."${compression.algorithm}";
+
+  compressionCommand = {
+    "zstd" = "zstd --no-progress --threads=0 -${toString compression.level}";
+    "xz" = "xz --keep --verbose --threads=0 -${toString compression.level}";
+  }."${compression.algorithm}";
 in
 
-runCommand name
+runCommand imageFileBasename
 {
   nativeBuildInputs = [
     systemd
     fakeroot
     util-linux
+    compressionPkg
   ] ++ fileSystemTools;
 } ''
   amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
@@ -67,6 +85,7 @@ runCommand name
   mkdir -p $out
   cd $out
 
+  echo "Building image with systemd-repart..."
   unshare --map-root-user fakeroot systemd-repart \
     --dry-run=no \
     --empty=create \
@@ -75,6 +94,17 @@ runCommand name
     --definitions="$amendedRepartDefinitions" \
     --split="${lib.boolToString split}" \
     --json=pretty \
-    image.raw \
+    ${imageFileBasename}.raw \
     | tee repart-output.json
+
+  # Compression is implemented in the same derivation as opposed to in a
+  # separate derivation to allow users to save disk space. Disk images are
+  # already very space intensive so we want to allow users to mitigate this.
+  if ${lib.boolToString compression.enable}; then
+    for f in ${imageFileBasename}*; do
+      echo "Compressing $f with ${compression.algorithm}..."
+      # Keep the original file when compressing and only delete it afterwards
+      ${compressionCommand} $f && rm $f
+    done
+  fi
 ''
diff --git a/nixos/modules/image/repart.nix b/nixos/modules/image/repart.nix
index da4f45d9a6392..ed584d9bf997b 100644
--- a/nixos/modules/image/repart.nix
+++ b/nixos/modules/image/repart.nix
@@ -66,7 +66,53 @@ in
 
     name = lib.mkOption {
       type = lib.types.str;
-      description = lib.mdDoc "The name of the image.";
+      description = lib.mdDoc ''
+        Name of the image.
+
+        If this option is unset but config.system.image.id is set,
+        config.system.image.id is used as the default value.
+      '';
+    };
+
+    version = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = config.system.image.version;
+      defaultText = lib.literalExpression "config.system.image.version";
+      description = lib.mdDoc "Version of the image";
+    };
+
+    imageFileBasename = lib.mkOption {
+      type = lib.types.str;
+      readOnly = true;
+      description = lib.mdDoc ''
+        Basename of the image filename without any extension (e.g. `image_1`).
+      '';
+    };
+
+    imageFile = lib.mkOption {
+      type = lib.types.str;
+      readOnly = true;
+      description = lib.mdDoc ''
+        Filename of the image including all extensions (e.g `image_1.raw` or
+        `image_1.raw.zst`).
+      '';
+    };
+
+    compression = {
+      enable = lib.mkEnableOption (lib.mdDoc "Image compression");
+
+      algorithm = lib.mkOption {
+        type = lib.types.enum [ "zstd" "xz" ];
+        default = "zstd";
+        description = lib.mdDoc "Compression algorithm";
+      };
+
+      level = lib.mkOption {
+        type = lib.types.int;
+        description = lib.mdDoc ''
+          Compression level. The available range depends on the used algorithm.
+        '';
+      };
     };
 
     seed = lib.mkOption {
@@ -131,6 +177,32 @@ in
 
   config = {
 
+    image.repart =
+      let
+        version = config.image.repart.version;
+        versionInfix = if version != null then "_${version}" else "";
+        compressionSuffix = lib.optionalString cfg.compression.enable
+          {
+            "zstd" = ".zst";
+            "xz" = ".xz";
+          }."${cfg.compression.algorithm}";
+      in
+      {
+        name = lib.mkIf (config.system.image.id != null) (lib.mkOptionDefault config.system.image.id);
+        imageFileBasename = cfg.name + versionInfix;
+        imageFile = cfg.imageFileBasename + ".raw" + compressionSuffix;
+
+        compression = {
+          # Generally default to slightly faster than default compression
+          # levels under the assumption that most of the building will be done
+          # for development and release builds will be customized.
+          level = lib.mkOptionDefault {
+            "zstd" = 3;
+            "xz" = 3;
+          }."${cfg.compression.algorithm}";
+        };
+      };
+
     system.build.image =
       let
         fileSystems = lib.filter
@@ -160,7 +232,7 @@ in
       in
       pkgs.callPackage ./repart-image.nix {
         systemd = cfg.package;
-        inherit (cfg) name split seed;
+        inherit (cfg) imageFileBasename compression split seed;
         inherit fileSystems definitionsDirectory partitions;
       };
 
diff --git a/nixos/modules/misc/version.nix b/nixos/modules/misc/version.nix
index 45dbf45b3ae70..c929c3b37285b 100644
--- a/nixos/modules/misc/version.nix
+++ b/nixos/modules/misc/version.nix
@@ -28,6 +28,8 @@ let
     DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html";
     SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html";
     BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues";
+    IMAGE_ID = lib.optionalString (config.system.image.id != null) config.system.image.id;
+    IMAGE_VERSION = lib.optionalString (config.system.image.version != null) config.system.image.version;
   } // lib.optionalAttrs (cfg.variant_id != null) {
     VARIANT_ID = cfg.variant_id;
   };
@@ -110,6 +112,38 @@ in
       example = "installer";
     };
 
+    image = {
+
+      id = lib.mkOption {
+        type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
+        default = null;
+        description = lib.mdDoc ''
+          Image identifier.
+
+          This corresponds to the IMAGE_ID field in os-release. See the
+          upstream docs for more details on valid characters for this field:
+          https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_ID=
+
+          You would only want to set this option if you're build NixOS appliance images.
+        '';
+      };
+
+      version = lib.mkOption {
+        type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
+        default = null;
+        description = lib.mdDoc ''
+          Image version.
+
+          This corresponds to the IMAGE_VERSION field in os-release. See the
+          upstream docs for more details on valid characters for this field:
+          https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_VERSION=
+
+          You would only want to set this option if you're build NixOS appliance images.
+        '';
+      };
+
+    };
+
     stateVersion = mkOption {
       type = types.str;
       # TODO Remove this and drop the default of the option so people are forced to set it.
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index e6fffd4716de9..2552ca6fa0f54 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -506,7 +506,7 @@
   ./services/editors/haste.nix
   ./services/editors/infinoted.nix
   ./services/finance/odoo.nix
-  ./services/games/asf.nix
+  ./services/games/archisteamfarm.nix
   ./services/games/crossfire-server.nix
   ./services/games/deliantra-server.nix
   ./services/games/factorio.nix
@@ -849,6 +849,7 @@
   ./services/monitoring/vmagent.nix
   ./services/monitoring/vmalert.nix
   ./services/monitoring/vnstat.nix
+  ./services/monitoring/watchdogd.nix
   ./services/monitoring/zabbix-agent.nix
   ./services/monitoring/zabbix-proxy.nix
   ./services/monitoring/zabbix-server.nix
@@ -1339,6 +1340,7 @@
   ./services/web-apps/restya-board.nix
   ./services/web-apps/rimgo.nix
   ./services/web-apps/sftpgo.nix
+  ./services/web-apps/suwayomi-server.nix
   ./services/web-apps/rss-bridge.nix
   ./services/web-apps/selfoss.nix
   ./services/web-apps/shiori.nix
@@ -1466,6 +1468,7 @@
   ./system/boot/stratisroot.nix
   ./system/boot/modprobe.nix
   ./system/boot/networkd.nix
+  ./system/boot/uki.nix
   ./system/boot/unl0kr.nix
   ./system/boot/plymouth.nix
   ./system/boot/resolved.nix
@@ -1486,6 +1489,7 @@
   ./system/boot/systemd/repart.nix
   ./system/boot/systemd/shutdown.nix
   ./system/boot/systemd/sysupdate.nix
+  ./system/boot/systemd/sysusers.nix
   ./system/boot/systemd/tmpfiles.nix
   ./system/boot/systemd/user.nix
   ./system/boot/systemd/userdbd.nix
diff --git a/nixos/modules/profiles/perlless.nix b/nixos/modules/profiles/perlless.nix
new file mode 100644
index 0000000000000..90abd14f077e4
--- /dev/null
+++ b/nixos/modules/profiles/perlless.nix
@@ -0,0 +1,31 @@
+# WARNING: If you enable this profile, you will NOT be able to switch to a new
+# configuration and thus you will not be able to rebuild your system with
+# nixos-rebuild!
+
+{ lib, ... }:
+
+{
+
+  # Disable switching to a new configuration. This is not a necessary
+  # limitation of a perlless system but just a current one. In the future,
+  # perlless switching might be possible.
+  system.switch.enable = lib.mkDefault false;
+
+  # Remove perl from activation
+  boot.initrd.systemd.enable = lib.mkDefault true;
+  system.etc.overlay.enable = lib.mkDefault true;
+  systemd.sysusers.enable = lib.mkDefault true;
+
+  # Random perl remnants
+  system.disableInstallerTools = lib.mkDefault true;
+  programs.less.lessopen = lib.mkDefault null;
+  programs.command-not-found.enable = lib.mkDefault false;
+  boot.enableContainers = lib.mkDefault false;
+  environment.defaultPackages = lib.mkDefault [ ];
+  documentation.info.enable = lib.mkDefault false;
+
+  # Check that the system does not contain a Nix store path that contains the
+  # string "perl".
+  system.forbiddenDependenciesRegex = "perl";
+
+}
diff --git a/nixos/modules/programs/ssh.nix b/nixos/modules/programs/ssh.nix
index c39a3c8d509be..0c1461709c22b 100644
--- a/nixos/modules/programs/ssh.nix
+++ b/nixos/modules/programs/ssh.nix
@@ -12,6 +12,7 @@ let
     ''
       #! ${pkgs.runtimeShell} -e
       export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
+      export XAUTHORITY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^XAUTHORITY=\(.*\)/\1/; t; d')"
       export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
       exec ${cfg.askPassword} "$@"
     '';
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 51ee0428d84e5..38fbfbf0caece 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -72,7 +72,7 @@ services.nginx = {
       };
     };
   };
-}
+};
 ```
 
 ## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
@@ -111,7 +111,7 @@ services.nginx = {
       };
     };
   };
-}
+};
 # Alternative config for Apache
 users.users.wwwrun.extraGroups = [ "acme" ];
 services.httpd = {
@@ -131,7 +131,7 @@ services.httpd = {
       '';
     };
   };
-}
+};
 ```
 
 Now you need to configure ACME to generate a certificate.
@@ -181,7 +181,7 @@ services.bind = {
       extraConfig = "allow-update { key rfc2136key.example.com.; };";
     }
   ];
-}
+};
 
 # Now we can configure ACME
 security.acme.acceptTerms = true;
@@ -271,7 +271,7 @@ services.nginx = {
       acmeRoot = null;
     };
   };
-}
+};
 ```
 
 And that's it! Next time your configuration is rebuilt, or when
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 7cc302969fb6d..40d9c487996b5 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -897,10 +897,10 @@ in {
         certs = attrValues cfg.certs;
       in [
         {
-          assertion = cfg.email != null || all (certOpts: certOpts.email != null) certs;
+          assertion = cfg.defaults.email != null || all (certOpts: certOpts.email != null) certs;
           message = ''
             You must define `security.acme.certs.<name>.email` or
-            `security.acme.email` to register with the CA. Note that using
+            `security.acme.defaults.email` to register with the CA. Note that using
             many different addresses for certs may trigger account rate limits.
           '';
         }
diff --git a/nixos/modules/services/audio/gmediarender.nix b/nixos/modules/services/audio/gmediarender.nix
index 545f2b1a2b60d..a4cb89098db7a 100644
--- a/nixos/modules/services/audio/gmediarender.nix
+++ b/nixos/modules/services/audio/gmediarender.nix
@@ -64,6 +64,7 @@ in
   config = mkIf cfg.enable {
     systemd = {
       services.gmediarender = {
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         description = "gmediarender server daemon";
diff --git a/nixos/modules/services/audio/jmusicbot.nix b/nixos/modules/services/audio/jmusicbot.nix
index fd1d4da192843..e7803677d0fd9 100644
--- a/nixos/modules/services/audio/jmusicbot.nix
+++ b/nixos/modules/services/audio/jmusicbot.nix
@@ -26,6 +26,7 @@ in
   config = mkIf cfg.enable {
     systemd.services.jmusicbot = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       description = "Discord music bot that's easy to set up and run yourself!";
       serviceConfig = mkMerge [{
diff --git a/nixos/modules/services/audio/spotifyd.nix b/nixos/modules/services/audio/spotifyd.nix
index 975be5a87cba9..1194b6f200d70 100644
--- a/nixos/modules/services/audio/spotifyd.nix
+++ b/nixos/modules/services/audio/spotifyd.nix
@@ -50,6 +50,7 @@ in
 
     systemd.services.spotifyd = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "sound.target" ];
       description = "spotifyd, a Spotify playing daemon";
       environment.SHELL = "/bin/sh";
diff --git a/nixos/modules/services/audio/ympd.nix b/nixos/modules/services/audio/ympd.nix
index b74cc3f9c0b41..6e8d22dab3c80 100644
--- a/nixos/modules/services/audio/ympd.nix
+++ b/nixos/modules/services/audio/ympd.nix
@@ -50,6 +50,7 @@ in {
       description = "Standalone MPD Web GUI written in C";
 
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 446d19b8fd5a0..9f702b17937cf 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -267,7 +267,7 @@ in {
 
     systemd.services.buildbot-master = {
       description = "Buildbot Continuous Integration Server.";
-      after = [ "network-online.target" ];
+      after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       path = cfg.packages ++ cfg.pythonPackages python.pkgs;
       environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}";
diff --git a/nixos/modules/services/continuous-integration/gitea-actions-runner.nix b/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
index 3f2be9464849f..06f0da3451a6c 100644
--- a/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
+++ b/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
@@ -188,6 +188,7 @@ in
         nameValuePair "gitea-runner-${escapeSystemdPath name}" {
           inherit (instance) enable;
           description = "Gitea Actions Runner";
+          wants = [ "network-online.target" ];
           after = [
             "network-online.target"
           ] ++ optionals (wantsDocker) [
diff --git a/nixos/modules/services/continuous-integration/github-runner/options.nix b/nixos/modules/services/continuous-integration/github-runner/options.nix
index 2335826e8b665..b9b1ea05e9672 100644
--- a/nixos/modules/services/continuous-integration/github-runner/options.nix
+++ b/nixos/modules/services/continuous-integration/github-runner/options.nix
@@ -153,6 +153,7 @@ with lib;
     type = types.attrs;
     description = lib.mdDoc ''
       Modify the systemd service. Can be used to, e.g., adjust the sandboxing options.
+      See {manpage}`systemd.exec(5)` for more options.
     '';
     example = {
       ProtectHome = false;
diff --git a/nixos/modules/services/continuous-integration/hydra/default.nix b/nixos/modules/services/continuous-integration/hydra/default.nix
index 46b03bba37be7..54bbe69703f95 100644
--- a/nixos/modules/services/continuous-integration/hydra/default.nix
+++ b/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -393,6 +393,7 @@ in
     systemd.services.hydra-evaluator =
       { wantedBy = [ "multi-user.target" ];
         requires = [ "hydra-init.service" ];
+        wants = [ "network-online.target" ];
         after = [ "hydra-init.service" "network.target" "network-online.target" ];
         path = with pkgs; [ hydra-package nettools jq ];
         restartTriggers = [ hydraConf ];
diff --git a/nixos/modules/services/databases/firebird.nix b/nixos/modules/services/databases/firebird.nix
index 36c12eaaf5f15..431233ce5ed41 100644
--- a/nixos/modules/services/databases/firebird.nix
+++ b/nixos/modules/services/databases/firebird.nix
@@ -143,7 +143,7 @@ in
       # ConnectionTimeout = 180
 
       #RemoteServiceName = gds_db
-      RemoteServicePort = ${cfg.port}
+      RemoteServicePort = ${toString cfg.port}
 
       # randomly choose port for server Event Notification
       #RemoteAuxPort = 0
diff --git a/nixos/modules/services/databases/lldap.nix b/nixos/modules/services/databases/lldap.nix
index d1574c98fe67f..e821da8e58aa3 100644
--- a/nixos/modules/services/databases/lldap.nix
+++ b/nixos/modules/services/databases/lldap.nix
@@ -104,6 +104,7 @@ in
   config = lib.mkIf cfg.enable {
     systemd.services.lldap = {
       description = "Lightweight LDAP server (lldap)";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index a7a0909f55e1b..df36e37976a44 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -294,6 +294,7 @@ in {
         "man:slapd-mdb"
       ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/desktops/geoclue2.nix b/nixos/modules/services/desktops/geoclue2.nix
index b04f46c26a568..2a68bb0b55f3a 100644
--- a/nixos/modules/services/desktops/geoclue2.nix
+++ b/nixos/modules/services/desktops/geoclue2.nix
@@ -200,6 +200,7 @@ in
     };
 
     systemd.services.geoclue = {
+      wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
       after = lib.optionals cfg.enableWifi [ "network-online.target" ];
       # restart geoclue service when the configuration changes
       restartTriggers = [
@@ -217,6 +218,7 @@ in
         # we can't be part of a system service, and the agent should
         # be okay with the main service coming and going
         wantedBy = [ "default.target" ];
+        wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
         after = lib.optionals cfg.enableWifi [ "network-online.target" ];
         unitConfig.ConditionUser = "!@system";
         serviceConfig = {
diff --git a/nixos/modules/services/editors/emacs.nix b/nixos/modules/services/editors/emacs.nix
index 6f45be6640bc6..ff6fd85d8a9b7 100644
--- a/nixos/modules/services/editors/emacs.nix
+++ b/nixos/modules/services/editors/emacs.nix
@@ -15,25 +15,6 @@ let
     fi
   '';
 
-  desktopApplicationFile = pkgs.writeTextFile {
-    name = "emacsclient.desktop";
-    destination = "/share/applications/emacsclient.desktop";
-    text = ''
-      [Desktop Entry]
-      Name=Emacsclient
-      GenericName=Text Editor
-      Comment=Edit text
-      MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
-      Exec=emacseditor %F
-      Icon=emacs
-      Type=Application
-      Terminal=false
-      Categories=Development;TextEditor;
-      StartupWMClass=Emacs
-      Keywords=Text;Editor;
-    '';
-  };
-
 in
 {
 
@@ -102,7 +83,7 @@ in
       wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
     };
 
-    environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
+    environment.systemPackages = [ cfg.package editorScript ];
 
     environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "emacseditor");
   };
diff --git a/nixos/modules/services/games/asf.nix b/nixos/modules/services/games/archisteamfarm.nix
index 27d174d6726ba..293e341bef385 100644
--- a/nixos/modules/services/games/asf.nix
+++ b/nixos/modules/services/games/archisteamfarm.nix
@@ -1,13 +1,11 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.services.archisteamfarm;
 
   format = pkgs.formats.json { };
 
-  asf-config = format.generate "ASF.json" (cfg.settings // {
+  configFile = format.generate "ASF.json" (cfg.settings // {
     # we disable it because ASF cannot update itself anyways
     # and nixos takes care of restarting the service
     # is in theory not needed as this is already the default for default builds
@@ -30,8 +28,8 @@ let
 in
 {
   options.services.archisteamfarm = {
-    enable = mkOption {
-      type = types.bool;
+    enable = lib.mkOption {
+      type = lib.types.bool;
       description = lib.mdDoc ''
         If enabled, starts the ArchisSteamFarm service.
         For configuring the SteamGuard token you will need to use the web-ui, which is enabled by default over on 127.0.0.1:1242.
@@ -40,14 +38,14 @@ in
       default = false;
     };
 
-    web-ui = mkOption {
-      type = types.submodule {
+    web-ui = lib.mkOption {
+      type = lib.types.submodule {
         options = {
-          enable = mkEnableOption "" // {
+          enable = lib.mkEnableOption "" // {
             description = lib.mdDoc "Whether to start the web-ui. This is the preferred way of configuring things such as the steam guard token.";
           };
 
-          package = mkPackageOption pkgs [ "ArchiSteamFarm" "ui" ] {
+          package = lib.mkPackageOption pkgs [ "ArchiSteamFarm" "ui" ] {
             extraDescription = ''
               ::: {.note}
               Contents must be in lib/dist
@@ -65,7 +63,7 @@ in
       description = lib.mdDoc "The Web-UI hosted on 127.0.0.1:1242.";
     };
 
-    package = mkPackageOption pkgs "ArchiSteamFarm" {
+    package = lib.mkPackageOption pkgs "ArchiSteamFarm" {
       extraDescription = ''
         ::: {.warning}
         Should always be the latest version, for security reasons,
@@ -74,15 +72,15 @@ in
       '';
     };
 
-    dataDir = mkOption {
-      type = types.path;
-      default = "/var/lib/asf";
+    dataDir = lib.mkOption {
+      type = lib.types.path;
+      default = "/var/lib/archisteamfarm";
       description = lib.mdDoc ''
         The ASF home directory used to store all data.
         If left as the default value this directory will automatically be created before the ASF server starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.'';
     };
 
-    settings = mkOption {
+    settings = lib.mkOption {
       type = format.type;
       description = lib.mdDoc ''
         The ASF.json file, all the options are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#global-config).
@@ -96,13 +94,13 @@ in
       default = { };
     };
 
-    ipcPasswordFile = mkOption {
-      type = types.nullOr types.path;
+    ipcPasswordFile = lib.mkOption {
+      type = with lib.types; nullOr path;
       default = null;
-      description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group.";
+      description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.";
     };
 
-    ipcSettings = mkOption {
+    ipcSettings = lib.mkOption {
       type = format.type;
       description = lib.mdDoc ''
         Settings to write to IPC.config.
@@ -120,25 +118,25 @@ in
       default = { };
     };
 
-    bots = mkOption {
-      type = types.attrsOf (types.submodule {
+    bots = lib.mkOption {
+      type = lib.types.attrsOf (lib.types.submodule {
         options = {
-          username = mkOption {
-            type = types.str;
+          username = lib.mkOption {
+            type = lib.types.str;
             description = lib.mdDoc "Name of the user to log in. Default is attribute name.";
             default = "";
           };
-          passwordFile = mkOption {
-            type = types.path;
-            description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group.";
+          passwordFile = lib.mkOption {
+            type = lib.types.path;
+            description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.";
           };
-          enabled = mkOption {
-            type = types.bool;
+          enabled = lib.mkOption {
+            type = lib.types.bool;
             default = true;
             description = lib.mdDoc "Whether to enable the bot on startup.";
           };
-          settings = mkOption {
-            type = types.attrs;
+          settings = lib.mkOption {
+            type = lib.types.attrs;
             description = lib.mdDoc ''
               Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config).
             '';
@@ -152,7 +150,7 @@ in
       example = {
         exampleBot = {
           username = "alice";
-          passwordFile = "/var/lib/asf/secrets/password";
+          passwordFile = "/var/lib/archisteamfarm/secrets/password";
           settings = { SteamParentalCode = "1234"; };
         };
       };
@@ -160,32 +158,34 @@ in
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
+    # TODO: drop with 24.11
+    services.archisteamfarm.dataDir = lib.mkIf (lib.versionAtLeast config.system.stateVersion "24.05") (lib.mkDefault "/var/lib/asf");
 
     users = {
-      users.asf = {
+      users.archisteamfarm = {
         home = cfg.dataDir;
         isSystemUser = true;
-        group = "asf";
+        group = "archisteamfarm";
         description = "Archis-Steam-Farm service user";
       };
-      groups.asf = { };
+      groups.archisteamfarm = { };
     };
 
     systemd.services = {
-      asf = {
+      archisteamfarm = {
         description = "Archis-Steam-Farm Service";
         after = [ "network.target" ];
         wantedBy = [ "multi-user.target" ];
 
-        serviceConfig = mkMerge [
-          (mkIf (cfg.dataDir == "/var/lib/asf") {
-            StateDirectory = "asf";
+        serviceConfig = lib.mkMerge [
+          (lib.mkIf (lib.hasPrefix "/var/lib/" cfg.dataDir) {
+            StateDirectory = lib.last (lib.splitString "/" cfg.dataDir);
             StateDirectoryMode = "700";
           })
           {
-            User = "asf";
-            Group = "asf";
+            User = "archisteamfarm";
+            Group = "archisteamfarm";
             WorkingDirectory = cfg.dataDir;
             Type = "simple";
             ExecStart = "${lib.getExe cfg.package} --no-restart --process-required --service --system-required --path ${cfg.dataDir}";
@@ -217,12 +217,10 @@ in
             RestrictNamespaces = true;
             RestrictRealtime = true;
             RestrictSUIDSGID = true;
-            SystemCallArchitectures = "native";
-            UMask = "0077";
-
-            # we luckily already have systemd v247+
             SecureBits = "noroot-locked";
+            SystemCallArchitectures = "native";
             SystemCallFilter = [ "@system-service" "~@privileged" ];
+            UMask = "0077";
           }
         ];
 
@@ -232,7 +230,7 @@ in
               mkdir -p $out
               # clean potential removed bots
               rm -rf $out/*.json
-              for i in ${strings.concatStringsSep " " (lists.map (x: "${getName x},${x}") (attrsets.mapAttrsToList mkBot cfg.bots))}; do IFS=",";
+              for i in ${lib.concatStringsSep " " (map (x: "${lib.getName x},${x}") (lib.mapAttrsToList mkBot cfg.bots))}; do IFS=",";
                 set -- $i
                 ln -fs $2 $out/$1
               done
@@ -242,22 +240,22 @@ in
           ''
             mkdir -p config
 
-            cp --no-preserve=mode ${asf-config} config/ASF.json
+            cp --no-preserve=mode ${configFile} config/ASF.json
 
-            ${optionalString (cfg.ipcPasswordFile != null) ''
+            ${lib.optionalString (cfg.ipcPasswordFile != null) ''
               ${replaceSecretBin} '#ipcPassword#' '${cfg.ipcPasswordFile}' config/ASF.json
             ''}
 
-            ${optionalString (cfg.ipcSettings != {}) ''
+            ${lib.optionalString (cfg.ipcSettings != {}) ''
               ln -fs ${ipc-config} config/IPC.config
             ''}
 
-            ${optionalString (cfg.ipcSettings != {}) ''
+            ${lib.optionalString (cfg.ipcSettings != {}) ''
               ln -fs ${createBotsScript}/* config/
             ''}
 
             rm -f www
-            ${optionalString cfg.web-ui.enable ''
+            ${lib.optionalString cfg.web-ui.enable ''
               ln -s ${cfg.web-ui.package}/ www
             ''}
           '';
@@ -267,6 +265,6 @@ in
 
   meta = {
     buildDocsInSandbox = false;
-    maintainers = with maintainers; [ lom SuperSandro2000 ];
+    maintainers = with lib.maintainers; [ lom SuperSandro2000 ];
   };
 }
diff --git a/nixos/modules/services/home-automation/evcc.nix b/nixos/modules/services/home-automation/evcc.nix
index d0ce3fb4a1ce6..f360f525b04b9 100644
--- a/nixos/modules/services/home-automation/evcc.nix
+++ b/nixos/modules/services/home-automation/evcc.nix
@@ -41,6 +41,7 @@ in
 
   config = mkIf cfg.enable {
     systemd.services.evcc = {
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
         "mosquitto.target"
diff --git a/nixos/modules/services/home-automation/home-assistant.nix b/nixos/modules/services/home-automation/home-assistant.nix
index bc470576b759b..a01628968966e 100644
--- a/nixos/modules/services/home-automation/home-assistant.nix
+++ b/nixos/modules/services/home-automation/home-assistant.nix
@@ -435,6 +435,7 @@ in {
 
     systemd.services.home-assistant = {
       description = "Home Assistant";
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
 
diff --git a/nixos/modules/services/logging/journaldriver.nix b/nixos/modules/services/logging/journaldriver.nix
index 59eedff90d60e..4d21464018aac 100644
--- a/nixos/modules/services/logging/journaldriver.nix
+++ b/nixos/modules/services/logging/journaldriver.nix
@@ -84,6 +84,7 @@ in {
     systemd.services.journaldriver = {
       description = "Stackdriver Logging journal forwarder";
       script      = "${pkgs.journaldriver}/bin/journaldriver";
+      wants       = [ "network-online.target" ];
       after       = [ "network-online.target" ];
       wantedBy    = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/mail/roundcube.nix b/nixos/modules/services/mail/roundcube.nix
index c883c143e5234..3f1a695ab91ae 100644
--- a/nixos/modules/services/mail/roundcube.nix
+++ b/nixos/modules/services/mail/roundcube.nix
@@ -250,6 +250,7 @@ in
         path = [ config.services.postgresql.package ];
       })
       {
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         script = let
diff --git a/nixos/modules/services/mail/sympa.nix b/nixos/modules/services/mail/sympa.nix
index 04ae46f66eeaf..13fc8656a2b5a 100644
--- a/nixos/modules/services/mail/sympa.nix
+++ b/nixos/modules/services/mail/sympa.nix
@@ -435,7 +435,7 @@ in
 
       wantedBy = [ "multi-user.target" ];
       after = [ "network-online.target" ];
-      wants = sympaSubServices;
+      wants = sympaSubServices ++ [ "network-online.target" ];
       before = sympaSubServices;
       serviceConfig = sympaServiceConfig "sympa_msg";
 
diff --git a/nixos/modules/services/matrix/synapse.nix b/nixos/modules/services/matrix/synapse.nix
index 50019d2a25cb5..4c1c396eac056 100644
--- a/nixos/modules/services/matrix/synapse.nix
+++ b/nixos/modules/services/matrix/synapse.nix
@@ -1056,6 +1056,7 @@ in {
 
     systemd.targets.matrix-synapse = lib.mkIf hasWorkers {
       description = "Synapse Matrix parent target";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
       wantedBy = [ "multi-user.target" ];
     };
@@ -1071,6 +1072,7 @@ in {
             requires = optional hasLocalPostgresDB "postgresql.service";
           }
           else {
+            wants = [ "network-online.target" ];
             after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
             requires = optional hasLocalPostgresDB "postgresql.service";
             wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/misc/amazon-ssm-agent.nix b/nixos/modules/services/misc/amazon-ssm-agent.nix
index 20b836abe164f..89a1c07665106 100644
--- a/nixos/modules/services/misc/amazon-ssm-agent.nix
+++ b/nixos/modules/services/misc/amazon-ssm-agent.nix
@@ -41,6 +41,7 @@ in {
     # See https://github.com/aws/amazon-ssm-agent/blob/mainline/packaging/linux/amazon-ssm-agent.service
     systemd.services.amazon-ssm-agent = {
       inherit (cfg.package.meta) description;
+      wants    = [ "network-online.target" ];
       after    = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/misc/bcg.nix b/nixos/modules/services/misc/bcg.nix
index 9da4a879cdd00..ad0b9c871342f 100644
--- a/nixos/modules/services/misc/bcg.nix
+++ b/nixos/modules/services/misc/bcg.nix
@@ -154,7 +154,7 @@ in
     in {
       description = "BigClown Gateway";
       wantedBy = [ "multi-user.target" ];
-      wants = mkIf config.services.mosquitto.enable [ "mosquitto.service" ];
+      wants = [ "network-online.target" ] ++ lib.optional config.services.mosquitto.enable "mosquitto.service";
       after = [ "network-online.target" ];
       preStart = ''
         umask 077
diff --git a/nixos/modules/services/misc/domoticz.nix b/nixos/modules/services/misc/domoticz.nix
index fd9fcf0b78eb5..315092f933514 100644
--- a/nixos/modules/services/misc/domoticz.nix
+++ b/nixos/modules/services/misc/domoticz.nix
@@ -35,6 +35,7 @@ in {
     systemd.services."domoticz" = {
       description = pkgDesc;
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         DynamicUser = true;
diff --git a/nixos/modules/services/misc/etesync-dav.nix b/nixos/modules/services/misc/etesync-dav.nix
index 9d99d548d95b0..ae2b5ad043433 100644
--- a/nixos/modules/services/misc/etesync-dav.nix
+++ b/nixos/modules/services/misc/etesync-dav.nix
@@ -59,6 +59,7 @@ in
 
       systemd.services.etesync-dav = {
         description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         path = [ pkgs.etesync-dav ];
diff --git a/nixos/modules/services/misc/mediatomb.nix b/nixos/modules/services/misc/mediatomb.nix
index d421d74c53ad7..03235e9a12655 100644
--- a/nixos/modules/services/misc/mediatomb.nix
+++ b/nixos/modules/services/misc/mediatomb.nix
@@ -357,6 +357,7 @@ in {
       description = "${cfg.serverName} media Server";
       # Gerbera might fail if the network interface is not available on startup
       # https://github.com/gerbera/gerbera/issues/1324
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig.ExecStart = "${binaryCommand} --port ${toString cfg.port} ${interfaceFlag} ${configFlag} --home ${cfg.dataDir}";
diff --git a/nixos/modules/services/misc/metabase.nix b/nixos/modules/services/misc/metabase.nix
index 883fa0b959116..5fc18e27eaae4 100644
--- a/nixos/modules/services/misc/metabase.nix
+++ b/nixos/modules/services/misc/metabase.nix
@@ -77,6 +77,7 @@ in {
     systemd.services.metabase = {
       description = "Metabase server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         MB_PLUGINS_DIR = "${dataDir}/plugins";
diff --git a/nixos/modules/services/misc/moonraker.nix b/nixos/modules/services/misc/moonraker.nix
index 750dca9d03736..4e419aafa990b 100644
--- a/nixos/modules/services/misc/moonraker.nix
+++ b/nixos/modules/services/misc/moonraker.nix
@@ -103,7 +103,7 @@ in {
 
   config = mkIf cfg.enable {
     warnings = []
-      ++ optional (cfg.settings ? update_manager)
+      ++ optional (cfg.settings.update_manager.enable_system_updates or false)
         ''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.''
       ++ optional (cfg.configDir != null)
         ''
diff --git a/nixos/modules/services/misc/nix-gc.nix b/nixos/modules/services/misc/nix-gc.nix
index 97596d28cd89b..de6bd76c7eb9d 100644
--- a/nixos/modules/services/misc/nix-gc.nix
+++ b/nixos/modules/services/misc/nix-gc.nix
@@ -1,7 +1,5 @@
 { config, lib, ... }:
 
-with lib;
-
 let
   cfg = config.nix.gc;
 in
@@ -14,14 +12,14 @@ in
 
     nix.gc = {
 
-      automatic = mkOption {
+      automatic = lib.mkOption {
         default = false;
-        type = types.bool;
+        type = lib.types.bool;
         description = lib.mdDoc "Automatically run the garbage collector at a specific time.";
       };
 
-      dates = mkOption {
-        type = types.str;
+      dates = lib.mkOption {
+        type = lib.types.singleLineStr;
         default = "03:15";
         example = "weekly";
         description = lib.mdDoc ''
@@ -33,9 +31,9 @@ in
         '';
       };
 
-      randomizedDelaySec = mkOption {
+      randomizedDelaySec = lib.mkOption {
         default = "0";
-        type = types.str;
+        type = lib.types.singleLineStr;
         example = "45min";
         description = lib.mdDoc ''
           Add a randomized delay before each garbage collection.
@@ -45,9 +43,9 @@ in
         '';
       };
 
-      persistent = mkOption {
+      persistent = lib.mkOption {
         default = true;
-        type = types.bool;
+        type = lib.types.bool;
         example = false;
         description = lib.mdDoc ''
           Takes a boolean argument. If true, the time when the service
@@ -61,10 +59,10 @@ in
         '';
       };
 
-      options = mkOption {
+      options = lib.mkOption {
         default = "";
         example = "--max-freed $((64 * 1024**3))";
-        type = types.str;
+        type = lib.types.singleLineStr;
         description = lib.mdDoc ''
           Options given to {file}`nix-collect-garbage` when the
           garbage collector is run automatically.
@@ -89,7 +87,8 @@ in
     systemd.services.nix-gc = lib.mkIf config.nix.enable {
       description = "Nix Garbage Collector";
       script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
-      startAt = optional cfg.automatic cfg.dates;
+      serviceConfig.Type = "oneshot";
+      startAt = lib.optional cfg.automatic cfg.dates;
     };
 
     systemd.timers.nix-gc = lib.mkIf cfg.automatic {
diff --git a/nixos/modules/services/misc/nix-ssh-serve.nix b/nixos/modules/services/misc/nix-ssh-serve.nix
index b656692ca01cd..cf9d6339c69b7 100644
--- a/nixos/modules/services/misc/nix-ssh-serve.nix
+++ b/nixos/modules/services/misc/nix-ssh-serve.nix
@@ -1,4 +1,4 @@
-{ config, lib, ... }:
+{ config, lib, pkgs, ... }:
 
 with lib;
 let cfg = config.nix.sshServe;
@@ -46,7 +46,7 @@ in {
       description = "Nix SSH store user";
       isSystemUser = true;
       group = "nix-ssh";
-      useDefaultShell = true;
+      shell = pkgs.bashInteractive;
     };
     users.groups.nix-ssh = {};
 
diff --git a/nixos/modules/services/misc/ollama.nix b/nixos/modules/services/misc/ollama.nix
index 9794bbbec464c..d9359d2b5cd44 100644
--- a/nixos/modules/services/misc/ollama.nix
+++ b/nixos/modules/services/misc/ollama.nix
@@ -9,6 +9,13 @@ in {
       enable = lib.mkEnableOption (
         lib.mdDoc "Server for local large language models"
       );
+      listenAddress = lib.mkOption {
+        type = lib.types.str;
+        default = "127.0.0.1:11434";
+        description = lib.mdDoc ''
+          Specifies the bind address on which the ollama server HTTP interface listens.
+        '';
+      };
       package = lib.mkPackageOption pkgs "ollama" { };
     };
   };
@@ -23,6 +30,7 @@ in {
         environment = {
           HOME = "%S/ollama";
           OLLAMA_MODELS = "%S/ollama/models";
+          OLLAMA_HOST = cfg.listenAddress;
         };
         serviceConfig = {
           ExecStart = "${lib.getExe cfg.package} serve";
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index 3c6832958f59a..ca34a327dbdfa 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -297,6 +297,7 @@ in
       wantedBy = [ "paperless-scheduler.service" ];
       before = [ "paperless-scheduler.service" ];
       after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
       serviceConfig = defaultServiceConfig // {
         User = cfg.user;
         Type = "oneshot";
diff --git a/nixos/modules/services/misc/portunus.nix b/nixos/modules/services/misc/portunus.nix
index 7036a372d1ea8..47af24f024cdf 100644
--- a/nixos/modules/services/misc/portunus.nix
+++ b/nixos/modules/services/misc/portunus.nix
@@ -230,7 +230,10 @@ in
         description = "Self-contained authentication service";
         wantedBy = [ "multi-user.target" ];
         after = [ "network.target" ];
-        serviceConfig.ExecStart = "${cfg.package.out}/bin/portunus-orchestrator";
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/portunus-orchestrator";
+          Restart = "on-failure";
+        };
         environment = {
           PORTUNUS_LDAP_SUFFIX = cfg.ldap.suffix;
           PORTUNUS_SERVER_BINARY = "${cfg.package}/bin/portunus-server";
diff --git a/nixos/modules/services/misc/taskserver/helper-tool.py b/nixos/modules/services/misc/taskserver/helper-tool.py
index fec05728b2b6b..b1eebb07686b2 100644
--- a/nixos/modules/services/misc/taskserver/helper-tool.py
+++ b/nixos/modules/services/misc/taskserver/helper-tool.py
@@ -61,6 +61,10 @@ def run_as_taskd_user():
     os.setuid(uid)
 
 
+def run_as_taskd_group():
+    gid = grp.getgrnam(TASKD_GROUP).gr_gid
+    os.setgid(gid)
+
 def taskd_cmd(cmd, *args, **kwargs):
     """
     Invoke taskd with the specified command with the privileges of the 'taskd'
@@ -90,7 +94,7 @@ def certtool_cmd(*args, **kwargs):
     """
     return subprocess.check_output(
         [CERTTOOL_COMMAND] + list(args),
-        preexec_fn=lambda: os.umask(0o077),
+        preexec_fn=run_as_taskd_group,
         stderr=subprocess.STDOUT,
         **kwargs
     )
@@ -156,17 +160,33 @@ def generate_key(org, user):
         sys.stderr.write(msg.format(user))
         return
 
-    basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user)
-    if os.path.exists(basedir):
+    keysdir = os.path.join(TASKD_DATA_DIR, "keys" )
+    orgdir  = os.path.join(keysdir       , org    )
+    userdir = os.path.join(orgdir        , user   )
+    if os.path.exists(userdir):
         raise OSError("Keyfile directory for {} already exists.".format(user))
 
-    privkey = os.path.join(basedir, "private.key")
-    pubcert = os.path.join(basedir, "public.cert")
+    privkey = os.path.join(userdir, "private.key")
+    pubcert = os.path.join(userdir, "public.cert")
 
     try:
-        os.makedirs(basedir, mode=0o700)
+        # We change the permissions and the owner ship of the base directories
+        # so that cfg.group and cfg.user could read the directories' contents.
+        # See also: https://bugs.python.org/issue42367
+        for bd in [keysdir, orgdir, userdir]:
+            # Allow cfg.group, but not others to read the contents of this group
+            os.makedirs(bd, exist_ok=True)
+            # not using mode= argument to makedirs intentionally - forcing the
+            # permissions we want
+            os.chmod(bd, mode=0o750)
+            os.chown(
+                bd,
+                uid=pwd.getpwnam(TASKD_USER).pw_uid,
+                gid=grp.getgrnam(TASKD_GROUP).gr_gid,
+            )
 
         certtool_cmd("-p", "--bits", CERT_BITS, "--outfile", privkey)
+        os.chmod(privkey, 0o640)
 
         template_data = [
             "organization = {0}".format(org),
@@ -187,7 +207,7 @@ def generate_key(org, user):
                 "--outfile", pubcert
             )
     except:
-        rmtree(basedir)
+        rmtree(userdir)
         raise
 
 
diff --git a/nixos/modules/services/monitoring/mackerel-agent.nix b/nixos/modules/services/monitoring/mackerel-agent.nix
index 62a7858500f24..5915634ed26fe 100644
--- a/nixos/modules/services/monitoring/mackerel-agent.nix
+++ b/nixos/modules/services/monitoring/mackerel-agent.nix
@@ -84,6 +84,7 @@ in {
     # upstream service file in https://git.io/JUt4Q
     systemd.services.mackerel-agent = {
       description = "mackerel.io agent";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "nss-lookup.target" ];
       wantedBy = [ "multi-user.target" ];
       environment = {
diff --git a/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
index 4fd630015f35a..bb426d8b7beb0 100644
--- a/nixos/modules/services/monitoring/prometheus/alertmanager.nix
+++ b/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -174,6 +174,7 @@ in {
 
       systemd.services.alertmanager = {
         wantedBy = [ "multi-user.target" ];
+        wants    = [ "network-online.target" ];
         after    = [ "network-online.target" ];
         preStart = ''
            ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
diff --git a/nixos/modules/services/monitoring/teamviewer.nix b/nixos/modules/services/monitoring/teamviewer.nix
index 9b1278317943d..7c45247aa6d5a 100644
--- a/nixos/modules/services/monitoring/teamviewer.nix
+++ b/nixos/modules/services/monitoring/teamviewer.nix
@@ -30,6 +30,7 @@ in
       description = "TeamViewer remote control daemon";
 
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "network.target" "dbus.service" ];
       requires = [ "dbus.service" ];
       preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer";
diff --git a/nixos/modules/services/monitoring/telegraf.nix b/nixos/modules/services/monitoring/telegraf.nix
index ee28ee03adf33..3bab8aba7bd60 100644
--- a/nixos/modules/services/monitoring/telegraf.nix
+++ b/nixos/modules/services/monitoring/telegraf.nix
@@ -59,6 +59,7 @@ in {
     in {
       description = "Telegraf Agent";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = lib.optional (config.services.telegraf.extraConfig.inputs ? procstat) pkgs.procps;
       serviceConfig = {
diff --git a/nixos/modules/services/monitoring/watchdogd.nix b/nixos/modules/services/monitoring/watchdogd.nix
new file mode 100644
index 0000000000000..e8d104651c6ad
--- /dev/null
+++ b/nixos/modules/services/monitoring/watchdogd.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.watchdogd;
+
+  mkPluginOpts = plugin: defWarn: defCrit: {
+    enabled = mkEnableOption "watchdogd plugin ${plugin}";
+    interval = mkOption {
+      type = types.ints.unsigned;
+      default = 300;
+      description = ''
+        Amount of seconds between every poll.
+      '';
+    };
+    logmark = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to log current stats every poll interval.
+      '';
+    };
+    warning = mkOption {
+      type = types.numbers.nonnegative;
+      default = defWarn;
+      description = ''
+        The high watermark level. Alert sent to log.
+      '';
+    };
+    critical = mkOption {
+      type = types.numbers.nonnegative;
+      default = defCrit;
+      description = ''
+        The critical watermark level. Alert sent to log, followed by reboot or script action.
+      '';
+    };
+  };
+in {
+  options.services.watchdogd = {
+    enable = mkEnableOption "watchdogd, an advanced system & process supervisor";
+    package = mkPackageOption pkgs "watchdogd" { };
+
+    settings = mkOption {
+      type = with types; submodule {
+        freeformType = let
+          valueType = oneOf [
+            bool
+            int
+            float
+            str
+          ];
+        in attrsOf (either valueType (attrsOf valueType));
+
+        options = {
+          timeout = mkOption {
+            type = types.ints.unsigned;
+            default = 15;
+            description = ''
+              The WDT timeout before reset.
+            '';
+          };
+          interval = mkOption {
+            type = types.ints.unsigned;
+            default = 5;
+            description = ''
+              The kick interval, i.e. how often {manpage}`watchdogd(8)` should reset the WDT timer.
+            '';
+          };
+
+          safe-exit = mkOption {
+            type = types.bool;
+            default = true;
+            description = ''
+              With {var}`safeExit` enabled, the daemon will ask the driver to disable the WDT before exiting.
+              However, some WDT drivers (or hardware) may not support this.
+            '';
+          };
+
+          filenr = mkPluginOpts "filenr" 0.9 1.0;
+
+          loadavg = mkPluginOpts "loadavg" 1.0 2.0;
+
+          meminfo = mkPluginOpts "meminfo" 0.9 0.95;
+        };
+      };
+      default = { };
+      description = ''
+        Configuration to put in {file}`watchdogd.conf`.
+        See {manpage}`watchdogd.conf(5)` for more details.
+      '';
+    };
+  };
+
+  config = let
+    toConfig = attrs: concatStringsSep "\n" (mapAttrsToList toValue attrs);
+
+    toValue = name: value:
+      if isAttrs value
+        then pipe value [
+          (mapAttrsToList toValue)
+          (map (s: "  ${s}"))
+          (concatStringsSep "\n")
+          (s: "${name} {\n${s}\n}")
+        ]
+      else if isBool value
+        then "${name} = ${boolToString value}"
+      else if any (f: f value) [isString isInt isFloat]
+        then "${name} = ${toString value}"
+      else throw ''
+        Found invalid type in `services.watchdogd.settings`: '${typeOf value}'
+      '';
+
+    watchdogdConf = pkgs.writeText "watchdogd.conf" (toConfig cfg.settings);
+  in mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.watchdogd = {
+      documentation = [
+        "man:watchdogd(8)"
+        "man:watchdogd.conf(5)"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      description = "Advanced system & process supervisor";
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/watchdogd -n -f ${watchdogdConf}";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ vifino ];
+}
diff --git a/nixos/modules/services/network-filesystems/openafs/client.nix b/nixos/modules/services/network-filesystems/openafs/client.nix
index bb0fee087e62e..02c3482ec657b 100644
--- a/nixos/modules/services/network-filesystems/openafs/client.nix
+++ b/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -215,6 +215,7 @@ in
     systemd.services.afsd = {
       description = "AFS client";
       wantedBy = [ "multi-user.target" ];
+      wants = lib.optional (!cfg.startDisconnected) "network-online.target";
       after = singleton (if cfg.startDisconnected then  "network.target" else "network-online.target");
       serviceConfig = { RemainAfterExit = true; };
       restartIfChanged = false;
diff --git a/nixos/modules/services/network-filesystems/samba.nix b/nixos/modules/services/network-filesystems/samba.nix
index 5d02eac8e9f1a..ef368ddbeefd5 100644
--- a/nixos/modules/services/network-filesystems/samba.nix
+++ b/nixos/modules/services/network-filesystems/samba.nix
@@ -154,7 +154,7 @@ in
       };
 
       securityType = mkOption {
-        type = types.str;
+        type = types.enum [ "auto" "user" "domain" "ads" ];
         default = "user";
         description = lib.mdDoc "Samba security type";
       };
diff --git a/nixos/modules/services/networking/bird.nix b/nixos/modules/services/networking/bird.nix
index 9deeb7694d2ac..e25f5c7b03794 100644
--- a/nixos/modules/services/networking/bird.nix
+++ b/nixos/modules/services/networking/bird.nix
@@ -18,6 +18,13 @@ in
           <http://bird.network.cz/>
         '';
       };
+      autoReload = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether bird2 should be automatically reloaded when the configuration changes.
+        '';
+      };
       checkConfig = mkOption {
         type = types.bool;
         default = true;
@@ -68,7 +75,7 @@ in
     systemd.services.bird2 = {
       description = "BIRD Internet Routing Daemon";
       wantedBy = [ "multi-user.target" ];
-      reloadTriggers = [ config.environment.etc."bird/bird2.conf".source ];
+      reloadTriggers = lib.optional cfg.autoReload config.environment.etc."bird/bird2.conf".source;
       serviceConfig = {
         Type = "forking";
         Restart = "on-failure";
diff --git a/nixos/modules/services/networking/bitcoind.nix b/nixos/modules/services/networking/bitcoind.nix
index 4512e666ba5ba..59722e31c62ab 100644
--- a/nixos/modules/services/networking/bitcoind.nix
+++ b/nixos/modules/services/networking/bitcoind.nix
@@ -198,6 +198,7 @@ in
         '';
       in {
         description = "Bitcoin daemon";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         serviceConfig = {
diff --git a/nixos/modules/services/networking/dante.nix b/nixos/modules/services/networking/dante.nix
index 605f2d74f8275..f0d1d6305c54d 100644
--- a/nixos/modules/services/networking/dante.nix
+++ b/nixos/modules/services/networking/dante.nix
@@ -47,6 +47,7 @@ in
 
     systemd.services.dante = {
       description   = "Dante SOCKS v4 and v5 compatible proxy server";
+      wants         = [ "network-online.target" ];
       after         = [ "network-online.target" ];
       wantedBy      = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/networking/ergo.nix b/nixos/modules/services/networking/ergo.nix
index 033d4d9caf8a8..1bee0f43f988a 100644
--- a/nixos/modules/services/networking/ergo.nix
+++ b/nixos/modules/services/networking/ergo.nix
@@ -114,6 +114,7 @@ in {
     systemd.services.ergo = {
       description = "ergo server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/networking/expressvpn.nix b/nixos/modules/services/networking/expressvpn.nix
index 30de6987d31fe..05c24d8bccffc 100644
--- a/nixos/modules/services/networking/expressvpn.nix
+++ b/nixos/modules/services/networking/expressvpn.nix
@@ -21,6 +21,7 @@ with lib;
         RestartSec = 5;
       };
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
     };
   };
diff --git a/nixos/modules/services/networking/frp.nix b/nixos/modules/services/networking/frp.nix
index 218d532c12daf..eb022308bc29f 100644
--- a/nixos/modules/services/networking/frp.nix
+++ b/nixos/modules/services/networking/frp.nix
@@ -4,8 +4,8 @@ with lib;
 
 let
   cfg = config.services.frp;
-  settingsFormat = pkgs.formats.ini { };
-  configFile = settingsFormat.generate "frp.ini" cfg.settings;
+  settingsFormat = pkgs.formats.toml { };
+  configFile = settingsFormat.generate "frp.toml" cfg.settings;
   isClient = (cfg.role == "client");
   isServer = (cfg.role == "server");
 in
@@ -31,17 +31,13 @@ in
         default = { };
         description = mdDoc ''
           Frp configuration, for configuration options
-          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_legacy_full.ini)
-          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_legacy_full.ini) on github.
-        '';
-        example = literalExpression ''
-          {
-            common = {
-              server_addr = "x.x.x.x";
-              server_port = 7000;
-            };
-          }
+          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_full_example.toml)
+          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_full_example.toml) on github.
         '';
+        example = {
+            serverAddr = "x.x.x.x";
+            serverPort = 7000;
+          };
       };
     };
   };
@@ -62,7 +58,7 @@ in
             Type = "simple";
             Restart = "on-failure";
             RestartSec = 15;
-            ExecStart = "${cfg.package}/bin/${executableFile} -c ${configFile}";
+            ExecStart = "${cfg.package}/bin/${executableFile} --strict_config -c ${configFile}";
             StateDirectoryMode = optionalString isServer "0700";
             DynamicUser = true;
             # Hardening
diff --git a/nixos/modules/services/networking/headscale.nix b/nixos/modules/services/networking/headscale.nix
index 4224a0578cc30..95b5fcf6ebde8 100644
--- a/nixos/modules/services/networking/headscale.nix
+++ b/nixos/modules/services/networking/headscale.nix
@@ -460,6 +460,7 @@ in {
 
     systemd.services.headscale = {
       description = "headscale coordination server for Tailscale";
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       wantedBy = ["multi-user.target"];
       restartTriggers = [configFile];
diff --git a/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixos/modules/services/networking/ircd-hybrid/default.nix
index 554b0f7bb8b44..64a34cc52d25a 100644
--- a/nixos/modules/services/networking/ircd-hybrid/default.nix
+++ b/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -125,7 +125,8 @@ in
 
     systemd.services.ircd-hybrid = {
       description = "IRCD Hybrid server";
-      after = [ "started networking" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       script = "${ircdService}/bin/control start";
     };
diff --git a/nixos/modules/services/networking/ivpn.nix b/nixos/modules/services/networking/ivpn.nix
index 6df630c1f1947..6c9ae599e670f 100644
--- a/nixos/modules/services/networking/ivpn.nix
+++ b/nixos/modules/services/networking/ivpn.nix
@@ -27,7 +27,7 @@ with lib;
     systemd.services.ivpn-service = {
       description = "iVPN daemon";
       wantedBy = [ "multi-user.target" ];
-      wants = [ "network.target" ];
+      wants = [ "network.target" "network-online.target" ];
       after = [
         "network-online.target"
         "NetworkManager.service"
diff --git a/nixos/modules/services/networking/kea.nix b/nixos/modules/services/networking/kea.nix
index 5ca705976c413..656ddd41fd12b 100644
--- a/nixos/modules/services/networking/kea.nix
+++ b/nixos/modules/services/networking/kea.nix
@@ -325,6 +325,9 @@ in
         "network-online.target"
         "time-sync.target"
       ];
+      wants = [
+        "network-online.target"
+      ];
       wantedBy = [
         "multi-user.target"
       ];
@@ -372,6 +375,9 @@ in
         "network-online.target"
         "time-sync.target"
       ];
+      wants = [
+        "network-online.target"
+      ];
       wantedBy = [
         "multi-user.target"
       ];
@@ -413,6 +419,7 @@ in
         "https://kea.readthedocs.io/en/kea-${package.version}/arm/ddns.html"
       ];
 
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
         "time-sync.target"
diff --git a/nixos/modules/services/networking/keepalived/default.nix b/nixos/modules/services/networking/keepalived/default.nix
index 429a47c3962c6..599dfd52e271f 100644
--- a/nixos/modules/services/networking/keepalived/default.nix
+++ b/nixos/modules/services/networking/keepalived/default.nix
@@ -59,9 +59,11 @@ let
         ${optionalString i.vmacXmitBase "vmac_xmit_base"}
 
         ${optionalString (i.unicastSrcIp != null) "unicast_src_ip ${i.unicastSrcIp}"}
-        unicast_peer {
-          ${concatStringsSep "\n" i.unicastPeers}
-        }
+        ${optionalString (builtins.length i.unicastPeers > 0) ''
+          unicast_peer {
+            ${concatStringsSep "\n" i.unicastPeers}
+          }
+        ''}
 
         virtual_ipaddress {
           ${concatMapStringsSep "\n" virtualIpLine i.virtualIps}
@@ -138,6 +140,7 @@ let
 
 in
 {
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
 
   options = {
     services.keepalived = {
diff --git a/nixos/modules/services/networking/mosquitto.nix b/nixos/modules/services/networking/mosquitto.nix
index f2b158b989427..ad9eefb422525 100644
--- a/nixos/modules/services/networking/mosquitto.nix
+++ b/nixos/modules/services/networking/mosquitto.nix
@@ -596,6 +596,7 @@ in
     systemd.services.mosquitto = {
       description = "Mosquitto MQTT Broker Daemon";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         Type = "notify";
diff --git a/nixos/modules/services/networking/mullvad-vpn.nix b/nixos/modules/services/networking/mullvad-vpn.nix
index 446c71f40764d..5da4ca1d1d803 100644
--- a/nixos/modules/services/networking/mullvad-vpn.nix
+++ b/nixos/modules/services/networking/mullvad-vpn.nix
@@ -53,7 +53,7 @@ with lib;
     systemd.services.mullvad-daemon = {
       description = "Mullvad VPN daemon";
       wantedBy = [ "multi-user.target" ];
-      wants = [ "network.target" ];
+      wants = [ "network.target" "network-online.target" ];
       after = [
         "network-online.target"
         "NetworkManager.service"
diff --git a/nixos/modules/services/networking/nbd.nix b/nixos/modules/services/networking/nbd.nix
index 454380aa3154c..b4bf7ede84632 100644
--- a/nixos/modules/services/networking/nbd.nix
+++ b/nixos/modules/services/networking/nbd.nix
@@ -117,6 +117,7 @@ in
     boot.kernelModules = [ "nbd" ];
 
     systemd.services.nbd-server = {
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       before = [ "multi-user.target" ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/networking/ocserv.nix b/nixos/modules/services/networking/ocserv.nix
index 9548fd92dbda3..3c61d56b893e9 100644
--- a/nixos/modules/services/networking/ocserv.nix
+++ b/nixos/modules/services/networking/ocserv.nix
@@ -85,6 +85,7 @@ in
     systemd.services.ocserv = {
       description = "OpenConnect SSL VPN server";
       documentation = [ "man:ocserv(8)" ];
+      wants = [ "network-online.target" ];
       after = [ "dbus.service" "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
 
diff --git a/nixos/modules/services/networking/pleroma.nix b/nixos/modules/services/networking/pleroma.nix
index db0a61b834699..8470f5e9cbc0c 100644
--- a/nixos/modules/services/networking/pleroma.nix
+++ b/nixos/modules/services/networking/pleroma.nix
@@ -92,6 +92,7 @@ in {
 
     systemd.services.pleroma = {
       description = "Pleroma social network";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "postgresql.service" ];
       wantedBy = [ "multi-user.target" ];
       restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
diff --git a/nixos/modules/services/networking/rosenpass.nix b/nixos/modules/services/networking/rosenpass.nix
index d2a264b83d677..487cb6f601429 100644
--- a/nixos/modules/services/networking/rosenpass.nix
+++ b/nixos/modules/services/networking/rosenpass.nix
@@ -208,6 +208,7 @@ in
       in
       rec {
         wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         path = [ cfg.package pkgs.wireguard-tools ];
 
diff --git a/nixos/modules/services/networking/rxe.nix b/nixos/modules/services/networking/rxe.nix
index 7dbb4823b4bcd..07437ed71195b 100644
--- a/nixos/modules/services/networking/rxe.nix
+++ b/nixos/modules/services/networking/rxe.nix
@@ -33,7 +33,7 @@ in {
 
       wantedBy = [ "multi-user.target" ];
       after = [ "systemd-modules-load.service" "network-online.target" ];
-      wants = [ "network-pre.target" ];
+      wants = [ "network-pre.target" "network-online.target" ];
 
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/networking/soju.nix b/nixos/modules/services/networking/soju.nix
index 7f0ac3e3b8e69..d69ec08ca13a0 100644
--- a/nixos/modules/services/networking/soju.nix
+++ b/nixos/modules/services/networking/soju.nix
@@ -110,6 +110,7 @@ in
     systemd.services.soju = {
       description = "soju IRC bouncer";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         DynamicUser = true;
diff --git a/nixos/modules/services/networking/strongswan-swanctl/module.nix b/nixos/modules/services/networking/strongswan-swanctl/module.nix
index c8832ed4defb6..a988509239558 100644
--- a/nixos/modules/services/networking/strongswan-swanctl/module.nix
+++ b/nixos/modules/services/networking/strongswan-swanctl/module.nix
@@ -55,6 +55,7 @@ in  {
     systemd.services.strongswan-swanctl = {
       description = "strongSwan IPsec IKEv1/IKEv2 daemon using swanctl";
       wantedBy = [ "multi-user.target" ];
+      wants    = [ "network-online.target" ];
       after    = [ "network-online.target" ];
       path     = with pkgs; [ kmod iproute2 iptables util-linux ];
       environment = {
diff --git a/nixos/modules/services/networking/strongswan.nix b/nixos/modules/services/networking/strongswan.nix
index e58526814d1ad..dcf04d2a1917c 100644
--- a/nixos/modules/services/networking/strongswan.nix
+++ b/nixos/modules/services/networking/strongswan.nix
@@ -153,6 +153,7 @@ in
       description = "strongSwan IPSec Service";
       wantedBy = [ "multi-user.target" ];
       path = with pkgs; [ kmod iproute2 iptables util-linux ]; # XXX Linux
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
diff --git a/nixos/modules/services/networking/syncplay.nix b/nixos/modules/services/networking/syncplay.nix
index 0a66d93bf153a..151259b6d4ad2 100644
--- a/nixos/modules/services/networking/syncplay.nix
+++ b/nixos/modules/services/networking/syncplay.nix
@@ -107,6 +107,7 @@ in
     systemd.services.syncplay = {
       description = "Syncplay Service";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/networking/wasabibackend.nix b/nixos/modules/services/networking/wasabibackend.nix
index 938145b35ee88..e3a48afd2a2c5 100644
--- a/nixos/modules/services/networking/wasabibackend.nix
+++ b/nixos/modules/services/networking/wasabibackend.nix
@@ -119,6 +119,7 @@ in {
     systemd.services.wasabibackend = {
       description = "wasabibackend server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       environment = {
         DOTNET_PRINT_TELEMETRY_MESSAGE = "false";
diff --git a/nixos/modules/services/networking/znc/default.nix b/nixos/modules/services/networking/znc/default.nix
index d3ba4a524197d..e15233293cf25 100644
--- a/nixos/modules/services/networking/znc/default.nix
+++ b/nixos/modules/services/networking/znc/default.nix
@@ -243,6 +243,7 @@ in
     systemd.services.znc = {
       description = "ZNC Server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       serviceConfig = {
         User = cfg.user;
diff --git a/nixos/modules/services/security/certmgr.nix b/nixos/modules/services/security/certmgr.nix
index db80e943973dc..02cb7afe87bad 100644
--- a/nixos/modules/services/security/certmgr.nix
+++ b/nixos/modules/services/security/certmgr.nix
@@ -182,6 +182,7 @@ in
     systemd.services.certmgr = {
       description = "certmgr";
       path = mkIf (cfg.svcManager == "command") [ pkgs.bash ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       inherit preStart;
diff --git a/nixos/modules/services/security/clamav.nix b/nixos/modules/services/security/clamav.nix
index d3164373ec01f..4480c0cae60c9 100644
--- a/nixos/modules/services/security/clamav.nix
+++ b/nixos/modules/services/security/clamav.nix
@@ -196,6 +196,7 @@ in
     systemd.services.clamav-freshclam = mkIf cfg.updater.enable {
       description = "ClamAV virus database updater (freshclam)";
       restartTriggers = [ freshclamConfigFile ];
+      requires = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
@@ -243,6 +244,7 @@ in
     systemd.services.clamav-fangfrisch = mkIf cfg.fangfrisch.enable {
       description = "ClamAV virus database updater (fangfrisch)";
       restartTriggers = [ fangfrischConfigFile ];
+      requires = [ "network-online.target" ];
       after = [ "network-online.target" "clamav-fangfrisch-init.service" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index 78916c907279a..d1dc37d549d2d 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -572,6 +572,7 @@ in
       description = "OAuth2 Proxy";
       path = [ cfg.package ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/system/cachix-agent/default.nix b/nixos/modules/services/system/cachix-agent/default.nix
index 196d3291d5555..f8020fe970f1b 100644
--- a/nixos/modules/services/system/cachix-agent/default.nix
+++ b/nixos/modules/services/system/cachix-agent/default.nix
@@ -49,6 +49,7 @@ in {
   config = mkIf cfg.enable {
     systemd.services.cachix-agent = {
       description = "Cachix Deploy Agent";
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       path = [ config.nix.package ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/system/cachix-watch-store.nix b/nixos/modules/services/system/cachix-watch-store.nix
index 8aa5f0358fa97..d48af29465aa5 100644
--- a/nixos/modules/services/system/cachix-watch-store.nix
+++ b/nixos/modules/services/system/cachix-watch-store.nix
@@ -61,6 +61,7 @@ in
   config = mkIf cfg.enable {
     systemd.services.cachix-watch-store-agent = {
       description = "Cachix watch store Agent";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = [ config.nix.package ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/system/cloud-init.nix b/nixos/modules/services/system/cloud-init.nix
index d782bb1a36668..00ae77be4271c 100644
--- a/nixos/modules/services/system/cloud-init.nix
+++ b/nixos/modules/services/system/cloud-init.nix
@@ -164,7 +164,10 @@ in
     systemd.services.cloud-init-local = {
       description = "Initial cloud-init job (pre-networking)";
       wantedBy = [ "multi-user.target" ];
-      before = [ "systemd-networkd.service" ];
+      # In certain environments (AWS for example), cloud-init-local will
+      # first configure an IP through DHCP, and later delete it.
+      # This can cause race conditions with anything else trying to set IP through DHCP.
+      before = [ "systemd-networkd.service" "dhcpcd.service" ];
       path = path;
       serviceConfig = {
         Type = "oneshot";
diff --git a/nixos/modules/services/system/dbus.nix b/nixos/modules/services/system/dbus.nix
index b47ebc92f93a8..e8f8b48d0337f 100644
--- a/nixos/modules/services/system/dbus.nix
+++ b/nixos/modules/services/system/dbus.nix
@@ -95,6 +95,7 @@ in
         uid = config.ids.uids.messagebus;
         description = "D-Bus system message bus daemon user";
         home = homeDir;
+        homeMode = "0755";
         group = "messagebus";
       };
 
diff --git a/nixos/modules/services/video/go2rtc/default.nix b/nixos/modules/services/video/go2rtc/default.nix
index 13851fa0306f6..9dddbb60baa80 100644
--- a/nixos/modules/services/video/go2rtc/default.nix
+++ b/nixos/modules/services/video/go2rtc/default.nix
@@ -94,6 +94,7 @@ in
 
   config = lib.mkIf cfg.enable {
     systemd.services.go2rtc = {
+      wants = [ "network-online.target" ];
       after = [
         "network-online.target"
       ];
diff --git a/nixos/modules/services/web-apps/akkoma.nix b/nixos/modules/services/web-apps/akkoma.nix
index 8980556ab0142..4cd9e26643787 100644
--- a/nixos/modules/services/web-apps/akkoma.nix
+++ b/nixos/modules/services/web-apps/akkoma.nix
@@ -974,7 +974,7 @@ in {
       # This service depends on network-online.target and is sequenced after
       # it because it requires access to the Internet to function properly.
       bindsTo = [ "akkoma-config.service" ];
-      wants = [ "network-online.service" ];
+      wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       after = [
         "akkoma-config.target"
diff --git a/nixos/modules/services/web-apps/alps.nix b/nixos/modules/services/web-apps/alps.nix
index 05fb676102df4..81c6b8ad30b5f 100644
--- a/nixos/modules/services/web-apps/alps.nix
+++ b/nixos/modules/services/web-apps/alps.nix
@@ -94,6 +94,7 @@ in {
       description = "alps is a simple and extensible webmail.";
       documentation = [ "https://git.sr.ht/~migadu/alps" ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/web-apps/c2fmzq-server.nix b/nixos/modules/services/web-apps/c2fmzq-server.nix
index 87938fe160e14..dee131182de16 100644
--- a/nixos/modules/services/web-apps/c2fmzq-server.nix
+++ b/nixos/modules/services/web-apps/c2fmzq-server.nix
@@ -80,6 +80,7 @@ in {
       description = "c2FmZQ-server";
       documentation = [ "https://github.com/c2FmZQ/c2FmZQ/blob/main/README.md" ];
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       serviceConfig = {
diff --git a/nixos/modules/services/web-apps/code-server.nix b/nixos/modules/services/web-apps/code-server.nix
index 11601f6c30449..d087deb7848d0 100644
--- a/nixos/modules/services/web-apps/code-server.nix
+++ b/nixos/modules/services/web-apps/code-server.nix
@@ -205,6 +205,7 @@ in {
     systemd.services.code-server = {
       description = "Code server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = cfg.extraPackages;
       environment = {
diff --git a/nixos/modules/services/web-apps/healthchecks.nix b/nixos/modules/services/web-apps/healthchecks.nix
index e5e425a29d54c..1d439f162313b 100644
--- a/nixos/modules/services/web-apps/healthchecks.nix
+++ b/nixos/modules/services/web-apps/healthchecks.nix
@@ -176,6 +176,7 @@ in
     systemd.targets.healthchecks = {
       description = "Target for all Healthchecks services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
     };
 
diff --git a/nixos/modules/services/web-apps/netbox.nix b/nixos/modules/services/web-apps/netbox.nix
index 88d40b3abc529..d034f3234a2bd 100644
--- a/nixos/modules/services/web-apps/netbox.nix
+++ b/nixos/modules/services/web-apps/netbox.nix
@@ -75,13 +75,17 @@ in {
     package = lib.mkOption {
       type = lib.types.package;
       default =
-        if lib.versionAtLeast config.system.stateVersion "23.11"
+        if lib.versionAtLeast config.system.stateVersion "24.05"
+        then pkgs.netbox_3_7
+        else if lib.versionAtLeast config.system.stateVersion "23.11"
         then pkgs.netbox_3_6
         else if lib.versionAtLeast config.system.stateVersion "23.05"
         then pkgs.netbox_3_5
         else pkgs.netbox_3_3;
       defaultText = lib.literalExpression ''
-        if lib.versionAtLeast config.system.stateVersion "23.11"
+        if lib.versionAtLeast config.system.stateVersion "24.05"
+        then pkgs.netbox_3_7
+        else if lib.versionAtLeast config.system.stateVersion "23.11"
         then pkgs.netbox_3_6
         else if lib.versionAtLeast config.system.stateVersion "23.05"
         then pkgs.netbox_3_5
@@ -267,6 +271,7 @@ in {
     systemd.targets.netbox = {
       description = "Target for all NetBox services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "redis-netbox.service" ];
     };
 
@@ -305,12 +310,13 @@ in {
           ${pkg}/bin/netbox trace_paths --no-input
           ${pkg}/bin/netbox collectstatic --no-input
           ${pkg}/bin/netbox remove_stale_contenttypes --no-input
-          # TODO: remove the condition when we remove netbox_3_3
-          ${lib.optionalString
-            (lib.versionAtLeast cfg.package.version "3.5.0")
-            "${pkg}/bin/netbox reindex --lazy"}
+          ${pkg}/bin/netbox reindex --lazy
           ${pkg}/bin/netbox clearsessions
-          ${pkg}/bin/netbox clearcache
+          ${lib.optionalString
+            # The clearcache command was removed in 3.7.0:
+            # https://github.com/netbox-community/netbox/issues/14458
+            (lib.versionOlder cfg.package.version "3.7.0")
+            "${pkg}/bin/netbox clearcache"}
 
           echo "${cfg.package.version}" > "$versionFile"
         '';
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 38c51251aac1f..0b19265942c03 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -99,11 +99,101 @@ let
   mysqlLocal = cfg.database.createLocally && cfg.config.dbtype == "mysql";
   pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
 
+  nextcloudGreaterOrEqualThan = versionAtLeast cfg.package.version;
+  nextcloudOlderThan = versionOlder cfg.package.version;
+
   # https://github.com/nextcloud/documentation/pull/11179
-  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2"
-    || (versionOlder cfg.package.version "27.0.0"
-      && versionAtLeast cfg.package.version "26.0.8");
+  ocmProviderIsNotAStaticDirAnymore = nextcloudGreaterOrEqualThan "27.1.2"
+    || (nextcloudOlderThan "27.0.0" && nextcloudGreaterOrEqualThan "26.0.8");
+
+  overrideConfig = let
+    c = cfg.config;
+    requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
+    objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
+      'objectstore' => [
+        'class' => '\\OC\\Files\\ObjectStore\\S3',
+        'arguments' => [
+          'bucket' => '${s3.bucket}',
+          'autocreate' => ${boolToString s3.autocreate},
+          'key' => '${s3.key}',
+          'secret' => nix_read_secret('${s3.secretFile}'),
+          ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"}
+          ${optionalString (s3.port != null) "'port' => ${toString s3.port},"}
+          'use_ssl' => ${boolToString s3.useSsl},
+          ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
+          'use_path_style' => ${boolToString s3.usePathStyle},
+          ${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
+        ],
+      ]
+    '';
+    showAppStoreSetting = cfg.appstoreEnable != null || cfg.extraApps != {};
+    renderedAppStoreSetting =
+      let
+        x = cfg.appstoreEnable;
+      in
+        if x == null then "false"
+        else boolToString x;
+    mkAppStoreConfig = name: { enabled, writable, ... }: optionalString enabled ''
+      [ 'path' => '${webroot}/${name}', 'url' => '/${name}', 'writable' => ${boolToString writable} ],
+    '';
+  in pkgs.writeText "nextcloud-config.php" ''
+    <?php
+    ${optionalString requiresReadSecretFunction ''
+      function nix_read_secret($file) {
+        if (!file_exists($file)) {
+          throw new \RuntimeException(sprintf(
+            "Cannot start Nextcloud, secret file %s set by NixOS doesn't seem to "
+            . "exist! Please make sure that the file exists and has appropriate "
+            . "permissions for user & group 'nextcloud'!",
+            $file
+          ));
+        }
+        return trim(file_get_contents($file));
+      }''}
+    function nix_decode_json_file($file, $error) {
+      if (!file_exists($file)) {
+        throw new \RuntimeException(sprintf($error, $file));
+      }
+      $decoded = json_decode(file_get_contents($file), true);
+
+      if (json_last_error() !== JSON_ERROR_NONE) {
+        throw new \RuntimeException(sprintf("Cannot decode %s, because: %s", $file, json_last_error_msg()));
+      }
 
+      return $decoded;
+    }
+    $CONFIG = [
+      'apps_paths' => [
+        ${concatStrings (mapAttrsToList mkAppStoreConfig appStores)}
+      ],
+      ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
+      ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
+      ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
+      ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
+      ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
+      ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
+      ${optionalString (c.dbpassFile != null) ''
+          'dbpassword' => nix_read_secret(
+            "${c.dbpassFile}"
+          ),
+        ''
+      }
+      'dbtype' => '${c.dbtype}',
+      ${objectstoreConfig}
+    ];
+
+    $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+      "${jsonFormat.generate "nextcloud-extraOptions.json" cfg.extraOptions}",
+      "impossible: this should never happen (decoding generated extraOptions file %s failed)"
+    ));
+
+    ${optionalString (cfg.secretFile != null) ''
+      $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+        "${cfg.secretFile}",
+        "Cannot start Nextcloud, secrets file %s set by NixOS doesn't exist!"
+      ));
+    ''}
+  '';
 in {
 
   imports = [
@@ -787,107 +877,23 @@ in {
         timerConfig.Unit = "nextcloud-cron.service";
       };
 
-      systemd.tmpfiles.rules = ["d ${cfg.home} 0750 nextcloud nextcloud"];
+      systemd.tmpfiles.rules = map (dir: "d ${dir} 0750 nextcloud nextcloud - -") [
+        "${cfg.home}"
+        "${datadir}/config"
+        "${datadir}/data"
+        "${cfg.home}/store-apps"
+      ] ++ [
+        "L+ ${datadir}/config/override.config.php - - - - ${overrideConfig}"
+      ];
 
       systemd.services = {
         # When upgrading the Nextcloud package, Nextcloud can report errors such as
         # "The files of the app [all apps in /var/lib/nextcloud/apps] were not replaced correctly"
         # Restarting phpfpm on Nextcloud package update fixes these issues (but this is a workaround).
-        phpfpm-nextcloud.restartTriggers = [ webroot ];
+        phpfpm-nextcloud.restartTriggers = [ webroot overrideConfig ];
 
         nextcloud-setup = let
           c = cfg.config;
-          requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
-          objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
-            'objectstore' => [
-              'class' => '\\OC\\Files\\ObjectStore\\S3',
-              'arguments' => [
-                'bucket' => '${s3.bucket}',
-                'autocreate' => ${boolToString s3.autocreate},
-                'key' => '${s3.key}',
-                'secret' => nix_read_secret('${s3.secretFile}'),
-                ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"}
-                ${optionalString (s3.port != null) "'port' => ${toString s3.port},"}
-                'use_ssl' => ${boolToString s3.useSsl},
-                ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
-                'use_path_style' => ${boolToString s3.usePathStyle},
-                ${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
-              ],
-            ]
-          '';
-
-          showAppStoreSetting = cfg.appstoreEnable != null || cfg.extraApps != {};
-          renderedAppStoreSetting =
-            let
-              x = cfg.appstoreEnable;
-            in
-              if x == null then "false"
-              else boolToString x;
-
-          nextcloudGreaterOrEqualThan = req: versionAtLeast cfg.package.version req;
-
-          mkAppStoreConfig = name: { enabled, writable, ... }: optionalString enabled ''
-            [ 'path' => '${webroot}/${name}', 'url' => '/${name}', 'writable' => ${boolToString writable} ],
-          '';
-
-          overrideConfig = pkgs.writeText "nextcloud-config.php" ''
-            <?php
-            ${optionalString requiresReadSecretFunction ''
-              function nix_read_secret($file) {
-                if (!file_exists($file)) {
-                  throw new \RuntimeException(sprintf(
-                    "Cannot start Nextcloud, secret file %s set by NixOS doesn't seem to "
-                    . "exist! Please make sure that the file exists and has appropriate "
-                    . "permissions for user & group 'nextcloud'!",
-                    $file
-                  ));
-                }
-                return trim(file_get_contents($file));
-              }''}
-            function nix_decode_json_file($file, $error) {
-              if (!file_exists($file)) {
-                throw new \RuntimeException(sprintf($error, $file));
-              }
-              $decoded = json_decode(file_get_contents($file), true);
-
-              if (json_last_error() !== JSON_ERROR_NONE) {
-                throw new \RuntimeException(sprintf("Cannot decode %s, because: %s", $file, json_last_error_msg()));
-              }
-
-              return $decoded;
-            }
-            $CONFIG = [
-              'apps_paths' => [
-                ${concatStrings (mapAttrsToList mkAppStoreConfig appStores)}
-              ],
-              ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
-              ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
-              ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
-              ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
-              ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
-              ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
-              ${optionalString (c.dbpassFile != null) ''
-                  'dbpassword' => nix_read_secret(
-                    "${c.dbpassFile}"
-                  ),
-                ''
-              }
-              'dbtype' => '${c.dbtype}',
-              ${objectstoreConfig}
-            ];
-
-            $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
-              "${jsonFormat.generate "nextcloud-extraOptions.json" cfg.extraOptions}",
-              "impossible: this should never happen (decoding generated extraOptions file %s failed)"
-            ));
-
-            ${optionalString (cfg.secretFile != null) ''
-              $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
-                "${cfg.secretFile}",
-                "Cannot start Nextcloud, secrets file %s set by NixOS doesn't exist!"
-              ));
-            ''}
-          '';
           occInstallCmd = let
             mkExport = { arg, value }: "export ${arg}=${value}";
             dbpass = {
@@ -932,6 +938,7 @@ in {
           after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
           requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
           path = [ occ ];
+          restartTriggers = [ overrideConfig ];
           script = ''
             ${optionalString (c.dbpassFile != null) ''
               if [ ! -r "${c.dbpassFile}" ]; then
@@ -959,18 +966,6 @@ in {
               fi
             '') [ "nix-apps" "apps" ]}
 
-            # create nextcloud directories.
-            # if the directories exist already with wrong permissions, we fix that
-            for dir in ${datadir}/config ${datadir}/data ${cfg.home}/store-apps; do
-              if [ ! -e $dir ]; then
-                install -o nextcloud -g nextcloud -d $dir
-              elif [ $(stat -c "%G" $dir) != "nextcloud" ]; then
-                chgrp -R nextcloud $dir
-              fi
-            done
-
-            ln -sf ${overrideConfig} ${datadir}/config/override.config.php
-
             # Do not install if already installed
             if [[ ! -e ${datadir}/config/config.php ]]; then
               ${occInstallCmd}
diff --git a/nixos/modules/services/web-apps/openvscode-server.nix b/nixos/modules/services/web-apps/openvscode-server.nix
index 76a19dccae165..81b9d1f3b4c8c 100644
--- a/nixos/modules/services/web-apps/openvscode-server.nix
+++ b/nixos/modules/services/web-apps/openvscode-server.nix
@@ -159,6 +159,7 @@ in
     systemd.services.openvscode-server = {
       description = "OpenVSCode server";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       path = cfg.extraPackages;
       environment = cfg.extraEnvironment;
diff --git a/nixos/modules/services/web-apps/peering-manager.nix b/nixos/modules/services/web-apps/peering-manager.nix
index d6f6077268d46..0382ce7174738 100644
--- a/nixos/modules/services/web-apps/peering-manager.nix
+++ b/nixos/modules/services/web-apps/peering-manager.nix
@@ -196,6 +196,7 @@ in {
     systemd.targets.peering-manager = {
       description = "Target for all Peering Manager services";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" "redis-peering-manager.service" ];
     };
 
diff --git a/nixos/modules/services/web-apps/suwayomi-server.md b/nixos/modules/services/web-apps/suwayomi-server.md
new file mode 100644
index 0000000000000..ff1e06c8a53ae
--- /dev/null
+++ b/nixos/modules/services/web-apps/suwayomi-server.md
@@ -0,0 +1,108 @@
+# Suwayomi-Server {#module-services-suwayomi-server}
+
+A free and open source manga reader server that runs extensions built for Tachiyomi.
+
+## Basic usage {#module-services-suwayomi-server-basic-usage}
+
+By default, the module will execute Suwayomi-Server backend and web UI:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+  };
+}
+```
+
+It runs in the systemd service named `suwayomi-server` in the data directory `/var/lib/suwayomi-server`.
+
+You can change the default parameters with some other parameters:
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    dataDir = "/var/lib/suwayomi"; # Default is "/var/lib/suwayomi-server"
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+    };
+  };
+}
+```
+
+If you want to create a desktop icon, you can activate the system tray option:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    dataDir = "/var/lib/suwayomi"; # Default is "/var/lib/suwayomi-server"
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+      server.enableSystemTray = true;
+    };
+  };
+}
+```
+
+## Basic authentication {#module-services-suwayomi-server-basic-auth}
+
+You can configure a basic authentication to the web interface with:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    openFirewall = true;
+
+    settings = {
+      server.port = 4567;
+      server = {
+        basicAuthEnabled = true;
+        basicAuthUsername = "username";
+
+        # NOTE: this is not a real upstream option
+        basicAuthPasswordFile = ./path/to/the/password/file;
+      };
+    };
+  };
+}
+```
+
+## Extra configuration {#module-services-suwayomi-server-extra-config}
+
+Not all the configuration options are available directly in this module, but you can add the other options of suwayomi-server with:
+
+```nix
+{ ... }:
+
+{
+  services.suwayomi-server = {
+    enable = true;
+
+    openFirewall = true;
+
+    settings = {
+      server = {
+        port = 4567;
+        autoDownloadNewChapters = false;
+        maxSourcesInParallel" = 6;
+      };
+    };
+  };
+}
+```
diff --git a/nixos/modules/services/web-apps/suwayomi-server.nix b/nixos/modules/services/web-apps/suwayomi-server.nix
new file mode 100644
index 0000000000000..c4c1540edbee5
--- /dev/null
+++ b/nixos/modules/services/web-apps/suwayomi-server.nix
@@ -0,0 +1,260 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.suwayomi-server;
+  inherit (lib) mkOption mdDoc mkEnableOption mkIf types;
+in
+{
+  options = {
+    services.suwayomi-server = {
+      enable = mkEnableOption (mdDoc "Suwayomi, a free and open source manga reader server that runs extensions built for Tachiyomi.");
+
+      package = lib.mkPackageOptionMD pkgs "suwayomi-server" { };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/suwayomi-server";
+        example = "/var/data/mangas";
+        description = mdDoc ''
+          The path to the data directory in which Suwayomi-Server will download scans.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "suwayomi";
+        example = "root";
+        description = mdDoc ''
+          User account under which Suwayomi-Server runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "suwayomi";
+        example = "medias";
+        description = mdDoc ''
+          Group under which Suwayomi-Server runs.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to open the firewall for the port in {option}`services.suwayomi-server.settings.server.port`.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType =
+            let
+              recursiveAttrsType = with types; attrsOf (nullOr (oneOf [
+                str
+                path
+                int
+                float
+                bool
+                (listOf str)
+                (recursiveAttrsType // { description = "instances of this type recursively"; })
+              ]));
+            in
+            recursiveAttrsType;
+          options = {
+            server = {
+              ip = mkOption {
+                type = types.str;
+                default = "0.0.0.0";
+                example = "127.0.0.1";
+                description = mdDoc ''
+                  The ip that Suwayomi will bind to.
+                '';
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 8080;
+                example = 4567;
+                description = mdDoc ''
+                  The port that Suwayomi will listen to.
+                '';
+              };
+
+              basicAuthEnabled = mkEnableOption (mdDoc ''
+                Add basic access authentication to Suwayomi-Server.
+                Enabling this option is useful when hosting on a public network/the Internet
+              '');
+
+              basicAuthUsername = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                description = mdDoc ''
+                  The username value that you have to provide when authenticating.
+                '';
+              };
+
+              # NOTE: this is not a real upstream option
+              basicAuthPasswordFile = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                example = "/var/secrets/suwayomi-server-password";
+                description = mdDoc ''
+                  The password file containing the value that you have to provide when authenticating.
+                '';
+              };
+
+              downloadAsCbz = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Download chapters as `.cbz` files.
+                '';
+              };
+
+              localSourcePath = mkOption {
+                type = types.path;
+                default = cfg.dataDir;
+                defaultText = lib.literalExpression "suwayomi-server.dataDir";
+                example = "/var/data/local_mangas";
+                description = mdDoc ''
+                  Path to the local source folder.
+                '';
+              };
+
+              systemTrayEnabled = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Whether to enable a system tray icon, if possible.
+                '';
+              };
+            };
+          };
+        };
+        description = mdDoc ''
+          Configuration to write to {file}`server.conf`.
+          See <https://github.com/Suwayomi/Suwayomi-Server/wiki/Configuring-Suwayomi-Server> for more information.
+        '';
+        default = { };
+        example = {
+          server.socksProxyEnabled = true;
+          server.socksProxyHost = "yourproxyhost.com";
+          server.socksProxyPort = "8080";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [{
+      assertion = with cfg.settings.server; basicAuthEnabled -> (basicAuthUsername != null && basicAuthPasswordFile != null);
+      message = ''
+        [suwayomi-server]: the username and the password file cannot be null when the basic auth is enabled
+      '';
+    }];
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.server.port ];
+
+    users.groups = mkIf (cfg.group == "suwayomi") {
+      suwayomi = { };
+    };
+
+    users.users = mkIf (cfg.user == "suwayomi") {
+      suwayomi = {
+        group = cfg.group;
+        # Need to set the user home because the package writes to ~/.local/Tachidesk
+        home = cfg.dataDir;
+        description = "Suwayomi Daemon user";
+        isSystemUser = true;
+      };
+    };
+
+    systemd.tmpfiles.settings."10-suwayomi-server" = {
+      "${cfg.dataDir}/.local/share/Tachidesk".d = {
+        mode = "0700";
+        inherit (cfg) user group;
+      };
+    };
+
+    systemd.services.suwayomi-server =
+      let
+        flattenConfig = prefix: config:
+          lib.foldl'
+            lib.mergeAttrs
+            { }
+            (lib.attrValues
+              (lib.mapAttrs
+                (k: v:
+                  if !(lib.isAttrs v)
+                  then { "${prefix}${k}" = v; }
+                  else flattenConfig "${prefix}${k}." v
+                )
+                config
+              )
+            );
+
+        #  HOCON is a JSON superset that suwayomi-server use for configuration
+        toHOCON = attr:
+          let
+            attrType = builtins.typeOf attr;
+          in
+          if builtins.elem attrType [ "string" "path" "int" "float" ]
+          then ''"${toString attr}"''
+          else if attrType == "bool"
+          then lib.boolToString attr
+          else if attrType == "list"
+          then "[\n${lib.concatMapStringsSep ",\n" toHOCON attr}\n]"
+          else # attrs, lambda, null
+            throw ''
+              [suwayomi-server]: invalid config value type '${attrType}'.
+            '';
+
+        configFile = pkgs.writeText "server.conf" (lib.pipe cfg.settings [
+          (settings: lib.recursiveUpdate settings {
+            server.basicAuthPasswordFile = null;
+            server.basicAuthPassword =
+              if settings.server.basicAuthEnabled
+              then "$TACHIDESK_SERVER_BASIC_AUTH_PASSWORD"
+              else null;
+          })
+          (flattenConfig "")
+          (lib.filterAttrs (_: x: x != null))
+          (lib.mapAttrsToList (name: value: ''${name} = ${toHOCON value}''))
+          lib.concatLines
+        ]);
+
+      in
+      {
+        description = "A free and open source manga reader server that runs extensions built for Tachiyomi.";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
+        after = [ "network-online.target" ];
+
+        script = ''
+          ${lib.optionalString cfg.settings.server.basicAuthEnabled ''
+            export TACHIDESK_SERVER_BASIC_AUTH_PASSWORD="$(<${cfg.settings.server.basicAuthPasswordFile})"
+          ''}
+          ${lib.getExe pkgs.envsubst} -i ${configFile} -o ${cfg.dataDir}/.local/share/Tachidesk/server.conf
+          ${lib.getExe cfg.package} -Dsuwayomi.tachidesk.config.server.rootDir=${cfg.dataDir}
+        '';
+
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+
+          Type = "simple";
+          Restart = "on-failure";
+
+          StateDirectory = mkIf (cfg.dataDir == "/var/lib/suwayomi-server") "suwayomi-server";
+        };
+      };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ ratcornu ];
+    doc = ./suwayomi-server.md;
+  };
+}
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index 002d6683b2ed5..2f7306309d694 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -174,22 +174,22 @@ let
             List of path(s) to respective language(s) which are copied from the 'languages' directory.
           '';
           example = literalExpression ''
-            [(
+            [
               # Let's package the German language.
               # For other languages try to replace language and country code in the download URL with your desired one.
               # Reference https://translate.wordpress.org for available translations and
               # codes.
-              language-de = pkgs.stdenv.mkDerivation {
+              (pkgs.stdenv.mkDerivation {
                 name = "language-de";
                 src = pkgs.fetchurl {
                   url = "https://de.wordpress.org/wordpress-''${pkgs.wordpress.version}-de_DE.tar.gz";
                   # Name is required to invalidate the hash when wordpress is updated
-                  name = "wordpress-''${pkgs.wordpress.version}-language-de"
+                  name = "wordpress-''${pkgs.wordpress.version}-language-de";
                   sha256 = "sha256-dlas0rXTSV4JAl8f/UyMbig57yURRYRhTMtJwF9g8h0=";
                 };
                 installPhase = "mkdir -p $out; cp -r ./wp-content/languages/* $out/";
-              };
-            )];
+              })
+            ];
           '';
         };
 
diff --git a/nixos/modules/services/web-servers/agate.nix b/nixos/modules/services/web-servers/agate.nix
index dce425035ff72..e03174c87945b 100644
--- a/nixos/modules/services/web-servers/agate.nix
+++ b/nixos/modules/services/web-servers/agate.nix
@@ -71,6 +71,7 @@ in
     systemd.services.agate = {
       description = "Agate";
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = [ "network.target" "network-online.target" ];
 
       script =
diff --git a/nixos/modules/services/web-servers/mighttpd2.nix b/nixos/modules/services/web-servers/mighttpd2.nix
index bdd6d8b62aa36..bb75dc4f2ff47 100644
--- a/nixos/modules/services/web-servers/mighttpd2.nix
+++ b/nixos/modules/services/web-servers/mighttpd2.nix
@@ -101,6 +101,7 @@ in {
       ];
     systemd.services.mighttpd2 = {
       description = "Mighttpd2 web server";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
diff --git a/nixos/modules/services/web-servers/minio.nix b/nixos/modules/services/web-servers/minio.nix
index 6431db250476b..be6946657e23d 100644
--- a/nixos/modules/services/web-servers/minio.nix
+++ b/nixos/modules/services/web-servers/minio.nix
@@ -98,6 +98,7 @@ in
 
       services.minio = {
         description = "Minio Object Storage";
+        wants = [ "network-online.target" ];
         after = [ "network-online.target" ];
         wantedBy = [ "multi-user.target" ];
         serviceConfig = {
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index cc2c680b33424..fc9eb504ebf81 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -144,6 +144,7 @@ in {
 
     systemd.services.traefik = {
       description = "Traefik web server";
+      wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
       startLimitIntervalSec = 86400;
diff --git a/nixos/modules/services/web-servers/ttyd.nix b/nixos/modules/services/web-servers/ttyd.nix
index 3b1d87ccb483e..e545869ca4320 100644
--- a/nixos/modules/services/web-servers/ttyd.nix
+++ b/nixos/modules/services/web-servers/ttyd.nix
@@ -180,10 +180,11 @@ in
         # Runs login which needs to be run as root
         # login: Cannot possibly work without effective root
         User = "root";
+        LoadCredential = lib.optionalString (cfg.passwordFile != null) "TTYD_PASSWORD_FILE:${cfg.passwordFile}";
       };
 
       script = if cfg.passwordFile != null then ''
-        PASSWORD=$(cat ${escapeShellArg cfg.passwordFile})
+        PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/TTYD_PASSWORD_FILE")
         ${pkgs.ttyd}/bin/ttyd ${lib.escapeShellArgs args} \
           --credential ${escapeShellArg cfg.username}:"$PASSWORD" \
           ${pkgs.shadow}/bin/login
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 4a8f2f61caaf4..36f25d5547cae 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -804,14 +804,14 @@ in
       ];
 
     system.checks = singleton (pkgs.runCommand "xkb-validated" {
-      inherit (cfg.xkb) model layout variant options;
+      inherit (cfg.xkb) dir model layout variant options;
       nativeBuildInputs = with pkgs.buildPackages; [ xkbvalidate ];
       preferLocalBuild = true;
     } ''
       ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
         "export XKB_CONFIG_ROOT=${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
       }
-      xkbvalidate "$model" "$layout" "$variant" "$options"
+      XKB_CONFIG_ROOT="$dir" xkbvalidate "$model" "$layout" "$variant" "$options"
       touch "$out"
     '');
 
diff --git a/nixos/modules/system/activation/switch-to-configuration.pl b/nixos/modules/system/activation/switch-to-configuration.pl
index e2f66a287bc4f..ba45231465fb4 100755
--- a/nixos/modules/system/activation/switch-to-configuration.pl
+++ b/nixos/modules/system/activation/switch-to-configuration.pl
@@ -889,9 +889,15 @@ while (my $f = <$list_active_users>) {
 
 close($list_active_users) || die("Unable to close the file handle to loginctl");
 
-# Set the new tmpfiles
-print STDERR "setting up tmpfiles\n";
-system("$new_systemd/bin/systemd-tmpfiles", "--create", "--remove", "--exclude-prefix=/dev") == 0 or $res = 3;
+# Restart sysinit-reactivation.target.
+# This target only exists to restart services ordered before sysinit.target. We
+# cannot use X-StopOnReconfiguration to restart sysinit.target because then ALL
+# services of the system would be restarted since all normal services have a
+# default dependency on sysinit.target. sysinit-reactivation.target ensures
+# that services ordered BEFORE sysinit.target get re-started in the correct
+# order. Ordering between these services is respected.
+print STDERR "restarting sysinit-reactivation.target\n";
+system("$new_systemd/bin/systemctl", "restart", "sysinit-reactivation.target") == 0 or $res = 4;
 
 # Before reloading we need to ensure that the units are still active. They may have been
 # deactivated because one of their requirements got stopped. If they are inactive
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index 6cd46f30373b5..055afe95df60b 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -15,6 +15,19 @@ import json
 from typing import NamedTuple, Dict, List
 from dataclasses import dataclass
 
+# These values will be replaced with actual values during the package build
+EFI_SYS_MOUNT_POINT = "@efiSysMountPoint@"
+TIMEOUT = "@timeout@"
+EDITOR = bool("@editor@")
+CONSOLE_MODE = "@consoleMode@"
+BOOTSPEC_TOOLS = "@bootspecTools@"
+DISTRO_NAME = "@distroName@"
+NIX = "@nix@"
+SYSTEMD = "@systemd@"
+CONFIGURATION_LIMIT = int("@configurationLimit@")
+CAN_TOUCH_EFI_VARIABLES = "@canTouchEfiVariables@"
+GRACEFUL = "@graceful@"
+COPY_EXTRA_FILES = "@copyExtraFiles@"
 
 @dataclass
 class BootSpec:
@@ -29,7 +42,6 @@ class BootSpec:
     initrdSecrets: str | None = None
 
 
-
 libc = ctypes.CDLL("libc.so.6")
 
 class SystemIdentifier(NamedTuple):
@@ -75,16 +87,16 @@ def generation_conf_filename(profile: str | None, generation: int, specialisatio
 
 
 def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
-    with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
-        if "@timeout@" != "":
-            f.write("timeout @timeout@\n")
+    with open(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", 'w') as f:
+        if TIMEOUT != "":
+            f.write(f"timeout {TIMEOUT}\n")
         f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
-        if not @editor@:
+        if not EDITOR:
             f.write("editor 0\n")
-        f.write("console-mode @consoleMode@\n")
+        f.write(f"console-mode {CONSOLE_MODE}\n")
         f.flush()
         os.fsync(f.fileno())
-    os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
+    os.rename(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
 
 
 def get_bootspec(profile: str | None, generation: int) -> BootSpec:
@@ -95,7 +107,7 @@ def get_bootspec(profile: str | None, generation: int) -> BootSpec:
         bootspec_json = json.load(boot_json_f)
     else:
         boot_json_str = subprocess.check_output([
-        "@bootspecTools@/bin/synthesize",
+        f"{BOOTSPEC_TOOLS}/bin/synthesize",
         "--version",
         "1",
         system_directory,
@@ -116,7 +128,7 @@ def copy_from_file(file: str, dry_run: bool = False) -> str:
     store_dir = os.path.basename(os.path.dirname(store_file_path))
     efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
     if not dry_run:
-        copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
+        copy_if_not_exists(store_file_path, f"{EFI_SYS_MOUNT_POINT}%s" % (efi_file_path))
     return efi_file_path
 
 def write_entry(profile: str | None, generation: int, specialisation: str | None,
@@ -126,13 +138,14 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
     kernel = copy_from_file(bootspec.kernel)
     initrd = copy_from_file(bootspec.initrd)
 
-    title = "@distroName@{profile}{specialisation}".format(
+    title = "{name}{profile}{specialisation}".format(
+        name=DISTRO_NAME,
         profile=" [" + profile + "]" if profile else "",
         specialisation=" (%s)" % specialisation if specialisation else "")
 
     try:
         if bootspec.initrdSecrets is not None:
-            subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
+            subprocess.check_call([bootspec.initrdSecrets, f"{EFI_SYS_MOUNT_POINT}%s" % (initrd)])
     except subprocess.CalledProcessError:
         if current:
             print("failed to create initrd secrets!", file=sys.stderr)
@@ -142,7 +155,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
                   f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr)
             print("note: this is normal after having removed "
                   "or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
-    entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
+    entry_file = f"{EFI_SYS_MOUNT_POINT}/loader/entries/%s" % (
         generation_conf_filename(profile, generation, specialisation))
     tmp_path = "%s.tmp" % (entry_file)
     kernel_params = "init=%s " % bootspec.init
@@ -167,7 +180,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
 
 def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     gen_list = subprocess.check_output([
-        "@nix@/bin/nix-env",
+        f"{NIX}/bin/nix-env",
         "--list-generations",
         "-p",
         "/nix/var/nix/profiles/%s" % ("system-profiles/" + profile if profile else "system"),
@@ -176,7 +189,7 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     gen_lines = gen_list.split('\n')
     gen_lines.pop()
 
-    configurationLimit = @configurationLimit@
+    configurationLimit = CONFIGURATION_LIMIT
     configurations = [
         SystemIdentifier(
             profile=profile,
@@ -189,14 +202,14 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
 
 
 def remove_old_entries(gens: list[SystemIdentifier]) -> None:
-    rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
-    rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
+    rex_profile = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos-(.*)-generation-.*\.conf$")
+    rex_generation = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
     known_paths = []
     for gen in gens:
         bootspec = get_bootspec(gen.profile, gen.generation)
         known_paths.append(copy_from_file(bootspec.kernel, True))
         known_paths.append(copy_from_file(bootspec.initrd, True))
-    for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
+    for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/loader/entries/nixos*-generation-[1-9]*.conf"):
         if rex_profile.match(path):
             prof = rex_profile.sub(r"\1", path)
         else:
@@ -207,7 +220,7 @@ def remove_old_entries(gens: list[SystemIdentifier]) -> None:
             continue
         if not (prof, gen_number, None) in gens:
             os.unlink(path)
-    for path in glob.iglob("@efiSysMountPoint@/efi/nixos/*"):
+    for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/*"):
         if not path in known_paths and not os.path.isdir(path):
             os.unlink(path)
 
@@ -230,7 +243,7 @@ def install_bootloader(args: argparse.Namespace) -> None:
         # Since systemd version 232 a machine ID is required and it might not
         # be there on newly installed systems, so let's generate one so that
         # bootctl can find it and we can also pass it to write_entry() later.
-        cmd = ["@systemd@/bin/systemd-machine-id-setup", "--print"]
+        cmd = [f"{SYSTEMD}/bin/systemd-machine-id-setup", "--print"]
         machine_id = subprocess.run(
           cmd, text=True, check=True, stdout=subprocess.PIPE
         ).stdout.rstrip()
@@ -242,22 +255,22 @@ def install_bootloader(args: argparse.Namespace) -> None:
     # flags to pass to bootctl install/update
     bootctl_flags = []
 
-    if "@canTouchEfiVariables@" != "1":
+    if CAN_TOUCH_EFI_VARIABLES != "1":
         bootctl_flags.append("--no-variables")
 
-    if "@graceful@" == "1":
+    if GRACEFUL == "1":
         bootctl_flags.append("--graceful")
 
     if os.getenv("NIXOS_INSTALL_BOOTLOADER") == "1":
         # bootctl uses fopen() with modes "wxe" and fails if the file exists.
-        if os.path.exists("@efiSysMountPoint@/loader/loader.conf"):
-            os.unlink("@efiSysMountPoint@/loader/loader.conf")
+        if os.path.exists(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf"):
+            os.unlink(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
 
-        subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["install"])
+        subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["install"])
     else:
         # Update bootloader to latest if needed
-        available_out = subprocess.check_output(["@systemd@/bin/bootctl", "--version"], universal_newlines=True).split()[2]
-        installed_out = subprocess.check_output(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@", "status"], universal_newlines=True)
+        available_out = subprocess.check_output([f"{SYSTEMD}/bin/bootctl", "--version"], universal_newlines=True).split()[2]
+        installed_out = subprocess.check_output([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}", "status"], universal_newlines=True)
 
         # See status_binaries() in systemd bootctl.c for code which generates this
         installed_match = re.search(r"^\W+File:.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$",
@@ -276,10 +289,10 @@ def install_bootloader(args: argparse.Namespace) -> None:
 
         if installed_version < available_version:
             print("updating systemd-boot from %s to %s" % (installed_version, available_version))
-            subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["update"])
+            subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["update"])
 
-    os.makedirs("@efiSysMountPoint@/efi/nixos", exist_ok=True)
-    os.makedirs("@efiSysMountPoint@/loader/entries", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/loader/entries", exist_ok=True)
 
     gens = get_generations()
     for profile in get_profiles():
@@ -302,9 +315,9 @@ def install_bootloader(args: argparse.Namespace) -> None:
             else:
                 raise e
 
-    for root, _, files in os.walk('@efiSysMountPoint@/efi/nixos/.extra-files', topdown=False):
-        relative_root = root.removeprefix("@efiSysMountPoint@/efi/nixos/.extra-files").removeprefix("/")
-        actual_root = os.path.join("@efiSysMountPoint@", relative_root)
+    for root, _, files in os.walk(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", topdown=False):
+        relative_root = root.removeprefix(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files").removeprefix("/")
+        actual_root = os.path.join(f"{EFI_SYS_MOUNT_POINT}", relative_root)
 
         for file in files:
             actual_file = os.path.join(actual_root, file)
@@ -317,14 +330,14 @@ def install_bootloader(args: argparse.Namespace) -> None:
             os.rmdir(actual_root)
         os.rmdir(root)
 
-    os.makedirs("@efiSysMountPoint@/efi/nixos/.extra-files", exist_ok=True)
+    os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", exist_ok=True)
 
-    subprocess.check_call("@copyExtraFiles@")
+    subprocess.check_call(COPY_EXTRA_FILES)
 
 
 def main() -> None:
-    parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files')
-    parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot')
+    parser = argparse.ArgumentParser(description=f"Update {DISTRO_NAME}-related systemd-boot files")
+    parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help=f"The default {DISTRO_NAME} config to boot")
     args = parser.parse_args()
 
     try:
@@ -334,9 +347,9 @@ def main() -> None:
         # it can leave the system in an unbootable state, when a crash/outage
         # happens shortly after an update. To decrease the likelihood of this
         # event sync the efi filesystem after each update.
-        rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY))
+        rc = libc.syncfs(os.open(f"{EFI_SYS_MOUNT_POINT}", os.O_RDONLY))
         if rc != 0:
-            print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr)
+            print(f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr)
 
 
 if __name__ == '__main__':
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
index 9d55c21077d13..3b140726c2d6a 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
@@ -81,7 +81,11 @@ in {
 
       type = types.bool;
 
-      description = lib.mdDoc "Whether to enable the systemd-boot (formerly gummiboot) EFI boot manager";
+      description = lib.mdDoc ''
+        Whether to enable the systemd-boot (formerly gummiboot) EFI boot manager.
+        For more information about systemd-boot:
+        https://www.freedesktop.org/wiki/Software/systemd/systemd-boot/
+      '';
     };
 
     editor = mkOption {
diff --git a/nixos/modules/system/boot/resolved.nix b/nixos/modules/system/boot/resolved.nix
index 538f71cc0b9ae..c42c88163c564 100644
--- a/nixos/modules/system/boot/resolved.nix
+++ b/nixos/modules/system/boot/resolved.nix
@@ -95,6 +95,29 @@ in
       '';
     };
 
+    services.resolved.dnsovertls = mkOption {
+      default = "false";
+      example = "true";
+      type = types.enum [ "true" "opportunistic" "false" ];
+      description = lib.mdDoc ''
+        If set to
+        - `"true"`:
+            all DNS lookups will be encrypted. This requires
+            that the DNS server supports DNS-over-TLS and
+            has a valid certificate. If the hostname was specified
+            via the `address#hostname` format in {option}`services.resolved.domains`
+            then the specified hostname is used to validate its certificate.
+        - `"opportunistic"`:
+            all DNS lookups will attempt to be encrypted, but will fallback
+            to unecrypted requests if the server does not support DNS-over-TLS.
+            Note that this mode does allow for a malicious party to conduct a
+            downgrade attack by immitating the DNS server and pretending to not
+            support encryption.
+        - `"false"`:
+            all DNS lookups are done unencrypted.
+      '';
+    };
+
     services.resolved.extraConfig = mkOption {
       default = "";
       type = types.lines;
@@ -141,6 +164,7 @@ in
           "Domains=${concatStringsSep " " cfg.domains}"}
         LLMNR=${cfg.llmnr}
         DNSSEC=${cfg.dnssec}
+        DNSOverTLS=${cfg.dnsovertls}
         ${config.services.resolved.extraConfig}
       '';
 
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index c3902007906ad..331ca5103ba61 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -451,20 +451,37 @@ in
         cfg.services
     );
 
-    assertions = concatLists (
-      mapAttrsToList
-        (name: service:
-          map (message: {
-            assertion = false;
-            inherit message;
-          }) (concatLists [
-            (optional ((builtins.elem "network-interfaces.target" service.after) || (builtins.elem "network-interfaces.target" service.wants))
-              "Service '${name}.service' is using the deprecated target network-interfaces.target, which no longer exists. Using network.target is recommended instead."
-            )
-          ])
-        )
-        cfg.services
-    );
+    assertions = let
+      mkOneAssert = typeStr: name: def: {
+        assertion = lib.elem "network-online.target" def.after -> lib.elem "network-online.target" (def.wants ++ def.requires ++ def.bindsTo);
+        message = "${name}.${typeStr} is ordered after 'network-online.target' but doesn't depend on it";
+      };
+      mkAsserts = typeStr: lib.mapAttrsToList (mkOneAssert typeStr);
+      mkMountAsserts = typeStr: map (m: mkOneAssert typeStr m.what m);
+    in mkMerge [
+      (concatLists (
+        mapAttrsToList
+          (name: service:
+            map (message: {
+              assertion = false;
+              inherit message;
+            }) (concatLists [
+              (optional ((builtins.elem "network-interfaces.target" service.after) || (builtins.elem "network-interfaces.target" service.wants))
+                "Service '${name}.service' is using the deprecated target network-interfaces.target, which no longer exists. Using network.target is recommended instead."
+              )
+            ])
+          )
+          cfg.services
+      ))
+      (mkAsserts "target" cfg.targets)
+      (mkAsserts "service" cfg.services)
+      (mkAsserts "socket" cfg.sockets)
+      (mkAsserts "timer" cfg.timers)
+      (mkAsserts "path" cfg.paths)
+      (mkMountAsserts "mount" cfg.mounts)
+      (mkMountAsserts "automount" cfg.automounts)
+      (mkAsserts "slice" cfg.slices)
+    ];
 
     system.build.units = cfg.units;
 
@@ -569,6 +586,13 @@ in
         unitConfig.X-StopOnReconfiguration = true;
       };
 
+    # This target only exists so that services ordered before sysinit.target
+    # are restarted in the correct order, notably BEFORE the other services,
+    # when switching configurations.
+    systemd.targets.sysinit-reactivation = {
+      description = "Reactivate sysinit units";
+    };
+
     systemd.units =
          mapAttrs' (n: v: nameValuePair "${n}.path"    (pathToUnit    n v)) cfg.paths
       // mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services
@@ -634,7 +658,6 @@ in
     systemd.services.systemd-udev-settle.restartIfChanged = false; # Causes long delays in nixos-rebuild
     systemd.targets.local-fs.unitConfig.X-StopOnReconfiguration = true;
     systemd.targets.remote-fs.unitConfig.X-StopOnReconfiguration = true;
-    systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     systemd.services.systemd-importd.environment = proxy_env;
     systemd.services.systemd-pstore.wantedBy = [ "sysinit.target" ]; # see #81138
 
diff --git a/nixos/modules/system/boot/systemd/sysusers.nix b/nixos/modules/system/boot/systemd/sysusers.nix
new file mode 100644
index 0000000000000..c619c2d91eb09
--- /dev/null
+++ b/nixos/modules/system/boot/systemd/sysusers.nix
@@ -0,0 +1,169 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+
+  cfg = config.systemd.sysusers;
+  userCfg = config.users;
+
+  sysusersConfig = pkgs.writeTextDir "00-nixos.conf" ''
+    # Type Name ID GECOS Home directory Shell
+
+    # Users
+    ${lib.concatLines (lib.mapAttrsToList
+      (username: opts:
+        let
+          uid = if opts.uid == null then "-" else toString opts.uid;
+        in
+          ''u ${username} ${uid}:${opts.group} "${opts.description}" ${opts.home} ${utils.toShellPath opts.shell}''
+      )
+      userCfg.users)
+    }
+
+    # Groups
+    ${lib.concatLines (lib.mapAttrsToList
+      (groupname: opts: ''g ${groupname} ${if opts.gid == null then "-" else toString opts.gid}'') userCfg.groups)
+    }
+
+    # Group membership
+    ${lib.concatStrings (lib.mapAttrsToList
+      (groupname: opts: (lib.concatMapStrings (username: "m ${username} ${groupname}\n")) opts.members ) userCfg.groups)
+    }
+  '';
+
+  staticSysusersCredentials = pkgs.runCommand "static-sysusers-credentials" { } ''
+    mkdir $out; cd $out
+    ${lib.concatLines (
+      (lib.mapAttrsToList
+        (username: opts: "echo -n '${opts.initialHashedPassword}' > 'passwd.hashed-password.${username}'")
+        (lib.filterAttrs (_username: opts: opts.initialHashedPassword != null) userCfg.users))
+        ++
+      (lib.mapAttrsToList
+        (username: opts: "echo -n '${opts.initialPassword}' > 'passwd.plaintext-password.${username}'")
+        (lib.filterAttrs (_username: opts: opts.initialPassword != null) userCfg.users))
+        ++
+      (lib.mapAttrsToList
+        (username: opts: "cat '${opts.hashedPasswordFile}' > 'passwd.hashed-password.${username}'")
+        (lib.filterAttrs (_username: opts: opts.hashedPasswordFile != null) userCfg.users))
+      )
+    }
+  '';
+
+  staticSysusers = pkgs.runCommand "static-sysusers"
+    {
+      nativeBuildInputs = [ pkgs.systemd ];
+    } ''
+    mkdir $out
+    export CREDENTIALS_DIRECTORY=${staticSysusersCredentials}
+    systemd-sysusers --root $out ${sysusersConfig}/00-nixos.conf
+  '';
+
+in
+
+{
+
+  options = {
+
+    # This module doesn't set it's own user options but reuses the ones from
+    # users-groups.nix
+
+    systemd.sysusers = {
+      enable = lib.mkEnableOption (lib.mdDoc "systemd-sysusers") // {
+        description = lib.mdDoc ''
+          If enabled, users are created with systemd-sysusers instead of with
+          the custom `update-users-groups.pl` script.
+
+          Note: This is experimental.
+        '';
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = config.system.activationScripts.users == "";
+        message = "system.activationScripts.users has to be empty to use systemd-sysusers";
+      }
+      {
+        assertion = config.users.mutableUsers -> config.system.etc.overlay.enable;
+        message = "config.users.mutableUsers requires config.system.etc.overlay.enable.";
+      }
+    ];
+
+    systemd = lib.mkMerge [
+      ({
+
+        # Create home directories, do not create /var/empty even if that's a user's
+        # home.
+        tmpfiles.settings.home-directories = lib.mapAttrs'
+          (username: opts: lib.nameValuePair opts.home {
+            d = {
+              mode = opts.homeMode;
+              user = username;
+              group = opts.group;
+            };
+          })
+          (lib.filterAttrs (_username: opts: opts.home != "/var/empty") userCfg.users);
+      })
+
+      (lib.mkIf config.users.mutableUsers {
+        additionalUpstreamSystemUnits = [
+          "systemd-sysusers.service"
+        ];
+
+        services.systemd-sysusers = {
+          # Enable switch-to-configuration to restart the service.
+          unitConfig.ConditionNeedsUpdate = [ "" ];
+          requiredBy = [ "sysinit-reactivation.target" ];
+          before = [ "sysinit-reactivation.target" ];
+          restartTriggers = [ "${config.environment.etc."sysusers.d".source}" ];
+
+          serviceConfig = {
+            LoadCredential = lib.mapAttrsToList
+              (username: opts: "passwd.hashed-password.${username}:${opts.hashedPasswordFile}")
+              (lib.filterAttrs (_username: opts: opts.hashedPasswordFile != null) userCfg.users);
+            SetCredential = (lib.mapAttrsToList
+              (username: opts: "passwd.hashed-password.${username}:${opts.initialHashedPassword}")
+              (lib.filterAttrs (_username: opts: opts.initialHashedPassword != null) userCfg.users))
+            ++
+            (lib.mapAttrsToList
+              (username: opts: "passwd.plaintext-password.${username}:${opts.initialPassword}")
+              (lib.filterAttrs (_username: opts: opts.initialPassword != null) userCfg.users))
+            ;
+          };
+        };
+      })
+    ];
+
+    environment.etc = lib.mkMerge [
+      (lib.mkIf (!userCfg.mutableUsers) {
+        "passwd" = {
+          source = "${staticSysusers}/etc/passwd";
+          mode = "0644";
+        };
+        "group" = {
+          source = "${staticSysusers}/etc/group";
+          mode = "0644";
+        };
+        "shadow" = {
+          source = "${staticSysusers}/etc/shadow";
+          mode = "0000";
+        };
+        "gshadow" = {
+          source = "${staticSysusers}/etc/gshadow";
+          mode = "0000";
+        };
+      })
+
+      (lib.mkIf userCfg.mutableUsers {
+        "sysusers.d".source = sysusersConfig;
+      })
+    ];
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+}
diff --git a/nixos/modules/system/boot/systemd/tmpfiles.nix b/nixos/modules/system/boot/systemd/tmpfiles.nix
index 183e2033ecb01..dae23eddd1e2b 100644
--- a/nixos/modules/system/boot/systemd/tmpfiles.nix
+++ b/nixos/modules/system/boot/systemd/tmpfiles.nix
@@ -150,6 +150,41 @@ in
       "systemd-tmpfiles-setup.service"
     ];
 
+    # Allow systemd-tmpfiles to be restarted by switch-to-configuration. This
+    # service is not pulled into the normal boot process. It only exists for
+    # switch-to-configuration.
+    #
+    # This needs to be a separate unit because it does not execute
+    # systemd-tmpfiles with `--boot` as that is supposed to only be executed
+    # once at boot time.
+    #
+    # Keep this aligned with the upstream `systemd-tmpfiles-setup.service` unit.
+    systemd.services."systemd-tmpfiles-resetup" = {
+      description = "Re-setup tmpfiles on a system that is already running.";
+
+      requiredBy = [ "sysinit-reactivation.target" ];
+      after = [ "local-fs.target" "systemd-sysusers.service" "systemd-journald.service" ];
+      before = [ "sysinit-reactivation.target" "shutdown.target" ];
+      conflicts = [ "shutdown.target" ];
+      restartTriggers = [ config.environment.etc."tmpfiles.d".source ];
+
+      unitConfig.DefaultDependencies = false;
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "systemd-tmpfiles --create --remove --exclude-prefix=/dev";
+        SuccessExitStatus = "DATAERR CANTCREAT";
+        ImportCredential = [
+          "tmpfiles.*"
+          "loging.motd"
+          "login.issue"
+          "network.hosts"
+          "ssh.authorized_keys.root"
+        ];
+      };
+    };
+
     environment.etc = {
       "tmpfiles.d".source = (pkgs.symlinkJoin {
         name = "tmpfiles.d";
diff --git a/nixos/modules/system/boot/uki.nix b/nixos/modules/system/boot/uki.nix
new file mode 100644
index 0000000000000..63c4e0c0e3913
--- /dev/null
+++ b/nixos/modules/system/boot/uki.nix
@@ -0,0 +1,85 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  cfg = config.boot.uki;
+
+  inherit (pkgs.stdenv.hostPlatform) efiArch;
+
+  format = pkgs.formats.ini { };
+  ukifyConfig = format.generate "ukify.conf" cfg.settings;
+
+in
+
+{
+  options = {
+
+    boot.uki = {
+      name = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Name of the UKI";
+      };
+
+      version = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = config.system.image.version;
+        defaultText = lib.literalExpression "config.system.image.version";
+        description = lib.mdDoc "Version of the image or generation the UKI belongs to";
+      };
+
+      settings = lib.mkOption {
+        type = format.type;
+        description = lib.mdDoc ''
+          The configuration settings for ukify. These control what the UKI
+          contains and how it is built.
+        '';
+      };
+    };
+
+    system.boot.loader.ukiFile = lib.mkOption {
+      type = lib.types.str;
+      internal = true;
+      description = lib.mdDoc "Name of the UKI file";
+    };
+
+  };
+
+  config = {
+
+    boot.uki.name = lib.mkOptionDefault (if config.system.image.id != null then
+      config.system.image.id
+    else
+      "nixos");
+
+    boot.uki.settings = lib.mkOptionDefault {
+      UKI = {
+        Linux = "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
+        Initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+        Cmdline = "init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}";
+        Stub = "${pkgs.systemd}/lib/systemd/boot/efi/linux${efiArch}.efi.stub";
+        Uname = "${config.boot.kernelPackages.kernel.modDirVersion}";
+        OSRelease = "@${config.system.build.etc}/etc/os-release";
+        # This is needed for cross compiling.
+        EFIArch = efiArch;
+      };
+    };
+
+    system.boot.loader.ukiFile =
+      let
+        name = config.boot.uki.name;
+        version = config.boot.uki.version;
+        versionInfix = if version != null then "_${version}" else "";
+      in
+      name + versionInfix + ".efi";
+
+    system.build.uki = pkgs.runCommand config.system.boot.loader.ukiFile { } ''
+      mkdir -p $out
+      ${pkgs.buildPackages.systemdUkify}/lib/systemd/ukify build \
+        --config=${ukifyConfig} \
+        --output="$out/${config.system.boot.loader.ukiFile}"
+    '';
+
+    meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  };
+}
diff --git a/nixos/modules/system/etc/build-composefs-dump.py b/nixos/modules/system/etc/build-composefs-dump.py
new file mode 100644
index 0000000000000..923d40008b63f
--- /dev/null
+++ b/nixos/modules/system/etc/build-composefs-dump.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python3
+
+"""Build a composefs dump from a Json config
+
+See the man page of composefs-dump for details about the format:
+https://github.com/containers/composefs/blob/main/man/composefs-dump.md
+
+Ensure to check the file with the check script when you make changes to it:
+
+./check-build-composefs-dump.sh ./build-composefs_dump.py
+"""
+
+import glob
+import json
+import os
+import sys
+from enum import Enum
+from pathlib import Path
+from typing import Any
+
+Attrs = dict[str, Any]
+
+
+class FileType(Enum):
+    """The filetype as defined by the `st_mode` stat field in octal
+
+    You can check the st_mode stat field of a path in Python with
+    `oct(os.stat("/path/").st_mode)`
+    """
+
+    directory = "4"
+    file = "10"
+    symlink = "12"
+
+
+class ComposefsPath:
+    path: str
+    size: int
+    filetype: FileType
+    mode: str
+    uid: str
+    gid: str
+    payload: str
+    rdev: str = "0"
+    nlink: int = 1
+    mtime: str = "1.0"
+    content: str = "-"
+    digest: str = "-"
+
+    def __init__(
+        self,
+        attrs: Attrs,
+        size: int,
+        filetype: FileType,
+        mode: str,
+        payload: str,
+        path: str | None = None,
+    ):
+        if path is None:
+            path = attrs["target"]
+        self.path = "/" + path
+        self.size = size
+        self.filetype = filetype
+        self.mode = mode
+        self.uid = attrs["uid"]
+        self.gid = attrs["gid"]
+        self.payload = payload
+
+    def write_line(self) -> str:
+        line_list = [
+            str(self.path),
+            str(self.size),
+            f"{self.filetype.value}{self.mode}",
+            str(self.nlink),
+            str(self.uid),
+            str(self.gid),
+            str(self.rdev),
+            str(self.mtime),
+            str(self.payload),
+            str(self.content),
+            str(self.digest),
+        ]
+        return " ".join(line_list)
+
+
+def eprint(*args, **kwargs) -> None:
+    print(args, **kwargs, file=sys.stderr)
+
+
+def leading_directories(path: str) -> list[str]:
+    """Return the leading directories of path
+
+    Given the path "alsa/conf.d/50-pipewire.conf", for example, this function
+    returns `[ "alsa", "alsa/conf.d" ]`.
+    """
+    parents = list(Path(path).parents)
+    parents.reverse()
+    # remove the implicit `.` from the start of a relative path or `/` from an
+    # absolute path
+    del parents[0]
+    return [str(i) for i in parents]
+
+
+def add_leading_directories(
+    target: str, attrs: Attrs, paths: dict[str, ComposefsPath]
+) -> None:
+    """Add the leading directories of a target path to the composefs paths
+
+    mkcomposefs expects that all leading directories are explicitly listed in
+    the dump file. Given the path "alsa/conf.d/50-pipewire.conf", for example,
+    this function adds "alsa" and "alsa/conf.d" to the composefs paths.
+    """
+    path_components = leading_directories(target)
+    for component in path_components:
+        composefs_path = ComposefsPath(
+            attrs,
+            path=component,
+            size=4096,
+            filetype=FileType.directory,
+            mode="0755",
+            payload="-",
+        )
+        paths[component] = composefs_path
+
+
+def main() -> None:
+    """Build a composefs dump from a Json config
+
+    This config describes the files that the final composefs image is supposed
+    to contain.
+    """
+    config_file = sys.argv[1]
+    if not config_file:
+        eprint("No config file was supplied.")
+        sys.exit(1)
+
+    with open(config_file, "rb") as f:
+        config = json.load(f)
+
+    if not config:
+        eprint("Config is empty.")
+        sys.exit(1)
+
+    eprint("Building composefs dump...")
+
+    paths: dict[str, ComposefsPath] = {}
+    for attrs in config:
+        target = attrs["target"]
+        source = attrs["source"]
+        mode = attrs["mode"]
+
+        if "*" in source:  # Path with globbing
+            glob_sources = glob.glob(source)
+            for glob_source in glob_sources:
+                basename = os.path.basename(glob_source)
+                glob_target = f"{target}/{basename}"
+
+                composefs_path = ComposefsPath(
+                    attrs,
+                    path=glob_target,
+                    size=100,
+                    filetype=FileType.symlink,
+                    mode="0777",
+                    payload=glob_source,
+                )
+
+                paths[glob_target] = composefs_path
+                add_leading_directories(glob_target, attrs, paths)
+        else:  # Without globbing
+            if mode == "symlink":
+                composefs_path = ComposefsPath(
+                    attrs,
+                    # A high approximation of the size of a symlink
+                    size=100,
+                    filetype=FileType.symlink,
+                    mode="0777",
+                    payload=source,
+                )
+            else:
+                if os.path.isdir(source):
+                    composefs_path = ComposefsPath(
+                        attrs,
+                        size=4096,
+                        filetype=FileType.directory,
+                        mode=mode,
+                        payload=source,
+                    )
+                else:
+                    composefs_path = ComposefsPath(
+                        attrs,
+                        size=os.stat(source).st_size,
+                        filetype=FileType.file,
+                        mode=mode,
+                        payload=target,
+                    )
+            paths[target] = composefs_path
+            add_leading_directories(target, attrs, paths)
+
+    composefs_dump = ["/ 4096 40755 1 0 0 0 0.0 - - -"]  # Root directory
+    for key in sorted(paths):
+        composefs_path = paths[key]
+        eprint(composefs_path.path)
+        composefs_dump.append(composefs_path.write_line())
+
+    print("\n".join(composefs_dump))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/nixos/modules/system/etc/check-build-composefs-dump.sh b/nixos/modules/system/etc/check-build-composefs-dump.sh
new file mode 100755
index 0000000000000..da61651d1a5d6
--- /dev/null
+++ b/nixos/modules/system/etc/check-build-composefs-dump.sh
@@ -0,0 +1,8 @@
+#! /usr/bin/env nix-shell
+#! nix-shell -i bash -p black ruff mypy
+
+file=$1
+
+black --check --diff $file
+ruff --line-length 88 $file
+mypy --strict $file
diff --git a/nixos/modules/system/etc/etc-activation.nix b/nixos/modules/system/etc/etc-activation.nix
index 7801049501860..f47fd771c6592 100644
--- a/nixos/modules/system/etc/etc-activation.nix
+++ b/nixos/modules/system/etc/etc-activation.nix
@@ -1,12 +1,96 @@
 { config, lib, ... }:
-let
-  inherit (lib) stringAfter;
-in {
+
+{
 
   imports = [ ./etc.nix ];
 
-  config = {
-    system.activationScripts.etc =
-      stringAfter [ "users" "groups" ] config.system.build.etcActivationCommands;
-  };
+  config = lib.mkMerge [
+
+    {
+      system.activationScripts.etc =
+        lib.stringAfter [ "users" "groups" ] config.system.build.etcActivationCommands;
+    }
+
+    (lib.mkIf config.system.etc.overlay.enable {
+
+      assertions = [
+        {
+          assertion = config.boot.initrd.systemd.enable;
+          message = "`system.etc.overlay.enable` requires `boot.initrd.systemd.enable`";
+        }
+        {
+          assertion = (!config.system.etc.overlay.mutable) -> config.systemd.sysusers.enable;
+          message = "`system.etc.overlay.mutable = false` requires `systemd.sysusers.enable`";
+        }
+        {
+          assertion = lib.versionAtLeast config.boot.kernelPackages.kernel.version "6.6";
+          message = "`system.etc.overlay.enable requires a newer kernel, at least version 6.6";
+        }
+        {
+          assertion = config.systemd.sysusers.enable -> (config.users.mutableUsers == config.system.etc.overlay.mutable);
+          message = ''
+            When using systemd-sysusers and mounting `/etc` via an overlay, users
+            can only be mutable when `/etc` is mutable and vice versa.
+          '';
+        }
+      ];
+
+      boot.initrd.availableKernelModules = [ "loop" "erofs" "overlay" ];
+
+      boot.initrd.systemd = {
+        mounts = [
+          {
+            where = "/run/etc-metadata";
+            what = "/sysroot${config.system.build.etcMetadataImage}";
+            type = "erofs";
+            options = "loop";
+            unitConfig.RequiresMountsFor = [
+              "/sysroot/nix/store"
+            ];
+          }
+          {
+            where = "/sysroot/etc";
+            what = "overlay";
+            type = "overlay";
+            options = lib.concatStringsSep "," ([
+              "relatime"
+              "redirect_dir=on"
+              "metacopy=on"
+              "lowerdir=/run/etc-metadata::/sysroot${config.system.build.etcBasedir}"
+            ] ++ lib.optionals config.system.etc.overlay.mutable [
+              "rw"
+              "upperdir=/sysroot/.rw-etc/upper"
+              "workdir=/sysroot/.rw-etc/work"
+            ] ++ lib.optionals (!config.system.etc.overlay.mutable) [
+              "ro"
+            ]);
+            wantedBy = [ "initrd-fs.target" ];
+            before = [ "initrd-fs.target" ];
+            requires = lib.mkIf config.system.etc.overlay.mutable [ "rw-etc.service" ];
+            after = lib.mkIf config.system.etc.overlay.mutable [ "rw-etc.service" ];
+            unitConfig.RequiresMountsFor = [
+              "/sysroot/nix/store"
+              "/run/etc-metadata"
+            ];
+          }
+        ];
+        services = lib.mkIf config.system.etc.overlay.mutable {
+          rw-etc = {
+            unitConfig = {
+              DefaultDependencies = false;
+              RequiresMountsFor = "/sysroot";
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = ''
+                /bin/mkdir -p -m 0755 /sysroot/.rw-etc/upper /sysroot/.rw-etc/work
+              '';
+            };
+          };
+        };
+      };
+
+    })
+
+  ];
 }
diff --git a/nixos/modules/system/etc/etc.nix b/nixos/modules/system/etc/etc.nix
index ea61e7384e60c..baf37ba6def34 100644
--- a/nixos/modules/system/etc/etc.nix
+++ b/nixos/modules/system/etc/etc.nix
@@ -62,6 +62,16 @@ let
     ]) etc'}
   '';
 
+  etcHardlinks = filter (f: f.mode != "symlink") etc';
+
+  build-composefs-dump = pkgs.runCommand "build-composefs-dump.py"
+    {
+      buildInputs = [ pkgs.python3 ];
+    } ''
+    install ${./build-composefs-dump.py} $out
+    patchShebangs --host $out
+  '';
+
 in
 
 {
@@ -72,6 +82,30 @@ in
 
   options = {
 
+    system.etc.overlay = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Mount `/etc` as an overlayfs instead of generating it via a perl script.
+
+          Note: This is currently experimental. Only enable this option if you're
+          confident that you can recover your system if it breaks.
+        '';
+      };
+
+      mutable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to mount `/etc` mutably (i.e. read-write) or immutably (i.e. read-only).
+
+          If this is false, only the immutable lowerdir is mounted. If it is
+          true, a writable upperdir is mounted on top.
+        '';
+      };
+    };
+
     environment.etc = mkOption {
       default = {};
       example = literalExpression ''
@@ -190,12 +224,84 @@ in
   config = {
 
     system.build.etc = etc;
-    system.build.etcActivationCommands =
-      ''
-        # Set up the statically computed bits of /etc.
-        echo "setting up /etc..."
-        ${pkgs.perl.withPackages (p: [ p.FileSlurp ])}/bin/perl ${./setup-etc.pl} ${etc}/etc
+    system.build.etcActivationCommands = let
+      etcOverlayOptions = lib.concatStringsSep "," ([
+        "relatime"
+        "redirect_dir=on"
+        "metacopy=on"
+      ] ++ lib.optionals config.system.etc.overlay.mutable [
+        "upperdir=/.rw-etc/upper"
+        "workdir=/.rw-etc/work"
+      ]);
+    in if config.system.etc.overlay.enable then ''
+      # This script atomically remounts /etc when switching configuration. On a (re-)boot
+      # this should not run because /etc is mounted via a systemd mount unit
+      # instead. To a large extent this mimics what composefs does. Because
+      # it's relatively simple, however, we avoid the composefs dependency.
+      if [[ ! $IN_NIXOS_SYSTEMD_STAGE1 ]]; then
+        echo "remounting /etc..."
+
+        tmpMetadataMount=$(mktemp --directory)
+        mount --type erofs ${config.system.build.etcMetadataImage} $tmpMetadataMount
+
+        # Mount the new /etc overlay to a temporary private mount.
+        # This needs the indirection via a private bind mount because you
+        # cannot move shared mounts.
+        tmpEtcMount=$(mktemp --directory)
+        mount --bind --make-private $tmpEtcMount $tmpEtcMount
+        mount --type overlay overlay \
+          --options lowerdir=$tmpMetadataMount::${config.system.build.etcBasedir},${etcOverlayOptions} \
+          $tmpEtcMount
+
+        # Move the new temporary /etc mount underneath the current /etc mount.
+        #
+        # This should eventually use util-linux to perform this move beneath,
+        # however, this functionality is not yet in util-linux. See this
+        # tracking issue: https://github.com/util-linux/util-linux/issues/2604
+        ${pkgs.move-mount-beneath}/bin/move-mount --move --beneath $tmpEtcMount /etc
+
+        # Unmount the top /etc mount to atomically reveal the new mount.
+        umount /etc
+
+      fi
+    '' else ''
+      # Set up the statically computed bits of /etc.
+      echo "setting up /etc..."
+      ${pkgs.perl.withPackages (p: [ p.FileSlurp ])}/bin/perl ${./setup-etc.pl} ${etc}/etc
+    '';
+
+    system.build.etcBasedir = pkgs.runCommandLocal "etc-lowerdir" { } ''
+      set -euo pipefail
+
+      makeEtcEntry() {
+        src="$1"
+        target="$2"
+
+        mkdir -p "$out/$(dirname "$target")"
+        cp "$src" "$out/$target"
+      }
+
+      mkdir -p "$out"
+      ${concatMapStringsSep "\n" (etcEntry: escapeShellArgs [
+        "makeEtcEntry"
+        # Force local source paths to be added to the store
+        "${etcEntry.source}"
+        etcEntry.target
+      ]) etcHardlinks}
+    '';
+
+    system.build.etcMetadataImage =
+      let
+        etcJson = pkgs.writeText "etc-json" (builtins.toJSON etc');
+        etcDump = pkgs.runCommand "etc-dump" { } "${build-composefs-dump} ${etcJson} > $out";
+      in
+      pkgs.runCommand "etc-metadata.erofs" {
+        nativeBuildInputs = [ pkgs.composefs pkgs.erofs-utils ];
+      } ''
+        mkcomposefs --from-file ${etcDump} $out
+        fsck.erofs $out
       '';
+
   };
 
 }
diff --git a/nixos/modules/tasks/auto-upgrade.nix b/nixos/modules/tasks/auto-upgrade.nix
index 29e3e313336f5..22311871274b9 100644
--- a/nixos/modules/tasks/auto-upgrade.nix
+++ b/nixos/modules/tasks/auto-upgrade.nix
@@ -109,6 +109,17 @@ in {
         '';
       };
 
+      fixedRandomDelay = mkOption {
+        default = false;
+        type = types.bool;
+        example = true;
+        description = lib.mdDoc ''
+          Make the randomized delay consistent between runs.
+          This reduces the jitter between automatic upgrades.
+          See {option}`randomizedDelaySec` for configuring the randomized delay.
+        '';
+      };
+
       rebootWindow = mkOption {
         description = lib.mdDoc ''
           Define a lower and upper time value (in HH:MM format) which
@@ -253,6 +264,7 @@ in {
     systemd.timers.nixos-upgrade = {
       timerConfig = {
         RandomizedDelaySec = cfg.randomizedDelaySec;
+        FixedRandomDelay = cfg.fixedRandomDelay;
         Persistent = cfg.persistent;
       };
     };
diff --git a/nixos/modules/testing/test-instrumentation.nix b/nixos/modules/testing/test-instrumentation.nix
index 9ee77cd79a9b1..6aa718c1975d7 100644
--- a/nixos/modules/testing/test-instrumentation.nix
+++ b/nixos/modules/testing/test-instrumentation.nix
@@ -207,7 +207,10 @@ in
     networking.usePredictableInterfaceNames = false;
 
     # Make it easy to log in as root when running the test interactively.
-    users.users.root.initialHashedPassword = mkOverride 150 "";
+    # This needs to be a file because of a quirk in systemd credentials,
+    # where you cannot specify an empty string as a value. systemd-sysusers
+    # uses credentials to set passwords on users.
+    users.users.root.hashedPasswordFile = mkOverride 150 "${pkgs.writeText "hashed-password.root" ""}";
 
     services.xserver.displayManager.job.logToJournal = true;
 
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index aa44f26426970..f0d9b95f81f6b 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -71,6 +71,7 @@ in
 
     systemd.services.fetch-ec2-metadata = {
       wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
       after = ["network-online.target"];
       path = [ pkgs.curl ];
       script = builtins.readFile ./ec2-metadata-fetcher.sh;
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index 3e48f8873ed4f..ea4cb916aa08d 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -150,10 +150,12 @@ in
       after = [
         "network-online.target"
         "lxcfs.service"
-      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+        "incus.socket"
+      ];
       requires = [
         "lxcfs.service"
-      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+        "incus.socket"
+      ];
       wants = [
         "network-online.target"
       ];
@@ -183,7 +185,7 @@ in
       };
     };
 
-    systemd.sockets.incus = lib.mkIf cfg.socketActivation {
+    systemd.sockets.incus = {
       description = "Incus UNIX socket";
       wantedBy = [ "sockets.target" ];
 
@@ -191,7 +193,6 @@ in
         ListenStream = "/var/lib/incus/unix.socket";
         SocketMode = "0660";
         SocketGroup = "incus-admin";
-        Service = "incus.service";
       };
     };
 
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index 885fb4e078530..e0d61b1754949 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -214,16 +214,14 @@ in {
         LimitNPROC = "infinity";
         TasksMax = "infinity";
 
-        Restart = "on-failure";
-        TimeoutStartSec = "${cfg.startTimeout}s";
-        TimeoutStopSec = "30s";
-
         # By default, `lxd` loads configuration files from hard-coded
         # `/usr/share/lxc/config` - since this is a no-go for us, we have to
         # explicitly tell it where the actual configuration files are
         Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
           "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
       };
+
+      unitConfig.ConditionPathExists = "!/var/lib/incus/.migrated-from-lxd";
     };
 
     systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
diff --git a/nixos/modules/virtualisation/oci-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index a4a40346f093b..07ed08ab2f84d 100644
--- a/nixos/modules/virtualisation/oci-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -267,6 +267,7 @@ let
     };
   in {
     wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+    wants = lib.optional (container.imageFile == null)  "network-online.target";
     after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ]
             # if imageFile is not set, the service needs the network to download the image from the registry
             ++ lib.optionals (container.imageFile == null) [ "network-online.target" ]
diff --git a/nixos/modules/virtualisation/podman/default.nix b/nixos/modules/virtualisation/podman/default.nix
index ec0b713e58b38..47382f9beab00 100644
--- a/nixos/modules/virtualisation/podman/default.nix
+++ b/nixos/modules/virtualisation/podman/default.nix
@@ -150,26 +150,33 @@ in
 
   };
 
-  config = lib.mkIf cfg.enable
-    {
+  config =
+    let
+      networkConfig = ({
+        dns_enabled = false;
+        driver = "bridge";
+        id = "0000000000000000000000000000000000000000000000000000000000000000";
+        internal = false;
+        ipam_options = { driver = "host-local"; };
+        ipv6_enabled = false;
+        name = "podman";
+        network_interface = "podman0";
+        subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
+      } // cfg.defaultNetwork.settings);
+      inherit (networkConfig) dns_enabled network_interface;
+    in
+    lib.mkIf cfg.enable {
       environment.systemPackages = [ cfg.package ]
         ++ lib.optional cfg.dockerCompat dockerCompat;
 
       # https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network
       environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) {
-        source = json.generate "podman.json" ({
-          dns_enabled = false;
-          driver = "bridge";
-          id = "0000000000000000000000000000000000000000000000000000000000000000";
-          internal = false;
-          ipam_options = { driver = "host-local"; };
-          ipv6_enabled = false;
-          name = "podman";
-          network_interface = "podman0";
-          subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
-        } // cfg.defaultNetwork.settings);
+        source = json.generate "podman.json" networkConfig;
       };
 
+      # containers cannot reach aardvark-dns otherwise
+      networking.firewall.interfaces.${network_interface}.allowedUDPPorts = lib.mkIf dns_enabled [ 53 ];
+
       virtualisation.containers = {
         enable = true; # Enable common /etc/containers configuration
         containersConf.settings = {
diff --git a/nixos/tests/3proxy.nix b/nixos/tests/3proxy.nix
index 83d39de018a39..b80b4e166d481 100644
--- a/nixos/tests/3proxy.nix
+++ b/nixos/tests/3proxy.nix
@@ -134,6 +134,7 @@
   testScript = ''
     start_all()
 
+    peer0.systemctl("start network-online.target")
     peer0.wait_for_unit("network-online.target")
 
     peer1.wait_for_unit("3proxy.service")
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index e5f2d4c7934a1..272782dc2f621 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -522,6 +522,7 @@ in {
           'curl --data \'{"host": "${caDomain}", "addresses": ["${nodes.acme.networking.primaryIPAddress}"]}\' http://${dnsServerIP nodes}:8055/add-a'
       )
 
+      acme.systemctl("start network-online.target")
       acme.wait_for_unit("network-online.target")
       acme.wait_for_unit("pebble.service")
 
diff --git a/nixos/tests/activation/etc-overlay-immutable.nix b/nixos/tests/activation/etc-overlay-immutable.nix
new file mode 100644
index 0000000000000..70c3623b929c5
--- /dev/null
+++ b/nixos/tests/activation/etc-overlay-immutable.nix
@@ -0,0 +1,30 @@
+{ lib, ... }: {
+
+  name = "activation-etc-overlay-immutable";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { pkgs, ... }: {
+    system.etc.overlay.enable = true;
+    system.etc.overlay.mutable = false;
+
+    # Prerequisites
+    systemd.sysusers.enable = true;
+    users.mutableUsers = false;
+    boot.initrd.systemd.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    specialisation.new-generation.configuration = {
+      environment.etc."newgen".text = "newgen";
+    };
+  };
+
+  testScript = ''
+    machine.succeed("findmnt --kernel --type overlay /etc")
+    machine.fail("stat /etc/newgen")
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+    assert machine.succeed("cat /etc/newgen") == "newgen"
+  '';
+}
diff --git a/nixos/tests/activation/etc-overlay-mutable.nix b/nixos/tests/activation/etc-overlay-mutable.nix
new file mode 100644
index 0000000000000..cfe7604fceb84
--- /dev/null
+++ b/nixos/tests/activation/etc-overlay-mutable.nix
@@ -0,0 +1,30 @@
+{ lib, ... }: {
+
+  name = "activation-etc-overlay-mutable";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { pkgs, ... }: {
+    system.etc.overlay.enable = true;
+    system.etc.overlay.mutable = true;
+
+    # Prerequisites
+    boot.initrd.systemd.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    specialisation.new-generation.configuration = {
+      environment.etc."newgen".text = "newgen";
+    };
+  };
+
+  testScript = ''
+    machine.succeed("findmnt --kernel --type overlay /etc")
+    machine.fail("stat /etc/newgen")
+    machine.succeed("echo -n 'mutable' > /etc/mutable")
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+    assert machine.succeed("cat /etc/newgen") == "newgen"
+    assert machine.succeed("cat /etc/mutable") == "mutable"
+  '';
+}
diff --git a/nixos/tests/activation/perlless.nix b/nixos/tests/activation/perlless.nix
new file mode 100644
index 0000000000000..4d784b4542f45
--- /dev/null
+++ b/nixos/tests/activation/perlless.nix
@@ -0,0 +1,24 @@
+{ lib, ... }:
+
+{
+
+  name = "activation-perlless";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { pkgs, modulesPath, ... }: {
+    imports = [ "${modulesPath}/profiles/perlless.nix" ];
+
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    virtualisation.mountHostNixStore = false;
+    virtualisation.useNixStoreImage = true;
+  };
+
+  testScript = ''
+    perl_store_paths = machine.succeed("ls /nix/store | grep perl || true")
+    print(perl_store_paths)
+    assert len(perl_store_paths) == 0
+  '';
+
+}
diff --git a/nixos/tests/adguardhome.nix b/nixos/tests/adguardhome.nix
index a6f790b83f5fc..80613ce825340 100644
--- a/nixos/tests/adguardhome.nix
+++ b/nixos/tests/adguardhome.nix
@@ -126,6 +126,7 @@
 
     with subtest("Testing successful DHCP start"):
         dhcpConf.wait_for_unit("adguardhome.service")
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
         # Test IP assignment via DHCP
         dhcpConf.wait_until_succeeds("ping -c 5 10.0.10.100")
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 63e957eace88d..1453a3875f6e7 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -285,6 +285,9 @@ in {
   activation = pkgs.callPackage ../modules/system/activation/test.nix { };
   activation-var = runTest ./activation/var.nix;
   activation-nix-channel = runTest ./activation/nix-channel.nix;
+  activation-etc-overlay-mutable = runTest ./activation/etc-overlay-mutable.nix;
+  activation-etc-overlay-immutable = runTest ./activation/etc-overlay-immutable.nix;
+  activation-perlless = runTest ./activation/perlless.nix;
   etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
   etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
   etebase-server = handleTest ./etebase-server.nix {};
@@ -569,8 +572,8 @@ in {
   netdata = handleTest ./netdata.nix {};
   networking.networkd = handleTest ./networking.nix { networkd = true; };
   networking.scripted = handleTest ./networking.nix { networkd = false; };
-  netbox_3_5 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_5; };
   netbox_3_6 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_6; };
+  netbox_3_7 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_7; };
   netbox-upgrade = handleTest ./web-apps/netbox-upgrade.nix {};
   # TODO: put in networking.nix after the test becomes more complete
   networkingProxy = handleTest ./networking-proxy.nix {};
@@ -587,6 +590,7 @@ in {
   nginx-globalredirect = handleTest ./nginx-globalredirect.nix {};
   nginx-http3 = handleTest ./nginx-http3.nix {};
   nginx-modsecurity = handleTest ./nginx-modsecurity.nix {};
+  nginx-moreheaders = handleTest ./nginx-moreheaders.nix {};
   nginx-njs = handleTest ./nginx-njs.nix {};
   nginx-proxyprotocol = handleTest ./nginx-proxyprotocol {};
   nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
@@ -808,6 +812,7 @@ in {
   stunnel = handleTest ./stunnel.nix {};
   sudo = handleTest ./sudo.nix {};
   sudo-rs = handleTest ./sudo-rs.nix {};
+  suwayomi-server = handleTest ./suwayomi-server.nix {};
   swap-file-btrfs = handleTest ./swap-file-btrfs.nix {};
   swap-partition = handleTest ./swap-partition.nix {};
   swap-random-encryption = handleTest ./swap-random-encryption.nix {};
@@ -819,6 +824,7 @@ in {
   syncthing-init = handleTest ./syncthing-init.nix {};
   syncthing-many-devices = handleTest ./syncthing-many-devices.nix {};
   syncthing-relay = handleTest ./syncthing-relay.nix {};
+  sysinit-reactivation = runTest ./sysinit-reactivation.nix;
   systemd = handleTest ./systemd.nix {};
   systemd-analyze = handleTest ./systemd-analyze.nix {};
   systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
@@ -863,6 +869,8 @@ in {
   systemd-repart = handleTest ./systemd-repart.nix {};
   systemd-shutdown = handleTest ./systemd-shutdown.nix {};
   systemd-sysupdate = runTest ./systemd-sysupdate.nix;
+  systemd-sysusers-mutable = runTest ./systemd-sysusers-mutable.nix;
+  systemd-sysusers-immutable = runTest ./systemd-sysusers-immutable.nix;
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix {};
   systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
@@ -902,6 +910,7 @@ in {
   trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
   tsja = handleTest ./tsja.nix {};
   tsm-client-gui = handleTest ./tsm-client-gui.nix {};
+  ttyd = handleTest ./web-servers/ttyd.nix {};
   txredisapi = handleTest ./txredisapi.nix {};
   tuptime = handleTest ./tuptime.nix {};
   turbovnc-headless-server = handleTest ./turbovnc-headless-server.nix {};
@@ -941,6 +950,7 @@ in {
   vsftpd = handleTest ./vsftpd.nix {};
   warzone2100 = handleTest ./warzone2100.nix {};
   wasabibackend = handleTest ./wasabibackend.nix {};
+  watchdogd = handleTest ./watchdogd.nix {};
   webhook = runTest ./webhook.nix;
   wiki-js = handleTest ./wiki-js.nix {};
   wine = handleTest ./wine.nix {};
diff --git a/nixos/tests/appliance-repart-image.nix b/nixos/tests/appliance-repart-image.nix
index 3f256db846214..b18968d3b9631 100644
--- a/nixos/tests/appliance-repart-image.nix
+++ b/nixos/tests/appliance-repart-image.nix
@@ -8,9 +8,8 @@
 let
   rootPartitionLabel = "root";
 
-  bootLoaderConfigPath = "/loader/entries/nixos.conf";
-  kernelPath = "/EFI/nixos/kernel.efi";
-  initrdPath = "/EFI/nixos/initrd.efi";
+  imageId = "nixos-appliance";
+  imageVersion = "1-rc1";
 in
 {
   name = "appliance-gpt-image";
@@ -29,6 +28,9 @@ in
     # TODO(raitobezarius): revisit this when #244907 lands
     boot.loader.grub.enable = false;
 
+    system.image.id = imageId;
+    system.image.version = imageVersion;
+
     virtualisation.fileSystems = lib.mkForce {
       "/" = {
         device = "/dev/disk/by-partlabel/${rootPartitionLabel}";
@@ -48,19 +50,8 @@ in
               "/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
                 "${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
 
-              # TODO: create an abstraction for Boot Loader Specification (BLS) entries.
-              "${bootLoaderConfigPath}".source = pkgs.writeText "nixos.conf" ''
-                title NixOS
-                linux ${kernelPath}
-                initrd ${initrdPath}
-                options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
-              '';
-
-              "${kernelPath}".source =
-                "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
-
-              "${initrdPath}".source =
-                "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+              "/EFI/Linux/${config.system.boot.loader.ukiFile}".source =
+                "${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
             };
           repartConfig = {
             Type = "esp";
@@ -99,7 +90,7 @@ in
       "-f",
       "qcow2",
       "-b",
-      "${nodes.machine.system.build.image}/image.raw",
+      "${nodes.machine.system.build.image}/${nodes.machine.image.repart.imageFile}",
       "-F",
       "raw",
       tmp_disk_image.name,
@@ -108,9 +99,11 @@ in
     # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
     os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
 
+    os_release = machine.succeed("cat /etc/os-release")
+    assert 'IMAGE_ID="${imageId}"' in os_release
+    assert 'IMAGE_VERSION="${imageVersion}"' in os_release
+
     bootctl_status = machine.succeed("bootctl status")
-    assert "${bootLoaderConfigPath}" in bootctl_status
-    assert "${kernelPath}" in bootctl_status
-    assert "${initrdPath}" in bootctl_status
+    assert "Boot Loader Specification Type #2 (.efi)" in bootctl_status
   '';
 }
diff --git a/nixos/tests/ayatana-indicators.nix b/nixos/tests/ayatana-indicators.nix
index bc7ff75f390f7..c9cbbda4c601d 100644
--- a/nixos/tests/ayatana-indicators.nix
+++ b/nixos/tests/ayatana-indicators.nix
@@ -4,7 +4,7 @@ in {
   name = "ayatana-indicators";
 
   meta = {
-    maintainers = with lib.maintainers; [ OPNA2608 ];
+    maintainers = lib.teams.lomiri.members;
   };
 
   nodes.machine = { config, ... }: {
@@ -27,17 +27,56 @@ in {
     services.ayatana-indicators = {
       enable = true;
       packages = with pkgs; [
+        ayatana-indicator-datetime
         ayatana-indicator-messages
-      ];
+      ] ++ (with pkgs.lomiri; [
+        lomiri-indicator-network
+        telephony-service
+      ]);
     };
 
-    # Services needed by some indicators
+    # Setup needed by some indicators
+
     services.accounts-daemon.enable = true; # messages
+
+    # Lomiri-ish setup for Lomiri indicators
+    # TODO move into a Lomiri module, once the package set is far enough for the DE to start
+
+    networking.networkmanager.enable = true; # lomiri-network-indicator
+    # TODO potentially urfkill for lomiri-network-indicator?
+
+    services.dbus.packages = with pkgs.lomiri; [
+      libusermetrics
+    ];
+
+    environment.systemPackages = with pkgs.lomiri; [
+      lomiri-schemas
+    ];
+
+    services.telepathy.enable = true;
+
+    users.users.usermetrics = {
+      group = "usermetrics";
+      home = "/var/lib/usermetrics";
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    users.groups.usermetrics = { };
   };
 
   # TODO session indicator starts up in a semi-broken state, but works fine after a restart. maybe being started before graphical session is truly up & ready?
   testScript = { nodes, ... }: let
-    runCommandPerIndicatorService = command: lib.strings.concatMapStringsSep "\n" command nodes.machine.systemd.user.targets."ayatana-indicators".wants;
+    runCommandOverServiceList = list: command:
+      lib.strings.concatMapStringsSep "\n" command list;
+
+    runCommandOverAyatanaIndicators = runCommandOverServiceList
+      (builtins.filter
+        (service: !(lib.strings.hasPrefix "lomiri" service || lib.strings.hasPrefix "telephony-service" service))
+        nodes.machine.systemd.user.targets."ayatana-indicators".wants);
+
+    runCommandOverAllIndicators = runCommandOverServiceList
+      nodes.machine.systemd.user.targets."ayatana-indicators".wants;
   in ''
     start_all()
     machine.wait_for_x()
@@ -50,8 +89,8 @@ in {
     machine.sleep(10)
 
     # Now check if all indicators were brought up successfully, and kill them for later
-  '' + (runCommandPerIndicatorService (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
-    machine.succeed("pgrep -f ${serviceExec}")
+  '' + (runCommandOverAyatanaIndicators (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
+    machine.succeed("pgrep -u ${user} -f ${serviceExec}")
     machine.succeed("pkill -f ${serviceExec}")
   '')) + ''
 
@@ -65,7 +104,7 @@ in {
     machine.sleep(10)
 
     # Now check if all indicator services were brought up successfully
-  '' + runCommandPerIndicatorService (service: ''
+  '' + runCommandOverAllIndicators (service: ''
     machine.wait_for_unit("${service}", "${user}")
   '');
 })
diff --git a/nixos/tests/babeld.nix b/nixos/tests/babeld.nix
index d4df6f86d089d..e497aa5b64e15 100644
--- a/nixos/tests/babeld.nix
+++ b/nixos/tests/babeld.nix
@@ -120,10 +120,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
     ''
       start_all()
 
-      client.wait_for_unit("network-online.target")
-      local_router.wait_for_unit("network-online.target")
-      remote_router.wait_for_unit("network-online.target")
-
       local_router.wait_for_unit("babeld.service")
       remote_router.wait_for_unit("babeld.service")
 
diff --git a/nixos/tests/bittorrent.nix b/nixos/tests/bittorrent.nix
index 4a73fea6a09d0..473b05d4c98e8 100644
--- a/nixos/tests/bittorrent.nix
+++ b/nixos/tests/bittorrent.nix
@@ -115,6 +115,7 @@ in
       start_all()
 
       # Wait for network and miniupnpd.
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       router.wait_for_unit("miniupnpd")
 
@@ -129,6 +130,7 @@ in
       tracker.succeed("chmod 644 /tmp/test.torrent")
 
       # Start the tracker.  !!! use a less crappy tracker
+      tracker.systemctl("start network-online.target")
       tracker.wait_for_unit("network-online.target")
       tracker.wait_for_unit("opentracker.service")
       tracker.wait_for_open_port(6969)
@@ -140,6 +142,7 @@ in
 
       # Now we should be able to download from the client behind the NAT.
       tracker.wait_for_unit("httpd")
+      client1.systemctl("start network-online.target")
       client1.wait_for_unit("network-online.target")
       client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
       client1.wait_for_file("${download-dir}/test.tar.bz2")
@@ -152,6 +155,7 @@ in
 
       # Now download from the second client.  This can only succeed if
       # the first client created a NAT hole in the router.
+      client2.systemctl("start network-online.target")
       client2.wait_for_unit("network-online.target")
       client2.succeed(
           "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
diff --git a/nixos/tests/buildbot.nix b/nixos/tests/buildbot.nix
index 2f6926313b7cd..149d73bba09c5 100644
--- a/nixos/tests/buildbot.nix
+++ b/nixos/tests/buildbot.nix
@@ -71,6 +71,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     gitrepo.wait_for_unit("multi-user.target")
 
     with subtest("Repo is accessible via git daemon"):
+        bbmaster.systemctl("start network-online.target")
         bbmaster.wait_for_unit("network-online.target")
         bbmaster.succeed("rm -rfv /tmp/fakerepo")
         bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
@@ -78,6 +79,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     with subtest("Master service and worker successfully connect"):
         bbmaster.wait_for_unit("buildbot-master.service")
         bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
+        bbworker.systemctl("start network-online.target")
         bbworker.wait_for_unit("network-online.target")
         bbworker.succeed("nc -z bbmaster 8010")
         bbworker.succeed("nc -z bbmaster 9989")
diff --git a/nixos/tests/cloud-init.nix b/nixos/tests/cloud-init.nix
index 786e01add7d4b..0b4c5a55c80a6 100644
--- a/nixos/tests/cloud-init.nix
+++ b/nixos/tests/cloud-init.nix
@@ -73,6 +73,7 @@ in makeTest {
   };
   testScript = ''
     # To wait until cloud-init terminates its run
+    unnamed.wait_for_unit("cloud-init-local.service")
     unnamed.wait_for_unit("cloud-final.service")
 
     unnamed.succeed("cat /tmp/cloudinit-write-file | grep -q 'cloudinit'")
diff --git a/nixos/tests/corerad.nix b/nixos/tests/corerad.nix
index b6f5d7fc6f75b..dd2bec794a1a0 100644
--- a/nixos/tests/corerad.nix
+++ b/nixos/tests/corerad.nix
@@ -56,6 +56,8 @@ import ./make-test-python.nix (
 
       with subtest("Wait for CoreRAD and network ready"):
           # Ensure networking is online and CoreRAD is ready.
+          router.systemctl("start network-online.target")
+          client.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
           client.wait_for_unit("network-online.target")
           router.wait_for_unit("corerad.service")
diff --git a/nixos/tests/curl-impersonate.nix b/nixos/tests/curl-impersonate.nix
index 7954e9e5584c4..33b10da1dfd0f 100644
--- a/nixos/tests/curl-impersonate.nix
+++ b/nixos/tests/curl-impersonate.nix
@@ -144,6 +144,8 @@ in {
     start_all()
 
     with subtest("Wait for network"):
+        web.systemctl("start network-online.target")
+        curl.systemctl("start network-online.target")
         web.wait_for_unit("network-online.target")
         curl.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/elk.nix b/nixos/tests/elk.nix
index 900ea6320100f..b5a8cb532ae0a 100644
--- a/nixos/tests/elk.nix
+++ b/nixos/tests/elk.nix
@@ -1,6 +1,6 @@
 # To run the test on the unfree ELK use the following command:
 # cd path/to/nixpkgs
-# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
+# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-7
 
 { system ? builtins.currentSystem,
   config ? {},
@@ -120,7 +120,7 @@ let
               };
 
               elasticsearch-curator = {
-                enable = true;
+                enable = elk ? elasticsearch-curator;
                 actionYAML = ''
                 ---
                 actions:
@@ -246,7 +246,7 @@ let
           one.wait_until_succeeds(
               expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
           )
-    '' + ''
+    '' + lib.optionalString (elk ? elasticsearch-curator) ''
       with subtest("Elasticsearch-curator works"):
           one.systemctl("stop logstash")
           one.systemctl("start elasticsearch-curator")
diff --git a/nixos/tests/ferm.nix b/nixos/tests/ferm.nix
index be43877445ebf..87c67ac623479 100644
--- a/nixos/tests/ferm.nix
+++ b/nixos/tests/ferm.nix
@@ -55,6 +55,8 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     ''
       start_all()
 
+      client.systemctl("start network-online.target")
+      server.systemctl("start network-online.target")
       client.wait_for_unit("network-online.target")
       server.wait_for_unit("network-online.target")
       server.wait_for_unit("ferm.service")
diff --git a/nixos/tests/frp.nix b/nixos/tests/frp.nix
index 2f5c0f8ec933b..1f57c031a53a5 100644
--- a/nixos/tests/frp.nix
+++ b/nixos/tests/frp.nix
@@ -18,10 +18,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable = true;
         role = "server";
         settings = {
-          common = {
-            bind_port = 7000;
-            vhost_http_port = 80;
-          };
+          bindPort = 7000;
+          vhostHTTPPort = 80;
         };
       };
     };
@@ -59,15 +57,16 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable = true;
         role = "client";
         settings = {
-          common = {
-            server_addr = "10.0.0.1";
-            server_port = 7000;
-          };
-          web = {
-            type = "http";
-            local_port = 80;
-            custom_domains = "10.0.0.1";
-          };
+          serverAddr = "10.0.0.1";
+          serverPort = 7000;
+          proxies = [
+            {
+              name = "web";
+              type = "http";
+              localPort = 80;
+              customDomains = [ "10.0.0.1" ];
+            }
+          ];
         };
       };
     };
diff --git a/nixos/tests/gitdaemon.nix b/nixos/tests/gitdaemon.nix
index bb07b6e97b7fb..052fa902b4504 100644
--- a/nixos/tests/gitdaemon.nix
+++ b/nixos/tests/gitdaemon.nix
@@ -59,6 +59,9 @@ in {
     with subtest("git daemon starts"):
         server.wait_for_unit("git-daemon.service")
 
+
+    server.systemctl("start network-online.target")
+    client.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
     client.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/guix/publish.nix b/nixos/tests/guix/publish.nix
index a15e00b0fa981..eb56fc97478cc 100644
--- a/nixos/tests/guix/publish.nix
+++ b/nixos/tests/guix/publish.nix
@@ -80,6 +80,7 @@ in {
 
     # Now it's the client turn to make use of it.
     substitute_server = "http://server.local:${toString publishPort}"
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     response = client.succeed(f"curl {substitute_server}")
     assert "Guix Substitute Server" in response
diff --git a/nixos/tests/haproxy.nix b/nixos/tests/haproxy.nix
index 555474d7f2999..1730938737577 100644
--- a/nixos/tests/haproxy.nix
+++ b/nixos/tests/haproxy.nix
@@ -1,22 +1,42 @@
-import ./make-test-python.nix ({ pkgs, ...}: {
+import ./make-test-python.nix ({ lib, pkgs, ...}: {
   name = "haproxy";
   nodes = {
-    machine = { ... }: {
-      services.haproxy = {
+    server = { ... }: {
+     services.haproxy = {
         enable = true;
         config = ''
+          global
+            limited-quic
+
           defaults
+            mode http
             timeout connect 10s
+            timeout client 10s
+            timeout server 10s
+
+            log /dev/log local0 debug err
+            option logasap
+            option httplog
+            option httpslog
 
           backend http_server
-            mode http
-            server httpd [::1]:8000
+            server httpd [::1]:8000 alpn http/1.1
 
           frontend http
-            bind *:80
-            mode http
+            bind :80
+            bind :443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h2,http/1.1
+            bind quic4@:443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h3 allow-0rtt
+
+            http-after-response add-header alt-svc 'h3=":443"; ma=60' if { ssl_fc }
+
             http-request use-service prometheus-exporter if { path /metrics }
             use_backend http_server
+
+          frontend http-cert-auth
+            bind :8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt
+            bind quic4@:8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt alpn h3
+
+            use_backend http_server
         '';
       };
       services.httpd = {
@@ -30,24 +50,75 @@ import ./make-test-python.nix ({ pkgs, ...}: {
           }];
         };
       };
+      networking.firewall.allowedTCPPorts = [ 80 443 8443 ];
+      networking.firewall.allowedUDPPorts = [ 443 8443 ];
+     };
+    client = { ... }: {
+      environment.systemPackages = [ pkgs.curlHTTP3 ];
     };
   };
   testScript = ''
+    # Helpers
+    def cmd(command):
+      print(f"+{command}")
+      r = os.system(command)
+      if r != 0:
+        raise Exception(f"Command {command} failed with exit code {r}")
+
+    def openssl(command):
+      cmd(f"${pkgs.openssl}/bin/openssl {command}")
+
+    # Generate CA.
+    openssl("req -new -newkey rsa:4096 -nodes -x509 -days 7 -subj '/C=ZZ/ST=Cloud/L=Unspecified/O=NixOS/OU=Tests/CN=CA Certificate' -keyout cacert.key -out cacert.crt")
+
+    # Generate and sign Server.
+    openssl("req -newkey rsa:4096 -nodes -subj '/CN=server/OU=Tests/O=NixOS' -keyout server.key -out server.csr")
+    openssl("x509 -req -in server.csr -out server.crt -CA cacert.crt -CAkey cacert.key -days 7")
+    cmd("cat server.crt server.key > fullchain.pem")
+
+    # Generate and sign Client.
+    openssl("req -newkey rsa:4096 -nodes -subj '/CN=client/OU=Tests/O=NixOS' -keyout client.key -out client.csr")
+    openssl("x509 -req -in client.csr -out client.crt -CA cacert.crt -CAkey cacert.key -days 7")
+    cmd("cat client.crt client.key > client.pem")
+
+    # Start the actual test.
     start_all()
-    machine.wait_for_unit("multi-user.target")
-    machine.wait_for_unit("haproxy.service")
-    machine.wait_for_unit("httpd.service")
-    assert "We are all good!" in machine.succeed("curl -fk http://localhost:80/index.txt")
-    assert "haproxy_process_pool_allocated_bytes" in machine.succeed(
-        "curl -fk http://localhost:80/metrics"
-    )
+    server.copy_from_host("fullchain.pem", "/etc/ssl/fullchain.pem")
+    server.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
+    server.succeed("chmod 0644 /etc/ssl/fullchain.pem /etc/ssl/cacert.crt")
+
+    client.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
+    client.copy_from_host("client.pem", "/root/client.pem")
+
+    server.wait_for_unit("multi-user.target")
+    server.wait_for_unit("haproxy.service")
+    server.wait_for_unit("httpd.service")
+
+    assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
+    assert "haproxy_process_pool_allocated_bytes" in client.succeed("curl -f http://server/metrics")
+
+    with subtest("https"):
+      assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt https://server/index.txt")
+
+    with subtest("https-cert-auth"):
+      # Client must succeed in authenticating with the right certificate.
+      assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
+      # Client must fail without certificate.
+      client.fail("curl --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
+
+    with subtest("h3"):
+      assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server/index.txt")
+
+    with subtest("h3-cert-auth"):
+      # Client must succeed in authenticating with the right certificate.
+      assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
+      # Client must fail without certificate.
+      client.fail("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
 
     with subtest("reload"):
-        machine.succeed("systemctl reload haproxy")
+        server.succeed("systemctl reload haproxy")
         # wait some time to ensure the following request hits the reloaded haproxy
-        machine.sleep(5)
-        assert "We are all good!" in machine.succeed(
-            "curl -fk http://localhost:80/index.txt"
-        )
+        server.sleep(5)
+        assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
   '';
 })
diff --git a/nixos/tests/hostname.nix b/nixos/tests/hostname.nix
index 6122e2ffeb83a..dffec956bc0b6 100644
--- a/nixos/tests/hostname.nix
+++ b/nixos/tests/hostname.nix
@@ -34,6 +34,7 @@ let
 
         machine = ${hostName}
 
+        machine.systemctl("start network-online.target")
         machine.wait_for_unit("network-online.target")
 
         # Test if NixOS computes the correct FQDN (either a FQDN or an error/null):
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index c88974605e306..26e8a4ac4c772 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -6,9 +6,8 @@
 }:
 {
   container = import ./container.nix { inherit system pkgs; };
+  lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
-  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
-    inherit system pkgs;
-  };
+  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
new file mode 100644
index 0000000000000..67245b54e7527
--- /dev/null
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -0,0 +1,112 @@
+import ../make-test-python.nix (
+
+  { pkgs, lib, ... }:
+
+  let
+    releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
+
+    container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+    container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+  in
+  {
+    name = "lxd-to-incus";
+
+    meta = {
+      maintainers = lib.teams.lxc.members;
+    };
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        environment.systemPackages = [ pkgs.lxd-to-incus ];
+
+        virtualisation = {
+          diskSize = 6144;
+          cores = 2;
+          memorySize = 2048;
+
+          lxd.enable = true;
+          lxd.preseed = {
+            networks = [
+              {
+                name = "nixostestbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "nixostestbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "nixostest_pool";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+              {
+                name = "nixos_notdefault";
+                devices = { };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "nixostest_pool";
+                driver = "dir";
+              }
+            ];
+          };
+
+          incus.enable = true;
+        };
+      };
+
+    testScript = ''
+      def lxd_wait_for_preseed(_) -> bool:
+        _, output = machine.systemctl("is-active lxd-preseed.service")
+        return ("inactive" in output)
+
+      def lxd_instance_is_up(_) -> bool:
+          status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+          return status == 0
+
+      def incus_instance_is_up(_) -> bool:
+          status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+          return status == 0
+
+      with machine.nested("initialize lxd and resources"):
+        machine.wait_for_unit("sockets.target")
+        machine.wait_for_unit("lxd.service")
+        retry(lxd_wait_for_preseed)
+
+        machine.succeed("lxc image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
+        machine.succeed("lxc launch nixos container")
+        retry(lxd_instance_is_up)
+
+      machine.wait_for_unit("incus.service")
+
+      with machine.nested("run migration"):
+          machine.succeed("lxd-to-incus --yes")
+
+      with machine.nested("verify resources migrated to incus"):
+          machine.succeed("incus config show container")
+          retry(incus_instance_is_up)
+          machine.succeed("incus exec container -- true")
+          machine.succeed("incus profile show default | grep nixostestbr0")
+          machine.succeed("incus profile show default | grep nixostest_pool")
+          machine.succeed("incus profile show nixos_notdefault")
+          machine.succeed("incus storage show nixostest_pool")
+          machine.succeed("incus network show nixostestbr0")
+    '';
+  }
+)
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 21d5e1470d8e0..7576fae41f83b 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -158,7 +158,9 @@ let
       start_all()
       ${optionalString clevisTest ''
       tang.wait_for_unit("sockets.target")
+      tang.systemctl("start network-online.target")
       tang.wait_for_unit("network-online.target")
+      machine.systemctl("start network-online.target")
       machine.wait_for_unit("network-online.target")
       ''}
       machine.wait_for_unit("multi-user.target")
@@ -187,6 +189,7 @@ let
 
       ${optionalString clevisTest ''
         with subtest("Create the Clevis secret with Tang"):
+             machine.systemctl("start network-online.target")
              machine.wait_for_unit("network-online.target")
              machine.succeed('echo -n password | clevis encrypt sss \'{"t": 2, "pins": {"tpm2": {}, "tang": {"url": "http://192.168.1.2"}}}\' -y > /mnt/etc/nixos/clevis-secret.jwe')''}
 
diff --git a/nixos/tests/kanidm.nix b/nixos/tests/kanidm.nix
index 3f5bca397740e..fa24d4a8a5e13 100644
--- a/nixos/tests/kanidm.nix
+++ b/nixos/tests/kanidm.nix
@@ -67,6 +67,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       ''
         start_all()
         server.wait_for_unit("kanidm.service")
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
 
         with subtest("Test HTTP interface"):
diff --git a/nixos/tests/keepalived.nix b/nixos/tests/keepalived.nix
index d0bf9d4652003..ce291514591fe 100644
--- a/nixos/tests/keepalived.nix
+++ b/nixos/tests/keepalived.nix
@@ -1,5 +1,6 @@
-import ./make-test-python.nix ({ pkgs, ... }: {
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
   name = "keepalived";
+  maintainers = [ lib.maintainers.raitobezarius ];
 
   nodes = {
     node1 = { pkgs, ... }: {
diff --git a/nixos/tests/lemmy.nix b/nixos/tests/lemmy.nix
index de2c4938fe231..e8d747f89a9e7 100644
--- a/nixos/tests/lemmy.nix
+++ b/nixos/tests/lemmy.nix
@@ -59,6 +59,7 @@ in
         server.succeed("curl --fail localhost:${toString uiPort}")
 
     with subtest("Lemmy-UI responds through the caddy reverse proxy"):
+        server.systemctl("start network-online.target")
         server.wait_for_unit("network-online.target")
         server.wait_for_unit("caddy.service")
         server.wait_for_open_port(80)
@@ -66,6 +67,7 @@ in
         assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}"
 
     with subtest("the server is exposed externally"):
+        client.systemctl("start network-online.target")
         client.wait_for_unit("network-online.target")
         client.succeed("curl -v --fail ${lemmyNodeName}")
 
diff --git a/nixos/tests/miriway.nix b/nixos/tests/miriway.nix
index f12c4d5ecc41e..a0987d9fc41b6 100644
--- a/nixos/tests/miriway.nix
+++ b/nixos/tests/miriway.nix
@@ -31,7 +31,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         enable-x11=
 
         ctrl-alt=t:foot --maximized
-        ctrl-alt=a:env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty --option window.startup_mode=maximized
+        ctrl-alt=a:env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY= alacritty --option window.startup_mode=\"maximized\"
 
         shell-component=dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY
 
diff --git a/nixos/tests/networking.nix b/nixos/tests/networking.nix
index 768d0cfa2238c..6bd89902eedb3 100644
--- a/nixos/tests/networking.nix
+++ b/nixos/tests/networking.nix
@@ -130,6 +130,7 @@ let
           start_all()
 
           client.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Make sure DHCP server is not started"):
@@ -222,6 +223,7 @@ let
           start_all()
 
           client.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Wait until we have an ip address on each interface"):
@@ -849,6 +851,7 @@ let
 
           client.wait_for_unit("network.target")
           client_with_privacy.wait_for_unit("network.target")
+          router.systemctl("start network-online.target")
           router.wait_for_unit("network-online.target")
 
           with subtest("Wait until we have an ip address"):
diff --git a/nixos/tests/nfs/kerberos.nix b/nixos/tests/nfs/kerberos.nix
index 1bace4058be59..5944b53319a0b 100644
--- a/nixos/tests/nfs/kerberos.nix
+++ b/nixos/tests/nfs/kerberos.nix
@@ -105,6 +105,7 @@ in
       server.wait_for_unit("rpc-gssd.service")
       server.wait_for_unit("rpc-svcgssd.service")
 
+      client.systemctl("start network-online.target")
       client.wait_for_unit("network-online.target")
 
       # add principals to client keytab
diff --git a/nixos/tests/nginx-moreheaders.nix b/nixos/tests/nginx-moreheaders.nix
new file mode 100644
index 0000000000000..560dcf9ce0b82
--- /dev/null
+++ b/nixos/tests/nginx-moreheaders.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix {
+  name = "nginx-more-headers";
+
+  nodes = {
+    webserver = { pkgs, ... }: {
+      services.nginx = {
+        enable = true;
+
+        virtualHosts.test = {
+          locations = {
+            "/".return = "200 blub";
+            "/some" =  {
+              return = "200 blub";
+              extraConfig = ''
+                more_set_headers "Referrer-Policy: no-referrer";
+              '';
+            };
+          };
+          extraConfig = ''
+            more_set_headers "X-Powered-By: nixos";
+          '';
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test | grep -q \"X-Powered-By: nixos\"")
+    webserver.fail("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test | grep -q \"Referrer-Policy: no-referrer\"")
+
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test/some | grep -q \"X-Powered-By: nixos\"")
+    webserver.succeed("curl --fail --resolve test:80:127.0.0.1 --head --verbose http://test/some | grep -q \"Referrer-Policy: no-referrer\"")
+  '';
+}
diff --git a/nixos/tests/nixops/default.nix b/nixos/tests/nixops/default.nix
index f7a26f2461c4c..6501d13a2ed36 100644
--- a/nixos/tests/nixops/default.nix
+++ b/nixos/tests/nixops/default.nix
@@ -1,6 +1,4 @@
-{ pkgs
-, testers
-, ... }:
+{ pkgs, ... }:
 let
   inherit (pkgs) lib;
 
@@ -21,7 +19,7 @@ let
     passthru.override = args': testsForPackage (args // args');
   };
 
-  testLegacyNetwork = { nixopsPkg, ... }: testers.nixosTest ({
+  testLegacyNetwork = { nixopsPkg, ... }: pkgs.testers.nixosTest ({
     name = "nixops-legacy-network";
     nodes = {
       deployer = { config, lib, nodes, pkgs, ... }: {
diff --git a/nixos/tests/opensmtpd-rspamd.nix b/nixos/tests/opensmtpd-rspamd.nix
index 19969a7b47ddd..e413a2050bd61 100644
--- a/nixos/tests/opensmtpd-rspamd.nix
+++ b/nixos/tests/opensmtpd-rspamd.nix
@@ -119,6 +119,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     smtp1.wait_for_unit("opensmtpd")
     smtp2.wait_for_unit("opensmtpd")
diff --git a/nixos/tests/opensmtpd.nix b/nixos/tests/opensmtpd.nix
index 17c1a569ba0d9..d32f82ed33b8c 100644
--- a/nixos/tests/opensmtpd.nix
+++ b/nixos/tests/opensmtpd.nix
@@ -104,6 +104,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     smtp1.wait_for_unit("opensmtpd")
     smtp2.wait_for_unit("opensmtpd")
diff --git a/nixos/tests/owncast.nix b/nixos/tests/owncast.nix
index debb34f5009dc..73aac4e704751 100644
--- a/nixos/tests/owncast.nix
+++ b/nixos/tests/owncast.nix
@@ -31,6 +31,8 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   testScript = ''
     start_all()
 
+    client.systemctl("start network-online.target")
+    server.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     server.wait_for_unit("network-online.target")
     server.wait_for_unit("owncast.service")
diff --git a/nixos/tests/podman/default.nix b/nixos/tests/podman/default.nix
index 0e1f420f2a7de..3eea45832f0a6 100644
--- a/nixos/tests/podman/default.nix
+++ b/nixos/tests/podman/default.nix
@@ -24,8 +24,6 @@ import ../make-test-python.nix (
         virtualisation.podman.enable = true;
 
         virtualisation.podman.defaultNetwork.settings.dns_enabled = true;
-
-        networking.firewall.allowedUDPPorts = [ 53 ];
       };
       docker = { pkgs, ... }: {
         virtualisation.podman.enable = true;
diff --git a/nixos/tests/postgis.nix b/nixos/tests/postgis.nix
index 09c738b938ba8..dacf4e576c071 100644
--- a/nixos/tests/postgis.nix
+++ b/nixos/tests/postgis.nix
@@ -24,6 +24,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     master.wait_for_unit("postgresql")
     master.sleep(10)  # Hopefully this is long enough!!
     master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis;'")
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_raster;'")
     master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_topology;'")
   '';
 })
diff --git a/nixos/tests/qemu-vm-restrictnetwork.nix b/nixos/tests/qemu-vm-restrictnetwork.nix
index 49a105ef10767..49aefcc099bda 100644
--- a/nixos/tests/qemu-vm-restrictnetwork.nix
+++ b/nixos/tests/qemu-vm-restrictnetwork.nix
@@ -21,6 +21,8 @@ import ./make-test-python.nix ({
 
     else:
       start_all()
+      unrestricted.systemctl("start network-online.target")
+      restricted.systemctl("start network-online.target")
       unrestricted.wait_for_unit("network-online.target")
       restricted.wait_for_unit("network-online.target")
 
diff --git a/nixos/tests/rss2email.nix b/nixos/tests/rss2email.nix
index f32326feb50fb..60b27b95fabe4 100644
--- a/nixos/tests/rss2email.nix
+++ b/nixos/tests/rss2email.nix
@@ -55,6 +55,7 @@ import ./make-test-python.nix {
   testScript = ''
     start_all()
 
+    server.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
     server.wait_for_unit("opensmtpd")
     server.wait_for_unit("dovecot2")
diff --git a/nixos/tests/ssh-audit.nix b/nixos/tests/ssh-audit.nix
index bd6255b8044d9..25772aba3ea08 100644
--- a/nixos/tests/ssh-audit.nix
+++ b/nixos/tests/ssh-audit.nix
@@ -70,6 +70,7 @@ import ./make-test-python.nix (
       ${serverName}.succeed("${pkgs.ssh-audit}/bin/ssh-audit 127.0.0.1")
 
       # Wait for client to be able to connect to the server
+      ${clientName}.systemctl("start network-online.target")
       ${clientName}.wait_for_unit("network-online.target")
 
       # Set up trusted private key
diff --git a/nixos/tests/suwayomi-server.nix b/nixos/tests/suwayomi-server.nix
new file mode 100644
index 0000000000000..36072028380b8
--- /dev/null
+++ b/nixos/tests/suwayomi-server.nix
@@ -0,0 +1,46 @@
+{ system ? builtins.currentSystem
+, pkgs
+, lib ? pkgs.lib
+}:
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (lib) recursiveUpdate;
+
+  baseTestConfig = {
+    meta.maintainers = with lib.maintainers; [ ratcornu ];
+    nodes.machine = { pkgs, ... }: {
+      services.suwayomi-server = {
+        enable = true;
+        settings.server.port = 1234;
+      };
+    };
+    testScript = ''
+      machine.wait_for_unit("suwayomi-server.service")
+      machine.wait_for_open_port(1234)
+      machine.succeed("curl --fail http://localhost:1234/")
+    '';
+  };
+in
+
+{
+  without-auth = makeTest (recursiveUpdate baseTestConfig {
+    name = "suwayomi-server-without-auth";
+  });
+
+  with-auth = makeTest (recursiveUpdate baseTestConfig {
+    name = "suwayomi-server-with-auth";
+
+    nodes.machine = { pkgs, ... }: {
+      services.suwayomi-server = {
+        enable = true;
+
+        settings.server = {
+          port = 1234;
+          basicAuthEnabled = true;
+          basicAuthUsername = "alice";
+          basicAuthPasswordFile = pkgs.writeText "snakeoil-pass.txt" "pass";
+        };
+      };
+    };
+  });
+}
diff --git a/nixos/tests/sysinit-reactivation.nix b/nixos/tests/sysinit-reactivation.nix
new file mode 100644
index 0000000000000..1a0caecb610a3
--- /dev/null
+++ b/nixos/tests/sysinit-reactivation.nix
@@ -0,0 +1,107 @@
+# This runs to two scenarios but in one tests:
+# - A post-sysinit service needs to be restarted AFTER tmpfiles was restarted.
+# - A service needs to be restarted BEFORE tmpfiles is restarted
+
+{ lib, ... }:
+
+let
+  makeGeneration = generation: {
+    "${generation}".configuration = {
+      systemd.services.pre-sysinit-before-tmpfiles.environment.USER =
+        lib.mkForce "${generation}-tmpfiles-user";
+
+      systemd.services.pre-sysinit-after-tmpfiles.environment = {
+        NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-pre-sysinit-after-tmpfiles";
+        PATH_TO_CREATE = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
+      };
+
+      systemd.services.post-sysinit.environment = {
+        NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
+        PATH_TO_CREATE = lib.mkForce "/run/${generation}-created-by-post-sysinit";
+      };
+
+      systemd.tmpfiles.settings.test = lib.mkForce {
+        "/run/${generation}-needed-by-pre-sysinit-after-tmpfiles".f.user =
+          "${generation}-tmpfiles-user";
+      };
+    };
+  };
+in
+
+{
+
+  name = "sysinit-reactivation";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    systemd.services.pre-sysinit-before-tmpfiles = {
+      wantedBy = [ "sysinit.target" ];
+      requiredBy = [ "sysinit-reactivation.target" ];
+      before = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment.USER = "tmpfiles-user";
+      script = "${pkgs.shadow}/bin/useradd $USER";
+    };
+
+    systemd.services.pre-sysinit-after-tmpfiles = {
+      wantedBy = [ "sysinit.target" ];
+      requiredBy = [ "sysinit-reactivation.target" ];
+      after = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment = {
+        NEEDED_PATH = "/run/needed-by-pre-sysinit-after-tmpfiles";
+        PATH_TO_CREATE = "/run/needed-by-post-sysinit";
+      };
+      script = ''
+        if [[ -e $NEEDED_PATH ]]; then
+          touch $PATH_TO_CREATE
+        fi
+      '';
+    };
+
+    systemd.services.post-sysinit = {
+      wantedBy = [ "default.target" ];
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      environment = {
+        NEEDED_PATH = "/run/needed-by-post-sysinit";
+        PATH_TO_CREATE = "/run/created-by-post-sysinit";
+      };
+      script = ''
+        if [[ -e $NEEDED_PATH ]]; then
+          touch $PATH_TO_CREATE
+        fi
+      '';
+    };
+
+    systemd.tmpfiles.settings.test = {
+      "/run/needed-by-pre-sysinit-after-tmpfiles".f.user =
+        "tmpfiles-user";
+    };
+
+    specialisation = lib.mkMerge [
+      (makeGeneration "second")
+      (makeGeneration "third")
+    ];
+  };
+
+  testScript = { nodes, ... }: ''
+    def switch(generation):
+      toplevel = "${nodes.machine.system.build.toplevel}";
+      machine.succeed(f"{toplevel}/specialisation/{generation}/bin/switch-to-configuration switch")
+
+    machine.wait_for_unit("default.target")
+    machine.succeed("test -e /run/created-by-post-sysinit")
+
+    switch("second")
+    machine.succeed("test -e /run/second-created-by-post-sysinit")
+
+    switch("third")
+    machine.succeed("test -e /run/third-created-by-post-sysinit")
+  '';
+}
diff --git a/nixos/tests/systemd-networkd-dhcpserver.nix b/nixos/tests/systemd-networkd-dhcpserver.nix
index cf0ccb7442118..665d8b5a05291 100644
--- a/nixos/tests/systemd-networkd-dhcpserver.nix
+++ b/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -101,6 +101,9 @@ import ./make-test-python.nix ({pkgs, ...}: {
   };
   testScript = { ... }: ''
     start_all()
+
+    router.systemctl("start network-online.target")
+    client.systemctl("start network-online.target")
     router.wait_for_unit("systemd-networkd-wait-online.service")
     client.wait_for_unit("systemd-networkd-wait-online.service")
     client.wait_until_succeeds("ping -c 5 10.0.2.1")
diff --git a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
index 54f371e6c070f..1e55341657bdb 100644
--- a/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
+++ b/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -263,9 +263,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
           };
         };
       };
-
-      # make the network-online target a requirement, we wait for it in our test script
-      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     };
 
     # This is the client behind the router. We should be receiving router
@@ -278,9 +275,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         useNetworkd = true;
         useDHCP = false;
       };
-
-      # make the network-online target a requirement, we wait for it in our test script
-      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
     };
   };
 
@@ -294,6 +288,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     # Since we only care about IPv6 that should not involve waiting for legacy
     # IP leases.
     client.start()
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
 
     # the static address on the router should not be reachable
@@ -312,6 +307,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     isp.wait_for_unit("multi-user.target")
 
     # wait until the uplink interface has a good status
+    router.systemctl("start network-online.target")
     router.wait_for_unit("network-online.target")
     router.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
 
diff --git a/nixos/tests/systemd-nspawn.nix b/nixos/tests/systemd-nspawn.nix
index 1a4251ef069e8..b86762233d183 100644
--- a/nixos/tests/systemd-nspawn.nix
+++ b/nixos/tests/systemd-nspawn.nix
@@ -38,6 +38,7 @@ in {
     start_all()
 
     server.wait_for_unit("nginx.service")
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
     client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw")
     client.succeed(
diff --git a/nixos/tests/systemd-sysusers-immutable.nix b/nixos/tests/systemd-sysusers-immutable.nix
new file mode 100644
index 0000000000000..42cbf84d175e4
--- /dev/null
+++ b/nixos/tests/systemd-sysusers-immutable.nix
@@ -0,0 +1,64 @@
+{ lib, ... }:
+
+let
+  rootPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
+  normaloPassword = "$y$j9T$3aiOV/8CADAK22OK2QT3/0$67OKd50Z4qTaZ8c/eRWHLIM.o3ujtC1.n9ysmJfv639";
+  newNormaloPassword = "mellow";
+in
+
+{
+
+  name = "activation-sysusers-immutable";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = {
+    systemd.sysusers.enable = true;
+    users.mutableUsers = false;
+
+    # Override the empty root password set by the test instrumentation
+    users.users.root.hashedPasswordFile = lib.mkForce null;
+    users.users.root.initialHashedPassword = rootPassword;
+    users.users.normalo = {
+      isNormalUser = true;
+      initialHashedPassword = normaloPassword;
+    };
+
+    specialisation.new-generation.configuration = {
+      users.users.new-normalo = {
+        isNormalUser = true;
+        initialPassword = newNormaloPassword;
+      };
+    };
+  };
+
+  testScript = ''
+    with subtest("Users are not created with systemd-sysusers"):
+      machine.fail("systemctl status systemd-sysusers.service")
+      machine.fail("ls /etc/sysusers.d")
+
+    with subtest("Correct mode on the password files"):
+      assert machine.succeed("stat -c '%a' /etc/passwd") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/group") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/shadow") == "0\n"
+      assert machine.succeed("stat -c '%a' /etc/gshadow") == "0\n"
+
+    with subtest("root user has correct password"):
+      print(machine.succeed("getent passwd root"))
+      assert "${rootPassword}" in machine.succeed("getent shadow root"), "root user password is not correct"
+
+    with subtest("normalo user is created"):
+      print(machine.succeed("getent passwd normalo"))
+      assert machine.succeed("stat -c '%U' /home/normalo") == "normalo\n"
+      assert "${normaloPassword}" in machine.succeed("getent shadow normalo"), "normalo user password is not correct"
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+      print(machine.succeed("getent shadow new-normalo"))
+      assert machine.succeed("stat -c '%U' /home/new-normalo") == "new-normalo\n"
+  '';
+}
diff --git a/nixos/tests/systemd-sysusers-mutable.nix b/nixos/tests/systemd-sysusers-mutable.nix
new file mode 100644
index 0000000000000..e69cfe23a59a1
--- /dev/null
+++ b/nixos/tests/systemd-sysusers-mutable.nix
@@ -0,0 +1,71 @@
+{ lib, ... }:
+
+let
+  rootPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
+  normaloPassword = "hello";
+  newNormaloPassword = "$y$j9T$p6OI0WN7.rSfZBOijjRdR.$xUOA2MTcB48ac.9Oc5fz8cxwLv1mMqabnn333iOzSA6";
+in
+
+{
+
+  name = "activation-sysusers-mutable";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { pkgs, ... }: {
+    systemd.sysusers.enable = true;
+    users.mutableUsers = true;
+
+    # Prerequisites
+    system.etc.overlay.enable = true;
+    boot.initrd.systemd.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    # Override the empty root password set by the test instrumentation
+    users.users.root.hashedPasswordFile = lib.mkForce null;
+    users.users.root.initialHashedPassword = rootPassword;
+    users.users.normalo = {
+      isNormalUser = true;
+      initialPassword = normaloPassword;
+    };
+
+    specialisation.new-generation.configuration = {
+      users.users.new-normalo = {
+        isNormalUser = true;
+        initialHashedPassword = newNormaloPassword;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("systemd-sysusers.service")
+
+    with subtest("systemd-sysusers.service contains the credentials"):
+      sysusers_service = machine.succeed("systemctl cat systemd-sysusers.service")
+      print(sysusers_service)
+      assert "SetCredential=passwd.plaintext-password.normalo:${normaloPassword}" in sysusers_service
+
+    with subtest("Correct mode on the password files"):
+      assert machine.succeed("stat -c '%a' /etc/passwd") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/group") == "644\n"
+      assert machine.succeed("stat -c '%a' /etc/shadow") == "0\n"
+      assert machine.succeed("stat -c '%a' /etc/gshadow") == "0\n"
+
+    with subtest("root user has correct password"):
+      print(machine.succeed("getent passwd root"))
+      assert "${rootPassword}" in machine.succeed("getent shadow root"), "root user password is not correct"
+
+    with subtest("normalo user is created"):
+      print(machine.succeed("getent passwd normalo"))
+      assert machine.succeed("stat -c '%U' /home/normalo") == "normalo\n"
+
+
+    machine.succeed("/run/current-system/specialisation/new-generation/bin/switch-to-configuration switch")
+
+
+    with subtest("new-normalo user is created after switching to new generation"):
+      print(machine.succeed("getent passwd new-normalo"))
+      assert machine.succeed("stat -c '%U' /home/new-normalo") == "new-normalo\n"
+      assert "${newNormaloPassword}" in machine.succeed("getent shadow new-normalo"), "new-normalo user password is not correct"
+  '';
+}
diff --git a/nixos/tests/tayga.nix b/nixos/tests/tayga.nix
index 44974f6efea83..4aade67d74d0d 100644
--- a/nixos/tests/tayga.nix
+++ b/nixos/tests/tayga.nix
@@ -206,6 +206,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
   testScript = ''
     # start client and server
     for machine in client, server:
+      machine.systemctl("start network-online.target")
       machine.wait_for_unit("network-online.target")
       machine.log(machine.execute("ip addr")[1])
       machine.log(machine.execute("ip route")[1])
@@ -214,6 +215,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
     # test systemd-networkd and nixos-scripts based router
     for router in router_systemd, router_nixos:
       router.start()
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       router.wait_for_unit("tayga.service")
       router.log(machine.execute("ip addr")[1])
diff --git a/nixos/tests/trafficserver.nix b/nixos/tests/trafficserver.nix
index e4557c6c50e54..94d0e4dd926e9 100644
--- a/nixos/tests/trafficserver.nix
+++ b/nixos/tests/trafficserver.nix
@@ -104,6 +104,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     ats.wait_for_open_port(80)
     httpbin.wait_for_unit("httpbin")
     httpbin.wait_for_open_port(80)
+    client.systemctl("start network-online.target")
     client.wait_for_unit("network-online.target")
 
     with subtest("Traffic Server is running"):
diff --git a/nixos/tests/ulogd/ulogd.py b/nixos/tests/ulogd/ulogd.py
index d20daa4d733a2..76a8d0c6e24a3 100644
--- a/nixos/tests/ulogd/ulogd.py
+++ b/nixos/tests/ulogd/ulogd.py
@@ -1,5 +1,6 @@
 start_all()
 machine.wait_for_unit("ulogd.service")
+machine.systemctl("start network-online.target")
 machine.wait_for_unit("network-online.target")
 
 with subtest("Ulogd is running"):
diff --git a/nixos/tests/upnp.nix b/nixos/tests/upnp.nix
index 5e135267403bd..93bc08f752ce3 100644
--- a/nixos/tests/upnp.nix
+++ b/nixos/tests/upnp.nix
@@ -81,11 +81,13 @@ in
       start_all()
 
       # Wait for network and miniupnpd.
+      router.systemctl("start network-online.target")
       router.wait_for_unit("network-online.target")
       # $router.wait_for_unit("nat")
       router.wait_for_unit("${if useNftables then "nftables" else "firewall"}.service")
       router.wait_for_unit("miniupnpd")
 
+      client1.systemctl("start network-online.target")
       client1.wait_for_unit("network-online.target")
 
       client1.succeed("upnpc -a ${internalClient1Address} 9000 9000 TCP")
diff --git a/nixos/tests/uptermd.nix b/nixos/tests/uptermd.nix
index 429e3c9dd5ff3..469aa5047c27c 100644
--- a/nixos/tests/uptermd.nix
+++ b/nixos/tests/uptermd.nix
@@ -28,6 +28,7 @@ in
     start_all()
 
     server.wait_for_unit("uptermd.service")
+    server.systemctl("start network-online.target")
     server.wait_for_unit("network-online.target")
 
     # wait for upterm port to be reachable
diff --git a/nixos/tests/watchdogd.nix b/nixos/tests/watchdogd.nix
new file mode 100644
index 0000000000000..663e97cbae104
--- /dev/null
+++ b/nixos/tests/watchdogd.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "watchdogd";
+  meta.maintainers = with lib.maintainers; [ vifino ];
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.qemu.options = [
+      "-device i6300esb" # virtual watchdog timer
+    ];
+    boot.kernelModules = [ "i6300esb" ];
+    services.watchdogd.enable = true;
+    services.watchdogd.settings = {
+      supervisor.enabled = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("watchdogd.service")
+
+    assert "i6300ESB" in machine.succeed("watchdogctl status")
+    machine.succeed("watchdogctl test")
+  '';
+})
diff --git a/nixos/tests/web-apps/netbox-upgrade.nix b/nixos/tests/web-apps/netbox-upgrade.nix
index b5403eb678bcb..4c554e7ae613b 100644
--- a/nixos/tests/web-apps/netbox-upgrade.nix
+++ b/nixos/tests/web-apps/netbox-upgrade.nix
@@ -1,6 +1,6 @@
 import ../make-test-python.nix ({ lib, pkgs, ... }: let
-  oldNetbox = pkgs.netbox_3_5;
-  newNetbox = pkgs.netbox_3_6;
+  oldNetbox = pkgs.netbox_3_6;
+  newNetbox = pkgs.netbox_3_7;
 in {
   name = "netbox-upgrade";
 
diff --git a/nixos/tests/web-servers/stargazer.nix b/nixos/tests/web-servers/stargazer.nix
index 6365d6a4fff10..f56d1b8c94545 100644
--- a/nixos/tests/web-servers/stargazer.nix
+++ b/nixos/tests/web-servers/stargazer.nix
@@ -1,4 +1,41 @@
 { pkgs, lib, ... }:
+let
+  test_script = pkgs.stdenv.mkDerivation rec {
+    pname = "stargazer-test-script";
+    inherit (pkgs.stargazer) version src;
+    buildInputs = with pkgs; [ (python3.withPackages (ps: with ps; [ cryptography ])) ];
+    dontBuild = true;
+    doCheck = false;
+    installPhase = ''
+      mkdir -p $out/bin
+      cp scripts/gemini-diagnostics $out/bin/test
+    '';
+  };
+  test_env = pkgs.stdenv.mkDerivation rec {
+    pname = "stargazer-test-env";
+    inherit (pkgs.stargazer) version src;
+    buildPhase = ''
+      cc test_data/cgi-bin/loop.c -o test_data/cgi-bin/loop
+    '';
+    doCheck = false;
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+    '';
+  };
+  scgi_server = pkgs.stdenv.mkDerivation rec {
+    pname = "stargazer-test-scgi-server";
+    inherit (pkgs.stargazer) version src;
+    buildInputs = with pkgs; [ python3 ];
+    dontConfigure = true;
+    dontBuild = true;
+    doCheck = false;
+    installPhase = ''
+      mkdir -p $out/bin
+      cp scripts/scgi-server $out/bin/scgi-server
+    '';
+  };
+in
 {
   name = "stargazer";
   meta = with lib.maintainers; { maintainers = [ gaykitty ]; };
@@ -7,25 +44,84 @@
     geminiserver = { pkgs, ... }: {
       services.stargazer = {
         enable = true;
+        connectionLogging = false;
+        requestTimeout = 1;
         routes = [
           {
             route = "localhost";
-            root = toString (pkgs.writeTextDir "index.gmi" ''
-              # Hello NixOS!
-            '');
+            root = "${test_env}/test_data/test_site";
+          }
+          {
+            route = "localhost=/en.gmi";
+            root = "${test_env}/test_data/test_site";
+            lang = "en";
+            charset = "ascii";
+          }
+          {
+            route = "localhost~/(.*).gemini";
+            root = "${test_env}/test_data/test_site";
+            rewrite = "\\1.gmi";
+            lang = "en";
+            charset = "ascii";
+          }
+          {
+            route = "localhost=/plain.txt";
+            root = "${test_env}/test_data/test_site";
+            lang = "en";
+            charset = "ascii";
+            cert-path = "/var/lib/gemini/certs/localhost.crt";
+            key-path = "/var/lib/gemini/certs/localhost.key";
+          }
+          {
+            route = "localhost:/cgi-bin";
+            root = "${test_env}/test_data";
+            cgi = true;
+            cgi-timeout = 5;
+          }
+          {
+            route = "localhost:/scgi";
+            scgi = true;
+            scgi-address = "127.0.0.1:1099";
+          }
+          {
+            route = "localhost=/root";
+            redirect = "..";
+            permanent = true;
+          }
+          {
+            route = "localhost=/priv.gmi";
+            root = "${test_env}/test_data/test_site";
+            client-cert = "${test_env}/test_data/client_cert/good.crt";
+          }
+          {
+            route = "example.com~(.*)";
+            redirect = "gemini://localhost";
+            rewrite = "\\1";
+          }
+          {
+            route = "localhost:/no-exist";
+            root = "./does_not_exist";
           }
         ];
       };
+      systemd.services.scgi_server = {
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${scgi_server}/bin/scgi-server";
+        };
+      };
     };
   };
 
   testScript = { nodes, ... }: ''
+    geminiserver.wait_for_unit("scgi_server")
+    geminiserver.wait_for_open_port(1099)
     geminiserver.wait_for_unit("stargazer")
     geminiserver.wait_for_open_port(1965)
 
-    with subtest("check is serving over gemini"):
-      response = geminiserver.succeed("${pkgs.gemget}/bin/gemget --header -o - gemini://localhost:1965")
+    with subtest("stargazer test suite"):
+      response = geminiserver.succeed("sh -c 'cd ${test_env}; ${test_script}/bin/test'")
       print(response)
-      assert "Hello NixOS!" in response
   '';
 }
diff --git a/nixos/tests/web-servers/ttyd.nix b/nixos/tests/web-servers/ttyd.nix
new file mode 100644
index 0000000000000..d161673684b31
--- /dev/null
+++ b/nixos/tests/web-servers/ttyd.nix
@@ -0,0 +1,19 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "ttyd";
+  meta.maintainers = with lib.maintainers; [ stunkymonkey ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.ttyd = {
+      enable = true;
+      username = "foo";
+      passwordFile = pkgs.writeText "password" "bar";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("ttyd.service")
+    machine.wait_for_open_port(7681)
+    response = machine.succeed("curl -vvv -u foo:bar -s -H 'Host: ttyd' http://127.0.0.1:7681/")
+    assert '<title>ttyd - Terminal</title>' in response, "Page didn't load successfully"
+  '';
+})
diff --git a/nixos/tests/zrepl.nix b/nixos/tests/zrepl.nix
index b16c7eddc7aec..bdf11122c73f6 100644
--- a/nixos/tests/zrepl.nix
+++ b/nixos/tests/zrepl.nix
@@ -42,6 +42,7 @@ import ./make-test-python.nix (
       start_all()
 
       with subtest("Wait for zrepl and network ready"):
+          host.systemctl("start network-online.target")
           host.wait_for_unit("network-online.target")
           host.wait_for_unit("zrepl.service")