about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md20
-rw-r--r--nixos/lib/make-squashfs.nix13
-rw-r--r--nixos/lib/qemu-common.nix1
-rw-r--r--nixos/modules/config/iproute2.nix15
-rw-r--r--nixos/modules/hardware/video/nvidia.nix6
-rw-r--r--nixos/modules/misc/nixpkgs.nix8
-rw-r--r--nixos/modules/module-list.nix7
-rw-r--r--nixos/modules/programs/cdemu.nix13
-rw-r--r--nixos/modules/programs/wayland/cardboard.nix24
-rw-r--r--nixos/modules/security/acme/default.nix59
-rw-r--r--nixos/modules/services/audio/wyoming/faster-whisper.nix4
-rw-r--r--nixos/modules/services/backup/bacula.nix40
-rw-r--r--nixos/modules/services/backup/restic.nix40
-rw-r--r--nixos/modules/services/databases/ferretdb.nix2
-rw-r--r--nixos/modules/services/home-automation/home-assistant.nix2
-rw-r--r--nixos/modules/services/misc/amazon-ssm-agent.nix (renamed from nixos/modules/services/misc/ssm-agent.nix)19
-rw-r--r--nixos/modules/services/misc/forgejo.nix2
-rw-r--r--nixos/modules/services/monitoring/munin.nix37
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/kea.nix4
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/knot.nix19
-rw-r--r--nixos/modules/services/networking/fastnetmon-advanced.nix222
-rw-r--r--nixos/modules/services/networking/kea.nix21
-rw-r--r--nixos/modules/services/networking/mullvad-vpn.nix2
-rw-r--r--nixos/modules/services/networking/rosenpass.nix233
-rw-r--r--nixos/modules/services/networking/syncthing.nix28
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix2
-rw-r--r--nixos/modules/services/web-apps/akkoma.nix17
-rw-r--r--nixos/modules/services/web-apps/c2fmzq-server.md42
-rw-r--r--nixos/modules/services/web-apps/c2fmzq-server.nix125
-rw-r--r--nixos/modules/services/web-apps/lanraragi.nix100
-rw-r--r--nixos/modules/services/web-apps/shiori.nix9
-rw-r--r--nixos/modules/services/web-apps/snipe-it.nix16
-rw-r--r--nixos/modules/services/web-servers/garage.nix2
-rw-r--r--nixos/modules/system/boot/networkd.nix4
-rw-r--r--nixos/modules/system/boot/systemd/tmpfiles.nix104
-rw-r--r--nixos/modules/tasks/filesystems/vfat.nix2
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix18
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix4
-rw-r--r--nixos/release-combined.nix3
-rw-r--r--nixos/release.nix15
-rw-r--r--nixos/tests/all-tests.nix8
-rw-r--r--nixos/tests/c2fmzq.nix75
-rw-r--r--nixos/tests/cockpit.nix3
-rw-r--r--nixos/tests/fastnetmon-advanced.nix65
-rw-r--r--nixos/tests/forgejo.nix23
-rw-r--r--nixos/tests/installer.nix32
-rw-r--r--nixos/tests/lanraragi.nix40
-rw-r--r--nixos/tests/lxd/container.nix13
-rw-r--r--nixos/tests/misc.nix4
-rw-r--r--nixos/tests/nixos-rebuild-install-bootloader.nix73
-rw-r--r--nixos/tests/openssh.nix14
-rw-r--r--nixos/tests/prometheus-exporters.nix6
-rw-r--r--nixos/tests/restic.nix18
-rw-r--r--nixos/tests/rosenpass.nix217
-rw-r--r--nixos/tests/stratis/encryption.nix4
55 files changed, 1698 insertions, 201 deletions
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index 9e2afe5fd2013..2a63ac984dbc6 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -107,6 +107,8 @@
 
 - [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable).
 
+- [FastNetMon Advanced](https://fastnetmon.com/product-overview/), a commercial high performance DDoS detector / sensor. Available as [services.fastnetmon-advanced](#opt-services.fastnetmon-advanced.enable).
+
 - [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers.
 
 - [audiobookshelf](https://github.com/advplyr/audiobookshelf/), a self-hosted audiobook and podcast server. Available as [services.audiobookshelf](#opt-services.audiobookshelf.enable).
@@ -121,6 +123,10 @@
 
 - [Soft Serve](https://github.com/charmbracelet/soft-serve), a tasty, self-hostable Git server for the command line. Available as [services.soft-serve](#opt-services.soft-serve.enable).
 
+- [Rosenpass](https://rosenpass.eu/), a service for post-quantum-secure VPNs with WireGuard. Available as [services.rosenpass](#opt-services.rosenpass.enable).
+
+- [c2FmZQ](https://github.com/c2FmZQ/c2FmZQ/), an application that can securely encrypt, store, and share files, including but not limited to pictures and videos. Available as [services.c2fmzq-server](#opt-services.c2fmzq-server.enable).
+
 ## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
 
 - `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
@@ -232,8 +238,6 @@
 
 - `baloo`, the file indexer/search engine used by KDE now has a patch to prevent files from constantly being reindexed when the device ids of the their underlying storage changes. This happens frequently when using btrfs or LVM. The patch has not yet been accepted upstream but it provides a significantly improved experience. When upgrading, reset baloo to get a clean index: `balooctl disable ; balooctl purge ; balooctl enable`.
 
-- `services.ddclient` has been removed on the request of the upstream maintainer because it is unmaintained and has bugs. Please switch to a different software like `inadyn` or `knsupdate`.
-
 - The `vlock` program from the `kbd` package has been moved into its own package output and should now be referenced explicitly as `kbd.vlock` or replaced with an alternative such as the standalone `vlock` package or `physlock`.
 
 - `fileSystems.<name>.autoFormat` now uses `systemd-makefs`, which does not accept formatting options. Therefore, `fileSystems.<name>.formatOptions` has been removed.
@@ -313,6 +317,8 @@
 
 - `rome` was removed because it is no longer maintained and is succeeded by `biome`.
 
+- The `prometheus-knot-exporter` was migrated to a version maintained by CZ.NIC. Various metric names have changed, so checking existing rules is recommended.
+
 - The `services.mtr-exporter.target` has been removed in favor of `services.mtr-exporter.jobs` which allows specifying multiple targets.
 
 - Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays.
@@ -325,6 +331,14 @@
 
 - `ps3netsrv` has been replaced with the webman-mod fork, the executable has been renamed from `ps3netsrv++` to `ps3netsrv` and cli parameters have changed.
 
+- `ssm-agent` package and module were renamed to `amazon-ssm-agent` to be consistent with the upstream package name.
+
+- `services.kea.{ctrl-agent,dhcp-ddns,dhcp,dhcp6}` now use separate runtime directories instead of `/run/kea` to work around the runtime directory being cleared on service start.
+
+- `mkDerivation` now rejects MD5 hashes.
+
+- The `junicode` font package has been updated to [major version 2](https://github.com/psb1558/Junicode-font/releases/tag/v2.001), which is now a font family. In particular, plain `Junicode.ttf` no longer exists. In addition, TrueType font files are now placed in `font/truetype` instead of `font/junicode-ttf`; this change does not affect use via `fonts.packages` NixOS option.
+
 ## Other Notable Changes {#sec-release-23.11-notable-changes}
 
 - The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
@@ -359,6 +373,8 @@
 
 - `services.outline` can now be configured to use local filesystem storage instead of S3 storage using [services.outline.storage.storageType](#opt-services.outline.storage.storageType).
 
+- `paperwork` was updated to version 2.2. Documents scanned with this version will not be visible to previous versions if you downgrade. See the [upstream announcement](https://forum.openpaper.work/t/paperwork-2-2-testing-phase/316#important-switch-from-jpeg-to-png-for-new-pages-2) for details and workarounds.
+
 - `buildGoModule` `go-modules` attrs have been renamed to `goModules`.
 
 - The `fonts.fonts` and `fonts.enableDefaultFonts` options have been renamed to `fonts.packages` and `fonts.enableDefaultPackages` respectively.
diff --git a/nixos/lib/make-squashfs.nix b/nixos/lib/make-squashfs.nix
index b7c7078b73b1b..4b6b567399484 100644
--- a/nixos/lib/make-squashfs.nix
+++ b/nixos/lib/make-squashfs.nix
@@ -1,15 +1,22 @@
 { lib, stdenv, squashfsTools, closureInfo
 
+,  fileName ? "squashfs"
 , # The root directory of the squashfs filesystem is filled with the
   # closures of the Nix store paths listed here.
   storeContents ? []
+  # Pseudo files to be added to squashfs image
+, pseudoFiles ? []
+, noStrip ? false
 , # Compression parameters.
   # For zstd compression you can use "zstd -Xcompression-level 6".
   comp ? "xz -Xdict-size 100%"
 }:
 
+let
+  pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles;
+in
 stdenv.mkDerivation {
-  name = "squashfs.img";
+  name = "${fileName}.img";
   __structuredAttrs = true;
 
   nativeBuildInputs = [ squashfsTools ];
@@ -31,8 +38,8 @@ stdenv.mkDerivation {
     '' + ''
 
       # Generate the squashfs image.
-      mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out \
-        -no-hardlinks -keep-as-directory -all-root -b 1048576 -comp ${comp} \
+      mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \
+        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 -comp ${comp} \
         -processors $NIX_BUILD_CORES
     '';
 }
diff --git a/nixos/lib/qemu-common.nix b/nixos/lib/qemu-common.nix
index 4fff2e0a6f15e..b946f62d93dc3 100644
--- a/nixos/lib/qemu-common.nix
+++ b/nixos/lib/qemu-common.nix
@@ -40,6 +40,7 @@ rec {
       otherHostGuestMatrix = {
         aarch64-darwin = {
           aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -machine virt,gic-version=2,accel=hvf:tcg -cpu max";
+          inherit (otherHostGuestMatrix.x86_64-darwin) x86_64-linux;
         };
         x86_64-darwin = {
           x86_64-linux = "${qemuPkg}/bin/qemu-system-x86_64 -machine type=q35,accel=hvf:tcg -cpu max";
diff --git a/nixos/modules/config/iproute2.nix b/nixos/modules/config/iproute2.nix
index 7e4fb4d848e39..78bd07d680e20 100644
--- a/nixos/modules/config/iproute2.nix
+++ b/nixos/modules/config/iproute2.nix
@@ -18,15 +18,10 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.etc."iproute2/bpf_pinning" = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/bpf_pinning"; };
-    environment.etc."iproute2/ematch_map"  = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/ematch_map";  };
-    environment.etc."iproute2/group"       = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/group";       };
-    environment.etc."iproute2/nl_protos"   = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/nl_protos";   };
-    environment.etc."iproute2/rt_dsfield"  = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/rt_dsfield";  };
-    environment.etc."iproute2/rt_protos"   = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/rt_protos";   };
-    environment.etc."iproute2/rt_realms"   = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/rt_realms";   };
-    environment.etc."iproute2/rt_scopes"   = { mode = "0644"; text = fileContents "${pkgs.iproute2}/etc/iproute2/rt_scopes";   };
-    environment.etc."iproute2/rt_tables"   = { mode = "0644"; text = (fileContents "${pkgs.iproute2}/etc/iproute2/rt_tables")
-                                                                   + (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}"); };
+    environment.etc."iproute2/rt_tables" = {
+      mode = "0644";
+      text = (fileContents "${pkgs.iproute2}/lib/iproute2/rt_tables")
+        + (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}");
+    };
   };
 }
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 4320edf60da51..c36775dd24bba 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -269,9 +269,9 @@ in {
         services.udev.extraRules =
         ''
           # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
-          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
+          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
           KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
           KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
         '';
diff --git a/nixos/modules/misc/nixpkgs.nix b/nixos/modules/misc/nixpkgs.nix
index bfcae9c7a9352..da321a9234493 100644
--- a/nixos/modules/misc/nixpkgs.nix
+++ b/nixos/modules/misc/nixpkgs.nix
@@ -187,7 +187,7 @@ in
 
     hostPlatform = mkOption {
       type = types.either types.str types.attrs; # TODO utilize lib.systems.parsedPlatform
-      example = { system = "aarch64-linux"; config = "aarch64-unknown-linux-gnu"; };
+      example = { system = "aarch64-linux"; };
       # Make sure that the final value has all fields for sake of other modules
       # referring to this. TODO make `lib.systems` itself use the module system.
       apply = lib.systems.elaborate;
@@ -205,7 +205,7 @@ in
     buildPlatform = mkOption {
       type = types.either types.str types.attrs; # TODO utilize lib.systems.parsedPlatform
       default = cfg.hostPlatform;
-      example = { system = "x86_64-linux"; config = "x86_64-unknown-linux-gnu"; };
+      example = { system = "x86_64-linux"; };
       # Make sure that the final value has all fields for sake of other modules
       # referring to this.
       apply = lib.systems.elaborate;
@@ -228,7 +228,7 @@ in
     localSystem = mkOption {
       type = types.attrs; # TODO utilize lib.systems.parsedPlatform
       default = { inherit (cfg) system; };
-      example = { system = "aarch64-linux"; config = "aarch64-unknown-linux-gnu"; };
+      example = { system = "aarch64-linux"; };
       # Make sure that the final value has all fields for sake of other modules
       # referring to this. TODO make `lib.systems` itself use the module system.
       apply = lib.systems.elaborate;
@@ -262,7 +262,7 @@ in
     crossSystem = mkOption {
       type = types.nullOr types.attrs; # TODO utilize lib.systems.parsedPlatform
       default = null;
-      example = { system = "aarch64-linux"; config = "aarch64-unknown-linux-gnu"; };
+      example = { system = "aarch64-linux"; };
       description = lib.mdDoc ''
         Systems with a recently generated `hardware-configuration.nix`
         may instead specify *only* {option}`nixpkgs.buildPlatform`,
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 47b262bf4d987..4d8fa8159a890 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -268,6 +268,7 @@
   ./programs/usbtop.nix
   ./programs/vim.nix
   ./programs/wavemon.nix
+  ./programs/wayland/cardboard.nix
   ./programs/wayland/river.nix
   ./programs/wayland/sway.nix
   ./programs/wayland/waybar.nix
@@ -625,6 +626,7 @@
   ./services/matrix/matrix-sliding-sync.nix
   ./services/matrix/synapse.nix
   ./services/misc/airsonic.nix
+  ./services/misc/amazon-ssm-agent.nix
   ./services/misc/ananicy.nix
   ./services/misc/ankisyncd.nix
   ./services/misc/apache-kafka.nix
@@ -741,7 +743,6 @@
   ./services/misc/spice-autorandr.nix
   ./services/misc/spice-vdagentd.nix
   ./services/misc/spice-webdavd.nix
-  ./services/misc/ssm-agent.nix
   ./services/misc/sssd.nix
   ./services/misc/subsonic.nix
   ./services/misc/sundtek.nix
@@ -907,6 +908,7 @@
   ./services/networking/eternal-terminal.nix
   ./services/networking/expressvpn.nix
   ./services/networking/fakeroute.nix
+  ./services/networking/fastnetmon-advanced.nix
   ./services/networking/ferm.nix
   ./services/networking/firefox-syncserver.nix
   ./services/networking/fireqos.nix
@@ -1047,6 +1049,7 @@
   ./services/networking/redsocks.nix
   ./services/networking/resilio.nix
   ./services/networking/robustirc-bridge.nix
+  ./services/networking/rosenpass.nix
   ./services/networking/routedns.nix
   ./services/networking/rpcbind.nix
   ./services/networking/rxe.nix
@@ -1229,6 +1232,7 @@
   ./services/web-apps/atlassian/jira.nix
   ./services/web-apps/audiobookshelf.nix
   ./services/web-apps/bookstack.nix
+  ./services/web-apps/c2fmzq-server.nix
   ./services/web-apps/calibre-web.nix
   ./services/web-apps/coder.nix
   ./services/web-apps/changedetection-io.nix
@@ -1268,6 +1272,7 @@
   ./services/web-apps/kavita.nix
   ./services/web-apps/keycloak.nix
   ./services/web-apps/komga.nix
+  ./services/web-apps/lanraragi.nix
   ./services/web-apps/lemmy.nix
   ./services/web-apps/limesurvey.nix
   ./services/web-apps/mainsail.nix
diff --git a/nixos/modules/programs/cdemu.nix b/nixos/modules/programs/cdemu.nix
index d43f009f2f92e..7eba4d29d83ba 100644
--- a/nixos/modules/programs/cdemu.nix
+++ b/nixos/modules/programs/cdemu.nix
@@ -53,6 +53,19 @@ in {
       dbus.packages = [ pkgs.cdemu-daemon ];
     };
 
+    users.groups.${config.programs.cdemu.group} = {};
+
+    # Systemd User service
+    # manually adapted from example in source package:
+    # https://sourceforge.net/p/cdemu/code/ci/master/tree/cdemu-daemon/service-example/cdemu-daemon.service
+    systemd.user.services.cdemu-daemon.description = "CDEmu daemon";
+    systemd.user.services.cdemu-daemon.serviceConfig = {
+      Type = "dbus";
+      BusName = "net.sf.cdemu.CDEmuDaemon";
+      ExecStart = "${pkgs.cdemu-daemon}/bin/cdemu-daemon --config-file \"%h/.config/cdemu-daemon\"";
+      Restart = "no";
+    };
+
     environment.systemPackages =
       [ pkgs.cdemu-daemon pkgs.cdemu-client ]
       ++ optional cfg.gui pkgs.gcdemu
diff --git a/nixos/modules/programs/wayland/cardboard.nix b/nixos/modules/programs/wayland/cardboard.nix
new file mode 100644
index 0000000000000..262c698c74ba8
--- /dev/null
+++ b/nixos/modules/programs/wayland/cardboard.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.cardboard;
+in
+{
+  meta.maintainers = with lib.maintainers; [ AndersonTorres ];
+
+  options.programs.cardboard = {
+    enable = lib.mkEnableOption (lib.mdDoc "cardboard");
+
+    package = lib.mkPackageOptionMD pkgs "cardboard" { };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ cfg.package ];
+
+      # To make a cardboard session available for certain DMs like SDDM
+      services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    }
+    (import ./wayland-session.nix { inherit lib pkgs; })
+  ]);
+}
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index f8e17bc71ee18..932bf3e791159 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -184,6 +184,7 @@ let
   certToConfig = cert: data: let
     acmeServer = data.server;
     useDns = data.dnsProvider != null;
+    useDnsOrS3 = useDns || data.s3Bucket != null;
     destPath = "/var/lib/acme/${cert}";
     selfsignedDeps = optionals (cfg.preliminarySelfsigned) [ "acme-selfsigned-${cert}.service" ];
 
@@ -219,7 +220,8 @@ let
       [ "--dns" data.dnsProvider ]
       ++ optionals (!data.dnsPropagationCheck) [ "--dns.disable-cp" ]
       ++ optionals (data.dnsResolver != null) [ "--dns.resolvers" data.dnsResolver ]
-    ) else if data.listenHTTP != null then [ "--http" "--http.port" data.listenHTTP ]
+    ) else if data.s3Bucket != null then [ "--http" "--http.s3-bucket" data.s3Bucket ]
+    else if data.listenHTTP != null then [ "--http" "--http.port" data.listenHTTP ]
     else [ "--http" "--http.webroot" data.webroot ];
 
     commonOpts = [
@@ -362,13 +364,12 @@ let
           "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
         ];
 
-        # Only try loading the environmentFile if the dns challenge is enabled
-        EnvironmentFile = mkIf useDns data.environmentFile;
+        EnvironmentFile = mkIf useDnsOrS3 data.environmentFile;
 
-        Environment = mkIf useDns
+        Environment = mkIf useDnsOrS3
           (mapAttrsToList (k: v: ''"${k}=%d/${k}"'') data.credentialFiles);
 
-        LoadCredential = mkIf useDns
+        LoadCredential = mkIf useDnsOrS3
           (mapAttrsToList (k: v: "${k}:${v}") data.credentialFiles);
 
         # Run as root (Prefixed with +)
@@ -755,6 +756,15 @@ let
         '';
       };
 
+      s3Bucket = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "acme";
+        description = lib.mdDoc ''
+          S3 bucket name to use for HTTP-01 based challenges. Challenges will be written to the S3 bucket.
+        '';
+      };
+
       inheritDefaults = mkOption {
         default = true;
         example = true;
@@ -928,35 +938,20 @@ in {
             and remove the wildcard from the path.
           '';
         }
-        {
-          assertion = data.dnsProvider == null || data.webroot == null;
-          message = ''
-            Options `security.acme.certs.${cert}.dnsProvider` and
-            `security.acme.certs.${cert}.webroot` are mutually exclusive.
-          '';
-        }
-        {
-          assertion = data.webroot == null || data.listenHTTP == null;
-          message = ''
-            Options `security.acme.certs.${cert}.webroot` and
-            `security.acme.certs.${cert}.listenHTTP` are mutually exclusive.
-          '';
-        }
-        {
-          assertion = data.listenHTTP == null || data.dnsProvider == null;
+        (let exclusiveAttrs = {
+          inherit (data) dnsProvider webroot listenHTTP s3Bucket;
+        }; in {
+          assertion = lib.length (lib.filter (x: x != null) (builtins.attrValues exclusiveAttrs)) == 1;
           message = ''
-            Options `security.acme.certs.${cert}.listenHTTP` and
-            `security.acme.certs.${cert}.dnsProvider` are mutually exclusive.
+            Exactly one of the options
+            `security.acme.certs.${cert}.dnsProvider`,
+            `security.acme.certs.${cert}.webroot`,
+            `security.acme.certs.${cert}.listenHTTP` and
+            `security.acme.certs.${cert}.s3Bucket`
+            is required.
+            Current values: ${(lib.generators.toPretty {} exclusiveAttrs)}.
           '';
-        }
-        {
-          assertion = data.dnsProvider != null || data.webroot != null || data.listenHTTP != null;
-          message = ''
-            One of `security.acme.certs.${cert}.dnsProvider`,
-            `security.acme.certs.${cert}.webroot`, or
-            `security.acme.certs.${cert}.listenHTTP` must be provided.
-          '';
-        }
+        })
         {
           assertion = all (hasSuffix "_FILE") (attrNames data.credentialFiles);
           message = ''
diff --git a/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixos/modules/services/audio/wyoming/faster-whisper.nix
index 1fb67ecfe5060..205e05f2ed176 100644
--- a/nixos/modules/services/audio/wyoming/faster-whisper.nix
+++ b/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -37,6 +37,9 @@ in
             enable = mkEnableOption (mdDoc "Wyoming faster-whisper server");
 
             model = mkOption {
+              # Intersection between available and referenced models here:
+              # https://github.com/rhasspy/models/releases/tag/v1.0
+              # https://github.com/rhasspy/rhasspy3/blob/wyoming-v1/programs/asr/faster-whisper/server/wyoming_faster_whisper/download.py#L17-L27
               type = enum [
                 "tiny"
                 "tiny-int8"
@@ -44,7 +47,6 @@ in
                 "base-int8"
                 "small"
                 "small-int8"
-                "medium"
                 "medium-int8"
               ];
               default = "tiny-int8";
diff --git a/nixos/modules/services/backup/bacula.nix b/nixos/modules/services/backup/bacula.nix
index 0acbf1b3eabba..5a75a46e5259a 100644
--- a/nixos/modules/services/backup/bacula.nix
+++ b/nixos/modules/services/backup/bacula.nix
@@ -15,16 +15,16 @@ let
       Client {
         Name = "${fd_cfg.name}";
         FDPort = ${toString fd_cfg.port};
-        WorkingDirectory = "${libDir}";
-        Pid Directory = "/run";
+        WorkingDirectory = ${libDir};
+        Pid Directory = /run;
         ${fd_cfg.extraClientConfig}
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
       Director {
         Name = "${name}";
-        Password = "${value.password}";
-        Monitor = "${value.monitor}";
+        Password = ${value.password};
+        Monitor = ${value.monitor};
       }
       '') fd_cfg.director)}
 
@@ -41,8 +41,8 @@ let
       Storage {
         Name = "${sd_cfg.name}";
         SDPort = ${toString sd_cfg.port};
-        WorkingDirectory = "${libDir}";
-        Pid Directory = "/run";
+        WorkingDirectory = ${libDir};
+        Pid Directory = /run;
         ${sd_cfg.extraStorageConfig}
       }
 
@@ -50,8 +50,8 @@ let
       Autochanger {
         Name = "${name}";
         Device = ${concatStringsSep ", " (map (a: "\"${a}\"") value.devices)};
-        Changer Device =  "${value.changerDevice}";
-        Changer Command = "${value.changerCommand}";
+        Changer Device =  ${value.changerDevice};
+        Changer Command = ${value.changerCommand};
         ${value.extraAutochangerConfig}
       }
       '') sd_cfg.autochanger)}
@@ -59,8 +59,8 @@ let
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
       Device {
         Name = "${name}";
-        Archive Device = "${value.archiveDevice}";
-        Media Type = "${value.mediaType}";
+        Archive Device = ${value.archiveDevice};
+        Media Type = ${value.mediaType};
         ${value.extraDeviceConfig}
       }
       '') sd_cfg.device)}
@@ -68,8 +68,8 @@ let
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
       Director {
         Name = "${name}";
-        Password = "${value.password}";
-        Monitor = "${value.monitor}";
+        Password = ${value.password};
+        Monitor = ${value.monitor};
       }
       '') sd_cfg.director)}
 
@@ -85,18 +85,18 @@ let
     ''
     Director {
       Name = "${dir_cfg.name}";
-      Password = "${dir_cfg.password}";
+      Password = ${dir_cfg.password};
       DirPort = ${toString dir_cfg.port};
-      Working Directory = "${libDir}";
-      Pid Directory = "/run/";
-      QueryFile = "${pkgs.bacula}/etc/query.sql";
+      Working Directory = ${libDir};
+      Pid Directory = /run/;
+      QueryFile = ${pkgs.bacula}/etc/query.sql;
       ${dir_cfg.extraDirectorConfig}
     }
 
     Catalog {
-      Name = "PostgreSQL";
-      dbname = "bacula";
-      user = "bacula";
+      Name = PostgreSQL;
+      dbname = bacula;
+      user = bacula;
     }
 
     Messages {
@@ -533,7 +533,7 @@ in {
       };
     };
 
-    services.postgresql.enable = dir_cfg.enable == true;
+    services.postgresql.enable = lib.mkIf dir_cfg.enable true;
 
     systemd.services.bacula-dir = mkIf dir_cfg.enable {
       after = [ "network.target" "postgresql.service" ];
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index 78220e99c3d1f..49a55d0560140 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -23,25 +23,13 @@ in
 
         environmentFile = mkOption {
           type = with types; nullOr str;
-          # added on 2021-08-28, s3CredentialsFile should
-          # be removed in the future (+ remember the warning)
-          default = config.s3CredentialsFile;
+          default = null;
           description = lib.mdDoc ''
             file containing the credentials to access the repository, in the
             format of an EnvironmentFile as described by systemd.exec(5)
           '';
         };
 
-        s3CredentialsFile = mkOption {
-          type = with types; nullOr str;
-          default = null;
-          description = lib.mdDoc ''
-            file containing the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
-            for an S3-hosted repository, in the format of an EnvironmentFile
-            as described by systemd.exec(5)
-          '';
-        };
-
         rcloneOptions = mkOption {
           type = with types; nullOr (attrsOf (oneOf [ str bool ]));
           default = null;
@@ -113,12 +101,15 @@ in
         };
 
         paths = mkOption {
+          # This is nullable for legacy reasons only. We should consider making it a pure listOf
+          # after some time has passed since this comment was added.
           type = types.nullOr (types.listOf types.str);
-          default = null;
+          default = [ ];
           description = lib.mdDoc ''
-            Which paths to backup.  If null or an empty array, no
-            backup command will be run.  This can be used to create a
-            prune-only job.
+            Which paths to backup, in addition to ones specified via
+            `dynamicFilesFrom`.  If null or an empty array and
+            `dynamicFilesFrom` is also null, no backup command will be run.
+             This can be used to create a prune-only job.
           '';
           example = [
             "/var/lib/postgresql"
@@ -231,7 +222,7 @@ in
           description = lib.mdDoc ''
             A script that produces a list of files to back up.  The
             results of this command are given to the '--files-from'
-            option.
+            option. The result is merged with paths specified via `paths`.
           '';
           example = "find /home/matt/git -type d -name .git";
         };
@@ -297,7 +288,6 @@ in
   };
 
   config = {
-    warnings = mapAttrsToList (n: v: "services.restic.backups.${n}.s3CredentialsFile is deprecated, please use services.restic.backups.${n}.environmentFile instead.") (filterAttrs (n: v: v.s3CredentialsFile != null) config.services.restic.backups);
     assertions = mapAttrsToList (n: v: {
       assertion = (v.repository == null) != (v.repositoryFile == null);
       message = "services.restic.backups.${n}: exactly one of repository or repositoryFile should be set";
@@ -310,10 +300,7 @@ in
             resticCmd = "${backup.package}/bin/restic${extraOptions}";
             excludeFlags = optional (backup.exclude != []) "--exclude-file=${pkgs.writeText "exclude-patterns" (concatStringsSep "\n" backup.exclude)}";
             filesFromTmpFile = "/run/restic-backups-${name}/includes";
-            backupPaths =
-              if (backup.dynamicFilesFrom == null)
-              then optionalString (backup.paths != null) (concatStringsSep " " backup.paths)
-              else "--files-from ${filesFromTmpFile}";
+            doBackup = (backup.dynamicFilesFrom != null) || (backup.paths != null && backup.paths != []);
             pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
               (resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
               (resticCmd + " check " + (concatStringsSep " " backup.checkOpts))
@@ -348,7 +335,7 @@ in
             after = [ "network-online.target" ];
             serviceConfig = {
               Type = "oneshot";
-              ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
+              ExecStart = (optionals doBackup [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} --files-from=${filesFromTmpFile}" ])
                 ++ pruneCmd;
               User = backup.user;
               RuntimeDirectory = "restic-backups-${name}";
@@ -366,8 +353,11 @@ in
               ${optionalString (backup.initialize) ''
                 ${resticCmd} snapshots || ${resticCmd} init
               ''}
+              ${optionalString (backup.paths != null && backup.paths != []) ''
+                cat ${pkgs.writeText "staticPaths" (concatStringsSep "\n" backup.paths)} >> ${filesFromTmpFile}
+              ''}
               ${optionalString (backup.dynamicFilesFrom != null) ''
-                ${pkgs.writeScript "dynamicFilesFromScript" backup.dynamicFilesFrom} > ${filesFromTmpFile}
+                ${pkgs.writeScript "dynamicFilesFromScript" backup.dynamicFilesFrom} >> ${filesFromTmpFile}
               ''}
             '';
           } // optionalAttrs (backup.dynamicFilesFrom != null || backup.backupCleanupCommand != null) {
diff --git a/nixos/modules/services/databases/ferretdb.nix b/nixos/modules/services/databases/ferretdb.nix
index 45f822d646910..ab55e22bf2146 100644
--- a/nixos/modules/services/databases/ferretdb.nix
+++ b/nixos/modules/services/databases/ferretdb.nix
@@ -30,7 +30,7 @@ in
         };
         description = ''
           Additional configuration for FerretDB, see
-          <https://docs.ferretdb.io/flags/>
+          <https://docs.ferretdb.io/configuration/flags/>
           for supported values.
         '';
       };
diff --git a/nixos/modules/services/home-automation/home-assistant.nix b/nixos/modules/services/home-automation/home-assistant.nix
index 99bac86a8e9a6..0e6fa65667af0 100644
--- a/nixos/modules/services/home-automation/home-assistant.nix
+++ b/nixos/modules/services/home-automation/home-assistant.nix
@@ -12,7 +12,7 @@ let
   # We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
   # https://www.home-assistant.io/docs/configuration/secrets/
   filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
-  configFile = pkgs.runCommand "configuration.yaml" { preferLocalBuild = true; } ''
+  configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
     cp ${format.generate "configuration.yaml" filteredConfig} $out
     sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
   '';
diff --git a/nixos/modules/services/misc/ssm-agent.nix b/nixos/modules/services/misc/amazon-ssm-agent.nix
index d1f371c2bd617..0be79e759c319 100644
--- a/nixos/modules/services/misc/ssm-agent.nix
+++ b/nixos/modules/services/misc/amazon-ssm-agent.nix
@@ -2,7 +2,7 @@
 
 with lib;
 let
-  cfg = config.services.ssm-agent;
+  cfg = config.services.amazon-ssm-agent;
 
   # The SSM agent doesn't pay attention to our /etc/os-release yet, and the lsb-release tool
   # in nixpkgs doesn't seem to work properly on NixOS, so let's just fake the two fields SSM
@@ -16,19 +16,24 @@ let
     esac
   '';
 in {
-  options.services.ssm-agent = {
-    enable = mkEnableOption (lib.mdDoc "AWS SSM agent");
+  imports = [
+    (mkRenamedOptionModule [ "services" "ssm-agent" "enable" ] [ "services" "amazon-ssm-agent" "enable" ])
+    (mkRenamedOptionModule [ "services" "ssm-agent" "package" ] [ "services" "amazon-ssm-agent" "package" ])
+  ];
+
+  options.services.amazon-ssm-agent = {
+    enable = mkEnableOption (lib.mdDoc "Amazon SSM agent");
 
     package = mkOption {
       type = types.path;
-      description = lib.mdDoc "The SSM agent package to use";
-      default = pkgs.ssm-agent.override { overrideEtc = false; };
-      defaultText = literalExpression "pkgs.ssm-agent.override { overrideEtc = false; }";
+      description = lib.mdDoc "The Amazon SSM agent package to use";
+      default = pkgs.amazon-ssm-agent.override { overrideEtc = false; };
+      defaultText = literalExpression "pkgs.amazon-ssm-agent.override { overrideEtc = false; }";
     };
   };
 
   config = mkIf cfg.enable {
-    systemd.services.ssm-agent = {
+    systemd.services.amazon-ssm-agent = {
       inherit (cfg.package.meta) description;
       after    = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
diff --git a/nixos/modules/services/misc/forgejo.nix b/nixos/modules/services/misc/forgejo.nix
index b2920981efbda..90b5f16f4189b 100644
--- a/nixos/modules/services/misc/forgejo.nix
+++ b/nixos/modules/services/misc/forgejo.nix
@@ -632,6 +632,8 @@ in
       };
     };
 
+    services.openssh.settings.AcceptEnv = mkIf (!cfg.settings.START_SSH_SERVER or false) "GIT_PROTOCOL";
+
     users.users = mkIf (cfg.user == "forgejo") {
       forgejo = {
         home = cfg.stateDir;
diff --git a/nixos/modules/services/monitoring/munin.nix b/nixos/modules/services/monitoring/munin.nix
index f37f2689927ed..90a51181ac30b 100644
--- a/nixos/modules/services/monitoring/munin.nix
+++ b/nixos/modules/services/monitoring/munin.nix
@@ -83,42 +83,47 @@ let
   # Copy one Munin plugin into the Nix store with a specific name.
   # This is suitable for use with plugins going directly into /etc/munin/plugins,
   # i.e. munin.extraPlugins.
-  internOnePlugin = name: path:
+  internOnePlugin = { name, path }:
     "cp -a '${path}' '${name}'";
 
   # Copy an entire tree of Munin plugins into a single directory in the Nix
-  # store, with no renaming.
-  # This is suitable for use with munin-node-configure --suggest, i.e.
-  # munin.extraAutoPlugins.
-  internManyPlugins = name: path:
+  # store, with no renaming. The output is suitable for use with
+  # munin-node-configure --suggest, i.e. munin.extraAutoPlugins.
+  # Note that this flattens the input; this is intentional, as
+  # munin-node-configure won't recurse into subdirectories.
+  internManyPlugins = path:
     "find '${path}' -type f -perm /a+x -exec cp -a -t . '{}' '+'";
 
   # Use the appropriate intern-fn to copy the plugins into the store and patch
   # them afterwards in an attempt to get them to run on NixOS.
+  # This is a bit hairy because we can't just fix shebangs; lots of munin plugins
+  # hardcode paths like /sbin/mount rather than trusting $PATH, so we have to
+  # look for and update those throughout the script. At the same time, if the
+  # plugin comes from a package that is already nixified, we don't want to
+  # rewrite paths like /nix/store/foo/sbin/mount.
+  # For now we make the simplifying assumption that no file will contain lines
+  # which mix store paths and FHS paths, and thus run our substitution only on
+  # lines which do not contain store paths.
   internAndFixPlugins = name: intern-fn: paths:
     pkgs.runCommand name {} ''
       mkdir -p "$out"
       cd "$out"
-      ${lib.concatStringsSep "\n"
-          (lib.attrsets.mapAttrsToList intern-fn paths)}
+      ${lib.concatStringsSep "\n" (map intern-fn paths)}
       chmod -R u+w .
-      find . -type f -exec sed -E -i '
-        s,(/usr)?/s?bin/,/run/current-system/sw/bin/,g
-      ' '{}' '+'
+      ${pkgs.findutils}/bin/find . -type f -exec ${pkgs.gnused}/bin/sed -E -i "
+        \%''${NIX_STORE}/%! s,(/usr)?/s?bin/,/run/current-system/sw/bin/,g
+      " '{}' '+'
     '';
 
   # TODO: write a derivation for munin-contrib, so that for contrib plugins
   # you can just refer to them by name rather than needing to include a copy
   # of munin-contrib in your nixos configuration.
   extraPluginDir = internAndFixPlugins "munin-extra-plugins.d"
-    internOnePlugin nodeCfg.extraPlugins;
+    internOnePlugin
+    (lib.attrsets.mapAttrsToList (k: v: { name = k; path = v; }) nodeCfg.extraPlugins);
 
   extraAutoPluginDir = internAndFixPlugins "munin-extra-auto-plugins.d"
-    internManyPlugins
-    (builtins.listToAttrs
-      (map
-        (path: { name = baseNameOf path; value = path; })
-        nodeCfg.extraAutoPlugins));
+    internManyPlugins nodeCfg.extraAutoPlugins;
 
   customStaticDir = pkgs.runCommand "munin-custom-static-data" {} ''
     cp -a "${pkgs.munin}/etc/opt/munin/static" "$out"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
index ed33c72f644f3..8b1cd47d0a409 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
@@ -15,8 +15,8 @@ in {
       type = types.listOf types.str;
       example = literalExpression ''
         [
-          "/run/kea/kea-dhcp4.socket"
-          "/run/kea/kea-dhcp6.socket"
+          "/run/kea-dhcp4/kea-dhcp4.socket"
+          "/run/kea-dhcp6/kea-dhcp6.socket"
         ]
       '';
       description = lib.mdDoc ''
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/knot.nix b/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
index a73425b37da71..7758487508033 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
@@ -8,9 +8,9 @@ in {
   port = 9433;
   extraOpts = {
     knotLibraryPath = mkOption {
-      type = types.str;
-      default = "${pkgs.knot-dns.out}/lib/libknot.so";
-      defaultText = literalExpression ''"''${pkgs.knot-dns.out}/lib/libknot.so"'';
+      type = types.nullOr types.str;
+      default = null;
+      example = literalExpression ''"''${pkgs.knot-dns.out}/lib/libknot.so"'';
       description = lib.mdDoc ''
         Path to the library of `knot-dns`.
       '';
@@ -25,7 +25,7 @@ in {
     };
 
     knotSocketTimeout = mkOption {
-      type = types.int;
+      type = types.ints.positive;
       default = 2000;
       description = lib.mdDoc ''
         Timeout in seconds.
@@ -33,17 +33,22 @@ in {
     };
   };
   serviceOpts = {
+    path = with pkgs; [
+      procps
+    ];
     serviceConfig = {
       ExecStart = ''
-        ${pkgs.prometheus-knot-exporter}/bin/knot_exporter \
+        ${pkgs.prometheus-knot-exporter}/bin/knot-exporter \
           --web-listen-addr ${cfg.listenAddress} \
           --web-listen-port ${toString cfg.port} \
-          --knot-library-path ${cfg.knotLibraryPath} \
           --knot-socket-path ${cfg.knotSocketPath} \
           --knot-socket-timeout ${toString cfg.knotSocketTimeout} \
+          ${lib.optionalString (cfg.knotLibraryPath != null) "--knot-library-path ${cfg.knotLibraryPath}"} \
           ${concatStringsSep " \\\n  " cfg.extraFlags}
       '';
-      SupplementaryGroups = [ "knot" ];
+      SupplementaryGroups = [
+        "knot"
+      ];
       RestrictAddressFamilies = [
         # Need AF_UNIX to collect data
         "AF_UNIX"
diff --git a/nixos/modules/services/networking/fastnetmon-advanced.nix b/nixos/modules/services/networking/fastnetmon-advanced.nix
new file mode 100644
index 0000000000000..26e8ad8b76d97
--- /dev/null
+++ b/nixos/modules/services/networking/fastnetmon-advanced.nix
@@ -0,0 +1,222 @@
+{ config, lib, pkgs, ... }:
+
+let
+  # Background information: FastNetMon requires a MongoDB to start. This is because
+  # it uses MongoDB to store its configuration. That is, in a normal setup there is
+  # one collection with one document.
+  # To provide declarative configuration in our NixOS module, this database is
+  # completely emptied and replaced on each boot by the fastnetmon-setup service
+  # using the configuration backup functionality.
+
+  cfg = config.services.fastnetmon-advanced;
+  settingsFormat = pkgs.formats.yaml { };
+
+  # obtain the default configs by starting up ferretdb and fcli in a derivation
+  default_configs = pkgs.runCommand "default-configs" {
+    nativeBuildInputs = [
+      pkgs.ferretdb
+      pkgs.fastnetmon-advanced # for fcli
+      pkgs.proot
+    ];
+  } ''
+    mkdir ferretdb fastnetmon $out
+    FERRETDB_TELEMETRY="disable" FERRETDB_HANDLER="sqlite" FERRETDB_STATE_DIR="$PWD/ferretdb" FERRETDB_SQLITE_URL="file:$PWD/ferretdb/" ferretdb &
+
+    cat << EOF > fastnetmon/fastnetmon.conf
+    ${builtins.toJSON {
+      mongodb_username = "";
+    }}
+    EOF
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli create_configuration
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli set bgp default
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli export_configuration backup.tar
+    tar -C $out --no-same-owner -xvf backup.tar
+  '';
+
+  # merge the user configs into the default configs
+  config_tar = pkgs.runCommand "fastnetmon-config.tar" {
+    nativeBuildInputs = with pkgs; [ jq ];
+  } ''
+    jq -s add ${default_configs}/main.json ${pkgs.writeText "main-add.json" (builtins.toJSON cfg.settings)} > main.json
+    mkdir hostgroup
+    ${lib.concatImapStringsSep "\n" (pos: hostgroup: ''
+      jq -s add ${default_configs}/hostgroup/0.json ${pkgs.writeText "hostgroup-${toString (pos - 1)}-add.json" (builtins.toJSON hostgroup)} > hostgroup/${toString (pos - 1)}.json
+    '') hostgroups}
+    mkdir bgp
+    ${lib.concatImapStringsSep "\n" (pos: bgp: ''
+      jq -s add ${default_configs}/bgp/0.json ${pkgs.writeText "bgp-${toString (pos - 1)}-add.json" (builtins.toJSON bgp)} > bgp/${toString (pos - 1)}.json
+    '') bgpPeers}
+    tar -cf $out main.json ${lib.concatImapStringsSep " " (pos: _: "hostgroup/${toString (pos - 1)}.json") hostgroups} ${lib.concatImapStringsSep " " (pos: _: "bgp/${toString (pos - 1)}.json") bgpPeers}
+  '';
+
+  hostgroups = lib.mapAttrsToList (name: hostgroup: { inherit name; } // hostgroup) cfg.hostgroups;
+  bgpPeers = lib.mapAttrsToList (name: bgpPeer: { inherit name; } // bgpPeer) cfg.bgpPeers;
+
+in {
+  options.services.fastnetmon-advanced = with lib; {
+    enable = mkEnableOption "the fastnetmon-advanced DDoS Protection daemon";
+
+    settings = mkOption {
+      description = ''
+        Extra configuration options to declaratively load into FastNetMon Advanced.
+
+        See the [FastNetMon Advanced Configuration options reference](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-configuration-options/) for more details.
+      '';
+      type = settingsFormat.type;
+      default = {};
+      example = literalExpression ''
+        {
+          networks_list = [ "192.0.2.0/24" ];
+          gobgp = true;
+          gobgp_flow_spec_announces = true;
+        }
+      '';
+    };
+    hostgroups = mkOption {
+      description = "Hostgroups to declaratively load into FastNetMon Advanced";
+      type = types.attrsOf settingsFormat.type;
+      default = {};
+    };
+    bgpPeers = mkOption {
+      description = "BGP Peers to declaratively load into FastNetMon Advanced";
+      type = types.attrsOf settingsFormat.type;
+      default = {};
+    };
+
+    enableAdvancedTrafficPersistence = mkOption {
+      description = "Store historical flow data in clickhouse";
+      type = types.bool;
+      default = false;
+    };
+
+    traffic_db.settings = mkOption {
+      type = settingsFormat.type;
+      description = "Additional settings for /etc/fastnetmon/traffic_db.conf";
+    };
+  };
+
+  config = lib.mkMerge [ (lib.mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      fastnetmon-advanced # for fcli
+    ];
+
+    environment.etc."fastnetmon/license.lic".source = "/var/lib/fastnetmon/license.lic";
+    environment.etc."fastnetmon/gobgpd.conf".source = "/run/fastnetmon/gobgpd.conf";
+    environment.etc."fastnetmon/fastnetmon.conf".source = pkgs.writeText "fastnetmon.conf" (builtins.toJSON {
+      mongodb_username = "";
+    });
+
+    services.ferretdb.enable = true;
+
+    systemd.services.fastnetmon-setup = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "ferretdb.service" ];
+      path = with pkgs; [ fastnetmon-advanced config.systemd.package ];
+      script = ''
+        fcli create_configuration
+        fcli delete hostgroup global
+        fcli import_configuration ${config_tar}
+        systemctl --no-block try-restart fastnetmon
+      '';
+      serviceConfig.Type = "oneshot";
+    };
+
+    systemd.services.fastnetmon = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "ferretdb.service" "fastnetmon-setup.service" "polkit.service" ];
+      path = with pkgs; [ iproute2 ];
+      unitConfig = {
+        # Disable logic which shuts service when we do too many restarts
+        # We do restarts from sudo fcli commit and it's expected that we may have many restarts
+        # Details: https://github.com/systemd/systemd/issues/2416
+        StartLimitInterval = 0;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/fastnetmon --log_to_console";
+
+        LimitNOFILE = 65535;
+        # Restart service when it fails due to any reasons, we need to keep processing traffic no matter what happened
+        Restart= "on-failure";
+        RestartSec= "5s";
+
+        DynamicUser = true;
+        CacheDirectory = "fastnetmon";
+        RuntimeDirectory = "fastnetmon"; # for gobgpd config
+        StateDirectory = "fastnetmon"; # for license file
+      };
+    };
+
+    security.polkit.enable = true;
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.freedesktop.systemd1.manage-units" &&
+          subject.isInGroup("fastnetmon")) {
+          if (action.lookup("unit") == "gobgp.service") {
+            var verb = action.lookup("verb");
+            if (verb == "start" || verb == "stop" || verb == "restart") {
+              return polkit.Result.YES;
+            }
+          }
+        }
+      });
+    '';
+
+    # We don't use the existing gobgp NixOS module and package, because the gobgp
+    # version might not be compatible with fastnetmon. Also, the service name
+    # _must_ be 'gobgp' and not 'gobgpd', so that fastnetmon can reload the config.
+    systemd.services.gobgp = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "GoBGP Routing Daemon";
+      unitConfig = {
+        ConditionPathExists = "/run/fastnetmon/gobgpd.conf";
+      };
+      serviceConfig = {
+        Type = "notify";
+        ExecStartPre = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -f /run/fastnetmon/gobgpd.conf -d";
+        SupplementaryGroups = [ "fastnetmon" ];
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -f /run/fastnetmon/gobgpd.conf --sdnotify";
+        ExecReload = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -r";
+        DynamicUser = true;
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  })
+
+  (lib.mkIf (cfg.enable && cfg.enableAdvancedTrafficPersistence) {
+    ## Advanced Traffic persistence
+    ## https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/
+
+    services.clickhouse.enable = true;
+
+    services.fastnetmon-advanced.settings.traffic_db = true;
+
+    services.fastnetmon-advanced.traffic_db.settings = {
+      clickhouse_batch_size = lib.mkDefault 1000;
+      clickhouse_batch_delay = lib.mkDefault 1;
+      traffic_db_host = lib.mkDefault "127.0.0.1";
+      traffic_db_port = lib.mkDefault 8100;
+      clickhouse_host = lib.mkDefault "127.0.0.1";
+      clickhouse_port = lib.mkDefault 9000;
+      clickhouse_user = lib.mkDefault "default";
+      clickhouse_password = lib.mkDefault "";
+    };
+    environment.etc."fastnetmon/traffic_db.conf".text = builtins.toJSON cfg.traffic_db.settings;
+
+    systemd.services.traffic_db = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/traffic_db";
+        # Restart service when it fails due to any reasons, we need to keep processing traffic no matter what happened
+        Restart= "on-failure";
+        RestartSec= "5s";
+
+        DynamicUser = true;
+      };
+    };
+
+  }) ];
+
+  meta.maintainers = lib.teams.wdz.members;
+}
diff --git a/nixos/modules/services/networking/kea.nix b/nixos/modules/services/networking/kea.nix
index 945f4113bd47d..2f922a026a3a9 100644
--- a/nixos/modules/services/networking/kea.nix
+++ b/nixos/modules/services/networking/kea.nix
@@ -254,7 +254,6 @@ in
       DynamicUser = true;
       User = "kea";
       ConfigurationDirectory = "kea";
-      RuntimeDirectory = "kea";
       StateDirectory = "kea";
       UMask = "0077";
     };
@@ -289,8 +288,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea";
-        KEA_LOCKFILE_DIR = "/run/kea";
+        KEA_PIDFILE_DIR = "/run/kea-ctrl-agent";
+        KEA_LOCKFILE_DIR = "/run/kea-ctrl-agent";
       };
 
       restartTriggers = [
@@ -301,6 +300,7 @@ in
         ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.ctrl-agent.extraArgs}";
         KillMode = "process";
         Restart = "on-failure";
+        RuntimeDirectory = "kea-ctrl-agent";
       } // commonServiceConfig;
     };
   })
@@ -329,8 +329,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea";
-        KEA_LOCKFILE_DIR = "/run/kea";
+        KEA_PIDFILE_DIR = "/run/kea-dhcp4";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp4";
       };
 
       restartTriggers = [
@@ -348,6 +348,7 @@ in
           "CAP_NET_BIND_SERVICE"
           "CAP_NET_RAW"
         ];
+        RuntimeDirectory = "kea-dhcp4";
       } // commonServiceConfig;
     };
   })
@@ -376,8 +377,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea";
-        KEA_LOCKFILE_DIR = "/run/kea";
+        KEA_PIDFILE_DIR = "/run/kea-dhcp6";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp6";
       };
 
       restartTriggers = [
@@ -393,6 +394,7 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
+        RuntimeDirectory = "kea-dhcp6";
       } // commonServiceConfig;
     };
   })
@@ -421,8 +423,8 @@ in
       ];
 
       environment = {
-        KEA_PIDFILE_DIR = "/run/kea";
-        KEA_LOCKFILE_DIR = "/run/kea";
+        KEA_PIDFILE_DIR = "/run/kea-dhcp-ddns";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp-ddns";
       };
 
       restartTriggers = [
@@ -437,6 +439,7 @@ in
         CapabilityBoundingSet = [
           "CAP_NET_BIND_SERVICE"
         ];
+        RuntimeDirectory = "kea-dhcp-ddns";
       } // commonServiceConfig;
     };
   })
diff --git a/nixos/modules/services/networking/mullvad-vpn.nix b/nixos/modules/services/networking/mullvad-vpn.nix
index 82e68bf92af1f..99ffbf56ccb00 100644
--- a/nixos/modules/services/networking/mullvad-vpn.nix
+++ b/nixos/modules/services/networking/mullvad-vpn.nix
@@ -76,5 +76,5 @@ with lib;
     };
   };
 
-  meta.maintainers = with maintainers; [ patricksjackson ymarkus ];
+  meta.maintainers = with maintainers; [ arcuru ymarkus ];
 }
diff --git a/nixos/modules/services/networking/rosenpass.nix b/nixos/modules/services/networking/rosenpass.nix
new file mode 100644
index 0000000000000..d2a264b83d677
--- /dev/null
+++ b/nixos/modules/services/networking/rosenpass.nix
@@ -0,0 +1,233 @@
+{ config
+, lib
+, options
+, pkgs
+, ...
+}:
+let
+  inherit (lib)
+    attrValues
+    concatLines
+    concatMap
+    filter
+    filterAttrsRecursive
+    flatten
+    getExe
+    mdDoc
+    mkIf
+    optional
+    ;
+
+  cfg = config.services.rosenpass;
+  opt = options.services.rosenpass;
+  settingsFormat = pkgs.formats.toml { };
+in
+{
+  options.services.rosenpass =
+    let
+      inherit (lib)
+        literalExpression
+        mdDoc
+        mkOption
+        ;
+      inherit (lib.types)
+        enum
+        listOf
+        nullOr
+        path
+        str
+        submodule
+        ;
+    in
+    {
+      enable = lib.mkEnableOption (mdDoc "Rosenpass");
+
+      package = lib.mkPackageOption pkgs "rosenpass" { };
+
+      defaultDevice = mkOption {
+        type = nullOr str;
+        description = mdDoc "Name of the network interface to use for all peers by default.";
+        example = "wg0";
+      };
+
+      settings = mkOption {
+        type = submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            public_key = mkOption {
+              type = path;
+              description = mdDoc "Path to a file containing the public key of the local Rosenpass peer. Generate this by running {command}`rosenpass gen-keys`.";
+            };
+
+            secret_key = mkOption {
+              type = path;
+              description = mdDoc "Path to a file containing the secret key of the local Rosenpass peer. Generate this by running {command}`rosenpass gen-keys`.";
+            };
+
+            listen = mkOption {
+              type = listOf str;
+              description = mdDoc "List of local endpoints to listen for connections.";
+              default = [ ];
+              example = literalExpression "[ \"0.0.0.0:10000\" ]";
+            };
+
+            verbosity = mkOption {
+              type = enum [ "Verbose" "Quiet" ];
+              default = "Quiet";
+              description = mdDoc "Verbosity of output produced by the service.";
+            };
+
+            peers =
+              let
+                peer = submodule {
+                  freeformType = settingsFormat.type;
+
+                  options = {
+                    public_key = mkOption {
+                      type = path;
+                      description = mdDoc "Path to a file containing the public key of the remote Rosenpass peer.";
+                    };
+
+                    endpoint = mkOption {
+                      type = nullOr str;
+                      default = null;
+                      description = mdDoc "Endpoint of the remote Rosenpass peer.";
+                    };
+
+                    device = mkOption {
+                      type = str;
+                      default = cfg.defaultDevice;
+                      defaultText = literalExpression "config.${opt.defaultDevice}";
+                      description = mdDoc "Name of the local WireGuard interface to use for this peer.";
+                    };
+
+                    peer = mkOption {
+                      type = str;
+                      description = mdDoc "WireGuard public key corresponding to the remote Rosenpass peer.";
+                    };
+                  };
+                };
+              in
+              mkOption {
+                type = listOf peer;
+                description = mdDoc "List of peers to exchange keys with.";
+                default = [ ];
+              };
+          };
+        };
+        default = { };
+        description = mdDoc "Configuration for Rosenpass, see <https://rosenpass.eu/> for further information.";
+      };
+    };
+
+  config = mkIf cfg.enable {
+    warnings =
+      let
+        # NOTE: In the descriptions below, we tried to refer to e.g.
+        # options.systemd.network.netdevs."<name>".wireguardPeers.*.PublicKey
+        # directly, but don't know how to traverse "<name>" and * in this path.
+        extractions = [
+          {
+            relevant = config.systemd.network.enable;
+            root = config.systemd.network.netdevs;
+            peer = (x: x.wireguardPeers);
+            key = (x: if x.wireguardPeerConfig ? PublicKey then x.wireguardPeerConfig.PublicKey else null);
+            description = mdDoc "${options.systemd.network.netdevs}.\"<name>\".wireguardPeers.*.wireguardPeerConfig.PublicKey";
+          }
+          {
+            relevant = config.networking.wireguard.enable;
+            root = config.networking.wireguard.interfaces;
+            peer = (x: x.peers);
+            key = (x: x.publicKey);
+            description = mdDoc "${options.networking.wireguard.interfaces}.\"<name>\".peers.*.publicKey";
+          }
+          rec {
+            relevant = root != { };
+            root = config.networking.wg-quick.interfaces;
+            peer = (x: x.peers);
+            key = (x: x.publicKey);
+            description = mdDoc "${options.networking.wg-quick.interfaces}.\"<name>\".peers.*.publicKey";
+          }
+        ];
+        relevantExtractions = filter (x: x.relevant) extractions;
+        extract = { root, peer, key, ... }:
+          filter (x: x != null) (flatten (concatMap (x: (map key (peer x))) (attrValues root)));
+        configuredKeys = flatten (map extract relevantExtractions);
+        itemize = xs: concatLines (map (x: " - ${x}") xs);
+        descriptions = map (x: "`${x.description}`");
+        missingKeys = filter (key: !builtins.elem key configuredKeys) (map (x: x.peer) cfg.settings.peers);
+        unusual = ''
+          While this may work as expected, e.g. you want to manually configure WireGuard,
+          such a scenario is unusual. Please double-check your configuration.
+        '';
+      in
+      (optional (relevantExtractions != [ ] && missingKeys != [ ]) ''
+        You have configured Rosenpass peers with the WireGuard public keys:
+        ${itemize missingKeys}
+        But there is no corresponding active Wireguard peer configuration in any of:
+        ${itemize (descriptions relevantExtractions)}
+        ${unusual}
+      '')
+      ++
+      optional (relevantExtractions == [ ]) ''
+        You have configured Rosenpass, but you have not configured Wireguard via any of:
+        ${itemize (descriptions extractions)}
+        ${unusual}
+      '';
+
+    environment.systemPackages = [ cfg.package pkgs.wireguard-tools ];
+
+    systemd.services.rosenpass =
+      let
+        filterNonNull = filterAttrsRecursive (_: v: v != null);
+        config = settingsFormat.generate "config.toml" (
+          filterNonNull (cfg.settings
+            //
+            (
+              let
+                credentialPath = id: "$CREDENTIALS_DIRECTORY/${id}";
+                # NOTE: We would like to remove all `null` values inside `cfg.settings`
+                # recursively, since `settingsFormat.generate` cannot handle `null`.
+                # This would require to traverse both attribute sets and lists recursively.
+                # `filterAttrsRecursive` only recurses into attribute sets, but not
+                # into values that might contain other attribute sets (such as lists,
+                # e.g. `cfg.settings.peers`). Here, we just specialize on `cfg.settings.peers`,
+                # and this may break unexpectedly whenever a `null` value is contained
+                # in a list in `cfg.settings`, other than `cfg.settings.peers`.
+                peersWithoutNulls = map filterNonNull cfg.settings.peers;
+              in
+              {
+                secret_key = credentialPath "pqsk";
+                public_key = credentialPath "pqpk";
+                peers = peersWithoutNulls;
+              }
+            )
+          )
+        );
+      in
+      rec {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network-online.target" ];
+        path = [ cfg.package pkgs.wireguard-tools ];
+
+        serviceConfig = {
+          User = "rosenpass";
+          Group = "rosenpass";
+          RuntimeDirectory = "rosenpass";
+          DynamicUser = true;
+          AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+          LoadCredential = [
+            "pqsk:${cfg.settings.secret_key}"
+            "pqpk:${cfg.settings.public_key}"
+          ];
+        };
+
+        # See <https://www.freedesktop.org/software/systemd/man/systemd.unit.html#Specifiers>
+        environment.CONFIG = "%t/${serviceConfig.RuntimeDirectory}/config.toml";
+
+        preStart = "${getExe pkgs.envsubst} -i ${config} -o \"$CONFIG\"";
+        script = "rosenpass exchange-config \"$CONFIG\"";
+      };
+  };
+}
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index c4b2d0e80f9be..bdcdaf056d038 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -10,6 +10,21 @@ let
   settingsFormat = pkgs.formats.json { };
   cleanedConfig = converge (filterAttrsRecursive (_: v: v != null && v != {})) cfg.settings;
 
+  isUnixGui = (builtins.substring 0 1 cfg.guiAddress) == "/";
+
+  # Syncthing supports serving the GUI over Unix sockets. If that happens, the
+  # API is served over the Unix socket as well.  This function returns the correct
+  # curl arguments for the address portion of the curl command for both network
+  # and Unix socket addresses.
+  curlAddressArgs = path: if isUnixGui
+    # if cfg.guiAddress is a unix socket, tell curl explicitly about it
+    # note that the dot in front of `${path}` is the hostname, which is
+    # required.
+    then "--unix-socket ${cfg.guiAddress} http://.${path}"
+    # no adjustements are needed if cfg.guiAddress is a network address
+    else "${cfg.guiAddress}${path}"
+    ;
+
   devices = mapAttrsToList (_: device: device // {
     deviceID = device.id;
   }) cfg.settings.devices;
@@ -62,14 +77,14 @@ let
       GET_IdAttrName = "deviceID";
       override = cfg.overrideDevices;
       conf = devices;
-      baseAddress = "${cfg.guiAddress}/rest/config/devices";
+      baseAddress = curlAddressArgs "/rest/config/devices";
     };
     dirs = {
       new_conf_IDs = map (v: v.id) folders;
       GET_IdAttrName = "id";
       override = cfg.overrideFolders;
       conf = folders;
-      baseAddress = "${cfg.guiAddress}/rest/config/folders";
+      baseAddress = curlAddressArgs "/rest/config/folders";
     };
   } [
     # Now for each of these attributes, write the curl commands that are
@@ -117,15 +132,14 @@ let
     builtins.attrNames
     (lib.subtractLists ["folders" "devices"])
     (map (subOption: ''
-      curl -X PUT -d ${lib.escapeShellArg (builtins.toJSON cleanedConfig.${subOption})} \
-        ${cfg.guiAddress}/rest/config/${subOption}
+      curl -X PUT -d ${lib.escapeShellArg (builtins.toJSON cleanedConfig.${subOption})} ${curlAddressArgs "/rest/config/${subOption}"}
     ''))
     (lib.concatStringsSep "\n")
   ]) + ''
     # restart Syncthing if required
-    if curl ${cfg.guiAddress}/rest/config/restart-required |
+    if curl ${curlAddressArgs "/rest/config/restart-required"} |
        ${jq} -e .requiresRestart > /dev/null; then
-        curl -X POST ${cfg.guiAddress}/rest/system/restart
+        curl -X POST ${curlAddressArgs "/rest/system/restart"}
     fi
   '');
 in {
@@ -651,7 +665,7 @@ in {
           ExecStart = ''
             ${cfg.package}/bin/syncthing \
               -no-browser \
-              -gui-address=${cfg.guiAddress} \
+              -gui-address=${if isUnixGui then "unix://" else ""}${cfg.guiAddress} \
               -home=${cfg.configDir} ${escapeShellArgs cfg.extraFlags}
           '';
           MemoryDenyWriteExecute = true;
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index 0595e9e6df238..90d9c68433cf4 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -530,5 +530,5 @@ in {
     '';
   };
 
-  meta.maintainers = with lib.maintainers; [ globin rnhmjoj ];
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 }
diff --git a/nixos/modules/services/web-apps/akkoma.nix b/nixos/modules/services/web-apps/akkoma.nix
index 8d17752586128..eaee70c712bb8 100644
--- a/nixos/modules/services/web-apps/akkoma.nix
+++ b/nixos/modules/services/web-apps/akkoma.nix
@@ -282,11 +282,11 @@ let
         AKKOMA_CONFIG_PATH="$RUNTIME_DIRECTORY/config.exs" \
         ERL_EPMD_ADDRESS="${cfg.dist.address}" \
         ERL_EPMD_PORT="${toString cfg.dist.epmdPort}" \
-        ERL_FLAGS="${concatStringsSep " " [
-          "-kernel inet_dist_use_interface '${erlAddr cfg.dist.address}'"
-          "-kernel inet_dist_listen_min ${toString cfg.dist.portMin}"
-          "-kernel inet_dist_listen_max ${toString cfg.dist.portMax}"
-        ]}" \
+        ERL_FLAGS=${lib.escapeShellArg (lib.escapeShellArgs ([
+          "-kernel" "inet_dist_use_interface" (erlAddr cfg.dist.address)
+          "-kernel" "inet_dist_listen_min" (toString cfg.dist.portMin)
+          "-kernel" "inet_dist_listen_max" (toString cfg.dist.portMax)
+        ] ++ cfg.dist.extraFlags))} \
         RELEASE_COOKIE="$(<"$RUNTIME_DIRECTORY/cookie")" \
         RELEASE_NAME="akkoma" \
           exec "${cfg.package}/bin/$(basename "$0")" "$@"
@@ -553,6 +553,13 @@ in {
           description = mdDoc "TCP port to bind Erlang Port Mapper Daemon to.";
         };
 
+        extraFlags = mkOption {
+          type = with types; listOf str;
+          default = [ ];
+          description = mdDoc "Extra flags to pass to Erlang";
+          example = [ "+sbwt" "none" "+sbwtdcpu" "none" "+sbwtdio" "none" ];
+        };
+
         portMin = mkOption {
           type = types.port;
           default = 49152;
diff --git a/nixos/modules/services/web-apps/c2fmzq-server.md b/nixos/modules/services/web-apps/c2fmzq-server.md
new file mode 100644
index 0000000000000..236953bd4ff7a
--- /dev/null
+++ b/nixos/modules/services/web-apps/c2fmzq-server.md
@@ -0,0 +1,42 @@
+# c2FmZQ {#module-services-c2fmzq}
+
+c2FmZQ is an application that can securely encrypt, store, and share files,
+including but not limited to pictures and videos.
+
+The service `c2fmzq-server` can be enabled by setting
+```
+{
+  services.c2fmzq-server.enable = true;
+}
+```
+This will spin up an instance of the server which is API-compatible with
+[Stingle Photos](https://stingle.org) and an experimental Progressive Web App
+(PWA) to interact with the storage via the browser.
+
+In principle the server can be exposed directly on a public interface and there
+are command line options to manage HTTPS certificates directly, but the module
+is designed to be served behind a reverse proxy or only accessed via localhost.
+
+```
+{
+  services.c2fmzq-server = {
+    enable = true;
+    bindIP = "127.0.0.1"; # default
+    port = 8080; # default
+  };
+
+  services.nginx = {
+    enable = true;
+    recommendedProxySettings = true;
+    virtualHosts."example.com" = {
+      enableACME = true;
+      forceSSL = true;
+      locations."/" = {
+        proxyPass = "http://127.0.0.1:8080";
+      };
+    };
+  };
+}
+```
+
+For more information, see <https://github.com/c2FmZQ/c2FmZQ/>.
diff --git a/nixos/modules/services/web-apps/c2fmzq-server.nix b/nixos/modules/services/web-apps/c2fmzq-server.nix
new file mode 100644
index 0000000000000..2749c2a5a87aa
--- /dev/null
+++ b/nixos/modules/services/web-apps/c2fmzq-server.nix
@@ -0,0 +1,125 @@
+{ lib, pkgs, config, ... }:
+
+let
+  inherit (lib) mkEnableOption mkPackageOption mkOption types;
+
+  cfg = config.services.c2fmzq-server;
+
+  argsFormat = {
+    type = with lib.types; nullOr (oneOf [ bool int str ]);
+    generate = lib.cli.toGNUCommandLineShell { };
+  };
+in {
+  options.services.c2fmzq-server = {
+    enable = mkEnableOption "c2fmzq-server";
+
+    bindIP = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = "The local address to use.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = "The local port to use.";
+    };
+
+    passphraseFile = mkOption {
+      type = types.str;
+      example = "/run/secrets/c2fmzq/pwfile";
+      description = "Path to file containing the database passphrase";
+    };
+
+    package = mkPackageOption pkgs "c2fmzq" { };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = argsFormat.type;
+
+        options = {
+          address = mkOption {
+            internal = true;
+            type = types.str;
+            default = "${cfg.bindIP}:${toString cfg.port}";
+          };
+
+          database = mkOption {
+            type = types.str;
+            default = "%S/c2fmzq-server/data";
+            description = "Path of the database";
+          };
+
+          verbose = mkOption {
+            type = types.ints.between 1 3;
+            default = 2;
+            description = "The level of logging verbosity: 1:Error 2:Info 3:Debug";
+          };
+        };
+      };
+      description = ''
+        Configuration for c2FmZQ-server passed as CLI arguments.
+        Run {command}`c2FmZQ-server help` for supported values.
+      '';
+      example = {
+        verbose = 3;
+        allow-new-accounts = true;
+        auto-approve-new-accounts = true;
+        encrypt-metadata = true;
+        enable-webapp = true;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.c2fmzq-server = {
+      description = "c2FmZQ-server";
+      documentation = [ "https://github.com/c2FmZQ/c2FmZQ/blob/main/README.md" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package} ${argsFormat.generate cfg.settings}";
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        DynamicUser = true;
+        Environment = "C2FMZQ_PASSPHRASE_FILE=%d/passphrase-file";
+        IPAccounting = true;
+        IPAddressAllow = cfg.bindIP;
+        IPAddressDeny = "any";
+        LoadCredential = "passphrase-file:${cfg.passphraseFile}";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SocketBindAllow = cfg.port;
+        SocketBindDeny = "any";
+        StateDirectory = "c2fmzq-server";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @obsolete" ];
+      };
+    };
+  };
+
+  meta = {
+    doc = ./c2fmzq-server.md;
+    maintainers = with lib.maintainers; [ hmenke ];
+  };
+}
diff --git a/nixos/modules/services/web-apps/lanraragi.nix b/nixos/modules/services/web-apps/lanraragi.nix
new file mode 100644
index 0000000000000..f1ab8b8b4eb4c
--- /dev/null
+++ b/nixos/modules/services/web-apps/lanraragi.nix
@@ -0,0 +1,100 @@
+{ pkgs, lib, config, ... }:
+
+let
+  cfg = config.services.lanraragi;
+in
+{
+  meta.maintainers = with lib.maintainers; [ tomasajt ];
+
+  options.services = {
+    lanraragi = {
+      enable = lib.mkEnableOption (lib.mdDoc "LANraragi");
+      package = lib.mkPackageOptionMD pkgs "lanraragi" { };
+
+      port = lib.mkOption {
+        type = lib.types.port;
+        default = 3000;
+        description = lib.mdDoc "Port for LANraragi's web interface.";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/lanraragi-password";
+        description = lib.mdDoc ''
+          A file containing the password for LANraragi's admin interface.
+        '';
+      };
+
+      redis = {
+        port = lib.mkOption {
+          type = lib.types.port;
+          default = 6379;
+          description = lib.mdDoc "Port for LANraragi's Redis server.";
+        };
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/run/keys/redis-lanraragi-password";
+          description = lib.mdDoc ''
+            A file containing the password for LANraragi's Redis server.
+          '';
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.redis.servers.lanraragi = {
+      enable = true;
+      port = cfg.redis.port;
+      requirePassFile = cfg.redis.passwordFile;
+    };
+
+    systemd.services.lanraragi = {
+      description = "LANraragi main service";
+      after = [ "network.target" "redis-lanraragi.service" ];
+      requires = [ "redis-lanraragi.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+        DynamicUser = true;
+        StateDirectory = "lanraragi";
+        RuntimeDirectory = "lanraragi";
+        LogsDirectory = "lanraragi";
+        Restart = "on-failure";
+        WorkingDirectory = "/var/lib/lanraragi";
+      };
+      environment = {
+        "LRR_TEMP_DIRECTORY" = "/run/lanraragi";
+        "LRR_LOG_DIRECTORY" = "/var/log/lanraragi";
+        "LRR_NETWORK" = "http://*:${toString cfg.port}";
+        "HOME" = "/var/lib/lanraragi";
+      };
+      preStart = ''
+        REDIS_PASS=${lib.optionalString (cfg.redis.passwordFile != null) "$(head -n1 ${cfg.redis.passwordFile})"}
+        cat > lrr.conf <<EOF
+        {
+          redis_address => "127.0.0.1:${toString cfg.redis.port}",
+          redis_password => "$REDIS_PASS",
+          redis_database => "0",
+          redis_database_minion => "1",
+          redis_database_config => "2",
+          redis_database_search => "3",
+        }
+        EOF
+      '' + lib.optionalString (cfg.passwordFile != null) ''
+        PASS_HASH=$(
+          PASS=$(head -n1 ${cfg.passwordFile}) ${cfg.package.perlEnv}/bin/perl -I${cfg.package}/share/lanraragi/lib -e \
+            'use LANraragi::Controller::Config; print LANraragi::Controller::Config::make_password_hash($ENV{PASS})' \
+            2>/dev/null
+        )
+
+        ${lib.getExe pkgs.redis} -h 127.0.0.1 -p ${toString cfg.redis.port} -a "$REDIS_PASS" <<EOF
+          SELECT 2
+          HSET LRR_CONFIG password $PASS_HASH
+        EOF
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/shiori.nix b/nixos/modules/services/web-apps/shiori.nix
index f0505e052e1c7..71b5ad4d4c062 100644
--- a/nixos/modules/services/web-apps/shiori.nix
+++ b/nixos/modules/services/web-apps/shiori.nix
@@ -29,6 +29,13 @@ in {
         default = 8080;
         description = lib.mdDoc "The port of the Shiori web application";
       };
+
+      webRoot = mkOption {
+        type = types.str;
+        default = "/";
+        example = "/shiori";
+        description = lib.mdDoc "The root of the Shiori web application";
+      };
     };
   };
 
@@ -40,7 +47,7 @@ in {
       environment.SHIORI_DIR = "/var/lib/shiori";
 
       serviceConfig = {
-        ExecStart = "${package}/bin/shiori serve --address '${address}' --port '${toString port}'";
+        ExecStart = "${package}/bin/shiori serve --address '${address}' --port '${toString port}' --webroot '${webRoot}'";
 
         DynamicUser = true;
         StateDirectory = "shiori";
diff --git a/nixos/modules/services/web-apps/snipe-it.nix b/nixos/modules/services/web-apps/snipe-it.nix
index 9cba5cb4fa9e0..4fbf2bad750bc 100644
--- a/nixos/modules/services/web-apps/snipe-it.nix
+++ b/nixos/modules/services/web-apps/snipe-it.nix
@@ -18,15 +18,19 @@ let
   inherit (snipe-it.passthru) phpPackage;
 
   # shell script for local administration
-  artisan = pkgs.writeScriptBin "snipe-it" ''
+  artisan = (pkgs.writeScriptBin "snipe-it" ''
     #! ${pkgs.runtimeShell}
-    cd ${snipe-it}
+    cd "${snipe-it}/share/php/snipe-it"
     sudo=exec
     if [[ "$USER" != ${user} ]]; then
       sudo='exec /run/wrappers/bin/sudo -u ${user}'
     fi
     $sudo ${phpPackage}/bin/php artisan $*
-  '';
+  '').overrideAttrs (old: {
+    meta = old.meta // {
+      mainProgram = "snipe-it";
+    };
+  });
 in {
   options.services.snipe-it = {
 
@@ -357,7 +361,7 @@ in {
     services.nginx = {
       enable = mkDefault true;
       virtualHosts."${cfg.hostName}" = mkMerge [ cfg.nginx {
-        root = mkForce "${snipe-it}/public";
+        root = mkForce "${snipe-it}/share/php/snipe-it/public";
         extraConfig = optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "fastcgi_param HTTPS on;";
         locations = {
           "/" = {
@@ -394,7 +398,7 @@ in {
         RuntimeDirectory = "snipe-it/cache";
         RuntimeDirectoryMode = "0700";
       };
-      path = [ pkgs.replace-secret ];
+      path = [ pkgs.replace-secret artisan ];
       script =
         let
           isSecret  = v: isAttrs v && v ? _secret && (isString v._secret || builtins.isPath v._secret);
@@ -451,7 +455,7 @@ in {
           rm "${cfg.dataDir}"/bootstrap/cache/*.php || true
 
           # migrate db
-          ${phpPackage}/bin/php artisan migrate --force
+          ${lib.getExe artisan} migrate --force
 
           # A placeholder file for invalid barcodes
           invalid_barcode_location="${cfg.dataDir}/public/uploads/barcodes/invalid_barcode.gif"
diff --git a/nixos/modules/services/web-servers/garage.nix b/nixos/modules/services/web-servers/garage.nix
index 731d5315f23aa..47b4c6ab416e8 100644
--- a/nixos/modules/services/web-servers/garage.nix
+++ b/nixos/modules/services/web-servers/garage.nix
@@ -86,7 +86,7 @@ in
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/garage server";
 
-        StateDirectory = mkIf (hasPrefix "/var/lib/garage" cfg.settings.data_dir && hasPrefix "/var/lib/garage" cfg.settings.metadata_dir) "garage";
+        StateDirectory = mkIf (hasPrefix "/var/lib/garage" cfg.settings.data_dir || hasPrefix "/var/lib/garage" cfg.settings.metadata_dir) "garage";
         DynamicUser = lib.mkDefault true;
         ProtectHome = true;
         NoNewPrivileges = true;
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 4be040927540a..b7ced5b0d3466 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -1020,7 +1020,7 @@ let
           "MulticastToUnicast"
           "NeighborSuppression"
           "Learning"
-          "Hairpin"
+          "HairPin"
           "Isolated"
           "UseBPDU"
           "FastLeave"
@@ -1036,7 +1036,7 @@ let
         (assertValueOneOf "MulticastToUnicast" boolValues)
         (assertValueOneOf "NeighborSuppression" boolValues)
         (assertValueOneOf "Learning" boolValues)
-        (assertValueOneOf "Hairpin" boolValues)
+        (assertValueOneOf "HairPin" boolValues)
         (assertValueOneOf "Isolated" boolValues)
         (assertValueOneOf "UseBPDU" boolValues)
         (assertValueOneOf "FastLeave" boolValues)
diff --git a/nixos/modules/system/boot/systemd/tmpfiles.nix b/nixos/modules/system/boot/systemd/tmpfiles.nix
index 32b9b275d3587..183e2033ecb01 100644
--- a/nixos/modules/system/boot/systemd/tmpfiles.nix
+++ b/nixos/modules/system/boot/systemd/tmpfiles.nix
@@ -20,6 +20,102 @@ in
       '';
     };
 
+    systemd.tmpfiles.settings = mkOption {
+      description = lib.mdDoc ''
+        Declare systemd-tmpfiles rules to create, delete, and clean up volatile
+        and temporary files and directories.
+
+        Even though the service is called `*tmp*files` you can also create
+        persistent files.
+      '';
+      example = {
+        "10-mypackage" = {
+          "/var/lib/my-service/statefolder".d = {
+            mode = "0755";
+            user = "root";
+            group = "root";
+          };
+        };
+      };
+      default = {};
+      type = types.attrsOf (types.attrsOf (types.attrsOf (types.submodule ({ name, config, ... }: {
+        options.type = mkOption {
+          type = types.str;
+          default = name;
+          example = "d";
+          description = lib.mdDoc ''
+            The type of operation to perform on the file.
+
+            The type consists of a single letter and optionally one or more
+            modifier characters.
+
+            Please see the upstream documentation for the available types and
+            more details:
+            <https://www.freedesktop.org/software/systemd/man/tmpfiles.d>
+          '';
+        };
+        options.mode = mkOption {
+          type = types.str;
+          default = "-";
+          example = "0755";
+          description = lib.mdDoc ''
+            The file access mode to use when creating this file or directory.
+          '';
+        };
+        options.user = mkOption {
+          type = types.str;
+          default = "-";
+          example = "root";
+          description = lib.mdDoc ''
+            The user of the file.
+
+            This may either be a numeric ID or a user/group name.
+
+            If omitted or when set to `"-"`, the user and group of the user who
+            invokes systemd-tmpfiles is used.
+          '';
+        };
+        options.group = mkOption {
+          type = types.str;
+          default = "-";
+          example = "root";
+          description = lib.mdDoc ''
+            The group of the file.
+
+            This may either be a numeric ID or a user/group name.
+
+            If omitted or when set to `"-"`, the user and group of the user who
+            invokes systemd-tmpfiles is used.
+          '';
+        };
+        options.age = mkOption {
+          type = types.str;
+          default = "-";
+          example = "10d";
+          description = lib.mdDoc ''
+            Delete a file when it reaches a certain age.
+
+            If a file or directory is older than the current time minus the age
+            field, it is deleted.
+
+            If set to `"-"` no automatic clean-up is done.
+          '';
+        };
+        options.argument = mkOption {
+          type = types.str;
+          default = "";
+          example = "";
+          description = lib.mdDoc ''
+            An argument whose meaning depends on the type of operation.
+
+            Please see the upstream documentation for the meaning of this
+            parameter in different situations:
+            <https://www.freedesktop.org/software/systemd/man/tmpfiles.d>
+          '';
+        };
+      }))));
+    };
+
     systemd.tmpfiles.packages = mkOption {
       type = types.listOf types.package;
       default = [];
@@ -100,7 +196,13 @@ in
           ${concatStringsSep "\n" cfg.rules}
         '';
       })
-    ];
+    ] ++ (mapAttrsToList (name: paths:
+      pkgs.writeTextDir "lib/tmpfiles.d/${name}.conf" (concatStrings (mapAttrsToList (path: types:
+        concatStrings (mapAttrsToList (_type: entry: ''
+          '${entry.type}' '${path}' '${entry.mode}' '${entry.user}' '${entry.group}' '${entry.age}' ${entry.argument}
+        '') types)
+      ) paths ))
+    ) cfg.settings);
 
     systemd.tmpfiles.rules = [
       "d  /nix/var                           0755 root root - -"
diff --git a/nixos/modules/tasks/filesystems/vfat.nix b/nixos/modules/tasks/filesystems/vfat.nix
index e535e97759b22..9281b34633c25 100644
--- a/nixos/modules/tasks/filesystems/vfat.nix
+++ b/nixos/modules/tasks/filesystems/vfat.nix
@@ -21,7 +21,7 @@ in
         ln -sv dosfsck $out/bin/fsck.vfat
       '';
 
-    boot.initrd.systemd.extraBin = mkIf inInitrd [ pkgs.dosfstools ];
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.dosfstools ];
 
   };
 }
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 1034c699629d9..61d7c4cb73fe6 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -37,7 +37,6 @@ in {
         ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
       '';
 
-    # TODO: build rootfs as squashfs for faster unpack
     system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
       extraArgs = "--owner=0";
 
@@ -64,6 +63,23 @@ in {
       extraCommands = "mkdir -p proc sys dev";
     };
 
+    system.build.squashfs = pkgs.callPackage ../../lib/make-squashfs.nix {
+      fileName = "nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}";
+
+      noStrip = true; # keep directory structure
+      comp = "zstd -Xcompression-level 6";
+
+      storeContents = [config.system.build.toplevel];
+
+      pseudoFiles = [
+        "/sbin d 0755 0 0"
+        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/init"
+        "/dev d 0755 0 0"
+        "/proc d 0555 0 0"
+        "/sys d 0555 0 0"
+      ];
+    };
+
     system.build.installBootLoader = pkgs.writeScript "install-lxd-sbin-init.sh" ''
       #!${pkgs.runtimeShell}
       ${pkgs.coreutils}/bin/ln -fs "$1/init" /sbin/init
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 55d23b6fb8b5d..e625c6322d9c6 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -690,8 +690,8 @@ in
       package =
         mkOption {
           type = types.package;
-          default = hostPkgs.qemu_kvm;
-          defaultText = literalExpression "config.virtualisation.host.pkgs.qemu_kvm";
+          default = if hostPkgs.stdenv.hostPlatform.qemuArch == pkgs.stdenv.hostPlatform.qemuArch then hostPkgs.qemu_kvm else hostPkgs.qemu;
+          defaultText = literalExpression "if hostPkgs.stdenv.hostPlatform.qemuArch == pkgs.stdenv.hostPlatform.qemuArch then config.virtualisation.host.pkgs.qemu_kvm else config.virtualisation.host.pkgs.qemu";
           example = literalExpression "pkgs.qemu_test";
           description = lib.mdDoc "QEMU package to use.";
         };
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 20bcb6802881a..9b4b92be6f3ac 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -97,6 +97,8 @@ in rec {
         (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleUefiSystemdBoot")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.simple")
         (onSystems ["x86_64-linux"] "nixos.tests.installer.swraid")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.zfsroot")
+        (onSystems ["x86_64-linux"] "nixos.tests.nixos-rebuild-specialisations")
         (onFullSupported "nixos.tests.ipv6")
         (onFullSupported "nixos.tests.keymap.azerty")
         (onFullSupported "nixos.tests.keymap.colemak")
@@ -163,7 +165,6 @@ in rec {
         (onFullSupported "nixos.tests.switchTest")
         (onFullSupported "nixos.tests.udisks2")
         (onFullSupported "nixos.tests.xfce")
-        (onSystems ["i686-linux"] "nixos.tests.zfs.installer")
         (onFullSupported "nixpkgs.emacs")
         (onFullSupported "nixpkgs.jdk")
         ["nixpkgs.tarball"]
diff --git a/nixos/release.nix b/nixos/release.nix
index abaa7ef9a7113..60f4cc94399c5 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -328,6 +328,21 @@ in rec {
 
   );
 
+  lxdContainerImageSquashfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.squashfs)
+
+  );
+
   # Metadata for the lxd image
   lxdContainerMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 1c8ee32428eac..4d5cb720bfdc1 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -153,6 +153,7 @@ in {
   budgie = handleTest ./budgie.nix {};
   buildbot = handleTest ./buildbot.nix {};
   buildkite-agents = handleTest ./buildkite-agents.nix {};
+  c2fmzq = handleTest ./c2fmzq.nix {};
   caddy = handleTest ./caddy.nix {};
   cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
   cage = handleTest ./cage.nix {};
@@ -248,6 +249,7 @@ in {
   ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {};
   ecryptfs = handleTest ./ecryptfs.nix {};
   fscrypt = handleTest ./fscrypt.nix {};
+  fastnetmon-advanced = runTest ./fastnetmon-advanced.nix;
   ejabberd = handleTest ./xmpp/ejabberd.nix {};
   elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
   emacs-daemon = handleTest ./emacs-daemon.nix {};
@@ -287,6 +289,7 @@ in {
   firewall-nftables = handleTest ./firewall.nix { nftables = true; };
   fish = handleTest ./fish.nix {};
   flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
+  floorp = handleTest ./firefox.nix { firefoxPackage = pkgs.floorp; };
   fluentd = handleTest ./fluentd.nix {};
   fluidd = handleTest ./fluidd.nix {};
   fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
@@ -431,6 +434,7 @@ in {
   kubo = import ./kubo { inherit recurseIntoAttrs runTest; };
   ladybird = handleTest ./ladybird.nix {};
   languagetool = handleTest ./languagetool.nix {};
+  lanraragi = handleTest ./lanraragi.nix {};
   latestKernel.login = handleTest ./login.nix { latestKernel = true; };
   leaps = handleTest ./leaps.nix {};
   lemmy = handleTest ./lemmy.nix {};
@@ -569,7 +573,8 @@ in {
   nix-serve-ssh = handleTest ./nix-serve-ssh.nix {};
   nixops = handleTest ./nixops/default.nix {};
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
-  nixos-rebuild-specialisations = handleTest ./nixos-rebuild-specialisations.nix {};
+  nixos-rebuild-install-bootloader = handleTestOn ["x86_64-linux"] ./nixos-rebuild-install-bootloader.nix {};
+  nixos-rebuild-specialisations = handleTestOn ["x86_64-linux"] ./nixos-rebuild-specialisations.nix {};
   nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; };
   node-red = handleTest ./node-red.nix {};
   nomad = handleTest ./nomad.nix {};
@@ -702,6 +707,7 @@ in {
   rkvm = handleTest ./rkvm {};
   robustirc-bridge = handleTest ./robustirc-bridge.nix {};
   roundcube = handleTest ./roundcube.nix {};
+  rosenpass = handleTest ./rosenpass.nix {};
   rshim = handleTest ./rshim.nix {};
   rspamd = handleTest ./rspamd.nix {};
   rss2email = handleTest ./rss2email.nix {};
diff --git a/nixos/tests/c2fmzq.nix b/nixos/tests/c2fmzq.nix
new file mode 100644
index 0000000000000..d8ec816c7d29c
--- /dev/null
+++ b/nixos/tests/c2fmzq.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "c2FmZQ";
+  meta.maintainers = with lib.maintainers; [ hmenke ];
+
+  nodes.machine = {
+    services.c2fmzq-server = {
+      enable = true;
+      port = 8080;
+      passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
+      settings = {
+        verbose = 3; # debug
+      };
+    };
+    environment = {
+      sessionVariables = {
+        C2FMZQ_PASSPHRASE = "lol";
+        C2FMZQ_API_SERVER = "http://localhost:8080";
+      };
+      systemPackages = [
+        pkgs.c2fmzq
+        (pkgs.writeScriptBin "c2FmZQ-client-wrapper" ''
+          #!${pkgs.expect}/bin/expect -f
+          spawn c2FmZQ-client {*}$argv
+          expect {
+            "Enter password:" { send "$env(PASSWORD)\r" }
+            "Type YES to confirm:" { send "YES\r" }
+            timeout { exit 1 }
+            eof { exit 0 }
+          }
+          interact
+        '')
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    machine.start()
+    machine.wait_for_unit("c2fmzq-server.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("Create accounts for alice and bob"):
+        machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com")
+        machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com")
+
+    with subtest("Log in as alice"):
+        machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com")
+        msg = machine.succeed("c2FmZQ-client -v 3 status")
+        assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}"
+
+    with subtest("Create a new album, upload a file, and delete the uploaded file"):
+        machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'")
+        machine.succeed("echo 'pls do not steal' > meme.txt")
+        machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'")
+        machine.succeed("c2FmZQ-client -v 3 sync")
+        machine.succeed("rm meme.txt")
+
+    with subtest("Share the album with bob"):
+        machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com")
+
+    with subtest("Log in as bob"):
+        machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com")
+        msg = machine.succeed("c2FmZQ-client -v 3 status")
+        assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}"
+
+    with subtest("Download the shared file"):
+        machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'")
+        machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .")
+        msg = machine.succeed("cat meme.txt")
+        assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}"
+
+    with subtest("Test that PWA is served"):
+        msg = machine.succeed("curl -sSfL http://localhost:8080")
+        assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}"
+  '';
+})
diff --git a/nixos/tests/cockpit.nix b/nixos/tests/cockpit.nix
index 6f86d1e2c464c..e7165b9790141 100644
--- a/nixos/tests/cockpit.nix
+++ b/nixos/tests/cockpit.nix
@@ -50,7 +50,8 @@ import ./make-test-python.nix (
             options = Options()
             options.add_argument("--headless")
 
-            driver = webdriver.Firefox(options=options)
+            service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}")  # noqa: E501
+            driver = webdriver.Firefox(options=options, service=service)
 
             driver.implicitly_wait(10)
 
diff --git a/nixos/tests/fastnetmon-advanced.nix b/nixos/tests/fastnetmon-advanced.nix
new file mode 100644
index 0000000000000..b2d2713a92110
--- /dev/null
+++ b/nixos/tests/fastnetmon-advanced.nix
@@ -0,0 +1,65 @@
+{ pkgs, lib, ... }:
+
+{
+  name = "fastnetmon-advanced";
+  meta.maintainers = lib.teams.wdz.members;
+
+  nodes = {
+    bird = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 179 ];
+      services.bird2 = {
+        enable = true;
+        config = ''
+          router id 192.168.1.1;
+
+          protocol bgp fnm {
+            local 192.168.1.1 as 64513;
+            neighbor 192.168.1.2 as 64514;
+            multihop;
+            ipv4 {
+              import all;
+              export none;
+            };
+          }
+        '';
+      };
+    };
+    fnm = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 179 ];
+      services.fastnetmon-advanced = {
+        enable = true;
+        settings = {
+          networks_list = [ "172.23.42.0/24" ];
+          gobgp = true;
+          gobgp_flow_spec_announces = true;
+        };
+        bgpPeers = {
+          bird = {
+            local_asn = 64514;
+            remote_asn = 64513;
+            local_address = "192.168.1.2";
+            remote_address = "192.168.1.1";
+
+            description = "Bird";
+            ipv4_unicast = true;
+            multihop = true;
+            active = true;
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+    fnm.wait_for_unit("fastnetmon.service")
+    bird.wait_for_unit("bird2.service")
+
+    fnm.wait_until_succeeds('journalctl -eu fastnetmon.service | grep "BGP daemon restarted correctly"')
+    fnm.wait_until_succeeds("journalctl -eu gobgp.service | grep BGP_FSM_OPENCONFIRM")
+    bird.wait_until_succeeds("birdc show protocol fnm | grep Estab")
+    fnm.wait_until_succeeds('journalctl -eu fastnetmon.service | grep "API server listening"')
+    fnm.succeed("fcli set blackhole 172.23.42.123")
+    bird.succeed("birdc show route | grep 172.23.42.123")
+  '';
+}
diff --git a/nixos/tests/forgejo.nix b/nixos/tests/forgejo.nix
index b326819e31906..6acd6acb50fa9 100644
--- a/nixos/tests/forgejo.nix
+++ b/nixos/tests/forgejo.nix
@@ -37,7 +37,7 @@ let
           settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
           settings.actions.ENABLED = true;
         };
-        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq ];
+        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq pkgs.file ];
         services.openssh.enable = true;
 
         specialisation.runner = {
@@ -53,6 +53,14 @@ let
             tokenFile = "/var/lib/forgejo/runner_token";
           };
         };
+        specialisation.dump = {
+          inheritParentConfig = true;
+          configuration.services.forgejo.dump = {
+            enable = true;
+            type = "tar.zst";
+            file = "dump.tar.zst";
+          };
+        };
       };
       client1 = { config, pkgs, ... }: {
         environment.systemPackages = [ pkgs.git ];
@@ -66,8 +74,10 @@ let
       let
         inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
         serverSystem = nodes.server.system.build.toplevel;
+        dumpFile = with nodes.server.specialisation.dump.configuration.services.forgejo.dump; "${backupDir}/${file}";
       in
       ''
+        import json
         GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
         REPO = "forgejo@server:test/repo"
         PRIVK = "${snakeOilPrivateKey}"
@@ -137,6 +147,11 @@ let
         client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
         client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
 
+        with subtest("Testing git protocol version=2 over ssh"):
+            git_protocol = client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' GIT_TRACE2_EVENT=true git -C repo fetch |& grep negotiated-version")
+            version = json.loads(git_protocol).get("value")
+            assert version == "2", f"git did not negotiate protocol version 2, but version {version} instead."
+
         server.wait_until_succeeds(
             'test "$(curl http://localhost:3000/api/v1/repos/test/repo/commits '
             + '-H "Accept: application/json" | jq length)" = "1"',
@@ -150,6 +165,12 @@ let
             server.succeed("${serverSystem}/specialisation/runner/bin/switch-to-configuration test")
             server.wait_for_unit("gitea-runner-test.service")
             server.succeed("journalctl -o cat -u gitea-runner-test.service | grep -q 'Runner registered successfully'")
+
+        with subtest("Testing backup service"):
+            server.succeed("${serverSystem}/specialisation/dump/bin/switch-to-configuration test")
+            server.systemctl("start forgejo-dump")
+            assert "Zstandard compressed data" in server.succeed("file ${dumpFile}")
+            server.copy_from_vm("${dumpFile}")
       '';
   });
 in
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index 5111cedf9256d..9ff1d8f5d039e 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -69,8 +69,8 @@ let
   # disk, and then reboot from the hard disk.  It's parameterized with
   # a test script fragment `createPartitions', which must create
   # partitions and filesystems.
-  testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi
-                  , grubIdentifier, preBootCommands, postBootCommands, extraConfig
+  testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi, grubIdentifier
+                  , postInstallCommands, preBootCommands, postBootCommands, extraConfig
                   , testSpecialisationConfig, testFlakeSwitch
                   }:
     let iface = "virtio";
@@ -153,6 +153,8 @@ let
               """
           )
 
+      ${postInstallCommands}
+
       with subtest("Shutdown system after installation"):
           machine.succeed("umount -R /mnt")
           machine.succeed("sync")
@@ -368,7 +370,9 @@ let
 
 
   makeInstallerTest = name:
-    { createPartitions, preBootCommands ? "", postBootCommands ? "", extraConfig ? ""
+    { createPartitions
+    , postInstallCommands ? "", preBootCommands ? "", postBootCommands ? ""
+    , extraConfig ? ""
     , extraInstallerConfig ? {}
     , bootLoader ? "grub" # either "grub" or "systemd-boot"
     , grubDevice ? "/dev/vda", grubIdentifier ? "uuid", grubUseEfi ? false
@@ -479,7 +483,7 @@ let
       };
 
       testScript = testScriptFun {
-        inherit bootLoader createPartitions preBootCommands postBootCommands
+        inherit bootLoader createPartitions postInstallCommands preBootCommands postBootCommands
                 grubDevice grubIdentifier grubUseEfi extraConfig
                 testSpecialisationConfig testFlakeSwitch;
       };
@@ -682,20 +686,32 @@ in {
     createPartitions = ''
       machine.succeed(
           "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
-          + " mkpart primary linux-swap 1M 1024M"
-          + " mkpart primary 1024M -1s",
+          + " mkpart primary 1M 100MB"  # bpool
+          + " mkpart primary linux-swap 100M 1024M"
+          + " mkpart primary 1024M -1s", # rpool
           "udevadm settle",
-          "mkswap /dev/vda1 -L swap",
+          "mkswap /dev/vda2 -L swap",
           "swapon -L swap",
-          "zpool create rpool /dev/vda2",
+          "zpool create rpool /dev/vda3",
           "zfs create -o mountpoint=legacy rpool/root",
           "mount -t zfs rpool/root /mnt",
           "zfs create -o mountpoint=legacy rpool/root/usr",
           "mkdir /mnt/usr",
           "mount -t zfs rpool/root/usr /mnt/usr",
+          "zpool create -o compatibility=grub2 bpool /dev/vda1",
+          "zfs create -o mountpoint=legacy bpool/boot",
+          "mkdir /mnt/boot",
+          "mount -t zfs bpool/boot /mnt/boot",
           "udevadm settle",
       )
     '';
+
+    # umount & export bpool before shutdown
+    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
+    postInstallCommands = ''
+      machine.succeed("umount /mnt/boot")
+      machine.succeed("zpool export bpool")
+    '';
   };
 
   # Create two physical LVM partitions combined into one volume group
diff --git a/nixos/tests/lanraragi.nix b/nixos/tests/lanraragi.nix
new file mode 100644
index 0000000000000..f513ac9d252bc
--- /dev/null
+++ b/nixos/tests/lanraragi.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "lanraragi";
+  meta.maintainers = with lib.maintainers; [ tomasajt ];
+
+  nodes = {
+    machine1 = { pkgs, ... }: {
+      services.lanraragi.enable = true;
+    };
+    machine2 = { pkgs, ... }: {
+      services.lanraragi = {
+        enable = true;
+        passwordFile = pkgs.writeText "lrr-test-pass" ''
+          ultra-secure-password
+        '';
+        port = 4000;
+        redis = {
+          port = 4001;
+          passwordFile = pkgs.writeText "redis-lrr-test-pass" ''
+            still-a-very-secure-password
+          '';
+        };
+      };
+    };
+
+
+  };
+
+  testScript = ''
+    start_all()
+
+    machine1.wait_for_unit("lanraragi.service")
+    machine1.wait_until_succeeds("curl -f localhost:3000")
+    machine1.succeed("[ $(curl -o /dev/null -X post 'http://localhost:3000/login' --data-raw 'password=kamimamita' -w '%{http_code}') -eq 302 ]")
+
+    machine2.wait_for_unit("lanraragi.service")
+    machine2.wait_until_succeeds("curl -f localhost:4000")
+    machine2.succeed("[ $(curl -o /dev/null -X post 'http://localhost:4000/login' --data-raw 'password=ultra-secure-password' -w '%{http_code}') -eq 302 ]")
+  '';
+})
+
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index bdaaebfc00281..0ebe73d872f2b 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -13,6 +13,7 @@ let
 
   lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
   lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs-squashfs = releases.lxdContainerImageSquashfs.${pkgs.stdenv.hostPlatform.system};
 
 in {
   name = "lxd-container";
@@ -23,7 +24,7 @@ in {
 
   nodes.machine = { lib, ... }: {
     virtualisation = {
-      diskSize = 4096;
+      diskSize = 6144;
 
       # Since we're testing `limits.cpu`, we've gotta have a known number of
       # cores to lean on
@@ -65,6 +66,16 @@ in {
         machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
         machine.succeed("lxc delete -f container")
 
+    with subtest("Squashfs image is functional"):
+        machine.succeed(
+            "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs} --alias nixos-squashfs"
+        )
+        machine.succeed("lxc launch nixos-squashfs container")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+        machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
+        machine.succeed("lxc delete -f container")
+
     with subtest("Container is mounted with lxcfs inside"):
         machine.succeed("lxc launch nixos container")
         with machine.nested("Waiting for instance to start and be usable"):
diff --git a/nixos/tests/misc.nix b/nixos/tests/misc.nix
index 442b45948c608..e7842debba7a2 100644
--- a/nixos/tests/misc.nix
+++ b/nixos/tests/misc.nix
@@ -13,6 +13,7 @@ in {
       environment.variables.EDITOR = lib.mkOverride 0 "emacs";
       documentation.nixos.enable = lib.mkOverride 0 true;
       systemd.tmpfiles.rules = [ "d /tmp 1777 root root 10d" ];
+      systemd.tmpfiles.settings."10-test"."/tmp/somefile".d = {};
       virtualisation.fileSystems = { "/tmp2" =
         { fsType = "tmpfs";
           options = [ "mode=1777" "noauto" ];
@@ -117,6 +118,9 @@ in {
           )
           machine.fail("[ -e /tmp/foo ]")
 
+      with subtest("whether systemd-tmpfiles settings works"):
+          machine.succeed("[ -e /tmp/somefile ]")
+
       with subtest("whether automounting works"):
           machine.fail("grep '/tmp2 tmpfs' /proc/mounts")
           machine.succeed("touch /tmp2/x")
diff --git a/nixos/tests/nixos-rebuild-install-bootloader.nix b/nixos/tests/nixos-rebuild-install-bootloader.nix
new file mode 100644
index 0000000000000..3ade90ea24a74
--- /dev/null
+++ b/nixos/tests/nixos-rebuild-install-bootloader.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nixos-rebuild-install-bootloader";
+
+  nodes = {
+    machine = { lib, pkgs, ... }: {
+      imports = [
+        ../modules/profiles/installation-device.nix
+        ../modules/profiles/base.nix
+      ];
+
+      nix.settings = {
+        substituters = lib.mkForce [ ];
+        hashed-mirrors = null;
+        connect-timeout = 1;
+      };
+
+      system.includeBuildDependencies = true;
+
+      virtualisation = {
+        cores = 2;
+        memorySize = 2048;
+      };
+
+      virtualisation.useBootLoader = true;
+    };
+  };
+
+  testScript =
+    let
+      configFile = pkgs.writeText "configuration.nix" ''
+        { lib, pkgs, ... }: {
+          imports = [
+            ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+
+          boot.loader.grub = {
+            enable = true;
+            device = "/dev/vda";
+            forceInstall = true;
+          };
+
+          documentation.enable = false;
+        }
+      '';
+
+    in
+    ''
+      machine.start()
+      machine.succeed("udevadm settle")
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("nixos-generate-config")
+      machine.copy_from_host(
+          "${configFile}",
+          "/etc/nixos/configuration.nix",
+      )
+      machine.succeed("nixos-rebuild switch")
+
+      # Need to run `nixos-rebuild` twice because the first run will install
+      # GRUB anyway
+      with subtest("Switch system again and install bootloader"):
+          result = machine.succeed("nixos-rebuild switch --install-bootloader")
+          # install-grub2.pl messages
+          assert "updating GRUB 2 menu..." in result
+          assert "installing the GRUB 2 boot loader on /dev/vda..." in result
+          # GRUB message
+          assert "Installation finished. No error reported." in result
+          # at this point we've tested regression #262724, but haven't tested the bootloader itself
+          # TODO: figure out how to how to tell the test driver to start the bootloader instead of
+          # booting into the kernel directly.
+    '';
+})
diff --git a/nixos/tests/openssh.nix b/nixos/tests/openssh.nix
index e88625678fec3..88d3e54ee76cb 100644
--- a/nixos/tests/openssh.nix
+++ b/nixos/tests/openssh.nix
@@ -22,7 +22,7 @@ in {
         ];
       };
 
-    server_lazy =
+    server-lazy =
       { ... }:
 
       {
@@ -34,7 +34,7 @@ in {
         ];
       };
 
-    server_localhost_only =
+    server-localhost-only =
       { ... }:
 
       {
@@ -43,7 +43,7 @@ in {
         };
       };
 
-    server_localhost_only_lazy =
+    server-localhost-only-lazy =
       { ... }:
 
       {
@@ -52,7 +52,7 @@ in {
         };
       };
 
-    server_match_rule =
+    server-match-rule =
       { ... }:
 
       {
@@ -119,11 +119,11 @@ in {
         )
 
         client.succeed(
-            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server_lazy 'echo hello world' >&2",
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2",
             timeout=30
         )
         client.succeed(
-            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server_lazy 'ulimit -l' | grep 1024",
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024",
             timeout=30
         )
 
@@ -137,7 +137,7 @@ in {
             timeout=30
         )
         client.succeed(
-            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server_lazy true",
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true",
             timeout=30
         )
 
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 8369d6a497ac1..7fd824967206f 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -416,8 +416,8 @@ let
     };
 
     kea = let
-      controlSocketPathV4 = "/run/kea/dhcp4.sock";
-      controlSocketPathV6 = "/run/kea/dhcp6.sock";
+      controlSocketPathV4 = "/run/kea-dhcp4/dhcp4.sock";
+      controlSocketPathV6 = "/run/kea-dhcp6/dhcp6.sock";
     in
     {
       exporterConfig = {
@@ -512,7 +512,7 @@ let
         wait_for_unit("knot.service")
         wait_for_unit("prometheus-knot-exporter.service")
         wait_for_open_port(9433)
-        succeed("curl -sSf 'localhost:9433' | grep 'knot_server_zone_count 1.0'")
+        succeed("curl -sSf 'localhost:9433' | grep '2\.019031301'")
       '';
     };
 
diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix
index 3b9ea2f85b1ed..54fdc1d3995ca 100644
--- a/nixos/tests/restic.nix
+++ b/nixos/tests/restic.nix
@@ -21,7 +21,10 @@ import ./make-test-python.nix (
       unpackPhase = "true";
       installPhase = ''
         mkdir $out
-        touch $out/some_file
+        echo some_file > $out/some_file
+        echo some_other_file > $out/some_other_file
+        mkdir $out/a_dir
+        echo a_file > $out/a_dir/a_file
       '';
     };
 
@@ -53,9 +56,13 @@ import ./make-test-python.nix (
               initialize = true;
             };
             remote-from-file-backup = {
-              inherit passwordFile paths exclude pruneOpts;
+              inherit passwordFile exclude pruneOpts;
               initialize = true;
               repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository;
+              paths = [ "/opt/a_dir" ];
+              dynamicFilesFrom = ''
+                find /opt -mindepth 1 -maxdepth 1 ! -name a_dir # all files in /opt except for a_dir
+              '';
             };
             rclonebackup = {
               inherit passwordFile paths exclude pruneOpts;
@@ -123,13 +130,18 @@ import ./make-test-python.nix (
           "systemctl start restic-backups-remote-from-file-backup.service",
           'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
+          # test that restoring that snapshot produces the same directory
+          "mkdir /tmp/restore-2",
+          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-2",
+          "diff -ru ${testDir} /tmp/restore-2/opt",
+
           # test that rclonebackup produces a snapshot
           "systemctl start restic-backups-rclonebackup.service",
           'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
           "systemctl start restic-backups-custompackage.service",
-          "grep 'backup.* /opt' /root/fake-restic.log",
+          "grep 'backup' /root/fake-restic.log",
           "grep 'check.* --some-check-option' /root/fake-restic.log",
 
           # test that we can create four snapshots in remotebackup and rclonebackup
diff --git a/nixos/tests/rosenpass.nix b/nixos/tests/rosenpass.nix
new file mode 100644
index 0000000000000..ec4046c8c035b
--- /dev/null
+++ b/nixos/tests/rosenpass.nix
@@ -0,0 +1,217 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  deviceName = "rp0";
+
+  server = {
+    ip = "fe80::1";
+    wg = {
+      public = "mQufmDFeQQuU/fIaB2hHgluhjjm1ypK4hJr1cW3WqAw=";
+      secret = "4N5Y1dldqrpsbaEiY8O0XBUGUFf8vkvtBtm8AoOX7Eo=";
+      listen = 10000;
+    };
+  };
+  client = {
+    ip = "fe80::2";
+    wg = {
+      public = "Mb3GOlT7oS+F3JntVKiaD7SpHxLxNdtEmWz/9FMnRFU=";
+      secret = "uC5dfGMv7Oxf5UDfdPkj6rZiRZT2dRWp5x8IQxrNcUE=";
+    };
+  };
+in
+{
+  name = "rosenpass";
+
+  nodes =
+    let
+      shared = peer: { config, modulesPath, ... }: {
+        imports = [ "${modulesPath}/services/networking/rosenpass.nix" ];
+
+        boot.kernelModules = [ "wireguard" ];
+
+        services.rosenpass = {
+          enable = true;
+          defaultDevice = deviceName;
+          settings = {
+            verbosity = "Verbose";
+            public_key = "/etc/rosenpass/pqpk";
+            secret_key = "/etc/rosenpass/pqsk";
+          };
+        };
+
+        networking.firewall.allowedUDPPorts = [ 9999 ];
+
+        systemd.network = {
+          enable = true;
+          networks."rosenpass" = {
+            matchConfig.Name = deviceName;
+            networkConfig.IPForward = true;
+            address = [ "${peer.ip}/64" ];
+          };
+
+          netdevs."10-rp0" = {
+            netdevConfig = {
+              Kind = "wireguard";
+              Name = deviceName;
+            };
+            wireguardConfig.PrivateKeyFile = "/etc/wireguard/wgsk";
+          };
+        };
+
+        environment.etc."wireguard/wgsk" = {
+          text = peer.wg.secret;
+          user = "systemd-network";
+          group = "systemd-network";
+        };
+      };
+    in
+    {
+      server = {
+        imports = [ (shared server) ];
+
+        networking.firewall.allowedUDPPorts = [ server.wg.listen ];
+
+        systemd.network.netdevs."10-${deviceName}" = {
+          wireguardConfig.ListenPort = server.wg.listen;
+          wireguardPeers = [
+            {
+              wireguardPeerConfig = {
+                AllowedIPs = [ "::/0" ];
+                PublicKey = client.wg.public;
+              };
+            }
+          ];
+        };
+
+        services.rosenpass.settings = {
+          listen = [ "0.0.0.0:9999" ];
+          peers = [
+            {
+              public_key = "/etc/rosenpass/peers/client/pqpk";
+              peer = client.wg.public;
+            }
+          ];
+        };
+      };
+      client = {
+        imports = [ (shared client) ];
+
+        systemd.network.netdevs."10-${deviceName}".wireguardPeers = [
+          {
+            wireguardPeerConfig = {
+              AllowedIPs = [ "::/0" ];
+              PublicKey = server.wg.public;
+              Endpoint = "server:${builtins.toString server.wg.listen}";
+            };
+          }
+        ];
+
+        services.rosenpass.settings.peers = [
+          {
+            public_key = "/etc/rosenpass/peers/server/pqpk";
+            endpoint = "server:9999";
+            peer = server.wg.public;
+          }
+        ];
+      };
+    };
+
+  testScript = { ... }: ''
+    from os import system
+
+    # Full path to rosenpass in the store, to avoid fiddling with `$PATH`.
+    rosenpass = "${pkgs.rosenpass}/bin/rosenpass"
+
+    # Path in `/etc` where keys will be placed.
+    etc = "/etc/rosenpass"
+
+    start_all()
+
+    for machine in [server, client]:
+        machine.wait_for_unit("multi-user.target")
+
+    # Gently stop Rosenpass to avoid crashes during key generation/distribution.
+    for machine in [server, client]:
+        machine.execute("systemctl stop rosenpass.service")
+
+    for (name, machine, remote) in [("server", server, client), ("client", client, server)]:
+        pk, sk = f"{name}.pqpk", f"{name}.pqsk"
+        system(f"{rosenpass} gen-keys --force --secret-key {sk} --public-key {pk}")
+        machine.copy_from_host(sk, f"{etc}/pqsk")
+        machine.copy_from_host(pk, f"{etc}/pqpk")
+        remote.copy_from_host(pk, f"{etc}/peers/{name}/pqpk")
+
+    for machine in [server, client]:
+        machine.execute("systemctl start rosenpass.service")
+
+    for machine in [server, client]:
+        machine.wait_for_unit("rosenpass.service")
+
+    with subtest("ping"):
+        client.succeed("ping -c 2 -i 0.5 ${server.ip}%${deviceName}")
+
+    with subtest("preshared-keys"):
+        # Rosenpass works by setting the WireGuard preshared key at regular intervals.
+        # Thus, if it is not active, then no key will be set, and the output of `wg show` will contain "none".
+        # Otherwise, if it is active, then the key will be set and "none" will not be found in the output of `wg show`.
+        for machine in [server, client]:
+            machine.wait_until_succeeds("wg show all preshared-keys | grep --invert-match none", timeout=5)
+  '';
+
+  # NOTE: Below configuration is for "interactive" (=developing/debugging) only.
+  interactive.nodes =
+    let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPublicKey snakeOilPrivateKey;
+
+      sshAndKeyGeneration = {
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+        environment.systemPackages = [
+          (pkgs.writeShellApplication {
+            name = "gen-keys";
+            runtimeInputs = [ pkgs.rosenpass ];
+            text = ''
+              HOST="$(hostname)"
+              if [ "$HOST" == "server" ]
+              then
+                PEER="client"
+              else
+                PEER="server"
+              fi
+
+              # Generate keypair.
+              mkdir -vp /etc/rosenpass/peers/$PEER
+              rosenpass gen-keys --force --secret-key /etc/rosenpass/pqsk --public-key /etc/rosenpass/pqpk
+
+              # Set up SSH key.
+              mkdir -p /root/.ssh
+              cp ${snakeOilPrivateKey} /root/.ssh/id_ecdsa
+              chmod 0400 /root/.ssh/id_ecdsa
+
+              # Copy public key to other peer.
+              # shellcheck disable=SC2029
+              ssh -o StrictHostKeyChecking=no $PEER "mkdir -pv /etc/rosenpass/peers/$HOST"
+              scp /etc/rosenpass/pqpk "$PEER:/etc/rosenpass/peers/$HOST/pqpk"
+            '';
+          })
+        ];
+      };
+
+      # Use kmscon <https://www.freedesktop.org/wiki/Software/kmscon/>
+      # to provide a slightly nicer console, and while we're at it,
+      # also use a nice font.
+      # With kmscon, we can for example zoom in/out using [Ctrl] + [+]
+      # and [Ctrl] + [-]
+      niceConsoleAndAutologin.services.kmscon = {
+        enable = true;
+        autologinUser = "root";
+        fonts = [{
+          name = "Fira Code";
+          package = pkgs.fira-code;
+        }];
+      };
+    in
+    {
+      server = sshAndKeyGeneration // niceConsoleAndAutologin;
+      client = sshAndKeyGeneration // niceConsoleAndAutologin;
+    };
+})
diff --git a/nixos/tests/stratis/encryption.nix b/nixos/tests/stratis/encryption.nix
index a555ff8a8e854..81b5f92b4ac4a 100644
--- a/nixos/tests/stratis/encryption.nix
+++ b/nixos/tests/stratis/encryption.nix
@@ -26,7 +26,7 @@ import ../make-test-python.nix ({ pkgs, ... }:
         # test rebinding encrypted pool
         machine.succeed("stratis pool rebind keyring  testpool testkey2")
         # test restarting encrypted pool
-        machine.succeed("stratis pool stop   testpool")
-        machine.succeed("stratis pool start  --name testpool --unlock-method keyring")
+        machine.succeed("stratis pool stop  --name testpool")
+        machine.succeed("stratis pool start --name testpool --unlock-method keyring")
       '';
   })