about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/x-windows.chapter.md10
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md11
-rw-r--r--nixos/lib/test-driver/test_driver/machine.py1
-rw-r--r--nixos/modules/config/users-groups.nix36
-rw-r--r--nixos/modules/hardware/video/nvidia.nix13
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl2
-rw-r--r--nixos/modules/module-list.nix7
-rw-r--r--nixos/modules/programs/goldwarden.nix50
-rw-r--r--nixos/modules/programs/steam.nix4
-rw-r--r--nixos/modules/services/desktops/flatpak.nix2
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix145
-rw-r--r--nixos/modules/services/desktops/pipewire/wireplumber.nix83
-rw-r--r--nixos/modules/services/misc/paperless.nix16
-rw-r--r--nixos/modules/services/networking/microsocks.nix146
-rw-r--r--nixos/modules/services/networking/scion/scion-control.nix69
-rw-r--r--nixos/modules/services/networking/scion/scion-daemon.nix64
-rw-r--r--nixos/modules/services/networking/scion/scion-dispatcher.nix74
-rw-r--r--nixos/modules/services/networking/scion/scion-router.nix49
-rw-r--r--nixos/modules/services/networking/scion/scion.nix39
-rw-r--r--nixos/modules/services/security/usbguard.nix4
-rw-r--r--nixos/modules/services/web-apps/kavita.nix63
-rw-r--r--nixos/modules/services/web-apps/pretix.nix2
-rw-r--r--nixos/modules/services/x11/xserver.nix2
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix5
-rw-r--r--nixos/modules/virtualisation/incus.nix165
-rw-r--r--nixos/release-combined.nix5
-rw-r--r--nixos/tests/all-tests.nix3
-rw-r--r--nixos/tests/goss.nix8
-rw-r--r--nixos/tests/incus/container.nix15
-rw-r--r--nixos/tests/incus/default.nix18
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix2
-rw-r--r--nixos/tests/kavita.nix46
-rw-r--r--nixos/tests/make-test-python.nix2
-rw-r--r--nixos/tests/scion/freestanding-deployment/README.rst12
-rw-r--r--nixos/tests/scion/freestanding-deployment/default.nix172
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology1.json51
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology2.json51
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology3.json60
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology4.json40
-rw-r--r--nixos/tests/scion/freestanding-deployment/topology5.json40
-rw-r--r--nixos/tests/systemd-user-linger.nix39
-rw-r--r--nixos/tests/ustreamer.nix75
42 files changed, 1470 insertions, 231 deletions
diff --git a/nixos/doc/manual/configuration/x-windows.chapter.md b/nixos/doc/manual/configuration/x-windows.chapter.md
index bf1872ae01ace..0e8e38b83dcdc 100644
--- a/nixos/doc/manual/configuration/x-windows.chapter.md
+++ b/nixos/doc/manual/configuration/x-windows.chapter.md
@@ -146,14 +146,12 @@ default because it's not free software. You can enable it as follows:
 services.xserver.videoDrivers = [ "nvidia" ];
 ```
 
-Or if you have an older card, you may have to use one of the legacy
-drivers:
+If you have an older card, you may have to use one of the legacy drivers:
 
 ```nix
-services.xserver.videoDrivers = [ "nvidiaLegacy470" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
-services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_470;
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_390;
+hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages.legacy_340;
 ```
 
 You may need to reboot after enabling this driver to prevent a clash
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index a40640a7b8d0a..92702cce18969 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -32,6 +32,11 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
 
+- The PipeWire and WirePlumber modules have removed support for using
+`environment.etc."pipewire/..."` and `environment.etc."wireplumber/..."`.
+Use `services.pipewire.extraConfig` or `services.pipewire.configPackages` for PipeWire and
+`services.pipewire.wireplumber.configPackages` for WirePlumber instead."
+
 - A new option `systemd.sysusers.enable` was added. If enabled, users and
   groups are created with systemd-sysusers instead of with a custom perl script.
 
@@ -111,6 +116,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Pretix](https://pretix.eu/about/en/), an open source ticketing software for events. Available as [services.pretix]($opt-services-pretix.enable).
 
+- [microsocks](https://github.com/rofl0r/microsocks), a tiny, portable SOCKS5 server with very moderate resource usage. Available as [services.microsocks]($opt-services-microsocks.enable).
+
 - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
 
 - [fritz-exporter](https://github.com/pdreker/fritz_exporter), a Prometheus exporter for extracting metrics from [FRITZ!](https://avm.de/produkte/) devices. Available as [services.prometheus.exporters.fritz](#opt-services.prometheus.exporters.fritz.enable).
@@ -442,6 +449,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
 
+- `services.kavita` now uses the freeform option `services.kavita.settings` for the application settings file.
+  The options `services.kavita.ipAdresses` and `services.kavita.port` now exist at `services.kavita.settings.IpAddresses`
+  and `services.kavita.settings.IpAddresses`.
+
 - The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
 
 - Gitea 1.21 upgrade has several breaking changes, including:
diff --git a/nixos/lib/test-driver/test_driver/machine.py b/nixos/lib/test-driver/test_driver/machine.py
index df8628bce9568..c117aab7c4011 100644
--- a/nixos/lib/test-driver/test_driver/machine.py
+++ b/nixos/lib/test-driver/test_driver/machine.py
@@ -1250,6 +1250,5 @@ class Machine:
             check_return=False,
             check_output=False,
         )
-        self.wait_for_console_text(r"systemd\[1\]:.*Switching root\.")
         self.connected = False
         self.connect()
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 02cd1a17f538a..f9750b7263cac 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -496,6 +496,7 @@ let
     in
       filter types.shellPackage.check shells;
 
+  lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
 in {
   imports = [
     (mkAliasOptionModuleMD [ "users" "extraUsers" ] [ "users" "users" ])
@@ -695,24 +696,31 @@ in {
       '';
     } else ""; # keep around for backwards compatibility
 
-    system.activationScripts.update-lingering = let
-      lingerDir = "/var/lib/systemd/linger";
-      lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
-      lingeringUsersFile = builtins.toFile "lingering-users"
-        (concatStrings (map (s: "${s}\n")
-          (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
-    in stringAfter [ "users" ] ''
-      if [ -e ${lingerDir} ] ; then
+    systemd.services.linger-users = lib.mkIf ((builtins.length lingeringUsers) > 0) {
+      wantedBy = ["multi-user.target"];
+      after = ["systemd-logind.service"];
+      requires = ["systemd-logind.service"];
+
+      script = let
+        lingerDir = "/var/lib/systemd/linger";
+        lingeringUsersFile = builtins.toFile "lingering-users"
+          (concatStrings (map (s: "${s}\n")
+            (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
+      in ''
+        mkdir -vp ${lingerDir}
         cd ${lingerDir}
-        for user in ${lingerDir}/*; do
-          if ! id "$user" >/dev/null 2>&1; then
+        for user in $(ls); do
+          if ! id "$user" >/dev/null; then
+            echo "Removing linger for missing user $user"
             rm --force -- "$user"
           fi
         done
-        ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
-        ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
-      fi
-    '';
+        ls | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
+        ls | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
+      '';
+
+      serviceConfig.Type = "oneshot";
+    };
 
     # Warn about user accounts with deprecated password hashing schemes
     # This does not work when the users and groups are created by
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 3b983f768f91a..352c8d8ead54d 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -396,6 +396,9 @@ in {
             modules = [nvidia_x11.bin];
             display = !offloadCfg.enable;
             deviceSection =
+              ''
+                Option "SidebandSocketPath" "/run/nvidia-xdriver/"
+              '' +
               lib.optionalString primeEnabled
               ''
                 BusID "${pCfg.nvidiaBusId}"
@@ -533,8 +536,14 @@ in {
 
         hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
 
-        systemd.tmpfiles.rules =
-          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+        systemd.tmpfiles.rules = [
+          # Remove the following log message:
+          #    (WW) NVIDIA: Failed to bind sideband socket to
+          #    (WW) NVIDIA:     '/var/run/nvidia-xdriver-b4f69129' Permission denied
+          #
+          # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
+          "d /run/nvidia-xdriver 0770 root users"
+        ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
           "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
 
         boot = {
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index 2f9edba4f0c9c..ef25b8b296e6e 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,7 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
-@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
+@bcacheDevices = grep(!m#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 299b163844f82..d89d294b0469f 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -193,6 +193,7 @@
   ./programs/gnome-disks.nix
   ./programs/gnome-terminal.nix
   ./programs/gnupg.nix
+  ./programs/goldwarden.nix
   ./programs/gpaste.nix
   ./programs/gphoto2.nix
   ./programs/haguichi.nix
@@ -1020,6 +1021,7 @@
   ./services/networking/lxd-image-server.nix
   ./services/networking/magic-wormhole-mailbox-server.nix
   ./services/networking/matterbridge.nix
+  ./services/networking/microsocks.nix
   ./services/networking/mihomo.nix
   ./services/networking/minidlna.nix
   ./services/networking/miniupnpd.nix
@@ -1106,6 +1108,11 @@
   ./services/networking/rpcbind.nix
   ./services/networking/rxe.nix
   ./services/networking/sabnzbd.nix
+  ./services/networking/scion/scion.nix
+  ./services/networking/scion/scion-control.nix
+  ./services/networking/scion/scion-daemon.nix
+  ./services/networking/scion/scion-dispatcher.nix
+  ./services/networking/scion/scion-router.nix
   ./services/networking/seafile.nix
   ./services/networking/searx.nix
   ./services/networking/shadowsocks.nix
diff --git a/nixos/modules/programs/goldwarden.nix b/nixos/modules/programs/goldwarden.nix
new file mode 100644
index 0000000000000..26f9a87c1986f
--- /dev/null
+++ b/nixos/modules/programs/goldwarden.nix
@@ -0,0 +1,50 @@
+{ lib, config, pkgs, ... }:
+let
+  cfg = config.programs.goldwarden;
+in
+{
+  options.programs.goldwarden = {
+    enable = lib.mkEnableOption "Goldwarden";
+    package = lib.mkPackageOption pkgs "goldwarden" {};
+    useSshAgent = lib.mkEnableOption "Goldwarden's SSH Agent" // { default = true; };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+       assertion = cfg.useSshAgent -> !config.programs.ssh.startAgent;
+       message = "Only one ssh-agent can be used at a time.";
+    }];
+
+    environment = {
+      etc = lib.mkIf config.programs.chromium.enable {
+        "chromium/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chromium/native-messaging-hosts/com.8bit.bitwarden.json";
+        "opt/chrome/native-messaging-hosts/com.8bit.bitwarden.json".source = "${cfg.package}/etc/chrome/native-messaging-hosts/com.8bit.bitwarden.json";
+      };
+
+      extraInit = lib.mkIf cfg.useSshAgent ''
+        if [ -z "$SSH_AUTH_SOCK" -a -n "$HOME" ]; then
+          export SSH_AUTH_SOCK="$HOME/.goldwarden-ssh-agent.sock"
+        fi
+      '';
+
+      systemPackages = [
+        # for cli and polkit action
+        cfg.package
+        # binary exec's into pinentry which should match the DE
+        config.programs.gnupg.agent.pinentryPackage
+      ];
+    };
+
+    programs.firefox.nativeMessagingHosts.packages = [ cfg.package ];
+
+    # see https://github.com/quexten/goldwarden/blob/main/cmd/goldwarden.service
+    systemd.user.services.goldwarden = {
+      description = "Goldwarden daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${lib.getExe cfg.package} daemonize";
+      path = [ config.programs.gnupg.agent.pinentryPackage ];
+      unitConfig.ConditionUser = "!@system";
+    };
+  };
+}
diff --git a/nixos/modules/programs/steam.nix b/nixos/modules/programs/steam.nix
index c93a34f618494..bab9bf8107b6e 100644
--- a/nixos/modules/programs/steam.nix
+++ b/nixos/modules/programs/steam.nix
@@ -45,6 +45,8 @@ in {
       apply = steam: steam.override (prev: {
         extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
           STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeSearchPathOutput "steamcompattool" "" cfg.extraCompatPackages;
+        }) // (optionalAttrs cfg.extest.enable {
+          LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
         }) // (prev.extraEnv or {});
         extraLibraries = pkgs: let
           prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
@@ -59,8 +61,6 @@ in {
           # use the setuid wrapped bubblewrap
           bubblewrap = "${config.security.wrapperDir}/..";
         };
-      } // optionalAttrs cfg.extest.enable {
-        extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
       });
       description = lib.mdDoc ''
         The Steam package to use. Additional libraries are added from the system
diff --git a/nixos/modules/services/desktops/flatpak.nix b/nixos/modules/services/desktops/flatpak.nix
index 4c26e6874023a..62ef38a3d5548 100644
--- a/nixos/modules/services/desktops/flatpak.nix
+++ b/nixos/modules/services/desktops/flatpak.nix
@@ -32,6 +32,8 @@ in {
 
     security.polkit.enable = true;
 
+    fonts.fontDir.enable = true;
+
     services.dbus.packages = [ pkgs.flatpak ];
 
     systemd.packages = [ pkgs.flatpak ];
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index 182615cd4d6ca..5c6eba889ebed 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -1,14 +1,21 @@
 # PipeWire service.
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers teams;
+  inherit (lib.attrsets) attrByPath attrsToList concatMapAttrs filterAttrs;
+  inherit (lib.lists) flatten optional optionals;
+  inherit (lib.modules) mkIf mkRemovedOptionModule;
+  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
+  inherit (lib.strings) concatMapStringsSep hasPrefix optionalString;
+  inherit (lib.types) attrsOf bool listOf package;
+
   json = pkgs.formats.json {};
   mapToFiles = location: config: concatMapAttrs (name: value: { "share/pipewire/${location}.conf.d/${name}.conf" = json.generate "${name}" value; }) config;
   extraConfigPkgFromFiles = locations: filesSet: pkgs.runCommand "pipewire-extra-config" { } ''
-    mkdir -p ${lib.concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
-    ${lib.concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (lib.attrsToList filesSet)}
+    mkdir -p ${concatMapStringsSep " " (l: "$out/share/pipewire/${l}.conf.d") locations}
+    ${concatMapStringsSep ";" ({name, value}: "ln -s ${value} $out/${name}") (attrsToList filesSet)}
   '';
   cfg = config.services.pipewire;
   enable32BitAlsaPlugins = cfg.alsa.support32Bit
@@ -40,15 +47,15 @@ let
     name = "pipewire-configs";
     paths = configPackages
       ++ [ extraConfigPkg ]
-      ++ lib.optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
+      ++ optionals cfg.wireplumber.enable cfg.wireplumber.configPackages;
     pathsToLink = [ "/share/pipewire" ];
   };
 
-  requiredLv2Packages = lib.flatten
+  requiredLv2Packages = flatten
     (
-      lib.concatMap
+      concatMap
       (p:
-        lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+        attrByPath ["passthru" "requiredLv2Packages"] [] p
       )
       configPackages
     );
@@ -59,58 +66,58 @@ let
     pathsToLink = [ "/lib/lv2" ];
   };
 in {
-  meta.maintainers = teams.freedesktop.members ++ [ lib.maintainers.k900 ];
+  meta.maintainers = teams.freedesktop.members ++ [ maintainers.k900 ];
 
   ###### interface
   options = {
     services.pipewire = {
-      enable = mkEnableOption (lib.mdDoc "PipeWire service");
+      enable = mkEnableOption "PipeWire service";
 
       package = mkPackageOption pkgs "pipewire" { };
 
       socketActivation = mkOption {
         default = true;
-        type = types.bool;
-        description = lib.mdDoc ''
+        type = bool;
+        description = ''
           Automatically run PipeWire when connections are made to the PipeWire socket.
         '';
       };
 
       audio = {
-        enable = lib.mkOption {
-          type = lib.types.bool;
+        enable = mkOption {
+          type = bool;
           # this is for backwards compatibility
           default = cfg.alsa.enable || cfg.jack.enable || cfg.pulse.enable;
-          defaultText = lib.literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
-          description = lib.mdDoc "Whether to use PipeWire as the primary sound server";
+          defaultText = literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
+          description = "Whether to use PipeWire as the primary sound server";
         };
       };
 
       alsa = {
-        enable = mkEnableOption (lib.mdDoc "ALSA support");
-        support32Bit = mkEnableOption (lib.mdDoc "32-bit ALSA support on 64-bit systems");
+        enable = mkEnableOption "ALSA support";
+        support32Bit = mkEnableOption "32-bit ALSA support on 64-bit systems";
       };
 
       jack = {
-        enable = mkEnableOption (lib.mdDoc "JACK audio emulation");
+        enable = mkEnableOption "JACK audio emulation";
       };
 
       raopOpenFirewall = mkOption {
-        type = lib.types.bool;
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           Opens UDP/6001-6002, required by RAOP/Airplay for timing and control data.
         '';
       };
 
       pulse = {
-        enable = mkEnableOption (lib.mdDoc "PulseAudio server emulation");
+        enable = mkEnableOption "PulseAudio server emulation";
       };
 
-      systemWide = lib.mkOption {
-        type = lib.types.bool;
+      systemWide = mkOption {
+        type = bool;
         default = false;
-        description = lib.mdDoc ''
+        description = ''
           If true, a system-wide PipeWire service and socket is enabled
           allowing all users in the "pipewire" group to use it simultaneously.
           If false, then user units are used instead, restricting access to
@@ -124,7 +131,7 @@ in {
 
       extraConfig = {
         pipewire = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-clock-rate" = {
@@ -138,7 +145,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire.conf.d`.
@@ -157,7 +164,7 @@ in {
           '';
         };
         client = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-no-resample" = {
@@ -166,7 +173,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by most applications.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client.conf.d`.
@@ -177,7 +184,7 @@ in {
           '';
         };
         client-rt = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "10-alsa-linear-volume" = {
@@ -186,7 +193,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire client library, used by real-time applications and legacy ALSA clients.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/client-rt.conf.d`.
@@ -198,7 +205,7 @@ in {
           '';
         };
         jack = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "20-hide-midi" = {
@@ -207,7 +214,7 @@ in {
               };
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire JACK server and client library.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/jack.conf.d`.
@@ -218,7 +225,7 @@ in {
           '';
         };
         pipewire-pulse = mkOption {
-          type = lib.types.attrsOf json.type;
+          type = attrsOf json.type;
           default = {};
           example = {
             "15-force-s16-info" = {
@@ -232,7 +239,7 @@ in {
               }];
             };
           };
-          description = lib.mdDoc ''
+          description = ''
             Additional configuration for the PipeWire PulseAudio server.
 
             Every item in this attrset becomes a separate drop-in file in `/etc/pipewire/pipewire-pulse.conf.d`.
@@ -248,10 +255,32 @@ in {
         };
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/pipewire/pipewire.conf.d/10-loopback.conf" '''
+            context.modules = [
+            {   name = libpipewire-module-loopback
+                args = {
+                  node.description = "Scarlett Focusrite Line 1"
+                  capture.props = {
+                      audio.position = [ FL ]
+                      stream.dont-remix = true
+                      node.target = "alsa_input.usb-Focusrite_Scarlett_Solo_USB_Y7ZD17C24495BC-00.analog-stereo"
+                      node.passive = true
+                  }
+                  playback.props = {
+                      node.name = "SF_mono_in_1"
+                      media.class = "Audio/Source"
+                      audio.position = [ MONO ]
+                  }
+                }
+            }
+            ]
+          ''')
+        ]'';
+        description = ''
           List of packages that provide PipeWire configuration, in the form of
           `share/pipewire/*/*.conf` files.
 
@@ -260,11 +289,11 @@ in {
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to PipeWire for [filter chains][wiki-filter-chain].
 
@@ -279,11 +308,11 @@ in {
   };
 
   imports = [
-    (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "config"] ''
       Overriding default PipeWire configuration through NixOS options never worked correctly and is no longer supported.
       Please create drop-in configuration files via `services.pipewire.extraConfig` instead.
     '')
-    (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
+    (mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
       pipewire-media-session is no longer supported upstream and has been removed.
       Please switch to `services.pipewire.wireplumber` instead.
     '')
@@ -306,12 +335,12 @@ in {
         message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
       }
       {
-        assertion = builtins.length
-          (builtins.attrNames
+        assertion = length
+          (attrNames
             (
-              lib.filterAttrs
+              filterAttrs
                 (name: value:
-                  lib.hasPrefix "pipewire/" name || name == "pipewire"
+                  hasPrefix "pipewire/" name || name == "pipewire"
                 )
                 config.environment.etc
             )) == 1;
@@ -320,7 +349,7 @@ in {
     ];
 
     environment.systemPackages = [ cfg.package ]
-                                 ++ lib.optional cfg.jack.enable jack-libs;
+                                 ++ optional cfg.jack.enable jack-libs;
 
     systemd.packages = [ cfg.package ];
 
@@ -336,16 +365,16 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
-    systemd.services.pipewire.environment.LV2_PATH = lib.mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
-    systemd.user.services.pipewire.environment.LV2_PATH = lib.mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
+    systemd.services.pipewire.environment.LV2_PATH = mkIf cfg.systemWide "${lv2Plugins}/lib/lv2";
+    systemd.user.services.pipewire.environment.LV2_PATH = mkIf (!cfg.systemWide) "${lv2Plugins}/lib/lv2";
 
     # Mask pw-pulse if it's not wanted
     systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
     systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
 
-    systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire-pulse.wantedBy = mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -377,18 +406,18 @@ in {
     };
 
     environment.sessionVariables.LD_LIBRARY_PATH =
-      lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+      mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
 
-    networking.firewall.allowedUDPPorts = lib.mkIf cfg.raopOpenFirewall [ 6001 6002 ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.raopOpenFirewall [ 6001 6002 ];
 
-    users = lib.mkIf cfg.systemWide {
+    users = mkIf cfg.systemWide {
       users.pipewire = {
         uid = config.ids.uids.pipewire;
         group = "pipewire";
         extraGroups = [
           "audio"
           "video"
-        ] ++ lib.optional config.security.rtkit.enable "rtkit";
+        ] ++ optional config.security.rtkit.enable "rtkit";
         description = "PipeWire system service user";
         isSystemUser = true;
         home = "/var/lib/pipewire";
diff --git a/nixos/modules/services/desktops/pipewire/wireplumber.nix b/nixos/modules/services/desktops/pipewire/wireplumber.nix
index de177d0e4ef38..6ab62eb03c25f 100644
--- a/nixos/modules/services/desktops/pipewire/wireplumber.nix
+++ b/nixos/modules/services/desktops/pipewire/wireplumber.nix
@@ -1,46 +1,65 @@
 { config, lib, pkgs, ... }:
 
 let
+  inherit (builtins) attrNames concatMap length;
+  inherit (lib) maintainers;
+  inherit (lib.attrsets) attrByPath filterAttrs;
+  inherit (lib.lists) flatten optional;
+  inherit (lib.modules) mkIf;
+  inherit (lib.options) literalExpression mkOption;
+  inherit (lib.strings) hasPrefix;
+  inherit (lib.types) bool listOf package;
+
   pwCfg = config.services.pipewire;
   cfg = pwCfg.wireplumber;
   pwUsedForAudio = pwCfg.audio.enable;
 in
 {
-  meta.maintainers = [ lib.maintainers.k900 ];
+  meta.maintainers = [ maintainers.k900 ];
 
   options = {
     services.pipewire.wireplumber = {
-      enable = lib.mkOption {
-        type = lib.types.bool;
-        default = config.services.pipewire.enable;
-        defaultText = lib.literalExpression "config.services.pipewire.enable";
-        description = lib.mdDoc "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
+      enable = mkOption {
+        type = bool;
+        default = pwCfg.enable;
+        defaultText = literalExpression "config.services.pipewire.enable";
+        description = "Whether to enable WirePlumber, a modular session / policy manager for PipeWire";
       };
 
-      package = lib.mkOption {
-        type = lib.types.package;
+      package = mkOption {
+        type = package;
         default = pkgs.wireplumber;
-        defaultText = lib.literalExpression "pkgs.wireplumber";
-        description = lib.mdDoc "The WirePlumber derivation to use.";
+        defaultText = literalExpression "pkgs.wireplumber";
+        description = "The WirePlumber derivation to use.";
       };
 
-      configPackages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      configPackages = mkOption {
+        type = listOf package;
         default = [ ];
-        description = lib.mdDoc ''
+        example = literalExpression ''[
+          (pkgs.writeTextDir "share/wireplumber/wireplumber.conf.d/10-bluez.conf" '''
+            monitor.bluez.properties = {
+              bluez5.roles = [ a2dp_sink a2dp_source bap_sink bap_source hsp_hs hsp_ag hfp_hf hfp_ag ]
+              bluez5.codecs = [ sbc sbc_xq aac ]
+              bluez5.enable-sbc-xq = true
+              bluez5.hfphsp-backend = "native"
+            }
+          ''')
+        ]'';
+        description = ''
           List of packages that provide WirePlumber configuration, in the form of
-          `share/wireplumber/*/*.lua` files.
+          `share/wireplumber/*/*.conf` files.
 
           LV2 dependencies will be picked up from config packages automatically
           via `passthru.requiredLv2Packages`.
         '';
       };
 
-      extraLv2Packages = lib.mkOption {
-        type = lib.types.listOf lib.types.package;
+      extraLv2Packages = mkOption {
+        type = listOf package;
         default = [];
-        example = lib.literalExpression "[ pkgs.lsp-plugins ]";
-        description = lib.mdDoc ''
+        example = literalExpression "[ pkgs.lsp-plugins ]";
+        description = ''
           List of packages that provide LV2 plugins in `lib/lv2` that should
           be made available to WirePlumber for [filter chains][wiki-filter-chain].
 
@@ -78,8 +97,8 @@ in
       '';
 
       configPackages = cfg.configPackages
-          ++ lib.optional (!pwUsedForAudio) pwNotForAudioConfigPkg
-          ++ lib.optional config.services.pipewire.systemWide systemwideConfigPkg;
+          ++ optional (!pwUsedForAudio) pwNotForAudioConfigPkg
+          ++ optional pwCfg.systemWide systemwideConfigPkg;
 
       configs = pkgs.buildEnv {
         name = "wireplumber-configs";
@@ -87,11 +106,11 @@ in
         pathsToLink = [ "/share/wireplumber" ];
       };
 
-      requiredLv2Packages = lib.flatten
+      requiredLv2Packages = flatten
         (
-          lib.concatMap
+          concatMap
             (p:
-              lib.attrByPath ["passthru" "requiredLv2Packages"] [] p
+              attrByPath ["passthru" "requiredLv2Packages"] [] p
             )
             configPackages
         );
@@ -102,19 +121,19 @@ in
         pathsToLink = [ "/lib/lv2" ];
       };
     in
-    lib.mkIf cfg.enable {
+    mkIf cfg.enable {
       assertions = [
         {
           assertion = !config.hardware.bluetooth.hsphfpd.enable;
           message = "Using WirePlumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
         }
         {
-          assertion = builtins.length
-            (builtins.attrNames
+          assertion = length
+            (attrNames
               (
-                lib.filterAttrs
+                filterAttrs
                   (name: value:
-                    lib.hasPrefix "wireplumber/" name || name == "wireplumber"
+                    hasPrefix "wireplumber/" name || name == "wireplumber"
                   )
                   config.environment.etc
               )) == 1;
@@ -128,19 +147,19 @@ in
 
       systemd.packages = [ cfg.package ];
 
-      systemd.services.wireplumber.enable = config.services.pipewire.systemWide;
-      systemd.user.services.wireplumber.enable = !config.services.pipewire.systemWide;
+      systemd.services.wireplumber.enable = pwCfg.systemWide;
+      systemd.user.services.wireplumber.enable = !pwCfg.systemWide;
 
       systemd.services.wireplumber.wantedBy = [ "pipewire.service" ];
       systemd.user.services.wireplumber.wantedBy = [ "pipewire.service" ];
 
-      systemd.services.wireplumber.environment = lib.mkIf config.services.pipewire.systemWide {
+      systemd.services.wireplumber.environment = mkIf pwCfg.systemWide {
         # Force WirePlumber to use system dbus.
         DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/dbus/system_bus_socket";
         LV2_PATH = "${lv2Plugins}/lib/lv2";
       };
 
       systemd.user.services.wireplumber.environment.LV2_PATH =
-        lib.mkIf (!config.services.pipewire.systemWide) "${lv2Plugins}/lib/lv2";
+        mkIf (!pwCfg.systemWide) "${lv2Plugins}/lib/lv2";
     };
 }
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index 9314c4f3848d8..9301d1f687254 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -27,6 +27,8 @@ let
       name = "paperless_ngx_nltk_data";
       paths = pkg.nltkData;
     };
+  } // optionalAttrs (cfg.openMPThreadingWorkaround) {
+    OMP_NUM_THREADS = "1";
   } // (lib.mapAttrs (_: s:
     if (lib.isAttrs s || lib.isList s) then builtins.toJSON s
     else if lib.isBool s then lib.boolToString s
@@ -199,6 +201,20 @@ in
     };
 
     package = mkPackageOption pkgs "paperless-ngx" { };
+
+    openMPThreadingWorkaround = mkEnableOption ''
+      a workaround for document classifier timeouts.
+
+      Paperless uses OpenBLAS via scikit-learn for document classification.
+
+      The default is to use threading for OpenMP but this would cause the
+      document classifier to spin on one core seemingly indefinitely if there
+      are large amounts of classes per classification; causing it to
+      effectively never complete due to running into timeouts.
+
+      This sets `OMP_NUM_THREADS` to `1` in order to mitigate the issue. See
+      https://github.com/NixOS/nixpkgs/issues/240591 for more information.
+    '' // mkOption { default = true; };
   };
 
   config = mkIf cfg.enable {
diff --git a/nixos/modules/services/networking/microsocks.nix b/nixos/modules/services/networking/microsocks.nix
new file mode 100644
index 0000000000000..be79a8495636f
--- /dev/null
+++ b/nixos/modules/services/networking/microsocks.nix
@@ -0,0 +1,146 @@
+{ config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.services.microsocks;
+
+  cmd =
+    if cfg.execWrapper != null
+    then "${cfg.execWrapper} ${cfg.package}/bin/microsocks"
+    else "${cfg.package}/bin/microsocks";
+  args =
+    [ "-i" cfg.ip "-p" (toString cfg.port) ]
+    ++ lib.optionals (cfg.authOnce) [ "-1" ]
+    ++ lib.optionals (cfg.disableLogging) [ "-q" ]
+    ++ lib.optionals (cfg.outgoingBindIp != null) [ "-b" cfg.outgoingBindIp ]
+    ++ lib.optionals (cfg.authUsername != null) [ "-u" cfg.authUsername ];
+in {
+  options.services.microsocks = {
+    enable = lib.mkEnableOption (lib.mdDoc "Tiny, portable SOCKS5 server with very moderate resource usage");
+    user = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "User microsocks runs as.";
+      type = lib.types.str;
+    };
+    group = lib.mkOption {
+      default = "microsocks";
+      description = lib.mdDoc "Group microsocks runs as.";
+      type = lib.types.str;
+    };
+    package = lib.mkPackageOption pkgs "microsocks" {};
+    ip = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        IP on which microsocks should listen. Defaults to 127.0.0.1 for
+        security reasons.
+      '';
+    };
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 1080;
+      description = lib.mdDoc "Port on which microsocks should listen.";
+    };
+    disableLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "If true, microsocks will not log any messages to stdout/stderr.";
+    };
+    authOnce = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If true, once a specific ip address authed successfully with user/pass,
+        it is added to a whitelist and may use the proxy without auth.
+      '';
+    };
+    outgoingBindIp = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc "Specifies which ip outgoing connections are bound to";
+    };
+    authUsername = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = "alice";
+      description = lib.mdDoc "Optional username to use for authentication.";
+    };
+    authPasswordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microsocks-password";
+      description = lib.mdDoc "Path to a file containing the password for authentication.";
+    };
+    execWrapper = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      example = ''
+        ''${pkgs.mullvad-vpn}/bin/mullvad-exclude
+      '';
+      description = lib.mdDoc ''
+        An optional command to prepend to the microsocks command (such as proxychains, or a VPN exclude command).
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.authUsername != null) == (cfg.authPasswordFile != null);
+        message = "Need to set both authUsername and authPasswordFile for microsocks";
+      }
+    ];
+    users = {
+      users = lib.mkIf (cfg.user == "microsocks") {
+        microsocks = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = lib.mkIf (cfg.group == "microsocks") {
+        microsocks = {};
+      };
+    };
+    systemd.services.microsocks = {
+      enable = true;
+      description = "a tiny socks server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 10;
+        LoadCredential = lib.optionalString (cfg.authPasswordFile != null) "MICROSOCKS_PASSWORD_FILE:${cfg.authPasswordFile}";
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+        RestrictNamespaces = [
+          "cgroup"
+          "ipc"
+          "pid"
+          "user"
+          "uts"
+        ];
+      };
+      script =
+        if cfg.authPasswordFile != null
+        then ''
+          PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/MICROSOCKS_PASSWORD_FILE")
+          ${cmd} ${lib.escapeShellArgs args} -P "$PASSWORD"
+        ''
+        else ''
+          ${cmd} ${lib.escapeShellArgs args}
+        '';
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion-control.nix b/nixos/modules/services/networking/scion/scion-control.nix
new file mode 100644
index 0000000000000..fdf3a9ba3cc15
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-control.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-control;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "cs";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    beacon_db = {
+      connection = "/var/lib/scion-control/control.beacon.db";
+    };
+    path_db = {
+      connection = "/var/lib/scion-control/control.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-control/control.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-control.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-control = {
+    enable = mkEnableOption (lib.mdDoc "the scion-control service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-control/control.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-control configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-control = {
+      description = "SCION Control Service";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = if (config.services.scion.scion-dispatcher.enable == true) then "scion" else null;
+        ExecStart = "${pkgs.scion}/bin/scion-control --config ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        StateDirectory = "scion-control";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion-daemon.nix b/nixos/modules/services/networking/scion/scion-daemon.nix
new file mode 100644
index 0000000000000..0bcc18771fc3c
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-daemon.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-daemon;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "sd";
+      config_dir = "/etc/scion";
+      reconnect_to_dispatcher = true;
+    };
+    path_db = {
+      connection = "/var/lib/scion-daemon/sd.path.db";
+    };
+    trust_db = {
+      connection = "/var/lib/scion-daemon/sd.trust.db";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-daemon.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-daemon = {
+    enable = mkEnableOption (lib.mdDoc "the scion-daemon service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          path_db = {
+            connection = "/var/lib/scion-daemon/sd.path.db";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-daemon configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-daemon = {
+      description = "SCION Daemon";
+      after = [ "network-online.target" "scion-dispatcher.service" ];
+      wants = [ "network-online.target" "scion-dispatcher.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-daemon --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-daemon";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion-dispatcher.nix b/nixos/modules/services/networking/scion/scion-dispatcher.nix
new file mode 100644
index 0000000000000..bab1ec0a989b5
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-dispatcher.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-dispatcher;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    dispatcher = {
+      id = "dispatcher";
+      socket_file_mode = "0770";
+      application_socket = "/dev/shm/dispatcher/default.sock";
+    };
+    log.console = {
+      level = "info";
+    };
+  };
+  configFile = toml.generate "scion-dispatcher.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-dispatcher = {
+    enable = mkEnableOption (lib.mdDoc "the scion-dispatcher service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          dispatcher = {
+            id = "dispatcher";
+            socket_file_mode = "0770";
+            application_socket = "/dev/shm/dispatcher/default.sock";
+          };
+          log.console = {
+            level = "info";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-dispatcher configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    # Needed for group ownership of the dispatcher socket
+    users.groups.scion = {};
+
+    # scion programs hardcode path to dispatcher in /run/shm, and is not
+    # configurable at runtime upstream plans to obsolete the dispatcher in
+    # favor of an SCMP daemon, at which point this can be removed.
+    system.activationScripts.scion-dispatcher = ''
+      ln -sf /dev/shm /run/shm
+    '';
+
+    systemd.services.scion-dispatcher = {
+      description = "SCION Dispatcher";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Group = "scion";
+        DynamicUser = true;
+        BindPaths = [ "/dev/shm:/run/shm" ];
+        ExecStartPre = "${pkgs.coreutils}/bin/rm -rf /run/shm/dispatcher";
+        ExecStart = "${pkgs.scion}/bin/scion-dispatcher --config ${configFile}";
+        Restart = "on-failure";
+        StateDirectory = "scion-dispatcher";
+      };
+    };
+  };
+}
+
diff --git a/nixos/modules/services/networking/scion/scion-router.nix b/nixos/modules/services/networking/scion/scion-router.nix
new file mode 100644
index 0000000000000..cbe83c6dbf8d1
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion-router.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion.scion-router;
+  toml = pkgs.formats.toml { };
+  defaultConfig = {
+    general = {
+      id = "br";
+      config_dir = "/etc/scion";
+    };
+  };
+  configFile = toml.generate "scion-router.toml" (defaultConfig // cfg.settings);
+in
+{
+  options.services.scion.scion-router = {
+    enable = mkEnableOption (lib.mdDoc "the scion-router service");
+    settings = mkOption {
+      default = { };
+      type = toml.type;
+      example = literalExpression ''
+        {
+          general.id = "br";
+        }
+      '';
+      description = lib.mdDoc ''
+        scion-router configuration. Refer to
+        <https://docs.scion.org/en/latest/manuals/common.html>
+        for details on supported values.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.scion-router = {
+      description = "SCION Router";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.scion}/bin/scion-router --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        StateDirectory = "scion-router";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/scion/scion.nix b/nixos/modules/services/networking/scion/scion.nix
new file mode 100644
index 0000000000000..704f942b5d9e3
--- /dev/null
+++ b/nixos/modules/services/networking/scion/scion.nix
@@ -0,0 +1,39 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scion;
+in
+{
+  options.services.scion = {
+    enable = mkEnableOption (lib.mdDoc "all of the scion components and services");
+    bypassBootstrapWarning = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        bypass Nix warning about SCION PKI bootstrapping
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    services.scion = {
+      scion-dispatcher.enable = true;
+      scion-daemon.enable = true;
+      scion-router.enable = true;
+      scion-control.enable = true;
+    };
+    assertions = [
+      { assertion = cfg.bypassBootstrapWarning == true;
+        message = ''
+          SCION is a routing protocol and requires bootstrapping with a manual, imperative key signing ceremony. You may want to join an existing Isolation Domain (ISD) such as scionlab.org, or bootstrap your own. If you have completed and configured the public key infrastructure for SCION and are sure this process is complete, then add the following to your configuration:
+
+          services.scion.bypassBootstrapWarning = true;
+
+          refer to docs.scion.org for more information
+        '';
+      }
+    ];
+  };
+}
+
diff --git a/nixos/modules/services/security/usbguard.nix b/nixos/modules/services/security/usbguard.nix
index f167fbb2eca82..ff54176e13d3a 100644
--- a/nixos/modules/services/security/usbguard.nix
+++ b/nixos/modules/services/security/usbguard.nix
@@ -80,7 +80,7 @@ in
       };
 
       implicitPolicyTarget = mkOption {
-        type = policy;
+        type = types.enum [ "allow" "block" "reject" ];
         default = "block";
         description = lib.mdDoc ''
           How to treat USB devices that don't match any rule in the policy.
@@ -110,7 +110,7 @@ in
       };
 
       insertedDevicePolicy = mkOption {
-        type = policy;
+        type = types.enum [ "block" "reject" "apply-policy" ];
         default = "apply-policy";
         description = lib.mdDoc ''
           How to treat USB devices that are already connected after the daemon
diff --git a/nixos/modules/services/web-apps/kavita.nix b/nixos/modules/services/web-apps/kavita.nix
index c3e39f0b54761..c90697bcfa8b2 100644
--- a/nixos/modules/services/web-apps/kavita.nix
+++ b/nixos/modules/services/web-apps/kavita.nix
@@ -2,7 +2,18 @@
 
 let
   cfg = config.services.kavita;
-in {
+  settingsFormat = pkgs.formats.json { };
+  appsettings = settingsFormat.generate "appsettings.json" ({ TokenKey = "@TOKEN@"; } // cfg.settings);
+in
+{
+  imports = [
+    (lib.mkChangedOptionModule [ "services" "kavita" "ipAdresses" ] [ "services" "kavita" "settings" "IpAddresses" ] (config:
+      let value = lib.getAttrFromPath [ "services" "kavita" "ipAdresses" ] config; in
+      lib.concatStringsSep "," value
+    ))
+    (lib.mkRenamedOptionModule [ "services" "kavita" "port" ] [ "services" "kavita" "settings" "Port" ])
+  ];
+
   options.services.kavita = {
     enable = lib.mkEnableOption (lib.mdDoc "Kavita reading server");
 
@@ -27,16 +38,31 @@ in {
         It can be generated with `head -c 32 /dev/urandom | base64`.
       '';
     };
-    port = lib.mkOption {
-      default = 5000;
-      type = lib.types.port;
-      description = lib.mdDoc "Port to bind to.";
-    };
-    ipAdresses = lib.mkOption {
-      default = ["0.0.0.0" "::"];
-      type = lib.types.listOf lib.types.str;
-      description = lib.mdDoc "IP Addresses to bind to. The default is to bind
-      to all IPv4 and IPv6 addresses.";
+
+    settings = lib.mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Kavita configuration options, as configured in {file}`appsettings.json`.
+      '';
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          Port = lib.mkOption {
+            default = 5000;
+            type = lib.types.port;
+            description = lib.mdDoc "Port to bind to.";
+          };
+
+          IpAddresses = lib.mkOption {
+            default = "0.0.0.0,::";
+            type = lib.types.commas;
+            description = lib.mdDoc ''
+              IP Addresses to bind to. The default is to bind to all IPv4 and IPv6 addresses.
+            '';
+          };
+        };
+      };
     };
   };
 
@@ -46,18 +72,15 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
       preStart = ''
-        umask u=rwx,g=rx,o=
-        cat > "${cfg.dataDir}/config/appsettings.json" <<EOF
-        {
-          "TokenKey": "$(cat ${cfg.tokenKeyFile})",
-          "Port": ${toString cfg.port},
-          "IpAddresses": "${lib.concatStringsSep "," cfg.ipAdresses}"
-        }
-        EOF
+        install -m600 ${appsettings} ${lib.escapeShellArg cfg.dataDir}/config/appsettings.json
+        ${pkgs.replace-secret}/bin/replace-secret '@TOKEN@' \
+          ''${CREDENTIALS_DIRECTORY}/token \
+          '${cfg.dataDir}/config/appsettings.json'
       '';
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
-        ExecStart = "${lib.getExe cfg.package}";
+        LoadCredential = [ "token:${cfg.tokenKeyFile}" ];
+        ExecStart = lib.getExe cfg.package;
         Restart = "always";
         User = cfg.user;
       };
diff --git a/nixos/modules/services/web-apps/pretix.nix b/nixos/modules/services/web-apps/pretix.nix
index 2355f8c450a1e..22ee9769aa923 100644
--- a/nixos/modules/services/web-apps/pretix.nix
+++ b/nixos/modules/services/web-apps/pretix.nix
@@ -63,7 +63,7 @@ in
   };
 
   options.services.pretix = {
-    enable = mkEnableOption "pretix";
+    enable = mkEnableOption "Pretix, a ticket shop application for conferences, festivals, concerts, etc.";
 
     package = mkPackageOption pkgs "pretix" { };
 
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 4e0235f9ad1dd..453f414e2a862 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -303,7 +303,7 @@ in
         type = types.listOf types.str;
         default = [ "modesetting" "fbdev" ];
         example = [
-          "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
+          "nvidia"
           "amdgpu-pro"
         ];
         # TODO(@oxij): think how to easily add the rest, like those nvidia things
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index e4f61db0cd02a..06359f273846a 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -392,7 +392,10 @@ in {
 
     boot.kernelParams = [
       "root=${config.boot.initrd.systemd.root}"
-    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}";
+    ] ++ lib.optional (config.boot.resumeDevice != "") "resume=${config.boot.resumeDevice}"
+      # `systemd` mounts root in initrd as read-only unless "rw" is on the kernel command line.
+      # For NixOS activation to succeed, we need to have root writable in initrd.
+      ++ lib.optional (config.boot.initrd.systemd.root == "gpt-auto") "rw";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package];
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index da7873c7bec86..1ceaa40cca9dc 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -1,8 +1,80 @@
-{ config, lib, pkgs, ... }:
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
 
 let
   cfg = config.virtualisation.incus;
   preseedFormat = pkgs.formats.yaml { };
+
+  serverBinPath = ''${pkgs.qemu_kvm}/libexec:${
+    lib.makeBinPath (
+      with pkgs;
+      [
+        cfg.package
+
+        acl
+        attr
+        bash
+        btrfs-progs
+        cdrkit
+        coreutils
+        criu
+        dnsmasq
+        e2fsprogs
+        findutils
+        getent
+        gnugrep
+        gnused
+        gnutar
+        gptfdisk
+        gzip
+        iproute2
+        iptables
+        kmod
+        lvm2
+        minio
+        nftables
+        qemu_kvm
+        qemu-utils
+        rsync
+        squashfsTools
+        systemd
+        thin-provisioning-tools
+        util-linux
+        virtiofsd
+        xz
+
+        (writeShellScriptBin "apparmor_parser" ''
+          exec '${apparmor-parser}/bin/apparmor_parser' -I '${apparmor-profiles}/etc/apparmor.d' "$@"
+        '')
+      ]
+      ++ lib.optionals config.boot.zfs.enabled [
+        config.boot.zfs.package
+        "${config.boot.zfs.package}/lib/udev"
+      ]
+      ++ lib.optionals config.virtualisation.vswitch.enable [ config.virtualisation.vswitch.package ]
+    )
+  }'';
+
+  # https://github.com/lxc/incus/blob/cff35a29ee3d7a2af1f937cbb6cf23776941854b/internal/server/instance/drivers/driver_qemu.go#L123
+  ovmf-prefix = if pkgs.stdenv.hostPlatform.isAarch64 then "AAVMF" else "OVMF";
+  ovmf = pkgs.linkFarm "incus-ovmf" [
+    {
+      name = "OVMF_CODE.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_CODE.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+    {
+      name = "OVMF_VARS.4MB.ms.fd";
+      path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
+    }
+  ];
 in
 {
   meta = {
@@ -11,26 +83,29 @@ in
 
   options = {
     virtualisation.incus = {
-      enable = lib.mkEnableOption (lib.mdDoc ''
+      enable = lib.mkEnableOption ''
         incusd, a daemon that manages containers and virtual machines.
 
         Users in the "incus-admin" group can interact with
         the daemon (e.g. to start or stop containers) using the
         {command}`incus` command line tool, among others.
-      '');
+      '';
 
       package = lib.mkPackageOption pkgs "incus" { };
 
       lxcPackage = lib.mkPackageOption pkgs "lxc" { };
 
+      clientPackage = lib.mkPackageOption pkgs [
+        "incus"
+        "client"
+      ] { };
+
       preseed = lib.mkOption {
-        type = lib.types.nullOr (
-          lib.types.submodule { freeformType = preseedFormat.type; }
-        );
+        type = lib.types.nullOr (lib.types.submodule { freeformType = preseedFormat.type; });
 
         default = null;
 
-        description = lib.mdDoc ''
+        description = ''
           Configuration for Incus preseed, see
           <https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
           for supported values.
@@ -80,18 +155,16 @@ in
         };
       };
 
-      socketActivation = lib.mkEnableOption (
-        lib.mdDoc ''
-          socket-activation for starting incus.service. Enabling this option
-          will stop incus.service from starting automatically on boot.
-        ''
-      );
+      socketActivation = lib.mkEnableOption (''
+        socket-activation for starting incus.service. Enabling this option
+        will stop incus.service from starting automatically on boot.
+      '');
 
       startTimeout = lib.mkOption {
         type = lib.types.ints.unsigned;
         default = 600;
         apply = toString;
-        description = lib.mdDoc ''
+        description = ''
           Time to wait (in seconds) for incusd to become ready to process requests.
           If incusd does not reply within the configured time, `incus.service` will be
           considered failed and systemd will attempt to restart it.
@@ -99,9 +172,12 @@ in
       };
 
       ui = {
-        enable = lib.mkEnableOption (lib.mdDoc "(experimental) Incus UI");
+        enable = lib.mkEnableOption "(experimental) Incus UI";
 
-        package = lib.mkPackageOption pkgs [ "incus" "ui" ] { };
+        package = lib.mkPackageOption pkgs [
+          "incus"
+          "ui"
+        ] { };
       };
     };
   };
@@ -109,7 +185,12 @@ in
   config = lib.mkIf cfg.enable {
     assertions = [
       {
-        assertion = !(config.networking.firewall.enable && !config.networking.nftables.enable && config.virtualisation.incus.enable);
+        assertion =
+          !(
+            config.networking.firewall.enable
+            && !config.networking.nftables.enable
+            && config.virtualisation.incus.enable
+          );
         message = "Incus on NixOS is unsupported using iptables. Set `networking.nftables.enable = true;`";
       }
     ];
@@ -137,7 +218,12 @@ in
       "vhost_vsock"
     ] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
 
-    environment.systemPackages = [ cfg.package ];
+    environment.systemPackages = [
+      cfg.clientPackage
+
+      # gui console support
+      pkgs.spice-gtk
+    ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
     # the latter can't be simply enabled to reuse the formers, because it
@@ -164,31 +250,24 @@ in
         "network-online.target"
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
       requires = [
         "lxcfs.service"
         "incus.socket"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
-
-      wants = [
-        "network-online.target"
-      ];
+      ] ++ lib.optionals config.virtualisation.vswitch.enable [ "ovs-vswitchd.service" ];
 
-      path = lib.optionals config.boot.zfs.enabled [
-        config.boot.zfs.package
-        "${config.boot.zfs.package}/lib/udev"
-      ]
-        ++ lib.optional config.virtualisation.vswitch.enable config.virtualisation.vswitch.package;
+      wants = [ "network-online.target" ];
 
-      environment = lib.mkMerge [ {
-        # Override Path to the LXC template configuration directory
-        INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
-      } (lib.mkIf (cfg.ui.enable) {
-        "INCUS_UI" = cfg.ui.package;
-      }) ];
+      environment = lib.mkMerge [
+        {
+          INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+          INCUS_OVMF_PATH = ovmf;
+          INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
+          PATH = lib.mkForce serverBinPath;
+        }
+        (lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
+      ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
@@ -222,15 +301,13 @@ in
     systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
       description = "Incus initialization with preseed file";
 
-      wantedBy = ["incus.service"];
-      after = ["incus.service"];
-      bindsTo = ["incus.service"];
-      partOf = ["incus.service"];
+      wantedBy = [ "incus.service" ];
+      after = [ "incus.service" ];
+      bindsTo = [ "incus.service" ];
+      partOf = [ "incus.service" ];
 
       script = ''
-        ${cfg.package}/bin/incus admin init --preseed <${
-          preseedFormat.generate "incus-preseed.yaml" cfg.preseed
-        }
+        ${cfg.package}/bin/incus admin init --preseed <${preseedFormat.generate "incus-preseed.yaml" cfg.preseed}
       '';
 
       serviceConfig = {
diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix
index 459700514ffc5..96b24feeb0631 100644
--- a/nixos/release-combined.nix
+++ b/nixos/release-combined.nix
@@ -169,11 +169,6 @@ in rec {
         (onFullSupported "nixpkgs.jdk")
         (onSystems ["x86_64-linux"] "nixpkgs.mesa_i686") # i686 sanity check + useful
         ["nixpkgs.tarball"]
-
-        # Ensure that nixpkgs-check-by-name is available in nixos-unstable,
-        # so that a pre-built version can be used in CI for PR's
-        # See ../pkgs/test/nixpkgs-check-by-name/README.md
-        (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
       ];
     };
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 69d340bae2774..9cff268ae1d15 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -791,6 +791,7 @@ in {
   sanoid = handleTest ./sanoid.nix {};
   scaphandre = handleTest ./scaphandre.nix {};
   schleuder = handleTest ./schleuder.nix {};
+  scion-freestanding-deployment = handleTest ./scion/freestanding-deployment {};
   scrutiny = handleTest ./scrutiny.nix {};
   sddm = handleTest ./sddm.nix {};
   seafile = handleTest ./seafile.nix {};
@@ -902,6 +903,7 @@ in {
   systemd-sysusers-immutable = runTest ./systemd-sysusers-immutable.nix;
   systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
   systemd-timesyncd-nscd-dnssec = handleTest ./systemd-timesyncd-nscd-dnssec.nix {};
+  systemd-user-linger = handleTest ./systemd-user-linger.nix {};
   systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
   systemd-misc = handleTest ./systemd-misc.nix {};
   systemd-userdbd = handleTest ./systemd-userdbd.nix {};
@@ -962,6 +964,7 @@ in {
   user-activation-scripts = handleTest ./user-activation-scripts.nix {};
   user-expiry = runTest ./user-expiry.nix;
   user-home-mode = handleTest ./user-home-mode.nix {};
+  ustreamer = handleTest ./ustreamer.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
   v2ray = handleTest ./v2ray.nix {};
   varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
diff --git a/nixos/tests/goss.nix b/nixos/tests/goss.nix
index 6b772d19215e3..2e77b2734464f 100644
--- a/nixos/tests/goss.nix
+++ b/nixos/tests/goss.nix
@@ -28,10 +28,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         };
         group.root.exists = true;
         kernel-param."kernel.ostype".value = "Linux";
-        service.goss = {
-          enabled = true;
-          running = true;
-        };
         user.root.exists = true;
       };
     };
@@ -46,8 +42,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     with subtest("returns health status"):
       result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz"))
 
-      assert len(result["results"]) == 10, f".results should be an array of 10 items, was {result['results']!r}"
+      assert len(result["results"]) == 8, f".results should be an array of 10 items, was {result['results']!r}"
       assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}"
-      assert result["summary"]["test-count"] == 10, f".summary.test-count should be 10, was {result['summary']['test-count']}"
+      assert result["summary"]["test-count"] == 8, f".summary.test-count should be 10, was {result['summary']['test-count']}"
     '';
 })
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index 9260f70da98c2..a71c5355046a5 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -1,20 +1,21 @@
-import ../make-test-python.nix ({ pkgs, lib, extra ? {}, ... } :
+import ../make-test-python.nix ({ pkgs, lib, extra ? {}, name ? "incus-container", ... } :
 
 let
   releases = import ../../release.nix {
-    configuration = {
-      # Building documentation makes the test unnecessarily take a longer time:
-      documentation.enable = lib.mkForce false;
+    configuration = lib.recursiveUpdate {
+        # Building documentation makes the test unnecessarily take a longer time:
+        documentation.enable = lib.mkForce false;
 
-      boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
-    } // extra;
+        boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+    }
+    extra;
   };
 
   container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
   container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
 in
 {
-  name = "incus-container";
+  inherit name;
 
   meta = {
     maintainers = lib.teams.lxc.members;
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index 32bc5396a1647..b850c4fba018d 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -5,16 +5,22 @@
   handleTestOn,
 }:
 {
-  container-old-init = import ./container.nix { inherit system pkgs; };
-  container-new-init = import ./container.nix { inherit system pkgs; extra = {
-    # Enable new systemd init
-    boot.initrd.systemd.enable = true;
-  }; };
+  container-legacy-init = import ./container.nix {
+    name = "container-legacy-init";
+    inherit system pkgs;
+  };
+  container-systemd-init = import ./container.nix {
+    name = "container-systemd-init";
+    inherit system pkgs;
+    extra = {
+      boot.initrd.systemd.enable = true;
+    };
+  };
   lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
   openvswitch = import ./openvswitch.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
   storage = import ./storage.nix { inherit system pkgs; };
-  ui = import ./ui.nix {inherit system pkgs;};
+  ui = import ./ui.nix { inherit system pkgs; };
   virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
index 262f63c0f26fb..e93b76591eca4 100644
--- a/nixos/tests/incus/lxd-to-incus.nix
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -95,7 +95,7 @@ import ../make-test-python.nix (
       machine.wait_for_unit("incus.service")
 
       with machine.nested("run migration"):
-          machine.succeed("lxd-to-incus --yes")
+          machine.succeed("${pkgs.incus}/bin/lxd-to-incus --yes")
 
       with machine.nested("verify resources migrated to incus"):
           machine.succeed("incus config show container")
diff --git a/nixos/tests/kavita.nix b/nixos/tests/kavita.nix
index f27b3fffbcf64..bb55e1fb29d43 100644
--- a/nixos/tests/kavita.nix
+++ b/nixos/tests/kavita.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ./make-test-python.nix ({ pkgs, ... }: {
   name = "kavita";
   meta = with pkgs.lib.maintainers; {
     maintainers = [ misterio77 ];
@@ -8,29 +8,35 @@ import ./make-test-python.nix ({ pkgs, ...} : {
     kavita = { config, pkgs, ... }: {
       services.kavita = {
         enable = true;
-        port = 5000;
-        tokenKeyFile = builtins.toFile "kavita.key" "QfpjFvjT83BLtZ74GE3U3Q==";
+        tokenKeyFile = builtins.toFile "kavita.key" "d26ba694b455271a8872415830fb7b5c58f8da98f9ef7f58b2ca4c34bd406512";
       };
     };
   };
 
-  testScript = let
-    regUrl = "http://kavita:5000/api/Account/register";
-    payload = builtins.toFile "payload.json" (builtins.toJSON {
-      username = "foo";
-      password = "correcthorsebatterystaple";
-      email = "foo@bar";
-    });
-  in ''
-    kavita.start
-    kavita.wait_for_unit("kavita.service")
+  testScript =
+    let
+      regUrl = "http://kavita:5000/api/Account/register";
+      loginUrl = "http://kavita:5000/api/Account/login";
+      localeUrl = "http://kavita:5000/api/locale";
+    in
+    ''
+      import json
 
-    # Check that static assets are working
-    kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+      kavita.start
+      kavita.wait_for_unit("kavita.service")
 
-    # Check that registration is working
-    kavita.succeed("curl -fX POST ${regUrl} --json @${payload}")
-    # But only for the first one
-    kavita.fail("curl -fX POST ${regUrl} --json @${payload}")
-  '';
+      # Check that static assets are working
+      kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+
+      # Check that registration is working
+      kavita.succeed("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+      # But only for the first one
+      kavita.fail("""curl -fX POST ${regUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'""")
+
+      # Log in and retrieve token
+      session = json.loads(kavita.succeed("""curl -fX POST ${loginUrl} --json '{"username": "foo", "password": "correcthorsebatterystaple"}'"""))
+      # Check list of locales
+      locales = json.loads(kavita.succeed(f"curl -fX GET ${localeUrl} -H 'Authorization: Bearer {session['token']}'"))
+      assert len(locales) > 0, "expected a list of locales"
+    '';
 })
diff --git a/nixos/tests/make-test-python.nix b/nixos/tests/make-test-python.nix
index 32531fffd2bf3..28569f1d2955a 100644
--- a/nixos/tests/make-test-python.nix
+++ b/nixos/tests/make-test-python.nix
@@ -1,5 +1,5 @@
 f: {
-  system,
+  system ? builtins.currentSystem,
   pkgs ? import ../.. { inherit system; config = {}; overlays = []; },
   ...
 } @ args:
diff --git a/nixos/tests/scion/freestanding-deployment/README.rst b/nixos/tests/scion/freestanding-deployment/README.rst
new file mode 100644
index 0000000000000..b2448a2dc9add
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/README.rst
@@ -0,0 +1,12 @@
+This NixOS VM test implements the network topology outlined in https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst#sample-scion-demo-topology, below is an excerpt from that document
+
+Sample SCION Demo Topology
+..........................
+
+The topology of the ISD includes the inter-AS connections to neighboring ASes, and defines the underlay IP/UDP addresses of services and routers running in this AS. This is specified in topology files - this guide later explains how to configure these files. A following graphic depicts the topology on a high level.
+
+.. figure:: https://github.com/scionproto/scion/raw/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy/SCION-deployment-guide.drawio.png
+   :width: 95 %
+   :figwidth: 100 %
+
+   *Figure 1 - Topology of the sample SCION demo environment. It consists of 1 ISD, 3 core ASes and 2 non-core ASes.*
diff --git a/nixos/tests/scion/freestanding-deployment/default.nix b/nixos/tests/scion/freestanding-deployment/default.nix
new file mode 100644
index 0000000000000..0c9686fbfbadf
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/default.nix
@@ -0,0 +1,172 @@
+# implements https://github.com/scionproto/scion/blob/27983125bccac6b84d1f96f406853aab0e460405/doc/tutorials/deploy.rst
+import ../../make-test-python.nix ({ pkgs, ... }:
+let
+  trust-root-configuration-keys = pkgs.runCommand "generate-trc-keys.sh" {
+    buildInputs = [
+      pkgs.scion
+    ];
+  } ''
+    set -euo pipefail
+
+    mkdir /tmp/tutorial-scion-certs && cd /tmp/tutorial-scion-certs
+    mkdir AS{1..5}
+
+    # Create voting and root keys and (self-signed) certificates for core ASes
+    pushd AS1
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 regular voting cert"}') regular-voting.pem regular-voting.key
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS2
+    scion-pki certificate create --not-after=3650d --profile=cp-root <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 cp root cert"}') cp-root.pem cp-root.key
+    popd
+
+    pushd AS3
+    scion-pki certificate create --not-after=3650d --profile=sensitive-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 sensitive voting cert"}') sensitive-voting.pem sensitive-voting.key
+    scion-pki certificate create --not-after=3650d --profile=regular-voting <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 regular voting cert"}') regular-voting.pem regular-voting.key
+    popd
+
+    # Create the TRC (Trust Root Configuration)
+    mkdir tmp
+    echo '
+    isd = 42
+    description = "Demo ISD 42"
+    serial_version = 1
+    base_version = 1
+    voting_quorum = 2
+
+    core_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    authoritative_ases = ["ffaa:1:1", "ffaa:1:2", "ffaa:1:3"]
+    cert_files = ["AS1/sensitive-voting.pem", "AS1/regular-voting.pem", "AS1/cp-root.pem", "AS2/cp-root.pem", "AS3/sensitive-voting.pem", "AS3/regular-voting.pem"]
+
+    [validity]
+    not_before = '$(date +%s)'
+    validity = "365d"' \
+    > trc-B1-S1-pld.tmpl
+
+    scion-pki trc payload --out=tmp/ISD42-B1-S1.pld.der --template trc-B1-S1-pld.tmpl
+    rm trc-B1-S1-pld.tmpl
+
+    # Sign and bundle the TRC
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS1/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS1-regular.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/sensitive-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-sensitive.trc
+    scion-pki trc sign tmp/ISD42-B1-S1.pld.der AS3/regular-voting.{pem,key} --out tmp/ISD42-B1-S1.AS3-regular.trc
+
+    scion-pki trc combine tmp/ISD42-B1-S1.AS{1,3}-{sensitive,regular}.trc --payload tmp/ISD42-B1-S1.pld.der --out ISD42-B1-S1.trc
+    rm tmp -r
+
+    # Create CA key and certificate for issuing ASes
+    pushd AS1
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+    pushd AS2
+    scion-pki certificate create --profile=cp-ca <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 CA cert"}') cp-ca.pem cp-ca.key --ca cp-root.pem --ca-key cp-root.key
+    popd
+
+    # Create AS key and certificate chains
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:1", "common_name": "42-ffaa:1:1 AS cert"}') AS1/cp-as.pem AS1/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:2", "common_name": "42-ffaa:1:2 AS cert"}') AS2/cp-as.pem AS2/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:3", "common_name": "42-ffaa:1:3 AS cert"}') AS3/cp-as.pem AS3/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:4", "common_name": "42-ffaa:1:4 AS cert"}') AS4/cp-as.pem AS4/cp-as.key --ca AS1/cp-ca.pem --ca-key AS1/cp-ca.key --bundle
+    scion-pki certificate create --profile=cp-as <(echo '{"isd_as": "42-ffaa:1:5", "common_name": "42-ffaa:1:5 AS cert"}') AS5/cp-as.pem AS5/cp-as.key --ca AS2/cp-ca.pem --ca-key AS2/cp-ca.key --bundle
+
+    for i in {1..5}
+    do
+      mkdir -p $out/AS$i
+      cp AS$i/cp-as.{key,pem} $out/AS$i
+    done
+
+    mv *.trc $out
+  '';
+  imports = hostId: [
+    ({
+      services.scion = {
+        enable = true;
+        bypassBootstrapWarning = true;
+      };
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "192.168.1.${toString hostId}/24";
+      };
+      environment.etc = {
+        "scion/topology.json".source = ./topology${toString hostId}.json;
+        "scion/crypto/as".source = trust-root-configuration-keys + "/AS${toString hostId}";
+        "scion/certs/ISD42-B1-S1.trc".source = trust-root-configuration-keys + "/ISD42-B1-S1.trc";
+        "scion/keys/master0.key".text = "U${toString hostId}v4k23ZXjGDwDofg/Eevw==";
+        "scion/keys/master1.key".text = "dBMko${toString hostId}qMS8DfrN/zP2OUdA==";
+      };
+      environment.systemPackages = [
+        pkgs.scion
+      ];
+    })
+  ];
+in
+{
+  name = "scion-test";
+  nodes = {
+    scion01 = { ... }: {
+      imports = (imports 1);
+    };
+    scion02 = { ... }: {
+      imports = (imports 2);
+    };
+    scion03 = { ... }: {
+      imports = (imports 3);
+    };
+    scion04 = { ... }: {
+      imports = (imports 4);
+    };
+    scion05 = { ... }: {
+      imports = (imports 5);
+    };
+  };
+  testScript = let
+    pingAll = pkgs.writeShellScript "ping-all-scion.sh" ''
+      addresses="42-ffaa:1:1 42-ffaa:1:2 42-ffaa:1:3 42-ffaa:1:4 42-ffaa:1:5"
+      timeout=100
+      wait_for_all() {
+        for as in "$@"
+        do
+          scion showpaths $as --no-probe > /dev/null
+          return 1
+        done
+        return 0
+      }
+      ping_all() {
+        for as in "$@"
+        do
+          scion ping "$as,127.0.0.1" -c 3
+        done
+        return 0
+      }
+      for i in $(seq 0 $timeout); do
+        wait_for_all $addresses && exit 0
+        ping_all $addresses && exit 0
+        sleep 1
+      done
+    '';
+  in
+  ''
+    # List of AS instances
+    machines = [scion01, scion02, scion03, scion04, scion05]
+
+    # Wait for scion-control.service on all instances
+    for i in machines:
+        i.wait_for_unit("scion-control.service")
+
+    # Execute pingAll command on all instances
+    for i in machines:
+        i.succeed("${pingAll} >&2")
+
+    # Restart scion-dispatcher and ping again to test robustness
+    for i in machines:
+        i.succeed("systemctl restart scion-dispatcher >&2")
+        i.succeed("${pingAll} >&2")
+  '';
+})
diff --git a/nixos/tests/scion/freestanding-deployment/topology1.json b/nixos/tests/scion/freestanding-deployment/topology1.json
new file mode 100644
index 0000000000000..de51515eebc2d
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology1.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:1",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.1:50014",
+            "remote": "192.168.1.4:50014"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.1:50012",
+            "remote": "192.168.1.2:50012"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.1:50013",
+            "remote": "192.168.1.3:50013"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology2.json b/nixos/tests/scion/freestanding-deployment/topology2.json
new file mode 100644
index 0000000000000..f8e10d5d1f75d
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology2.json
@@ -0,0 +1,51 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:2",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.2:50012",
+            "remote": "192.168.1.1:50012"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.2:50023",
+            "remote": "192.168.1.3:50023"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.2:50025",
+            "remote": "192.168.1.5:50025"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology3.json b/nixos/tests/scion/freestanding-deployment/topology3.json
new file mode 100644
index 0000000000000..53cee431885b3
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology3.json
@@ -0,0 +1,60 @@
+{
+  "attributes": [
+    "core"
+  ],
+  "isd_as": "42-ffaa:1:3",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.3:50013",
+            "remote": "192.168.1.1:50013"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.3:50023",
+            "remote": "192.168.1.2:50023"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "core",
+          "mtu": 1472
+        },
+        "3": {
+          "underlay": {
+            "public": "192.168.1.3:50034",
+            "remote": "192.168.1.4:50034"
+          },
+          "isd_as": "42-ffaa:1:4",
+          "link_to": "child",
+          "mtu": 1472
+        },
+        "4": {
+          "underlay": {
+            "public": "192.168.1.3:50035",
+            "remote": "192.168.1.5:50035"
+          },
+          "isd_as": "42-ffaa:1:5",
+          "link_to": "child",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology4.json b/nixos/tests/scion/freestanding-deployment/topology4.json
new file mode 100644
index 0000000000000..03c507a4daf58
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology4.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:4",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.4:50014",
+            "remote": "192.168.1.1:50014"
+          },
+          "isd_as": "42-ffaa:1:1",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.4:50034",
+            "remote": "192.168.1.3:50034"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/scion/freestanding-deployment/topology5.json b/nixos/tests/scion/freestanding-deployment/topology5.json
new file mode 100644
index 0000000000000..6114c1f73c2a7
--- /dev/null
+++ b/nixos/tests/scion/freestanding-deployment/topology5.json
@@ -0,0 +1,40 @@
+{
+  "attributes": [],
+  "isd_as": "42-ffaa:1:5",
+  "mtu": 1472,
+  "control_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "discovery_service": {
+    "cs": {
+      "addr": "127.0.0.1:31000"
+    }
+  },
+  "border_routers": {
+    "br": {
+      "internal_addr": "127.0.0.1:31002",
+      "interfaces": {
+        "1": {
+          "underlay": {
+            "public": "192.168.1.5:50025",
+            "remote": "192.168.1.2:50025"
+          },
+          "isd_as": "42-ffaa:1:2",
+          "link_to": "parent",
+          "mtu": 1472
+        },
+        "2": {
+          "underlay": {
+            "public": "192.168.1.5:50035",
+            "remote": "192.168.1.3:50035"
+          },
+          "isd_as": "42-ffaa:1:3",
+          "link_to": "parent",
+          "mtu": 1472
+        }
+      }
+    }
+  }
+}
diff --git a/nixos/tests/systemd-user-linger.nix b/nixos/tests/systemd-user-linger.nix
new file mode 100644
index 0000000000000..2c3d71668979f
--- /dev/null
+++ b/nixos/tests/systemd-user-linger.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix (
+  { lib, ... }:
+  {
+    name = "systemd-user-linger";
+
+    nodes.machine =
+      { ... }:
+      {
+        users.users = {
+          alice = {
+            isNormalUser = true;
+            linger = true;
+            uid = 1000;
+          };
+
+          bob = {
+            isNormalUser = true;
+            linger = false;
+            uid = 10001;
+          };
+        };
+      };
+
+    testScript =
+      { ... }:
+      ''
+        machine.wait_for_file("/var/lib/systemd/linger/alice")
+        machine.succeed("systemctl status user-1000.slice")
+
+        machine.fail("test -e /var/lib/systemd/linger/bob")
+        machine.fail("systemctl status user-1001.slice")
+
+        with subtest("missing users have linger purged"):
+            machine.succeed("touch /var/lib/systemd/linger/missing")
+            machine.systemctl("restart linger-users")
+            machine.succeed("test ! -e /var/lib/systemd/linger/missing")
+      '';
+  }
+)
diff --git a/nixos/tests/ustreamer.nix b/nixos/tests/ustreamer.nix
new file mode 100644
index 0000000000000..1354eb03a3269
--- /dev/null
+++ b/nixos/tests/ustreamer.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ustreamer-vmtest";
+  nodes = {
+    client = {...}: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    camera = {config, ...}: let
+      configFile = pkgs.writeText "akvcam-configFile" ''
+        [Cameras]
+        cameras/size = 2
+
+        cameras/1/type = output
+        cameras/1/mode = mmap, userptr, rw
+        cameras/1/description = Virtual Camera (output device)
+        cameras/1/formats = 2
+        cameras/1/videonr = 7
+
+        cameras/2/type = capture
+        cameras/2/mode = mmap, rw
+        cameras/2/description = Virtual Camera
+        cameras/2/formats = 1, 2
+        cameras/2/videonr = 9
+
+        [Connections]
+        connections/size = 1
+        connections/1/connection = 1:2
+
+        [Formats]
+        formats/size = 2
+
+        formats/1/format = YUY2
+        formats/1/width = 640
+        formats/1/height = 480
+        formats/1/fps = 30
+
+        formats/2/format = RGB24, YUY2
+        formats/2/width = 640
+        formats/2/height = 480
+        formats/2/fps = 20/1, 15/2
+      '';
+    in {
+      environment.systemPackages = [ pkgs.ustreamer ];
+      networking.firewall.enable = false;
+      systemd.services.ustreamer = {
+        description = "ustreamer service";
+        wantedBy = ["multi-user.target"];
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${pkgs.ustreamer}/bin/ustreamer --host=0.0.0.0 --port 8000 --device /dev/video9 --device-timeout=8";
+          PrivateTmp = true;
+          BindReadOnlyPaths = "/dev/video9";
+          SupplementaryGroups = [
+            "video"
+          ];
+          Restart = "always";
+        };
+      };
+      boot.extraModulePackages = [config.boot.kernelPackages.akvcam];
+      boot.kernelModules = ["akvcam"];
+      boot.extraModprobeConfig = ''
+        options akvcam config_file=${configFile}
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    camera.wait_for_unit("ustreamer.service")
+    camera.wait_for_open_port(8000)
+
+    client.wait_for_unit("multi-user.target")
+    client.succeed("curl http://camera:8000")
+  '';
+})