about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/installation/installing-usb.section.md2
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md18
-rw-r--r--nixos/lib/make-single-disk-zfs-image.nix5
-rw-r--r--nixos/lib/make-squashfs.nix3
-rw-r--r--nixos/maintainers/scripts/openstack/openstack-image-zfs.nix8
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix3
-rw-r--r--nixos/modules/programs/starship.nix33
-rw-r--r--nixos/modules/services/admin/pgadmin.nix18
-rw-r--r--nixos/modules/services/mail/dovecot.nix186
-rw-r--r--nixos/modules/services/misc/ntfy-sh.nix6
-rw-r--r--nixos/modules/services/networking/miniupnpd.nix53
-rw-r--r--nixos/modules/services/networking/networkmanager.nix5
-rw-r--r--nixos/modules/services/web-apps/c2fmzq-server.nix2
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix10
-rw-r--r--nixos/modules/services/web-servers/nginx/vhost-options.nix7
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix2
-rw-r--r--nixos/modules/tasks/trackpoint.nix2
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix17
-rw-r--r--nixos/tests/all-tests.nix6
-rw-r--r--nixos/tests/c2fmzq.nix3
-rw-r--r--nixos/tests/nginx-etag-compression.nix45
-rw-r--r--nixos/tests/nixos-rebuild-target-host.nix136
-rw-r--r--nixos/tests/ntfy-sh-migration.nix77
-rw-r--r--nixos/tests/pantheon.nix34
-rw-r--r--nixos/tests/pgadmin4.nix56
-rw-r--r--nixos/tests/spark/default.nix2
-rw-r--r--nixos/tests/upnp.nix5
27 files changed, 661 insertions, 83 deletions
diff --git a/nixos/doc/manual/installation/installing-usb.section.md b/nixos/doc/manual/installation/installing-usb.section.md
index adfe22ea2f00e..3b9e2f492f04d 100644
--- a/nixos/doc/manual/installation/installing-usb.section.md
+++ b/nixos/doc/manual/installation/installing-usb.section.md
@@ -35,7 +35,7 @@ select the image, select the USB flash drive and click "Write".
 4. Then use the `dd` utility to write the image to the USB flash drive.
 
   ```ShellSession
-  sudo dd if=<path-to-image> of=/dev/sdX bs=4M conv=fsync
+  sudo dd bs=4M conv=fsync oflag=direct status=progress if=<path-to-image> of=/dev/sdX
   ```
 
 ## Creating bootable USB flash drive from a Terminal on macOS {#sec-booting-from-usb-macos}
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index 4f01e74d849c9..aba4d3d72d1df 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -70,8 +70,26 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 - `mkosi` was updated to v19. Parts of the user interface have changed. Consult the
   [release notes](https://github.com/systemd/mkosi/releases/tag/v19) for a list of changes.
 
+- `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block.
+  Example:
+
+  ```nix
+    locations."/".extraConfig = ''
+      add_header Alt-Svc 'h3=":$server_port"; ma=86400';
+    '';
+    locations."^~ /assets/".extraConfig = ''
+      add_header Alt-Svc 'h3=":$server_port"; ma=86400';
+    '';
+
+  ```
 - The `kanata` package has been updated to v1.5.0, which includes [breaking changes](https://github.com/jtroo/kanata/releases/tag/v1.5.0).
 
+- The `craftos-pc` package has been updated to v2.8, which includes [breaking changes](https://github.com/MCJack123/craftos2/releases/tag/v2.8).
+  - Files are now handled in binary mode; this could break programs with embedded UTF-8 characters.
+  - The ROM was updated to match ComputerCraft version v1.109.2.
+  - The bundled Lua was updated to Lua v5.2, which includes breaking changes. See the [Lua manual](https://www.lua.org/manual/5.2/manual.html#8) for more information.
+  - The WebSocket API [was rewritten](https://github.com/MCJack123/craftos2/issues/337), which introduced breaking changes.
+
 - The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows:
   - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
   - If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default.
diff --git a/nixos/lib/make-single-disk-zfs-image.nix b/nixos/lib/make-single-disk-zfs-image.nix
index a3564f9a8b68e..585fa93b7fa0f 100644
--- a/nixos/lib/make-single-disk-zfs-image.nix
+++ b/nixos/lib/make-single-disk-zfs-image.nix
@@ -21,6 +21,9 @@
 , # size of the FAT partition, in megabytes.
   bootSize ? 1024
 
+  , # memory allocated for virtualized build instance
+  memSize ? 1024
+
 , # The size of the root partition, in megabytes.
   rootSize ? 2048
 
@@ -230,7 +233,7 @@ let
   ).runInLinuxVM (
     pkgs.runCommand name
       {
-        memSize = 1024;
+        inherit memSize;
         QEMU_OPTS = "-drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
         preVM = ''
           PATH=$PATH:${pkgs.qemu_kvm}/bin
diff --git a/nixos/lib/make-squashfs.nix b/nixos/lib/make-squashfs.nix
index 4b6b567399484..f28e2c6715805 100644
--- a/nixos/lib/make-squashfs.nix
+++ b/nixos/lib/make-squashfs.nix
@@ -14,6 +14,7 @@
 
 let
   pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles;
+  compFlag = if comp == null then "-no-compression" else "-comp ${comp}";
 in
 stdenv.mkDerivation {
   name = "${fileName}.img";
@@ -39,7 +40,7 @@ stdenv.mkDerivation {
 
       # Generate the squashfs image.
       mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \
-        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 -comp ${comp} \
+        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 ${compFlag} \
         -processors $NIX_BUILD_CORES
     '';
 }
diff --git a/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix b/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
index 936dcee12949e..60f0535854dd5 100644
--- a/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
+++ b/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
@@ -20,6 +20,12 @@ in
       default = "nixos-openstack-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
     };
 
+    ramMB = mkOption {
+      type = types.int;
+      default = 1024;
+      description = lib.mdDoc "RAM allocation for build VM";
+    };
+
     sizeMB = mkOption {
       type = types.int;
       default = 8192;
@@ -64,7 +70,7 @@ in
       includeChannel = copyChannel;
 
       bootSize = 1000;
-
+      memSize = cfg.ramMB;
       rootSize = cfg.sizeMB;
       rootPoolProperties = {
         ashift = 12;
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index 0b5135c088eaf..6adb94e09aff3 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -512,9 +512,10 @@ in
                 + lib.optionalString isAarch "-Xbcj arm"
                 + lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
                 + lib.optionalString (isSparc) "-Xbcj sparc";
-      type = lib.types.str;
+      type = lib.types.nullOr lib.types.str;
       description = lib.mdDoc ''
         Compression settings to use for the squashfs nix store.
+        `null` disables compression.
       '';
       example = "zstd -Xcompression-level 6";
     };
diff --git a/nixos/modules/programs/starship.nix b/nixos/modules/programs/starship.nix
index bec3900496fd9..34f6f0882c617 100644
--- a/nixos/modules/programs/starship.nix
+++ b/nixos/modules/programs/starship.nix
@@ -1,13 +1,21 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.programs.starship;
 
   settingsFormat = pkgs.formats.toml { };
 
-  settingsFile = settingsFormat.generate "starship.toml" cfg.settings;
+  userSettingsFile = settingsFormat.generate "starship.toml" cfg.settings;
+
+  settingsFile = if cfg.presets == [] then userSettingsFile else pkgs.runCommand "starship.toml"
+    {
+      nativeBuildInputs = [ pkgs.yq ];
+    } ''
+    tomlq -s -t 'reduce .[] as $item ({}; . * $item)' \
+      ${lib.concatStringsSep " " (map (f: "${pkgs.starship}/share/starship/presets/${f}.toml") cfg.presets)} \
+      ${userSettingsFile} \
+      > $out
+  '';
 
   initOption =
     if cfg.interactiveOnly then
@@ -18,19 +26,28 @@ let
 in
 {
   options.programs.starship = {
-    enable = mkEnableOption (lib.mdDoc "the Starship shell prompt");
+    enable = lib.mkEnableOption (lib.mdDoc "the Starship shell prompt");
 
-    interactiveOnly = mkOption {
+    interactiveOnly = lib.mkOption {
       default = true;
       example = false;
-      type = types.bool;
+      type = lib.types.bool;
       description = lib.mdDoc ''
         Whether to enable starship only when the shell is interactive.
         Some plugins require this to be set to false to function correctly.
       '';
     };
 
-    settings = mkOption {
+    presets = lib.mkOption {
+      default = [ ];
+      example = [ "nerd-font-symbols" ];
+      type = with lib.types; listOf str;
+      description = lib.mdDoc ''
+        Presets files to be merged with settings in order.
+      '';
+    };
+
+    settings = lib.mkOption {
       inherit (settingsFormat) type;
       default = { };
       description = lib.mdDoc ''
@@ -41,7 +58,7 @@ in
     };
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     programs.bash.${initOption} = ''
       if [[ $TERM != "dumb" ]]; then
         # don't set STARSHIP_CONFIG automatically if there's a user-specified
diff --git a/nixos/modules/services/admin/pgadmin.nix b/nixos/modules/services/admin/pgadmin.nix
index 3d820db59f4cb..ceb5655dc562f 100644
--- a/nixos/modules/services/admin/pgadmin.nix
+++ b/nixos/modules/services/admin/pgadmin.nix
@@ -44,12 +44,19 @@ in
 
     initialPasswordFile = mkOption {
       description = lib.mdDoc ''
-        Initial password file for the pgAdmin account.
+        Initial password file for the pgAdmin account. Minimum length by default is 6.
+        Please see `services.pgadmin.minimumPasswordLength`.
         NOTE: Should be string not a store path, to prevent the password from being world readable
       '';
       type = types.path;
     };
 
+    minimumPasswordLength = mkOption {
+      description = lib.mdDoc "Minimum length of the password";
+      type = types.int;
+      default = 6;
+    };
+
     emailServer = {
       enable = mkOption {
         description = lib.mdDoc ''
@@ -116,6 +123,7 @@ in
 
     services.pgadmin.settings = {
       DEFAULT_SERVER_PORT = cfg.port;
+      PASSWORD_LENGTH_MIN = cfg.minimumPasswordLength;
       SERVER_MODE = true;
       UPGRADE_CHECK_ENABLED = false;
     } // (optionalAttrs cfg.openFirewall {
@@ -141,6 +149,14 @@ in
 
       preStart = ''
         # NOTE: this is idempotent (aka running it twice has no effect)
+        # Check here for password length to prevent pgadmin from starting
+        # and presenting a hard to find error message
+        # see https://github.com/NixOS/nixpkgs/issues/270624
+        PW_LENGTH=$(wc -m < ${escapeShellArg cfg.initialPasswordFile})
+        if [ $PW_LENGTH -lt ${toString cfg.minimumPasswordLength} ]; then
+            echo "Password must be at least ${toString cfg.minimumPasswordLength} characters long"
+            exit 1
+        fi
         (
           # Email address:
           echo ${escapeShellArg cfg.initialEmail}
diff --git a/nixos/modules/services/mail/dovecot.nix b/nixos/modules/services/mail/dovecot.nix
index abbb2f32e6ccc..25c7017a1d258 100644
--- a/nixos/modules/services/mail/dovecot.nix
+++ b/nixos/modules/services/mail/dovecot.nix
@@ -1,8 +1,11 @@
 { options, config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib) any attrValues concatMapStringsSep concatStrings
+    concatStringsSep flatten imap1 isList literalExpression mapAttrsToList
+    mkEnableOption mkIf mkOption mkRemovedOptionModule optional optionalAttrs
+    optionalString singleton types;
+
   cfg = config.services.dovecot2;
   dovecotPkg = pkgs.dovecot;
 
@@ -113,6 +116,36 @@ let
       ''
     )
 
+    ''
+      plugin {
+        sieve_plugins = ${concatStringsSep " " cfg.sieve.plugins}
+        sieve_extensions = ${concatStringsSep " " (map (el: "+${el}") cfg.sieve.extensions)}
+        sieve_global_extensions = ${concatStringsSep " " (map (el: "+${el}") cfg.sieve.globalExtensions)}
+    ''
+    (optionalString (cfg.imapsieve.mailbox != []) ''
+      ${
+        concatStringsSep "\n" (flatten (imap1 (
+            idx: el:
+              singleton "imapsieve_mailbox${toString idx}_name = ${el.name}"
+              ++ optional (el.from != null) "imapsieve_mailbox${toString idx}_from = ${el.from}"
+              ++ optional (el.causes != null) "imapsieve_mailbox${toString idx}_causes = ${el.causes}"
+              ++ optional (el.before != null) "imapsieve_mailbox${toString idx}_before = file:${stateDir}/imapsieve/before/${baseNameOf el.before}"
+              ++ optional (el.after != null) "imapsieve_mailbox${toString idx}_after = file:${stateDir}/imapsieve/after/${baseNameOf el.after}"
+          )
+          cfg.imapsieve.mailbox))
+      }
+    '')
+    (optionalString (cfg.sieve.pipeBins != []) ''
+        sieve_pipe_bin_dir = ${pkgs.linkFarm "sieve-pipe-bins" (map (el: {
+          name = builtins.unsafeDiscardStringContext (baseNameOf el);
+          path = el;
+        })
+        cfg.sieve.pipeBins)}
+    '')
+    ''
+      }
+    ''
+
     cfg.extraConfig
   ];
 
@@ -343,6 +376,104 @@ in
       description = lib.mdDoc "Quota limit for the user in bytes. Supports suffixes b, k, M, G, T and %.";
     };
 
+    imapsieve.mailbox = mkOption {
+      default = [];
+      description = "Configure Sieve filtering rules on IMAP actions";
+      type = types.listOf (types.submodule ({ config, ... }: {
+        options = {
+          name = mkOption {
+            description = ''
+              This setting configures the name of a mailbox for which administrator scripts are configured.
+
+              The settings defined hereafter with matching sequence numbers apply to the mailbox named by this setting.
+
+              This setting supports wildcards with a syntax compatible with the IMAP LIST command, meaning that this setting can apply to multiple or even all ("*") mailboxes.
+            '';
+            example = "Junk";
+            type = types.str;
+          };
+
+          from = mkOption {
+            default = null;
+            description = ''
+              Only execute the administrator Sieve scripts for the mailbox configured with services.dovecot2.imapsieve.mailbox.<name>.name when the message originates from the indicated mailbox.
+
+              This setting supports wildcards with a syntax compatible with the IMAP LIST command, meaning that this setting can apply to multiple or even all ("*") mailboxes.
+            '';
+            example = "*";
+            type = types.nullOr types.str;
+          };
+
+          causes = mkOption {
+            default = null;
+            description = ''
+              Only execute the administrator Sieve scripts for the mailbox configured with services.dovecot2.imapsieve.mailbox.<name>.name when one of the listed IMAPSIEVE causes apply.
+
+              This has no effect on the user script, which is always executed no matter the cause.
+            '';
+            example = "COPY";
+            type = types.nullOr (types.enum [ "APPEND" "COPY" "FLAG" ]);
+          };
+
+          before = mkOption {
+            default = null;
+            description = ''
+              When an IMAP event of interest occurs, this sieve script is executed before any user script respectively.
+
+              This setting each specify the location of a single sieve script. The semantics of this setting is similar to sieve_before: the specified scripts form a sequence together with the user script in which the next script is only executed when an (implicit) keep action is executed.
+            '';
+            example = literalExpression "./report-spam.sieve";
+            type = types.nullOr types.path;
+          };
+
+          after = mkOption {
+            default = null;
+            description = ''
+              When an IMAP event of interest occurs, this sieve script is executed after any user script respectively.
+
+              This setting each specify the location of a single sieve script. The semantics of this setting is similar to sieve_after: the specified scripts form a sequence together with the user script in which the next script is only executed when an (implicit) keep action is executed.
+            '';
+            example = literalExpression "./report-spam.sieve";
+            type = types.nullOr types.path;
+          };
+        };
+      }));
+    };
+
+    sieve = {
+      plugins = mkOption {
+        default = [];
+        example = [ "sieve_extprograms" ];
+        description = "Sieve plugins to load";
+        type = types.listOf types.str;
+      };
+
+      extensions = mkOption {
+        default = [];
+        description = "Sieve extensions for use in user scripts";
+        example = [ "notify" "imapflags" "vnd.dovecot.filter" ];
+        type = types.listOf types.str;
+      };
+
+      globalExtensions = mkOption {
+        default = [];
+        example = [ "vnd.dovecot.environment" ];
+        description = "Sieve extensions for use in global scripts";
+        type = types.listOf types.str;
+      };
+
+      pipeBins = mkOption {
+        default = [];
+        example = literalExpression ''
+          map lib.getExe [
+            (pkgs.writeShellScriptBin "learn-ham.sh" "exec ''${pkgs.rspamd}/bin/rspamc learn_ham")
+            (pkgs.writeShellScriptBin "learn-spam.sh" "exec ''${pkgs.rspamd}/bin/rspamc learn_spam")
+          ]
+        '';
+        description = "Programs available for use by the vnd.dovecot.pipe extension";
+        type = types.listOf types.path;
+      };
+    };
   };
 
 
@@ -353,14 +484,23 @@ in
       enable = true;
       params.dovecot2 = {};
     };
-    services.dovecot2.protocols =
-      optional cfg.enableImap "imap"
-      ++ optional cfg.enablePop3 "pop3"
-      ++ optional cfg.enableLmtp "lmtp";
-
-    services.dovecot2.mailPlugins = mkIf cfg.enableQuota {
-      globally.enable = [ "quota" ];
-      perProtocol.imap.enable = [ "imap_quota" ];
+
+    services.dovecot2 = {
+      protocols =
+        optional cfg.enableImap "imap"
+        ++ optional cfg.enablePop3 "pop3"
+        ++ optional cfg.enableLmtp "lmtp";
+
+      mailPlugins = mkIf cfg.enableQuota {
+        globally.enable = [ "quota" ];
+        perProtocol.imap.enable = [ "imap_quota" ];
+      };
+
+      sieve.plugins =
+        optional (cfg.imapsieve.mailbox != []) "sieve_imapsieve"
+        ++ optional (cfg.sieve.pipeBins != []) "sieve_extprograms";
+
+      sieve.globalExtensions = optional (cfg.sieve.pipeBins != []) "vnd.dovecot.pipe";
     };
 
     users.users = {
@@ -415,7 +555,7 @@ in
       # (should be 0) so that the compiled sieve script is newer than
       # the source file and Dovecot won't try to compile it.
       preStart = ''
-        rm -rf ${stateDir}/sieve
+        rm -rf ${stateDir}/sieve ${stateDir}/imapsieve
       '' + optionalString (cfg.sieveScripts != {}) ''
         mkdir -p ${stateDir}/sieve
         ${concatStringsSep "\n" (
@@ -432,6 +572,29 @@ in
         ) cfg.sieveScripts
       )}
         chown -R '${cfg.mailUser}:${cfg.mailGroup}' '${stateDir}/sieve'
+      ''
+      + optionalString (cfg.imapsieve.mailbox != []) ''
+        mkdir -p ${stateDir}/imapsieve/{before,after}
+
+        ${
+          concatMapStringsSep "\n"
+            (el:
+              optionalString (el.before != null) ''
+                cp -p ${el.before} ${stateDir}/imapsieve/before/${baseNameOf el.before}
+                ${pkgs.dovecot_pigeonhole}/bin/sievec '${stateDir}/imapsieve/before/${baseNameOf el.before}'
+              ''
+              + optionalString (el.after != null) ''
+                cp -p ${el.after} ${stateDir}/imapsieve/after/${baseNameOf el.after}
+                ${pkgs.dovecot_pigeonhole}/bin/sievec '${stateDir}/imapsieve/after/${baseNameOf el.after}'
+              ''
+            )
+            cfg.imapsieve.mailbox
+        }
+
+        ${
+          optionalString (cfg.mailUser != null && cfg.mailGroup != null)
+            "chown -R '${cfg.mailUser}:${cfg.mailGroup}' '${stateDir}/imapsieve'"
+        }
       '';
     };
 
@@ -459,4 +622,5 @@ in
 
   };
 
+  meta.maintainers = [ lib.maintainers.dblsaiko ];
 }
diff --git a/nixos/modules/services/misc/ntfy-sh.nix b/nixos/modules/services/misc/ntfy-sh.nix
index 98134e94eeede..b8b0772401156 100644
--- a/nixos/modules/services/misc/ntfy-sh.nix
+++ b/nixos/modules/services/misc/ntfy-sh.nix
@@ -79,12 +79,6 @@ in
         cache-file = mkDefault "/var/lib/ntfy-sh/cache-file.db";
       };
 
-      systemd.tmpfiles.rules = [
-        "f ${cfg.settings.auth-file} 0600 ${cfg.user} ${cfg.group} - -"
-        "d ${cfg.settings.attachment-cache-dir} 0700 ${cfg.user} ${cfg.group} - -"
-        "f ${cfg.settings.cache-file} 0600 ${cfg.user} ${cfg.group} - -"
-      ];
-
       systemd.services.ntfy-sh = {
         description = "Push notifications server";
 
diff --git a/nixos/modules/services/networking/miniupnpd.nix b/nixos/modules/services/networking/miniupnpd.nix
index 64aacaf350404..116298dc6b1db 100644
--- a/nixos/modules/services/networking/miniupnpd.nix
+++ b/nixos/modules/services/networking/miniupnpd.nix
@@ -13,8 +13,17 @@ let
       listening_ip=${range}
     '') cfg.internalIPs}
 
+    ${lib.optionalString (firewall == "nftables") ''
+      upnp_table_name=miniupnpd
+      upnp_nat_table_name=miniupnpd
+    ''}
+
     ${cfg.appendConfig}
   '';
+  firewall = if config.networking.nftables.enable then "nftables" else "iptables";
+  miniupnpd = pkgs.miniupnpd.override { inherit firewall; };
+  firewallScripts = lib.optionals (firewall == "iptables")
+    ([ "iptables"] ++ lib.optional (config.networking.enableIPv6) "ip6tables");
 in
 {
   options = {
@@ -57,20 +66,50 @@ in
   };
 
   config = mkIf cfg.enable {
-    networking.firewall.extraCommands = ''
-      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_init.sh -i ${cfg.externalInterface}
-    '';
+    networking.firewall.extraCommands = lib.mkIf (firewallScripts != []) (builtins.concatStringsSep "\n" (map (fw: ''
+      EXTIF=${cfg.externalInterface} ${pkgs.bash}/bin/bash -x ${miniupnpd}/etc/miniupnpd/${fw}_init.sh
+    '') firewallScripts));
+
+    networking.firewall.extraStopCommands = lib.mkIf (firewallScripts != []) (builtins.concatStringsSep "\n" (map (fw: ''
+      EXTIF=${cfg.externalInterface} ${pkgs.bash}/bin/bash -x ${miniupnpd}/etc/miniupnpd/${fw}_removeall.sh
+    '') firewallScripts));
 
-    networking.firewall.extraStopCommands = ''
-      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_removeall.sh -i ${cfg.externalInterface}
-    '';
+    networking.nftables = lib.mkIf (firewall == "nftables") {
+      # see nft_init in ${miniupnpd-nftables}/etc/miniupnpd
+      tables.miniupnpd = {
+        family = "inet";
+        # The following is omitted because it's expected that the firewall is to be responsible for it.
+        #
+        # chain forward {
+        #   type filter hook forward priority filter; policy drop;
+        #   jump miniupnpd
+        # }
+        #
+        # Otherwise, it quickly gets ugly with (potentially) two forward chains with "policy drop".
+        # This means the chain "miniupnpd" never actually gets triggered and is simply there to satisfy
+        # miniupnpd. If you're doing it yourself (without networking.firewall), the easiest way to get
+        # it to work is adding a rule "ct status dnat accept" - this is what networking.firewall does.
+        # If you don't want to simply accept forwarding for all "ct status dnat" packets, override
+        # upnp_table_name with whatever your table is, create a chain "miniupnpd" in your table and
+        # jump into it from your forward chain.
+        content = ''
+          chain miniupnpd {}
+          chain prerouting_miniupnpd {
+            type nat hook prerouting priority dstnat; policy accept;
+          }
+          chain postrouting_miniupnpd {
+            type nat hook postrouting priority srcnat; policy accept;
+          }
+        '';
+      };
+    };
 
     systemd.services.miniupnpd = {
       description = "MiniUPnP daemon";
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
+        ExecStart = "${miniupnpd}/bin/miniupnpd -f ${configFile}";
         PIDFile = "/run/miniupnpd.pid";
         Type = "forking";
       };
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index d32712c8243d7..c96439cf2641a 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -565,7 +565,10 @@ in
       wantedBy = [ "network-online.target" ];
     };
 
-    systemd.services.ModemManager.aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
+    systemd.services.ModemManager = {
+      aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
+      path = lib.optionals (cfg.fccUnlockScripts != []) [ pkgs.libqmi pkgs.libmbim ];
+    };
 
     systemd.services.NetworkManager-dispatcher = {
       wantedBy = [ "network.target" ];
diff --git a/nixos/modules/services/web-apps/c2fmzq-server.nix b/nixos/modules/services/web-apps/c2fmzq-server.nix
index 2749c2a5a87aa..54c4bf1b09ca5 100644
--- a/nixos/modules/services/web-apps/c2fmzq-server.nix
+++ b/nixos/modules/services/web-apps/c2fmzq-server.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.c2fmzq-server;
 
   argsFormat = {
-    type = with lib.types; nullOr (oneOf [ bool int str ]);
+    type = with lib.types; attrsOf (nullOr (oneOf [ bool int str ]));
     generate = lib.cli.toGNUCommandLineShell { };
   };
 in {
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 91b17bfc09fe9..6799de6c7d96c 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -334,8 +334,8 @@ let
           + optionalString vhost.default "default_server "
           + optionalString vhost.reuseport "reuseport "
           + optionalString (extraParameters != []) (concatStringsSep " "
-            (let inCompatibleParameters = [ "ssl" "proxy_protocol" "http2" ];
-                isCompatibleParameter = param: !(any (p: p == param) inCompatibleParameters);
+            (let inCompatibleParameters = [ "accept_filter" "backlog" "deferred" "fastopen" "http2" "proxy_protocol" "so_keepalive" "ssl" ];
+                isCompatibleParameter = param: !(any (p: lib.hasPrefix p param) inCompatibleParameters);
             in filter isCompatibleParameter extraParameters))
           + ";"))
           + "
@@ -408,12 +408,6 @@ let
             ssl_conf_command Options KTLS;
           ''}
 
-          ${optionalString (hasSSL && vhost.quic && vhost.http3)
-            # Advertise that HTTP/3 is available
-          ''
-            add_header Alt-Svc 'h3=":$server_port"; ma=86400';
-          ''}
-
           ${mkBasicAuth vhostName vhost}
 
           ${optionalString (vhost.root != null) "root ${vhost.root};"}
diff --git a/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixos/modules/services/web-servers/nginx/vhost-options.nix
index 64a95afab9f40..ea98439d3823d 100644
--- a/nixos/modules/services/web-servers/nginx/vhost-options.nix
+++ b/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -235,9 +235,9 @@ with lib;
         which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
         and activate the QUIC transport protocol
         `services.nginx.virtualHosts.<name>.quic = true;`.
-        Note that HTTP/3 support is experimental and
-        *not* yet recommended for production.
+        Note that HTTP/3 support is experimental and *not* yet recommended for production.
         Read more at https://quic.nginx.org/
+        HTTP/3 availability must be manually advertised, preferably in each location block.
       '';
     };
 
@@ -250,8 +250,7 @@ with lib;
         which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
         and activate the QUIC transport protocol
         `services.nginx.virtualHosts.<name>.quic = true;`.
-        Note that special application protocol support is experimental and
-        *not* yet recommended for production.
+        Note that special application protocol support is experimental and *not* yet recommended for production.
         Read more at https://quic.nginx.org/
       '';
     };
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index b38f228fc1606..bc8b8fdf8144f 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -71,7 +71,7 @@ let
     done
     poolReady() {
       pool="$1"
-      state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
+      state="$("${zpoolCmd}" import -d "${cfgZfs.devNodes}" 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
       if [[ "$state" = "ONLINE" ]]; then
         return 0
       else
diff --git a/nixos/modules/tasks/trackpoint.nix b/nixos/modules/tasks/trackpoint.nix
index 317613b847927..b3f6f32eaa473 100644
--- a/nixos/modules/tasks/trackpoint.nix
+++ b/nixos/modules/tasks/trackpoint.nix
@@ -88,7 +88,7 @@ with lib;
         serviceConfig.Type = "oneshot";
         serviceConfig.RemainAfterExit = true;
         serviceConfig.ExecStart = ''
-          ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}
+          ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}"
         '';
       };
     })
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
index e195ff937d68e..217242a8fbd22 100644
--- a/nixos/modules/virtualisation/libvirtd.nix
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -116,6 +116,15 @@ let
           QEMU's swtpm options.
         '';
       };
+
+      vhostUserPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = lib.literalExpression "[ pkgs.virtiofsd ]";
+        description = lib.mdDoc ''
+          Packages containing out-of-tree vhost-user drivers.
+        '';
+      };
     };
   };
 
@@ -502,6 +511,14 @@ in
     # https://libvirt.org/daemons.html#monolithic-systemd-integration
     systemd.sockets.libvirtd.wantedBy = [ "sockets.target" ];
 
+    systemd.tmpfiles.rules = let
+      vhostUserCollection = pkgs.buildEnv {
+        name = "vhost-user";
+        paths = cfg.qemu.vhostUserPackages;
+        pathsToLink = [ "/share/qemu/vhost-user" ];
+      };
+    in [ "L+ /var/lib/qemu/vhost-user - - - - ${vhostUserCollection}/share/qemu/vhost-user" ];
+
     security.polkit = {
       enable = true;
       extraConfig = ''
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index be394c19ebef0..33f13c3d1181c 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -583,6 +583,7 @@ in {
   nginx = handleTest ./nginx.nix {};
   nginx-auth = handleTest ./nginx-auth.nix {};
   nginx-etag = handleTest ./nginx-etag.nix {};
+  nginx-etag-compression = handleTest ./nginx-etag-compression.nix {};
   nginx-globalredirect = handleTest ./nginx-globalredirect.nix {};
   nginx-http3 = handleTest ./nginx-http3.nix {};
   nginx-modsecurity = handleTest ./nginx-modsecurity.nix {};
@@ -604,6 +605,7 @@ in {
   nixos-generate-config = handleTest ./nixos-generate-config.nix {};
   nixos-rebuild-install-bootloader = handleTestOn ["x86_64-linux"] ./nixos-rebuild-install-bootloader.nix {};
   nixos-rebuild-specialisations = handleTestOn ["x86_64-linux"] ./nixos-rebuild-specialisations.nix {};
+  nixos-rebuild-target-host = handleTest ./nixos-rebuild-target-host.nix {};
   nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; };
   nixseparatedebuginfod = handleTest ./nixseparatedebuginfod.nix {};
   node-red = handleTest ./node-red.nix {};
@@ -617,6 +619,7 @@ in {
   nscd = handleTest ./nscd.nix {};
   nsd = handleTest ./nsd.nix {};
   ntfy-sh = handleTest ./ntfy-sh.nix {};
+  ntfy-sh-migration = handleTest ./ntfy-sh-migration.nix {};
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
@@ -910,7 +913,8 @@ in {
   unbound = handleTest ./unbound.nix {};
   unifi = handleTest ./unifi.nix {};
   unit-php = handleTest ./web-servers/unit-php.nix {};
-  upnp = handleTest ./upnp.nix {};
+  upnp.iptables = handleTest ./upnp.nix { useNftables = false; };
+  upnp.nftables = handleTest ./upnp.nix { useNftables = true; };
   uptermd = handleTest ./uptermd.nix {};
   uptime-kuma = handleTest ./uptime-kuma.nix {};
   usbguard = handleTest ./usbguard.nix {};
diff --git a/nixos/tests/c2fmzq.nix b/nixos/tests/c2fmzq.nix
index d8ec816c7d29c..59addbde24ed5 100644
--- a/nixos/tests/c2fmzq.nix
+++ b/nixos/tests/c2fmzq.nix
@@ -9,6 +9,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
       settings = {
         verbose = 3; # debug
+        # make sure multiple freeform options evaluate
+        allow-new-accounts = true;
+        auto-approve-new-accounts = true;
       };
     };
     environment = {
diff --git a/nixos/tests/nginx-etag-compression.nix b/nixos/tests/nginx-etag-compression.nix
new file mode 100644
index 0000000000000..67493ae299841
--- /dev/null
+++ b/nixos/tests/nginx-etag-compression.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix {
+  name = "nginx-etag-compression";
+
+  nodes.machine = { pkgs, lib, ... }: {
+    services.nginx = {
+      enable = true;
+      recommendedGzipSettings = true;
+      virtualHosts.default = {
+        root = pkgs.runCommandLocal "testdir" {} ''
+          mkdir "$out"
+          cat > "$out/index.html" <<EOF
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          Hello, world!
+          EOF
+          ${pkgs.gzip}/bin/gzip -k "$out/index.html"
+        '';
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+
+    etag_plain = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:' http://127.0.0.1/")
+    etag_gzip = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:gzip' http://127.0.0.1/")
+
+    with subtest("different representations have different etags"):
+      assert etag_plain != etag_gzip, f"etags should differ: {etag_plain} == {etag_gzip}"
+
+    with subtest("etag for uncompressed response is reproducible"):
+      etag_plain_repeat = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:' http://127.0.0.1/")
+      assert etag_plain == etag_plain_repeat, f"etags should be the same: {etag_plain} != {etag_plain_repeat}"
+
+    with subtest("etag for compressed response is reproducible"):
+      etag_gzip_repeat = machine.succeed("curl -s -w'%header{etag}' -o/dev/null -H 'Accept-encoding:gzip' http://127.0.0.1/")
+      assert etag_gzip == etag_gzip_repeat, f"etags should be the same: {etag_gzip} != {etag_gzip_repeat}"
+  '';
+}
diff --git a/nixos/tests/nixos-rebuild-target-host.nix b/nixos/tests/nixos-rebuild-target-host.nix
new file mode 100644
index 0000000000000..8d60b788abf38
--- /dev/null
+++ b/nixos/tests/nixos-rebuild-target-host.nix
@@ -0,0 +1,136 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nixos-rebuild-target-host";
+
+  nodes = {
+    deployer = { lib, ... }: let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+    in {
+      imports = [ ../modules/profiles/installation-device.nix ];
+
+      nix.settings = {
+        substituters = lib.mkForce [ ];
+        hashed-mirrors = null;
+        connect-timeout = 1;
+      };
+
+      environment.systemPackages = [ pkgs.passh ];
+
+      system.includeBuildDependencies = true;
+
+      virtualisation = {
+        cores = 2;
+        memorySize = 2048;
+      };
+
+      system.build.privateKey = snakeOilPrivateKey;
+      system.build.publicKey = snakeOilPublicKey;
+    };
+
+    target = { nodes, lib, ... }: let
+      targetConfig = {
+        documentation.enable = false;
+        services.openssh.enable = true;
+
+        users.users.root.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+        users.users.alice.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+        users.users.bob.openssh.authorizedKeys.keys = [ nodes.deployer.system.build.publicKey ];
+
+        users.users.alice.extraGroups = [ "wheel" ];
+        users.users.bob.extraGroups = [ "wheel" ];
+
+        # Disable sudo for root to ensure sudo isn't called without `--use-remote-sudo`
+        security.sudo.extraRules = lib.mkForce [
+          { groups = [ "wheel" ]; commands = [ { command = "ALL"; } ]; }
+          { users = [ "alice" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+        ];
+
+        nix.settings.trusted-users = [ "@wheel" ];
+      };
+    in {
+      imports = [ ./common/user-account.nix ];
+
+      config = lib.mkMerge [
+        targetConfig
+        {
+          system.build = {
+            inherit targetConfig;
+          };
+
+          networking.hostName = "target";
+        }
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      sshConfig = builtins.toFile "ssh.conf" ''
+        UserKnownHostsFile=/dev/null
+        StrictHostKeyChecking=no
+      '';
+
+      targetConfigJSON = pkgs.writeText "target-configuration.json"
+        (builtins.toJSON nodes.target.system.build.targetConfig);
+
+      targetNetworkJSON = pkgs.writeText "target-network.json"
+        (builtins.toJSON nodes.target.system.build.networkConfig);
+
+      configFile = hostname: pkgs.writeText "configuration.nix" ''
+        { lib, modulesPath, ... }: {
+          imports = [
+            (modulesPath + "/virtualisation/qemu-vm.nix")
+            (modulesPath + "/testing/test-instrumentation.nix")
+            (modulesPath + "/../tests/common/user-account.nix")
+            (lib.modules.importJSON ./target-configuration.json)
+            (lib.modules.importJSON ./target-network.json)
+            ./hardware-configuration.nix
+          ];
+
+          boot.loader.grub = {
+            enable = true;
+            device = "/dev/vda";
+            forceInstall = true;
+          };
+
+          # this will be asserted
+          networking.hostName = "${hostname}";
+        }
+      '';
+    in
+    ''
+      start_all()
+      target.wait_for_open_port(22)
+
+      deployer.wait_until_succeeds("ping -c1 target")
+      deployer.succeed("install -Dm 600 ${nodes.deployer.system.build.privateKey} ~root/.ssh/id_ecdsa")
+      deployer.succeed("install ${sshConfig} ~root/.ssh/config")
+
+      target.succeed("nixos-generate-config")
+      deployer.succeed("scp alice@target:/etc/nixos/hardware-configuration.nix /root/hardware-configuration.nix")
+
+      deployer.copy_from_host("${configFile "config-1-deployed"}", "/root/configuration-1.nix")
+      deployer.copy_from_host("${configFile "config-2-deployed"}", "/root/configuration-2.nix")
+      deployer.copy_from_host("${configFile "config-3-deployed"}", "/root/configuration-3.nix")
+      deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
+      deployer.copy_from_host("${targetConfigJSON}", "/root/target-configuration.json")
+
+      # Ensure sudo is disabled for root
+      target.fail("sudo true")
+
+      # This test also ensures that sudo is not called without --use-remote-sudo
+      with subtest("Deploy to root@target"):
+        deployer.succeed("nixos-rebuild switch -I nixos-config=/root/configuration-1.nix --target-host root@target &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-1-deployed", f"{target_hostname=}"
+
+      with subtest("Deploy to alice@target with passwordless sudo"):
+        deployer.succeed("nixos-rebuild switch -I nixos-config=/root/configuration-2.nix --target-host alice@target --use-remote-sudo &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-2-deployed", f"{target_hostname=}"
+
+      with subtest("Deploy to bob@target with password based sudo"):
+        deployer.succeed("passh -c 3 -C -p ${nodes.target.users.users.bob.password} -P \"\[sudo\] password\" nixos-rebuild switch -I nixos-config=/root/configuration-3.nix --target-host bob@target --use-remote-sudo &>/dev/console")
+        target_hostname = deployer.succeed("ssh alice@target cat /etc/hostname").rstrip()
+        assert target_hostname == "config-3-deployed", f"{target_hostname=}"
+    '';
+})
diff --git a/nixos/tests/ntfy-sh-migration.nix b/nixos/tests/ntfy-sh-migration.nix
new file mode 100644
index 0000000000000..de6660052d679
--- /dev/null
+++ b/nixos/tests/ntfy-sh-migration.nix
@@ -0,0 +1,77 @@
+# the ntfy-sh module was switching to DynamicUser=true. this test assures that
+# the migration does not break existing setups.
+#
+# this test works doing a migration and asserting ntfy-sh runs properly. first,
+# ntfy-sh is configured to use a static user and group. then ntfy-sh is
+# started and tested. after that, ntfy-sh is shut down and a systemd drop
+# in configuration file is used to upate the service configuration to use
+# DynamicUser=true. then the ntfy-sh is started again and tested.
+
+import ./make-test-python.nix {
+  name = "ntfy-sh";
+
+  nodes.machine = {
+    lib,
+    pkgs,
+    ...
+  }: {
+    environment.etc."ntfy-sh-dynamic-user.conf".text = ''
+      [Service]
+      Group=new-ntfy-sh
+      User=new-ntfy-sh
+      DynamicUser=true
+    '';
+
+    services.ntfy-sh.enable = true;
+    services.ntfy-sh.settings.base-url = "http://localhost:2586";
+
+    systemd.services.ntfy-sh.serviceConfig = {
+      DynamicUser = lib.mkForce false;
+      ExecStartPre = [
+        "${pkgs.coreutils}/bin/id"
+        "${pkgs.coreutils}/bin/ls -lahd /var/lib/ntfy-sh/"
+        "${pkgs.coreutils}/bin/ls -lah /var/lib/ntfy-sh/"
+      ];
+      Group = lib.mkForce "old-ntfy-sh";
+      User = lib.mkForce "old-ntfy-sh";
+    };
+
+    users.users.old-ntfy-sh = {
+      isSystemUser = true;
+      group = "old-ntfy-sh";
+    };
+
+    users.groups.old-ntfy-sh = {};
+  };
+
+  testScript = ''
+    import json
+
+    msg = "Test notification"
+
+    def test_ntfysh():
+      machine.wait_for_unit("ntfy-sh.service")
+      machine.wait_for_open_port(2586)
+
+      machine.succeed(f"curl -d '{msg}' localhost:2586/test")
+
+      text = machine.succeed("curl -s localhost:2586/test/json?poll=1")
+      for line in text.splitlines():
+        notif = json.loads(line)
+        assert msg == notif["message"], "Wrong message"
+
+      machine.succeed("ntfy user list")
+
+    machine.wait_for_unit("multi-user.target")
+
+    test_ntfysh()
+
+    machine.succeed("systemctl stop ntfy-sh.service")
+    machine.succeed("mkdir -p /run/systemd/system/ntfy-sh.service.d")
+    machine.succeed("cp /etc/ntfy-sh-dynamic-user.conf /run/systemd/system/ntfy-sh.service.d/dynamic-user.conf")
+    machine.succeed("systemctl daemon-reload")
+    machine.succeed("systemctl start ntfy-sh.service")
+
+    test_ntfysh()
+  '';
+}
diff --git a/nixos/tests/pantheon.nix b/nixos/tests/pantheon.nix
index be1351283d99a..69a28c397bedc 100644
--- a/nixos/tests/pantheon.nix
+++ b/nixos/tests/pantheon.nix
@@ -26,6 +26,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
 
     with subtest("Test we can see usernames in elementary-greeter"):
         machine.wait_for_text("${user.description}")
+        machine.wait_until_succeeds("pgrep -f io.elementary.greeter-compositor")
         # OCR was struggling with this one.
         # machine.wait_for_text("${bob.description}")
         # Ensure the password box is focused by clicking it.
@@ -39,21 +40,29 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.wait_for_x()
         machine.wait_for_file("${user.home}/.Xauthority")
         machine.succeed("xauth merge ${user.home}/.Xauthority")
+        machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
 
     with subtest("Check that logging in has given the user ownership of devices"):
         machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
 
-    with subtest("Check if pantheon session components actually start"):
-        machine.wait_until_succeeds("pgrep gala")
-        machine.wait_for_window("gala")
-        machine.wait_until_succeeds("pgrep -f io.elementary.wingpanel")
-        machine.wait_for_window("io.elementary.wingpanel")
-        machine.wait_until_succeeds("pgrep plank")
-        machine.wait_for_window("plank")
-        machine.wait_until_succeeds("pgrep -f gsd-media-keys")
+    with subtest("Check if Pantheon components actually start"):
+        for i in ["gala", "io.elementary.wingpanel", "plank", "gsd-media-keys", "io.elementary.desktop.agent-polkit"]:
+            machine.wait_until_succeeds(f"pgrep -f {i}")
+        for i in ["gala", "io.elementary.wingpanel", "plank"]:
+            machine.wait_for_window(i)
         machine.wait_for_unit("bamfdaemon.service", "${user.name}")
         machine.wait_for_unit("io.elementary.files.xdg-desktop-portal.service", "${user.name}")
 
+    with subtest("Check if various environment variables are set"):
+        cmd = "xargs --null --max-args=1 echo < /proc/$(pgrep -xf /run/current-system/sw/bin/gala)/environ"
+        machine.succeed(f"{cmd} | grep 'XDG_CURRENT_DESKTOP' | grep 'Pantheon'")
+        # Hopefully from the sessionPath option.
+        machine.succeed(f"{cmd} | grep 'XDG_DATA_DIRS' | grep 'gsettings-schemas/pantheon-agent-geoclue2'")
+        # Hopefully from login shell.
+        machine.succeed(f"{cmd} | grep '__NIXOS_SET_ENVIRONMENT_DONE' | grep '1'")
+        # See elementary-session-settings packaging.
+        machine.succeed(f"{cmd} | grep 'XDG_CONFIG_DIRS' | grep 'elementary-default-settings'")
+
     with subtest("Open elementary videos"):
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'")
         machine.sleep(2)
@@ -61,6 +70,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.wait_for_text("No Videos Open")
 
     with subtest("Open elementary calendar"):
+        machine.wait_until_succeeds("pgrep -f evolution-calendar-factory")
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'")
         machine.sleep(2)
         machine.wait_for_window("io.elementary.calendar")
@@ -75,6 +85,14 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
         machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
         machine.wait_for_window("io.elementary.terminal")
 
+    with subtest("Trigger multitasking view"):
+        cmd = "dbus-send --session --dest=org.pantheon.gala --print-reply /org/pantheon/gala org.pantheon.gala.PerformAction int32:1"
+        env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0"
+        machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
+        machine.sleep(3)
+        machine.screenshot("multitasking")
+        machine.succeed(f"su - ${user.name} -c '{env} {cmd}'")
+
     with subtest("Check if gala has ever coredumped"):
         machine.fail("coredumpctl --json=short | grep gala")
         # So you can see the dock in the below screenshot.
diff --git a/nixos/tests/pgadmin4.nix b/nixos/tests/pgadmin4.nix
index 3ee7ed19fa1c5..407e4592ef5f7 100644
--- a/nixos/tests/pgadmin4.nix
+++ b/nixos/tests/pgadmin4.nix
@@ -4,31 +4,49 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
   name = "pgadmin4";
   meta.maintainers = with lib.maintainers; [ mkg20001 gador ];
 
-  nodes.machine = { pkgs, ... }: {
+  nodes = {
+    machine = { pkgs, ... }: {
 
-    imports = [ ./common/user-account.nix ];
+      imports = [ ./common/user-account.nix ];
 
-    environment.systemPackages = with pkgs; [
-      wget
-      curl
-      pgadmin4-desktopmode
-    ];
+      environment.systemPackages = with pkgs; [
+        wget
+        curl
+        pgadmin4-desktopmode
+      ];
 
-    services.postgresql = {
-      enable = true;
-      authentication = ''
-        host    all             all             localhost               trust
-      '';
+      services.postgresql = {
+        enable = true;
+        authentication = ''
+          host    all             all             localhost               trust
+        '';
+      };
+
+      services.pgadmin = {
+        port = 5051;
+        enable = true;
+        initialEmail = "bruh@localhost.de";
+        initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+      };
     };
+    machine2 = { pkgs, ... }: {
+
+      imports = [ ./common/user-account.nix ];
+
+      services.postgresql = {
+        enable = true;
+      };
 
-    services.pgadmin = {
-      port = 5051;
-      enable = true;
-      initialEmail = "bruh@localhost.de";
-      initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+      services.pgadmin = {
+        enable = true;
+        initialEmail = "bruh@localhost.de";
+        initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+        minimumPasswordLength = 12;
+      };
     };
   };
 
+
   testScript = ''
     with subtest("Check pgadmin module"):
       machine.wait_for_unit("postgresql")
@@ -49,5 +67,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }:
       machine.wait_until_succeeds("curl -sS localhost:5050")
       machine.wait_until_succeeds("curl -sS localhost:5050/browser/ | grep \"<title>pgAdmin 4</title>\" > /dev/null")
       machine.succeed("wget -nv --level=1 --spider --recursive localhost:5050/browser")
+
+    with subtest("Check pgadmin minimum password length"):
+      machine2.wait_for_unit("postgresql")
+      machine2.wait_for_console_text("Password must be at least 12 characters long")
   '';
 })
diff --git a/nixos/tests/spark/default.nix b/nixos/tests/spark/default.nix
index eed7db35bf4f1..034e9711bed52 100644
--- a/nixos/tests/spark/default.nix
+++ b/nixos/tests/spark/default.nix
@@ -10,7 +10,7 @@ let
     sparkCluster = testSparkCluster args;
     passthru.override = args': testsForPackage (args // args');
   };
-  testSparkCluster = { sparkPackage, ... }: pkgs.nixosTest ({
+  testSparkCluster = { sparkPackage, ... }: pkgs.testers.nixosTest ({
     name = "spark";
 
     nodes = {
diff --git a/nixos/tests/upnp.nix b/nixos/tests/upnp.nix
index af7cc1fe24130..5e135267403bd 100644
--- a/nixos/tests/upnp.nix
+++ b/nixos/tests/upnp.nix
@@ -5,7 +5,7 @@
 # this succeeds an external client will try to connect to the port
 # mapping.
 
-import ./make-test-python.nix ({ pkgs, ... }:
+import ./make-test-python.nix ({ pkgs, useNftables, ... }:
 
 let
   internalRouterAddress = "192.168.3.1";
@@ -27,6 +27,7 @@ in
           networking.nat.enable = true;
           networking.nat.internalInterfaces = [ "eth2" ];
           networking.nat.externalInterface = "eth1";
+          networking.nftables.enable = useNftables;
           networking.firewall.enable = true;
           networking.firewall.trustedInterfaces = [ "eth2" ];
           networking.interfaces.eth1.ipv4.addresses = [
@@ -82,7 +83,7 @@ in
       # Wait for network and miniupnpd.
       router.wait_for_unit("network-online.target")
       # $router.wait_for_unit("nat")
-      router.wait_for_unit("firewall.service")
+      router.wait_for_unit("${if useNftables then "nftables" else "firewall"}.service")
       router.wait_for_unit("miniupnpd")
 
       client1.wait_for_unit("network-online.target")