about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
authorRok Garbas <rok@garbas.si>2024-03-13 00:37:12 +0100
committerGitHub <noreply@github.com>2024-03-13 00:37:12 +0100
commit6f11ba9ffea6000f34b6a9498a9f66b50c743668 (patch)
tree1676e9c7e2044b72817199b20d3b4cb70a6bf6b6 /nixos
parent3e98d578958677398ad4434211875519de17f3e9 (diff)
parent91a2fbbce26077b62169ed72f3e59921d095103a (diff)
Merge branch 'master' into amazon-ec2-amis
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/configuration/x-windows.chapter.md1
-rw-r--r--nixos/doc/manual/development/replace-modules.section.md3
-rw-r--r--nixos/doc/manual/development/running-nixos-tests.section.md10
-rw-r--r--nixos/doc/manual/development/writing-modules.chapter.md7
-rw-r--r--nixos/doc/manual/installation/installing.chapter.md3
-rw-r--r--nixos/doc/manual/release-notes/rl-2405.section.md22
-rw-r--r--nixos/lib/make-iso9660-image.nix25
-rw-r--r--nixos/lib/make-iso9660-image.sh5
-rw-r--r--nixos/lib/test-driver/test_driver/logger.py12
-rw-r--r--nixos/modules/config/no-x-libs.nix2
-rw-r--r--nixos/modules/config/users-groups.nix5
-rw-r--r--nixos/modules/image/repart-image.nix38
-rw-r--r--nixos/modules/image/repart.nix32
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix11
-rw-r--r--nixos/modules/module-list.nix3
-rw-r--r--nixos/modules/programs/chromium.nix19
-rw-r--r--nixos/modules/programs/clash-verge.nix7
-rw-r--r--nixos/modules/programs/coolercontrol.nix37
-rw-r--r--nixos/modules/programs/gnupg.nix42
-rw-r--r--nixos/modules/programs/steam.nix22
-rw-r--r--nixos/modules/programs/wayland/sway.nix5
-rw-r--r--nixos/modules/services/backup/bacula.nix222
-rw-r--r--nixos/modules/services/backup/syncoid.nix2
-rw-r--r--nixos/modules/services/databases/lldap.nix17
-rw-r--r--nixos/modules/services/databases/memcached.nix2
-rw-r--r--nixos/modules/services/databases/postgresql.nix43
-rw-r--r--nixos/modules/services/desktop-managers/plasma6.nix (renamed from nixos/modules/services/x11/desktop-managers/plasma6.nix)18
-rw-r--r--nixos/modules/services/development/hoogle.nix15
-rw-r--r--nixos/modules/services/development/nixseparatedebuginfod.nix4
-rw-r--r--nixos/modules/services/hardware/fwupd.nix4
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix57
-rw-r--r--nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix8
-rw-r--r--nixos/modules/services/mail/listmonk.nix6
-rw-r--r--nixos/modules/services/matrix/matrix-sliding-sync.nix3
-rw-r--r--nixos/modules/services/misc/etebase-server.nix28
-rw-r--r--nixos/modules/services/misc/homepage-dashboard.nix224
-rw-r--r--nixos/modules/services/misc/paperless.nix1
-rw-r--r--nixos/modules/services/misc/tabby.nix203
-rw-r--r--nixos/modules/services/monitoring/mackerel-agent.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/restic.nix13
-rw-r--r--nixos/modules/services/monitoring/scrutiny.nix149
-rw-r--r--nixos/modules/services/networking/bird-lg.nix4
-rw-r--r--nixos/modules/services/networking/murmur.nix2
-rw-r--r--nixos/modules/services/networking/nebula.nix34
-rw-r--r--nixos/modules/services/networking/networkmanager.nix1
-rw-r--r--nixos/modules/services/networking/unbound.nix7
-rw-r--r--nixos/modules/services/security/esdm.nix101
-rw-r--r--nixos/modules/services/security/vaultwarden/default.nix10
-rw-r--r--nixos/modules/services/security/yubikey-agent.nix14
-rw-r--r--nixos/modules/services/web-apps/miniflux.nix28
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix2
-rw-r--r--nixos/modules/services/web-apps/photoprism.nix1
-rw-r--r--nixos/modules/services/web-apps/vikunja.nix47
-rw-r--r--nixos/modules/services/web-servers/stargazer.nix8
-rw-r--r--nixos/modules/services/x11/desktop-managers/deepin.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/default.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/lxqt.nix2
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix1
-rw-r--r--nixos/modules/services/x11/desktop-managers/xfce.nix1
-rw-r--r--nixos/modules/services/x11/xserver.nix2
-rw-r--r--nixos/modules/system/boot/networkd.nix2
-rw-r--r--nixos/modules/system/boot/systemd.nix2
-rw-r--r--nixos/modules/system/boot/uki.nix2
-rw-r--r--nixos/modules/virtualisation/incus.nix13
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix2
-rw-r--r--nixos/modules/virtualisation/virtualbox-host.nix51
-rw-r--r--nixos/tests/akkoma.nix12
-rw-r--r--nixos/tests/all-tests.nix2
-rw-r--r--nixos/tests/homepage-dashboard.nix30
-rw-r--r--nixos/tests/incus/container.nix1
-rw-r--r--nixos/tests/incus/default.nix1
-rw-r--r--nixos/tests/incus/lxd-to-incus.nix1
-rw-r--r--nixos/tests/incus/openvswitch.nix65
-rw-r--r--nixos/tests/incus/preseed.nix1
-rw-r--r--nixos/tests/incus/socket-activated.nix1
-rw-r--r--nixos/tests/incus/ui.nix1
-rw-r--r--nixos/tests/incus/virtual-machine.nix1
-rw-r--r--nixos/tests/knot.nix21
-rw-r--r--nixos/tests/miniflux.nix78
-rw-r--r--nixos/tests/minio.nix6
-rw-r--r--nixos/tests/miriway.nix4
-rw-r--r--nixos/tests/nebula.nix6
-rw-r--r--nixos/tests/nixops/default.nix3
-rw-r--r--nixos/tests/ollama.nix56
-rw-r--r--nixos/tests/pass-secret-service.nix1
-rw-r--r--nixos/tests/pgvecto-rs.nix76
-rw-r--r--nixos/tests/privoxy.nix13
-rw-r--r--nixos/tests/sanoid.nix3
-rw-r--r--nixos/tests/vikunja.nix26
-rw-r--r--nixos/tests/virtualbox.nix47
90 files changed, 1614 insertions, 514 deletions
diff --git a/nixos/doc/manual/configuration/x-windows.chapter.md b/nixos/doc/manual/configuration/x-windows.chapter.md
index 0451e4d25265f..bf1872ae01ace 100644
--- a/nixos/doc/manual/configuration/x-windows.chapter.md
+++ b/nixos/doc/manual/configuration/x-windows.chapter.md
@@ -150,6 +150,7 @@ Or if you have an older card, you may have to use one of the legacy
 drivers:
 
 ```nix
+services.xserver.videoDrivers = [ "nvidiaLegacy470" ];
 services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
 services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
 services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
diff --git a/nixos/doc/manual/development/replace-modules.section.md b/nixos/doc/manual/development/replace-modules.section.md
index ac9f5adbaf981..45e2adbc26088 100644
--- a/nixos/doc/manual/development/replace-modules.section.md
+++ b/nixos/doc/manual/development/replace-modules.section.md
@@ -47,9 +47,8 @@ without having to know its implementation details.
 ```nix
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib) mkIf mkOption types;
   cfg = config.programs.man;
 in
 
diff --git a/nixos/doc/manual/development/running-nixos-tests.section.md b/nixos/doc/manual/development/running-nixos-tests.section.md
index 33076f5dc2a7d..b8191ebd313c5 100644
--- a/nixos/doc/manual/development/running-nixos-tests.section.md
+++ b/nixos/doc/manual/development/running-nixos-tests.section.md
@@ -18,3 +18,13 @@ you can view a log of the test:
 ```ShellSession
 $ nix-store --read-log result
 ```
+
+## System Requirements {#sec-running-nixos-tests-requirements}
+
+NixOS tests require virtualization support.
+This means that the machine must have `kvm` in its [system features](https://nixos.org/manual/nix/stable/command-ref/conf-file.html?highlight=system-features#conf-system-features) list, or `apple-virt` in case of macOS.
+These features are autodetected locally, but `apple-virt` is only autodetected since Nix 2.19.0.
+
+Features of **remote builders** must additionally be configured manually on the client, e.g. on NixOS with [`nix.buildMachines.*.supportedFeatures`](https://search.nixos.org/options?show=nix.buildMachines.*.supportedFeatures&sort=alpha_asc&query=nix.buildMachines) or through general [Nix configuration](https://nixos.org/manual/nix/stable/advanced-topics/distributed-builds).
+
+If you run the tests on a **macOS** machine, you also need a "remote" builder for Linux; possibly a VM. [nix-darwin](https://daiderd.com/nix-darwin/) users may enable [`nix.linux-builder.enable`](https://daiderd.com/nix-darwin/manual/index.html#opt-nix.linux-builder.enable) to launch such a VM.
diff --git a/nixos/doc/manual/development/writing-modules.chapter.md b/nixos/doc/manual/development/writing-modules.chapter.md
index e07b899e6df7b..20157a21e890f 100644
--- a/nixos/doc/manual/development/writing-modules.chapter.md
+++ b/nixos/doc/manual/development/writing-modules.chapter.md
@@ -104,9 +104,8 @@ functions system environment substitution should *not* be disabled explicitly.
 ```nix
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib) concatStringsSep mkIf mkOption optionalString types;
   cfg = config.services.locate;
 in {
   options.services.locate = {
@@ -163,9 +162,7 @@ in {
 ::: {#exec-escaping-example .example}
 ### Escaping in Exec directives
 ```nix
-{ config, lib, pkgs, utils, ... }:
-
-with lib;
+{ config, pkgs, utils, ... }:
 
 let
   cfg = config.services.echo;
diff --git a/nixos/doc/manual/installation/installing.chapter.md b/nixos/doc/manual/installation/installing.chapter.md
index 815bcc071cd9c..c7deb07352f1c 100644
--- a/nixos/doc/manual/installation/installing.chapter.md
+++ b/nixos/doc/manual/installation/installing.chapter.md
@@ -272,6 +272,9 @@ update /etc/fstab.
     # parted /dev/sda -- mkpart ESP fat32 1MB 512MB
     # parted /dev/sda -- set 3 esp on
     ```
+    ::: {.note}
+    In case you decided to not create a swap partition, replace `3` by `2`. To be sure of the id number of ESP, run `parted --list`.
+    :::
 
 Once complete, you can follow with
 [](#sec-installation-manual-partitioning-formatting).
diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md
index 327e84f356530..22689868cf02c 100644
--- a/nixos/doc/manual/release-notes/rl-2405.section.md
+++ b/nixos/doc/manual/release-notes/rl-2405.section.md
@@ -127,6 +127,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
 
+- `unrar` was updated to v7. See [changelog](https://www.rarlab.com/unrar7notes.htm) for more information.
+
+- `k3s` was updated to [v1.29](https://github.com/k3s-io/k3s/releases/tag/v1.29.1%2Bk3s2). See [changelog and upgrade notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes) for more information.
+
 - `k9s` was updated to v0.31. There have been various breaking changes in the config file format,
   check out the changelog of [v0.29](https://github.com/derailed/k9s/releases/tag/v0.29.0),
   [v0.30](https://github.com/derailed/k9s/releases/tag/v0.30.0) and
@@ -139,6 +143,11 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   The list in `nixos/modules/virtualisation/amazon-ec2-amis.nix` will stop
   being updated and will be removed in the future.
 
+- The option `services.postgresql.ensureUsers._.ensurePermissions` has been removed as it's
+  not declarative and is broken with newer postgresql versions. Consider using
+  [](#opt-services.postgresql.ensureUsers._.ensureDBOwnership)
+  instead or a tool that's more suited for managing the data inside a postgresql database.
+
 - `idris2` was updated to v0.7.0. This version introduces breaking changes. Check out the [changelog](https://github.com/idris-lang/Idris2/blob/v0.7.0/CHANGELOG.md#v070) for details.
 
 - `neo4j` has been updated to 5, you may want to read the [release notes for Neo4j 5](https://neo4j.com/release-notes/database/neo4j-5/)
@@ -162,6 +171,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
 
+- `services.homepage-dashboard` now takes it's configuration using native Nix expressions, rather than dumping templated configurations into `/var/lib/homepage-dashboard` where they were previously managed manually. There are now new options which allow the configuration of bookmarks, services, widgets and custom CSS/JS natively in Nix.
+
+- `hare` may now be cross-compiled. For that to work, however, `haredoc` needed to stop being built together with it. Thus, the latter is now its own package with the name of `haredoc`.
+
 - The legacy and long deprecated systemd target `network-interfaces.target` has been removed. Use `network.target` instead.
 
 - `services.frp.settings` now generates the frp configuration file in TOML format as [recommended by upstream](https://github.com/fatedier/frp#configuration-files), instead of the legacy INI format. This has also introduced other changes in the configuration file structure and options.
@@ -174,6 +187,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
   release notes of [v19](https://github.com/systemd/mkosi/releases/tag/v19) and
   [v20](https://github.com/systemd/mkosi/releases/tag/v20) for a list of changes.
 
+- The `services.vikunja` systemd service now uses `vikunja` as dynamic user instead of `vikunja-api`. Database users might need to be changed.
+
+- The `services.vikunja.setupNginx` setting has been removed. Users now need to setup the webserver configuration on their own with a proxy pass to the vikunja service.
+
 - The `woodpecker-*` packages have been updated to v2 which includes [breaking changes](https://woodpecker-ci.org/docs/next/migrations#200).
 
 - `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block.
@@ -356,6 +373,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - A new hardening flag, `zerocallusedregs` was made available, corresponding to the gcc/clang option `-fzero-call-used-regs=used-gpr`.
 
+- A new hardening flag, `trivialautovarinit` was made available, corresponding to the gcc/clang option `-ftrivial-auto-var-init=pattern`.
+
 - New options were added to the dnsdist module to enable and configure a DNSCrypt endpoint (see `services.dnsdist.dnscrypt.enable`, etc.).
   The module can generate the DNSCrypt provider key pair, certificates and also performs their rotation automatically with no downtime.
 
@@ -370,6 +389,9 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
 
 - [Nginx virtual hosts](#opt-services.nginx.virtualHosts) using `forceSSL` or
   `globalRedirect` can now have redirect codes other than 301 through
+
+- `bacula` now allows to configure `TLS` for encrypted communication.
+
   `redirectCode`.
 
 - `libjxl` 0.9.0 [dropped support for the butteraugli API](https://github.com/libjxl/libjxl/pull/2576). You will no longer be able to set `enableButteraugli` on `libaom`.
diff --git a/nixos/lib/make-iso9660-image.nix b/nixos/lib/make-iso9660-image.nix
index 2f7dcf519a16f..ec520f5706822 100644
--- a/nixos/lib/make-iso9660-image.nix
+++ b/nixos/lib/make-iso9660-image.nix
@@ -1,4 +1,4 @@
-{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
+{ lib, stdenv, callPackage, closureInfo, xorriso, syslinux, libossp_uuid, squashfsTools
 
 , # The file name of the resulting ISO image.
   isoName ? "cd.iso"
@@ -16,6 +16,17 @@
   # symlink to `object' that will be added to the CD.
   storeContents ? []
 
+, # In addition to `contents', the closure of the store paths listed
+  # in `squashfsContents' is compressed as squashfs and the result is
+  # placed in /nix-store.squashfs on the CD.
+  # FIXME: This is a performance optimization to avoid Hydra copying
+  # the squashfs between builders and should be removed when Hydra
+  # is smarter about scheduling.
+  squashfsContents ? []
+
+, # Compression settings for squashfs
+  squashfsCompression ? "xz -Xdict-size 100%"
+
 , # Whether this should be an El-Torito bootable CD.
   bootable ? false
 
@@ -45,12 +56,20 @@ assert bootable -> bootImage != "";
 assert efiBootable -> efiBootImage != "";
 assert usbBootable -> isohybridMbrImage != "";
 
+let
+  needSquashfs = squashfsContents != [];
+  makeSquashfsDrv = callPackage ./make-squashfs.nix {
+    storeContents = squashfsContents;
+    comp = squashfsCompression;
+  };
+in
 stdenv.mkDerivation {
   name = isoName;
   __structuredAttrs = true;
 
   buildCommandPath = ./make-iso9660-image.sh;
-  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ];
+  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ]
+    ++ lib.optionals needSquashfs makeSquashfsDrv.nativeBuildInputs;
 
   inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;
 
@@ -60,6 +79,8 @@ stdenv.mkDerivation {
   objects = map (x: x.object) storeContents;
   symlinks = map (x: x.symlink) storeContents;
 
+  squashfsCommand = lib.optionalString needSquashfs makeSquashfsDrv.buildCommand;
+
   # For obtaining the closure of `storeContents'.
   closureInfo = closureInfo { rootPaths = map (x: x.object) storeContents; };
 }
diff --git a/nixos/lib/make-iso9660-image.sh b/nixos/lib/make-iso9660-image.sh
index 34febe9cfe0e6..5881195e461f8 100644
--- a/nixos/lib/make-iso9660-image.sh
+++ b/nixos/lib/make-iso9660-image.sh
@@ -68,6 +68,11 @@ for i in $(< $closureInfo/store-paths); do
     addPath "${i:1}" "$i"
 done
 
+# If needed, build a squashfs and add that
+if [[ -n "$squashfsCommand" ]]; then
+    (out="nix-store.squashfs" eval "$squashfsCommand")
+    addPath "nix-store.squashfs" "nix-store.squashfs"
+fi
 
 # Also include a manifest of the closures in a format suitable for
 # nix-store --load-db.
diff --git a/nixos/lib/test-driver/test_driver/logger.py b/nixos/lib/test-driver/test_driver/logger.py
index 116244b5e4ae0..0b0623bddfa1e 100644
--- a/nixos/lib/test-driver/test_driver/logger.py
+++ b/nixos/lib/test-driver/test_driver/logger.py
@@ -1,6 +1,3 @@
-# mypy: disable-error-code="no-untyped-call"
-# drop the above line when mypy is upgraded to include
-# https://github.com/python/typeshed/commit/49b717ca52bf0781a538b04c0d76a5513f7119b8
 import codecs
 import os
 import sys
@@ -10,6 +7,7 @@ from contextlib import contextmanager
 from queue import Empty, Queue
 from typing import Any, Dict, Iterator
 from xml.sax.saxutils import XMLGenerator
+from xml.sax.xmlreader import AttributesImpl
 
 from colorama import Fore, Style
 
@@ -22,7 +20,7 @@ class Logger:
         self.queue: "Queue[Dict[str, str]]" = Queue()
 
         self.xml.startDocument()
-        self.xml.startElement("logfile", attrs={})
+        self.xml.startElement("logfile", attrs=AttributesImpl({}))
 
         self._print_serial_logs = True
 
@@ -44,7 +42,7 @@ class Logger:
         return message
 
     def log_line(self, message: str, attributes: Dict[str, str]) -> None:
-        self.xml.startElement("line", attributes)
+        self.xml.startElement("line", attrs=AttributesImpl(attributes))
         self.xml.characters(message)
         self.xml.endElement("line")
 
@@ -89,8 +87,8 @@ class Logger:
             )
         )
 
-        self.xml.startElement("nest", attrs={})
-        self.xml.startElement("head", attributes)
+        self.xml.startElement("nest", attrs=AttributesImpl({}))
+        self.xml.startElement("head", attrs=AttributesImpl(attributes))
         self.xml.characters(message)
         self.xml.endElement("head")
 
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index 870b3fe77cca9..fea6e0c4110bd 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -66,7 +66,7 @@ with lib;
       networkmanager-sstp = super.networkmanager-vpnc.override { withGnome = false; };
       networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
       pango = super.pango.override { x11Support = false; };
-      pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
+      pinentry-curses = super.pinentry-curses.override { withLibsecret = false; };
       pipewire = super.pipewire.override { vulkanSupport = false; x11Support = false; };
       pythonPackagesExtensions = super.pythonPackagesExtensions ++ [
         (python-final: python-prev: {
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index dd34771c0b42b..02cd1a17f538a 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -704,6 +704,11 @@ in {
     in stringAfter [ "users" ] ''
       if [ -e ${lingerDir} ] ; then
         cd ${lingerDir}
+        for user in ${lingerDir}/*; do
+          if ! id "$user" >/dev/null 2>&1; then
+            rm --force -- "$user"
+          fi
+        done
         ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
         ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
       fi
diff --git a/nixos/modules/image/repart-image.nix b/nixos/modules/image/repart-image.nix
index 7ac47ee32ff43..5ae523c43f589 100644
--- a/nixos/modules/image/repart-image.nix
+++ b/nixos/modules/image/repart-image.nix
@@ -3,6 +3,7 @@
 
 { lib
 , runCommand
+, runCommandLocal
 , python3
 , black
 , ruff
@@ -33,6 +34,7 @@
 , seed
 , definitionsDirectory
 , sectorSize
+, mkfsEnv ? {}
 }:
 
 let
@@ -50,6 +52,11 @@ let
     mypy --strict $out
   '';
 
+  amendedRepartDefinitions = runCommandLocal "amended-repart.d" {} ''
+    definitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
+    cp -r $definitions $out
+  '';
+
   fileSystemToolMapping = {
     "vfat" = [ dosfstools mtools ];
     "ext4" = [ e2fsprogs.bin ];
@@ -74,28 +81,39 @@ in
 
 runCommand imageFileBasename
 {
+  __structuredAttrs = true;
+
   nativeBuildInputs = [
     systemd
     fakeroot
     util-linux
     compressionPkg
   ] ++ fileSystemTools;
-} ''
-  amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
 
+  env = mkfsEnv;
+
+  systemdRepartFlags = [
+    "--dry-run=no"
+    "--empty=create"
+    "--size=auto"
+    "--seed=${seed}"
+    "--definitions=${amendedRepartDefinitions}"
+    "--split=${lib.boolToString split}"
+    "--json=pretty"
+  ] ++ lib.optionals (sectorSize != null) [
+    "--sector-size=${toString sectorSize}"
+  ];
+
+  passthru = {
+    inherit amendRepartDefinitions amendedRepartDefinitions;
+  };
+} ''
   mkdir -p $out
   cd $out
 
   echo "Building image with systemd-repart..."
   unshare --map-root-user fakeroot systemd-repart \
-    --dry-run=no \
-    --empty=create \
-    --size=auto \
-    --seed="${seed}" \
-    --definitions="$amendedRepartDefinitions" \
-    --split="${lib.boolToString split}" \
-    --json=pretty \
-    ${lib.optionalString (sectorSize != null) "--sector-size=${toString sectorSize}"} \
+    ''${systemdRepartFlags[@]} \
     ${imageFileBasename}.raw \
     | tee repart-output.json
 
diff --git a/nixos/modules/image/repart.nix b/nixos/modules/image/repart.nix
index 6a933f0d83ccc..90c9c7e51dfa3 100644
--- a/nixos/modules/image/repart.nix
+++ b/nixos/modules/image/repart.nix
@@ -60,6 +60,11 @@ let
       };
     };
   };
+
+  mkfsOptionsToEnv = opts: lib.mapAttrs' (fsType: options: {
+    name = "SYSTEMD_REPART_MKFS_OPTIONS_${lib.toUpper fsType}";
+    value = builtins.concatStringsSep " " options;
+  }) opts;
 in
 {
   options.image.repart = {
@@ -183,6 +188,29 @@ in
       '';
     };
 
+    mkfsOptions = lib.mkOption {
+      type = with lib.types; attrsOf (listOf str);
+      default = {};
+      example = lib.literalExpression ''
+        {
+          vfat = [ "-S 512" "-c" ];
+        }
+      '';
+      description = lib.mdDoc ''
+        Specify extra options for created file systems. The specified options
+        are converted to individual environment variables of the format
+        `SYSTEMD_REPART_MKFS_OPTIONS_<FSTYPE>`.
+
+        See [upstream systemd documentation](https://github.com/systemd/systemd/blob/v255/docs/ENVIRONMENT.md?plain=1#L575-L577)
+        for information about the usage of these environment variables.
+
+        The example would produce the following environment variable:
+        ```
+        SYSTEMD_REPART_MKFS_OPTIONS_VFAT="-S 512 -c"
+        ```
+      '';
+    };
+
   };
 
   config = {
@@ -239,11 +267,13 @@ in
           (lib.mapAttrs (_n: v: { Partition = v.repartConfig; }) finalPartitions);
 
         partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
+
+        mkfsEnv = mkfsOptionsToEnv cfg.mkfsOptions;
       in
       pkgs.callPackage ./repart-image.nix {
         systemd = cfg.package;
         inherit (cfg) imageFileBasename compression split seed sectorSize;
-        inherit fileSystems definitionsDirectory partitions;
+        inherit fileSystems definitionsDirectory partitions mkfsEnv;
       };
 
     meta.maintainers = with lib.maintainers; [ nikstur ];
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index 6adb94e09aff3..f5b6af3a6b7ff 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -811,12 +811,6 @@ in
       optional config.isoImage.includeSystemBuildDependencies
         config.system.build.toplevel.drvPath;
 
-    # Create the squashfs image that contains the Nix store.
-    system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
-      storeContents = config.isoImage.storeContents;
-      comp = config.isoImage.squashfsCompression;
-    };
-
     # Individual files to be included on the CD, outside of the Nix
     # store on the CD.
     isoImage.contents =
@@ -827,9 +821,6 @@ in
         { source = config.system.build.initialRamdisk + "/" + config.system.boot.loader.initrdFile;
           target = "/boot/" + config.system.boot.loader.initrdFile;
         }
-        { source = config.system.build.squashfsStore;
-          target = "/nix-store.squashfs";
-        }
         { source = pkgs.writeText "version" config.system.nixos.label;
           target = "/version.txt";
         }
@@ -878,6 +869,8 @@ in
       bootable = config.isoImage.makeBiosBootable;
       bootImage = "/isolinux/isolinux.bin";
       syslinux = if config.isoImage.makeBiosBootable then pkgs.syslinux else null;
+      squashfsContents = config.isoImage.storeContents;
+      squashfsCompression = config.isoImage.squashfsCompression;
     } // optionalAttrs (config.isoImage.makeUsbBootable && config.isoImage.makeBiosBootable) {
       usbBootable = true;
       isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 627427262da63..41e369ac1c650 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -163,6 +163,7 @@
   ./programs/clash-verge.nix
   ./programs/cnping.nix
   ./programs/command-not-found/command-not-found.nix
+  ./programs/coolercontrol.nix
   ./programs/criu.nix
   ./programs/darling.nix
   ./programs/dconf.nix
@@ -719,6 +720,7 @@
   ./services/misc/libreddit.nix
   ./services/misc/lidarr.nix
   ./services/misc/lifecycled.nix
+  ./services/misc/llama-cpp.nix
   ./services/misc/logkeys.nix
   ./services/misc/mame.nix
   ./services/misc/mbpfan.nix
@@ -781,6 +783,7 @@
   ./services/misc/svnserve.nix
   ./services/misc/synergy.nix
   ./services/misc/sysprof.nix
+  ./services/misc/tabby.nix
   ./services/misc/tandoor-recipes.nix
   ./services/misc/taskserver
   ./services/misc/tautulli.nix
diff --git a/nixos/modules/programs/chromium.nix b/nixos/modules/programs/chromium.nix
index 45a9e9e2a6895..5e89837300489 100644
--- a/nixos/modules/programs/chromium.nix
+++ b/nixos/modules/programs/chromium.nix
@@ -98,6 +98,24 @@ in
           }
         '';
       };
+
+      initialPrefs = mkOption {
+        type = types.attrs;
+        description = lib.mdDoc ''
+          Initial preferences are used to configure the browser for the first run.
+          Unlike {option}`programs.chromium.extraOpts`, initialPrefs can be changed by users in the browser settings.
+          More information can be found in the Chromium documentation:
+          <https://www.chromium.org/administrators/configuring-other-preferences/>
+        '';
+        default = {};
+        example = literalExpression ''
+          {
+            "first_run_tabs" = [
+              "https://nixos.org/"
+            ];
+          }
+        '';
+      };
     };
   };
 
@@ -110,6 +128,7 @@ in
         { source = "${cfg.plasmaBrowserIntegrationPackage}/etc/chromium/native-messaging-hosts/org.kde.plasma.browser_integration.json"; };
       "chromium/policies/managed/default.json" = lib.mkIf (defaultProfile != {}) { text = builtins.toJSON defaultProfile; };
       "chromium/policies/managed/extra.json" = lib.mkIf (cfg.extraOpts != {}) { text = builtins.toJSON cfg.extraOpts; };
+      "chromium/initial_preferences" = lib.mkIf (cfg.initialPrefs != {}) { text = builtins.toJSON cfg.initialPrefs; };
       # for google-chrome https://www.chromium.org/administrators/linux-quick-start
       "opt/chrome/native-messaging-hosts/org.kde.plasma.browser_integration.json" = lib.mkIf cfg.enablePlasmaBrowserIntegration
         { source = "${cfg.plasmaBrowserIntegrationPackage}/etc/opt/chrome/native-messaging-hosts/org.kde.plasma.browser_integration.json"; };
diff --git a/nixos/modules/programs/clash-verge.nix b/nixos/modules/programs/clash-verge.nix
index 57a1c0377edbf..e1afafa7cadc3 100644
--- a/nixos/modules/programs/clash-verge.nix
+++ b/nixos/modules/programs/clash-verge.nix
@@ -3,6 +3,7 @@
 {
   options.programs.clash-verge = {
     enable = lib.mkEnableOption (lib.mdDoc "Clash Verge");
+    package = lib.mkPackageOption pkgs "clash-verge" {};
     autoStart = lib.mkEnableOption (lib.mdDoc "Clash Verge auto launch");
     tunMode = lib.mkEnableOption (lib.mdDoc "Clash Verge TUN mode");
   };
@@ -14,10 +15,10 @@
     lib.mkIf cfg.enable {
 
       environment.systemPackages = [
-        pkgs.clash-verge
+        cfg.package
         (lib.mkIf cfg.autoStart (pkgs.makeAutostartItem {
           name = "clash-verge";
-          package = pkgs.clash-verge;
+          package = cfg.package;
         }))
       ];
 
@@ -25,7 +26,7 @@
         owner = "root";
         group = "root";
         capabilities = "cap_net_bind_service,cap_net_admin=+ep";
-        source = "${lib.getExe pkgs.clash-verge}";
+        source = "${lib.getExe cfg.package}";
       };
     };
 
diff --git a/nixos/modules/programs/coolercontrol.nix b/nixos/modules/programs/coolercontrol.nix
new file mode 100644
index 0000000000000..6e7299ad16b72
--- /dev/null
+++ b/nixos/modules/programs/coolercontrol.nix
@@ -0,0 +1,37 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.programs.coolercontrol;
+in
+{
+  ##### interface
+  options = {
+    programs.coolercontrol.enable = lib.mkEnableOption (lib.mdDoc "CoolerControl GUI & its background services");
+  };
+
+  ##### implementation
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = with pkgs.coolercontrol; [
+      coolercontrol-gui
+    ];
+
+    systemd = {
+      packages = with pkgs.coolercontrol; [
+        coolercontrol-liqctld
+        coolercontrold
+      ];
+
+      # https://github.com/NixOS/nixpkgs/issues/81138
+      services = {
+        coolercontrol-liqctld.wantedBy = [ "multi-user.target" ];
+        coolercontrold.wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ OPNA2608 codifryed ];
+}
diff --git a/nixos/modules/programs/gnupg.nix b/nixos/modules/programs/gnupg.nix
index 179d2de87cc58..66be1f247fbde 100644
--- a/nixos/modules/programs/gnupg.nix
+++ b/nixos/modules/programs/gnupg.nix
@@ -1,8 +1,7 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib) mkRemovedOptionModule mkOption mkPackageOption types mkIf optionalString;
 
   cfg = config.programs.gnupg;
 
@@ -26,8 +25,10 @@ let
       "curses";
 
 in
-
 {
+  imports = [
+    (mkRemovedOptionModule [ "programs" "gnupg" "agent" "pinentryFlavor" ] "Use programs.gnupg.agent.pinentryPackage instead")
+  ];
 
   options.programs.gnupg = {
     package = mkPackageOption pkgs "gnupg" { };
@@ -66,17 +67,17 @@ in
       '';
     };
 
-    agent.pinentryFlavor = mkOption {
-      type = types.nullOr (types.enum pkgs.pinentry.flavors);
-      example = "gnome3";
-      default = defaultPinentryFlavor;
-      defaultText = literalMD ''matching the configured desktop environment'';
+    agent.pinentryPackage = mkOption {
+      type = types.nullOr types.package;
+      example = lib.literalMD "pkgs.pinentry-gnome3";
+      default = pkgs.pinentry-curses;
+      defaultText = lib.literalMD "matching the configured desktop environment or `pkgs.pinentry-curses`";
       description = lib.mdDoc ''
-        Which pinentry interface to use. If not null, the path to the
-        pinentry binary will be set in /etc/gnupg/gpg-agent.conf.
-        If not set at all, it'll pick an appropriate flavor depending on the
-        system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce
-        4.12, gnome3 on all other systems with X enabled, ncurses otherwise).
+        Which pinentry package to use. The path to the mainProgram as defined in
+        the package's meta attriutes will be set in /etc/gnupg/gpg-agent.conf.
+        If not set by the user, it'll pick an appropriate flavor depending on the
+        system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce,
+        gnome3 on all other systems with X enabled, curses otherwise).
       '';
     };
 
@@ -102,9 +103,8 @@ in
   };
 
   config = mkIf cfg.agent.enable {
-    programs.gnupg.agent.settings = {
-      pinentry-program = lib.mkIf (cfg.agent.pinentryFlavor != null)
-        "${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry";
+    programs.gnupg.agent.settings = mkIf (cfg.agent.pinentryPackage != null) {
+      pinentry-program = lib.getExe cfg.agent.pinentryPackage;
     };
 
     environment.etc."gnupg/gpg-agent.conf".source =
@@ -207,9 +207,9 @@ in
       wantedBy = [ "sockets.target" ];
     };
 
-    services.dbus.packages = mkIf (cfg.agent.pinentryFlavor == "gnome3") [ pkgs.gcr ];
+    services.dbus.packages = mkIf (lib.elem "gnome3" (cfg.agent.pinentryPackage.flavors or [])) [ pkgs.gcr ];
 
-    environment.systemPackages = with pkgs; [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
 
     environment.interactiveShellInit = ''
       # Bind gpg-agent to this TTY if gpg commands are used.
@@ -230,12 +230,10 @@ in
     '';
 
     assertions = [
-      { assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
+      {
+        assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
         message = "You can't use ssh-agent and GnuPG agent with SSH support enabled at the same time!";
       }
     ];
   };
-
-  # uses attributes of the linked package
-  meta.buildDocsInSandbox = false;
 }
diff --git a/nixos/modules/programs/steam.nix b/nixos/modules/programs/steam.nix
index c7f1e622f7baf..31803f061dce2 100644
--- a/nixos/modules/programs/steam.nix
+++ b/nixos/modules/programs/steam.nix
@@ -43,6 +43,9 @@ in {
         }
       '';
       apply = steam: steam.override (prev: {
+        extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
+            STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeBinPath cfg.extraCompatPackages;
+          }) // (prev.extraEnv or {});
         extraLibraries = pkgs: let
           prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
           additionalLibs = with config.hardware.opengl;
@@ -56,6 +59,8 @@ in {
           # use the setuid wrapped bubblewrap
           bubblewrap = "${config.security.wrapperDir}/..";
         };
+      } // optionalAttrs cfg.extest.enable {
+        extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
       });
       description = lib.mdDoc ''
         The Steam package to use. Additional libraries are added from the system
@@ -66,6 +71,16 @@ in {
       '';
     };
 
+    extraCompatPackages = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra packages to be used as compatibility tools for Steam on Linux. Packages will be included
+        in the `STEAM_EXTRA_COMPAT_TOOLS_PATHS` environmental variable. For more information see
+        <https://github.com/ValveSoftware/steam-for-linux/issues/6310">.
+      '';
+    };
+
     remotePlay.openFirewall = mkOption {
       type = types.bool;
       default = false;
@@ -114,6 +129,11 @@ in {
         };
       };
     };
+
+    extest.enable = mkEnableOption (lib.mdDoc ''
+      Load the extest library into Steam, to translate X11 input events to
+      uinput events (e.g. for using Steam Input on Wayland)
+    '');
   };
 
   config = mkIf cfg.enable {
@@ -167,5 +187,5 @@ in {
     ];
   };
 
-  meta.maintainers = with maintainers; [ mkg20001 ];
+  meta.maintainers = teams.steam;
 }
diff --git a/nixos/modules/programs/wayland/sway.nix b/nixos/modules/programs/wayland/sway.nix
index ca2503ae5da77..2bd297af52544 100644
--- a/nixos/modules/programs/wayland/sway.nix
+++ b/nixos/modules/programs/wayland/sway.nix
@@ -152,6 +152,7 @@ in {
             '';
           }
         ];
+
         environment = {
           systemPackages = optional (cfg.package != null) cfg.package ++ cfg.extraPackages;
           # Needed for the default wallpaper:
@@ -166,8 +167,12 @@ in {
             "sway/config".source = mkOptionDefault "${cfg.package}/etc/sway/config";
           };
         };
+
+        programs.gnupg.agent.pinentryPackage = lib.mkDefault pkgs.pinentry-gnome3;
+
         # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050913
         xdg.portal.config.sway.default = mkDefault [ "wlr" "gtk" ];
+
         # To make a Sway session available if a display manager like SDDM is enabled:
         services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ]; }
       (import ./wayland-session.nix { inherit lib pkgs; })
diff --git a/nixos/modules/services/backup/bacula.nix b/nixos/modules/services/backup/bacula.nix
index 5a75a46e5259a..39975adf59092 100644
--- a/nixos/modules/services/backup/bacula.nix
+++ b/nixos/modules/services/backup/bacula.nix
@@ -4,11 +4,36 @@
 # TODO: test configuration when building nixexpr (use -t parameter)
 # TODO: support sqlite3 (it's deprecate?) and mysql
 
-with lib;
 
 let
+  inherit (lib)
+    concatStringsSep
+    literalExpression
+    mapAttrsToList
+    mdDoc
+    mkIf
+    mkOption
+    optional
+    optionalString
+    types
+    ;
   libDir = "/var/lib/bacula";
 
+  yes_no = bool: if bool then "yes" else "no";
+  tls_conf = tls_cfg: optionalString tls_cfg.enable (
+    concatStringsSep
+      "\n"
+      (
+      ["TLS Enable = yes;"]
+      ++ optional (tls_cfg.require != null) "TLS Require = ${yes_no tls_cfg.require};"
+      ++ optional (tls_cfg.certificate != null) ''TLS Certificate = "${tls_cfg.certificate}";''
+      ++ [''TLS Key = "${tls_cfg.key}";'']
+      ++ optional (tls_cfg.verifyPeer != null) "TLS Verify Peer = ${yes_no tls_cfg.verifyPeer};"
+      ++ optional (tls_cfg.allowedCN != [ ]) "TLS Allowed CN = ${concatStringsSep " " (tls_cfg.allowedCN)};"
+      ++ optional (tls_cfg.caCertificateFile != null) ''TLS CA Certificate File = "${tls_cfg.caCertificateFile}";''
+      )
+  );
+
   fd_cfg = config.services.bacula-fd;
   fd_conf = pkgs.writeText "bacula-fd.conf"
     ''
@@ -18,6 +43,7 @@ let
         WorkingDirectory = ${libDir};
         Pid Directory = /run;
         ${fd_cfg.extraClientConfig}
+        ${tls_conf fd_cfg.tls}
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@@ -25,6 +51,7 @@ let
         Name = "${name}";
         Password = ${value.password};
         Monitor = ${value.monitor};
+        ${tls_conf value.tls}
       }
       '') fd_cfg.director)}
 
@@ -44,6 +71,7 @@ let
         WorkingDirectory = ${libDir};
         Pid Directory = /run;
         ${sd_cfg.extraStorageConfig}
+        ${tls_conf sd_cfg.tls}
       }
 
       ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@@ -70,6 +98,7 @@ let
         Name = "${name}";
         Password = ${value.password};
         Monitor = ${value.monitor};
+        ${tls_conf value.tls}
       }
       '') sd_cfg.director)}
 
@@ -90,6 +119,7 @@ let
       Working Directory = ${libDir};
       Pid Directory = /run/;
       QueryFile = ${pkgs.bacula}/etc/query.sql;
+      ${tls_conf dir_cfg.tls}
       ${dir_cfg.extraDirectorConfig}
     }
 
@@ -108,13 +138,99 @@ let
     ${dir_cfg.extraConfig}
     '';
 
-  directorOptions = {...}:
+  linkOption = name: destination: "[${name}](#opt-${builtins.replaceStrings [ "<" ">"] ["_" "_"] destination})";
+  tlsLink = destination: submodulePath: linkOption "${submodulePath}.${destination}" "${submodulePath}.${destination}";
+
+  tlsOptions = submodulePath: {...}:
+  {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Specifies if TLS should be enabled.
+          If this set to `false` TLS will be completely disabled, even if ${tlsLink "tls.require" submodulePath} is true.
+        '';
+      };
+      require = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = mdDoc ''
+          Require TLS or TLS-PSK encryption.
+          This directive is ignored unless one of ${tlsLink "tls.enable" submodulePath} is true or TLS PSK Enable is set to `yes`.
+          If TLS is not required while TLS or TLS-PSK are enabled, then the Bacula component
+          will connect with other components either with or without TLS or TLS-PSK
+
+          If ${tlsLink "tls.enable" submodulePath} or TLS-PSK is enabled and TLS is required, then the Bacula
+          component will refuse any connection request that does not use TLS.
+        '';
+      };
+      certificate = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = mdDoc ''
+          The full path to the PEM encoded TLS certificate.
+          It will be used as either a client or server certificate,
+          depending on the connection direction.
+          This directive is required in a server context, but it may
+          not be specified in a client context if ${tlsLink "tls.verifyPeer" submodulePath} is
+          `false` in the corresponding server context.
+        '';
+      };
+      key = mkOption {
+        type = types.path;
+        description = mdDoc ''
+          The path of a PEM encoded TLS private key.
+          It must correspond to the TLS certificate.
+        '';
+      };
+      verifyPeer = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = mdDoc ''
+          Verify peer certificate.
+          Instructs server to request and verify the client's X.509 certificate.
+          Any client certificate signed by a known-CA will be accepted.
+          Additionally, the client's X509 certificate Common Name must meet the value of the Address directive.
+          If ${tlsLink "tls.allowedCN" submodulePath} is used,
+          the client's x509 certificate Common Name must also correspond to
+          one of the CN specified in the ${tlsLink "tls.allowedCN" submodulePath} directive.
+          This directive is valid only for a server and not in client context.
+
+          Standard from Bacula is `true`.
+        '';
+      };
+      allowedCN = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = mdDoc ''
+          Common name attribute of allowed peer certificates.
+          This directive is valid for a server and in a client context.
+          If this directive is specified, the peer certificate will be verified against this list.
+          In the case this directive is configured on a server side, the allowed
+          CN list will not be checked if ${tlsLink "tls.verifyPeer" submodulePath} is false.
+        '';
+      };
+      caCertificateFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = mdDoc ''
+          The path specifying a PEM encoded TLS CA certificate(s).
+          Multiple certificates are permitted in the file.
+          One of TLS CA Certificate File or TLS CA Certificate Dir are required in a server context, unless
+          ${tlsLink "tls.verifyPeer" submodulePath} is false, and are always required in a client context.
+        '';
+      };
+    };
+  };
+
+  directorOptions = submodulePath:{...}:
   {
     options = {
       password = mkOption {
         type = types.str;
         # TODO: required?
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies the password that must be supplied for the default Bacula
           Console to be authorized. The same password must appear in the
           Director resource of the Console configuration file. For added
@@ -135,7 +251,7 @@ let
         type = types.enum [ "no" "yes" ];
         default = "no";
         example = "yes";
-        description = lib.mdDoc ''
+        description = mdDoc ''
           If Monitor is set to `no`, this director will have
           full access to this Storage daemon. If Monitor is set to
           `yes`, this director will only be able to fetch the
@@ -146,6 +262,13 @@ let
           security problems.
         '';
       };
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "${submodulePath}.director.<name>");
+        description = mdDoc ''
+          TLS Options for the Director in this Configuration.
+        '';
+      };
     };
   };
 
@@ -154,7 +277,7 @@ let
     options = {
       changerDevice = mkOption {
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string must be the generic SCSI device name of the
           autochanger that corresponds to the normal read/write Archive Device
           specified in the Device resource. This generic SCSI device name
@@ -173,7 +296,7 @@ let
 
       changerCommand = mkOption {
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The name-string specifies an external program to be called that will
           automatically change volumes as required by Bacula. Normally, this
           directive will be specified only in the AutoChanger resource, which
@@ -195,14 +318,14 @@ let
       };
 
       devices = mkOption {
-        description = lib.mdDoc "";
+        description = mdDoc "";
         type = types.listOf types.str;
       };
 
       extraAutochangerConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Autochanger directive.
         '';
         example = ''
@@ -219,7 +342,7 @@ let
       archiveDevice = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string gives the system file name of the storage
           device managed by this storage daemon. This will usually be the
           device file name of a removable storage device (tape drive), for
@@ -236,7 +359,7 @@ let
       mediaType = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The specified name-string names the type of media supported by this
           device, for example, `DLT7000`. Media type names are
           arbitrary in that you set them to anything you want, but they must be
@@ -274,7 +397,7 @@ let
       extraDeviceConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Device directive.
         '';
         example = ''
@@ -295,7 +418,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable the Bacula File Daemon.
         '';
       };
@@ -304,7 +427,7 @@ in {
         default = "${config.networking.hostName}-fd";
         defaultText = literalExpression ''"''${config.networking.hostName}-fd"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The client name that must be used by the Director when connecting.
           Generally, it is a good idea to use a name related to the machine so
           that error messages can be easily identified if you have multiple
@@ -315,7 +438,7 @@ in {
       port = mkOption {
         default = 9102;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This specifies the port number on which the Client listens for
           Director connections. It must agree with the FDPort specified in
           the Client resource of the Director's configuration file.
@@ -324,16 +447,26 @@ in {
 
       director = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines director resources in Bacula File Daemon.
         '';
-        type = with types; attrsOf (submodule directorOptions);
+        type = types.attrsOf (types.submodule (directorOptions "services.bacula-fd"));
       };
 
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-fd");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the File Daemon.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
+
       extraClientConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Client directive.
         '';
         example = ''
@@ -345,7 +478,7 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
@@ -358,7 +491,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable Bacula Storage Daemon.
         '';
       };
@@ -367,7 +500,7 @@ in {
         default = "${config.networking.hostName}-sd";
         defaultText = literalExpression ''"''${config.networking.hostName}-sd"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies the Name of the Storage daemon.
         '';
       };
@@ -375,7 +508,7 @@ in {
       port = mkOption {
         default = 9103;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specifies port number on which the Storage daemon listens for
           Director connections.
         '';
@@ -383,32 +516,32 @@ in {
 
       director = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Director resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule directorOptions);
+        type = types.attrsOf (types.submodule (directorOptions "services.bacula-sd"));
       };
 
       device = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Device resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule deviceOptions);
+        type = types.attrsOf (types.submodule deviceOptions);
       };
 
       autochanger = mkOption {
         default = {};
-        description = lib.mdDoc ''
+        description = mdDoc ''
           This option defines Autochanger resources in Bacula Storage Daemon.
         '';
-        type = with types; attrsOf (submodule autochangerOptions);
+        type = types.attrsOf (types.submodule autochangerOptions);
       };
 
       extraStorageConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Storage directive.
         '';
         example = ''
@@ -420,13 +553,21 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
           console = all
         '';
       };
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-sd");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the Storage Daemon.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
 
     };
 
@@ -434,7 +575,7 @@ in {
       enable = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Whether to enable Bacula Director Daemon.
         '';
       };
@@ -443,7 +584,7 @@ in {
         default = "${config.networking.hostName}-dir";
         defaultText = literalExpression ''"''${config.networking.hostName}-dir"'';
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           The director name used by the system administrator. This directive is
           required.
         '';
@@ -452,7 +593,7 @@ in {
       port = mkOption {
         default = 9101;
         type = types.port;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Specify the port (a positive integer) on which the Director daemon
           will listen for Bacula Console connections. This same port number
           must be specified in the Director resource of the Console
@@ -465,7 +606,7 @@ in {
       password = mkOption {
         # TODO: required?
         type = types.str;
-        description = lib.mdDoc ''
+        description = mdDoc ''
            Specifies the password that must be supplied for a Director.
         '';
       };
@@ -473,7 +614,7 @@ in {
       extraMessagesConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Messages directive.
         '';
         example = ''
@@ -484,7 +625,7 @@ in {
       extraDirectorConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration to be passed in Director directive.
         '';
         example = ''
@@ -496,13 +637,22 @@ in {
       extraConfig = mkOption {
         default = "";
         type = types.lines;
-        description = lib.mdDoc ''
+        description = mdDoc ''
           Extra configuration for Bacula Director Daemon.
         '';
         example = ''
           TODO
         '';
       };
+
+      tls = mkOption {
+        type = types.submodule (tlsOptions "services.bacula-dir");
+        default = { };
+        description = mdDoc ''
+          TLS Options for the Director.
+          Important notice: The backup won't be encrypted.
+        '';
+       };
     };
   };
 
diff --git a/nixos/modules/services/backup/syncoid.nix b/nixos/modules/services/backup/syncoid.nix
index 7b8d3b431309f..4a04f0aa16221 100644
--- a/nixos/modules/services/backup/syncoid.nix
+++ b/nixos/modules/services/backup/syncoid.nix
@@ -134,7 +134,7 @@ in
     localSourceAllow = mkOption {
       type = types.listOf types.str;
       # Permissions snapshot and destroy are in case --no-sync-snap is not used
-      default = [ "bookmark" "hold" "send" "snapshot" "destroy" ];
+      default = [ "bookmark" "hold" "send" "snapshot" "destroy" "mount" ];
       description = lib.mdDoc ''
         Permissions granted for the {option}`services.syncoid.user` user
         for local source datasets. See
diff --git a/nixos/modules/services/databases/lldap.nix b/nixos/modules/services/databases/lldap.nix
index e821da8e58aa3..033de7af886f2 100644
--- a/nixos/modules/services/databases/lldap.nix
+++ b/nixos/modules/services/databases/lldap.nix
@@ -107,10 +107,25 @@ in
       wants = [ "network-online.target" ];
       after = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
+      # lldap defaults to a hardcoded `jwt_secret` value if none is provided, which is bad, because
+      # an attacker could create a valid admin jwt access token fairly trivially.
+      # Because there are 3 different ways `jwt_secret` can be provided, we check if any one of them is present,
+      # and if not, bootstrap a secret in `/var/lib/lldap/jwt_secret_file` and give that to lldap.
+      script = lib.optionalString (!cfg.settings ? jwt_secret) ''
+        if [[ -z "$LLDAP_JWT_SECRET_FILE" ]] && [[ -z "$LLDAP_JWT_SECRET" ]]; then
+          if [[ ! -e "./jwt_secret_file" ]]; then
+            ${lib.getExe pkgs.openssl} rand -base64 -out ./jwt_secret_file 32
+          fi
+          export LLDAP_JWT_SECRET_FILE="./jwt_secret_file"
+        fi
+      '' + ''
+         ${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}
+      '';
       serviceConfig = {
-        ExecStart = "${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}";
         StateDirectory = "lldap";
+        StateDirectoryMode = "0750";
         WorkingDirectory = "%S/lldap";
+        UMask = "0027";
         User = "lldap";
         Group = "lldap";
         DynamicUser = true;
diff --git a/nixos/modules/services/databases/memcached.nix b/nixos/modules/services/databases/memcached.nix
index 542c80ab2e67f..fd943c20091ab 100644
--- a/nixos/modules/services/databases/memcached.nix
+++ b/nixos/modules/services/databases/memcached.nix
@@ -37,7 +37,7 @@ in
         description = lib.mdDoc "The port to bind to.";
       };
 
-      enableUnixSocket = mkEnableOption (lib.mdDoc "unix socket at /run/memcached/memcached.sock");
+      enableUnixSocket = mkEnableOption (lib.mdDoc "Unix Domain Socket at /run/memcached/memcached.sock instead of listening on an IP address and port. The `listen` and `port` options are ignored.");
 
       maxMemory = mkOption {
         type = types.ints.unsigned;
diff --git a/nixos/modules/services/databases/postgresql.nix b/nixos/modules/services/databases/postgresql.nix
index ed5915735730b..c4e76c82ba5c7 100644
--- a/nixos/modules/services/databases/postgresql.nix
+++ b/nixos/modules/services/databases/postgresql.nix
@@ -161,33 +161,6 @@ in
               '';
             };
 
-            ensurePermissions = mkOption {
-              type = types.attrsOf types.str;
-              default = {};
-              visible = false; # This option has been deprecated.
-              description = lib.mdDoc ''
-                This option is DEPRECATED and should not be used in nixpkgs anymore,
-                use `ensureDBOwnership` instead. It can also break with newer
-                versions of PostgreSQL (≥ 15).
-
-                Permissions to ensure for the user, specified as an attribute set.
-                The attribute names specify the database and tables to grant the permissions for.
-                The attribute values specify the permissions to grant. You may specify one or
-                multiple comma-separated SQL privileges here.
-
-                For more information on how to specify the target
-                and on which privileges exist, see the
-                [GRANT syntax](https://www.postgresql.org/docs/current/sql-grant.html).
-                The attributes are used as `GRANT ''${attrValue} ON ''${attrName}`.
-              '';
-              example = literalExpression ''
-                {
-                  "DATABASE \"nextcloud\"" = "ALL PRIVILEGES";
-                  "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
-                }
-              '';
-            };
-
             ensureDBOwnership = mkOption {
               type = types.bool;
               default = false;
@@ -460,16 +433,6 @@ in
         Offender: ${name} has not been found among databases.
       '';
     }) cfg.ensureUsers;
-    # `ensurePermissions` is now deprecated, let's avoid it.
-    warnings = lib.optional (any ({ ensurePermissions, ... }: ensurePermissions != {}) cfg.ensureUsers) "
-      `services.postgresql.ensureUsers.*.ensurePermissions` is used in your expressions,
-      this option is known to be broken with newer PostgreSQL versions,
-      consider migrating to `services.postgresql.ensureUsers.*.ensureDBOwnership` or
-      consult the release notes or manual for more migration guidelines.
-
-      This option will be removed in NixOS 24.05 unless it sees significant
-      maintenance improvements.
-    ";
 
     services.postgresql.settings =
       {
@@ -583,11 +546,6 @@ in
               concatMapStrings
               (user:
               let
-                  userPermissions = concatStringsSep "\n"
-                    (mapAttrsToList
-                      (database: permission: ''$PSQL -tAc 'GRANT ${permission} ON ${database} TO "${user.name}"' '')
-                      user.ensurePermissions
-                    );
                   dbOwnershipStmt = optionalString
                     user.ensureDBOwnership
                     ''$PSQL -tAc 'ALTER DATABASE "${user.name}" OWNER TO "${user.name}";' '';
@@ -599,7 +557,6 @@ in
                   userClauses = ''$PSQL -tAc 'ALTER ROLE "${user.name}" ${concatStringsSep " " clauseSqlStatements}' '';
                 in ''
                   $PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"'
-                  ${userPermissions}
                   ${userClauses}
 
                   ${dbOwnershipStmt}
diff --git a/nixos/modules/services/x11/desktop-managers/plasma6.nix b/nixos/modules/services/desktop-managers/plasma6.nix
index 1237261e0af7b..1710d28954d62 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma6.nix
+++ b/nixos/modules/services/desktop-managers/plasma6.nix
@@ -5,8 +5,7 @@
   utils,
   ...
 }: let
-  xcfg = config.services.xserver;
-  cfg = xcfg.desktopManager.plasma6;
+  cfg = config.services.desktopManager.plasma6;
 
   inherit (pkgs) kdePackages;
   inherit (lib) literalExpression mkDefault mkIf mkOption mkPackageOptionMD types;
@@ -17,7 +16,7 @@
   '';
 in {
   options = {
-    services.xserver.desktopManager.plasma6 = {
+    services.desktopManager.plasma6 = {
       enable = mkOption {
         type = types.bool;
         default = false;
@@ -44,6 +43,12 @@ in {
     };
   };
 
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enable" ] [ "services" "desktopManager" "plasma6" "enable" ])
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enableQt5Integration" ] [ "services" "desktopManager" "plasma6" "enableQt5Integration" ])
+    (lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "notoPackage" ] [ "services" "desktopManager" "plasma6" "notoPackage" ])
+  ];
+
   config = mkIf cfg.enable {
     assertions = [
       {
@@ -161,7 +166,7 @@ in {
     in
       requiredPackages
       ++ utils.removePackagesByName optionalPackages config.environment.plasma6.excludePackages
-      ++ lib.optionals config.services.xserver.desktopManager.plasma6.enableQt5Integration [
+      ++ lib.optionals config.services.desktopManager.plasma6.enableQt5Integration [
         breeze.qt5
         plasma-integration.qt5
         pkgs.plasma5Packages.kwayland-integration
@@ -175,7 +180,7 @@ in {
       ++ lib.optional config.powerManagement.enable powerdevil
       ++ lib.optional config.services.colord.enable colord-kde
       ++ lib.optional config.services.hardware.bolt.enable plasma-thunderbolt
-      ++ lib.optionals config.services.samba.enable [kdenetwork-filesharing pkgs.samba]
+      ++ lib.optional config.services.samba.enable kdenetwork-filesharing
       ++ lib.optional config.services.xserver.wacom.enable wacomtablet
       ++ lib.optional config.services.flatpak.enable flatpak-kcm;
 
@@ -185,7 +190,7 @@ in {
       "/libexec" # for drkonqi
     ];
 
-    environment.etc."X11/xkb".source = xcfg.xkb.dir;
+    environment.etc."X11/xkb".source = config.services.xserver.xkb.dir;
 
     # Add ~/.config/kdedefaults to XDG_CONFIG_DIRS for shells, since Plasma sets that.
     # FIXME: maybe we should append to XDG_CONFIG_DIRS in /etc/set-environment instead?
@@ -210,6 +215,7 @@ in {
       serif = ["Noto Serif"];
     };
 
+    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
     programs.ssh.askPassword = mkDefault "${kdePackages.ksshaskpass.out}/bin/ksshaskpass";
 
     # Enable helpful DBus services.
diff --git a/nixos/modules/services/development/hoogle.nix b/nixos/modules/services/development/hoogle.nix
index 88dd01fd8aab2..c90bb7f019021 100644
--- a/nixos/modules/services/development/hoogle.nix
+++ b/nixos/modules/services/development/hoogle.nix
@@ -56,6 +56,16 @@ in {
       description = lib.mdDoc "Set the host to bind on.";
       default = "127.0.0.1";
     };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--no-security-headers" ];
+      description = lib.mdDoc ''
+        Additional command-line arguments to pass to
+        {command}`hoogle server`
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
@@ -66,7 +76,10 @@ in {
 
       serviceConfig = {
         Restart = "always";
-        ExecStart = ''${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host}'';
+        ExecStart = ''
+          ${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host} \
+            ${concatStringsSep " " cfg.extraOptions}
+        '';
 
         DynamicUser = true;
 
diff --git a/nixos/modules/services/development/nixseparatedebuginfod.nix b/nixos/modules/services/development/nixseparatedebuginfod.nix
index daf85153d339f..a2ec0d2c80e1f 100644
--- a/nixos/modules/services/development/nixseparatedebuginfod.nix
+++ b/nixos/modules/services/development/nixseparatedebuginfod.nix
@@ -90,7 +90,9 @@ in
 
     users.groups.nixseparatedebuginfod = { };
 
-    nix.settings.extra-allowed-users = [ "nixseparatedebuginfod" ];
+    nix.settings = lib.optionalAttrs (lib.versionAtLeast config.nix.package.version "2.4") {
+      extra-allowed-users = [ "nixseparatedebuginfod" ];
+    };
 
     environment.variables.DEBUGINFOD_URLS = "http://${url}";
 
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
index 8a9e38d0547bc..c4837ff80ec7a 100644
--- a/nixos/modules/services/hardware/fwupd.nix
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -14,11 +14,11 @@ let
 
   customEtc = {
     "fwupd/fwupd.conf" = {
-      source = format.generate "fwupd.conf" {
+      source = format.generate "fwupd.conf" ({
         fwupd = cfg.daemonSettings;
       } // lib.optionalAttrs (lib.length (lib.attrNames cfg.uefiCapsuleSettings) != 0) {
         uefi_capsule = cfg.uefiCapsuleSettings;
-      };
+      });
       # fwupd tries to chmod the file if it doesn't have the right permissions
       mode = "0640";
     };
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
index a90d234f65c0c..1aaa2d07b9bde 100644
--- a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
+++ b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/cdi-generate.nix
@@ -1,37 +1,58 @@
-{ config, lib, pkgs }: let
+{
+  addDriverRunpath,
+  glibc,
+  jq,
+  lib,
+  nvidia-container-toolkit,
+  nvidia-driver,
+  runtimeShell,
+  writeScriptBin,
+}:
+let
   mountOptions = { options = ["ro" "nosuid" "nodev" "bind"]; };
   mounts = [
-    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-control";
+    # FIXME: Making /usr mounts optional
+    { hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
       containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
-    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-server";
+    { hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
       containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
-    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-debugdump";
+    { hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
       containerPath = "/usr/bin/nvidia-debugdump"; }
-    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-powerd";
+    { hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
       containerPath = "/usr/bin/nvidia-powerd"; }
-    { hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-smi";
+    { hostPath = lib.getExe' nvidia-driver "nvidia-smi";
       containerPath = "/usr/bin/nvidia-smi"; }
-    { hostPath = "${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk";
+    { hostPath = lib.getExe' nvidia-container-toolkit "nvidia-ctk";
       containerPath = "/usr/bin/nvidia-ctk"; }
-    { hostPath = "${pkgs.glibc}/lib";
-      containerPath = "${pkgs.glibc}/lib"; }
-    { hostPath = "${pkgs.glibc}/lib64";
-      containerPath = "${pkgs.glibc}/lib64"; }
+    { hostPath = "${lib.getLib glibc}/lib";
+      containerPath = "${lib.getLib glibc}/lib"; }
+
+    # FIXME: use closureinfo
+    {
+      hostPath = addDriverRunpath.driverLink;
+      containerPath = addDriverRunpath.driverLink;
+    }
+    { hostPath = "${lib.getLib glibc}/lib";
+      containerPath = "${lib.getLib glibc}/lib"; }
+    { hostPath = "${lib.getLib glibc}/lib64";
+      containerPath = "${lib.getLib glibc}/lib64"; }
   ];
   jqAddMountExpression = ".containerEdits.mounts[.containerEdits.mounts | length] |= . +";
   mountsToJq = lib.concatMap
     (mount:
-      ["${pkgs.jq}/bin/jq '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
+      ["${lib.getExe jq} '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
     mounts;
-in ''
-#! ${pkgs.runtimeShell}
+in
+writeScriptBin "nvidia-cdi-generator"
+''
+#! ${runtimeShell}
 
 function cdiGenerate {
-  ${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk cdi generate \
+  ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"} cdi generate \
     --format json \
-    --ldconfig-path ${pkgs.glibc.bin}/bin/ldconfig \
-    --library-search-path ${config.hardware.nvidia.package}/lib \
-    --nvidia-ctk-path ${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk
+    --ldconfig-path ${lib.getExe' glibc "ldconfig"} \
+    --library-search-path ${lib.getLib nvidia-driver}/lib \
+    --nvidia-ctk-path ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"}
 }
 
 cdiGenerate | \
diff --git a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
index 3c96e9c41be52..b95bdf191fad2 100644
--- a/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
+++ b/nixos/modules/services/hardware/nvidia-container-toolkit-cdi-generator/default.nix
@@ -26,9 +26,11 @@
       serviceConfig = {
         RuntimeDirectory = "cdi";
         RemainAfterExit = true;
-        ExecStart = let
-          script = (pkgs.writeScriptBin "nvidia-cdi-generator"
-            (import ./cdi-generate.nix { inherit config lib pkgs; })); in (lib.getExe script);
+        ExecStart =
+          let
+            script = pkgs.callPackage ./cdi-generate.nix { nvidia-driver = config.hardware.nvidia.package; };
+          in
+          lib.getExe script;
         Type = "oneshot";
       };
     };
diff --git a/nixos/modules/services/mail/listmonk.nix b/nixos/modules/services/mail/listmonk.nix
index 945eb436c1f23..d6399304cc108 100644
--- a/nixos/modules/services/mail/listmonk.nix
+++ b/nixos/modules/services/mail/listmonk.nix
@@ -187,7 +187,11 @@ in {
           # Indeed, it will try to create all the folders and realize one of them already exist.
           # Therefore, we have to create it ourselves.
           ''${pkgs.coreutils}/bin/mkdir -p "''${STATE_DIRECTORY}/listmonk/uploads"''
-          "${cfg.package}/bin/listmonk --config ${cfgFile} --idempotent --install --upgrade --yes"
+          # setup database if not already done
+          "${cfg.package}/bin/listmonk --config ${cfgFile} --idempotent --install --yes"
+          # apply db migrations (setup and migrations can not be done in one step
+          # with "--install --upgrade" listmonk ignores the upgrade)
+          "${cfg.package}/bin/listmonk --config ${cfgFile} --upgrade --yes"
           "${updateDatabaseConfigScript}/bin/update-database-config.sh"
         ];
         ExecStart = "${cfg.package}/bin/listmonk --config ${cfgFile}";
diff --git a/nixos/modules/services/matrix/matrix-sliding-sync.nix b/nixos/modules/services/matrix/matrix-sliding-sync.nix
index 8b22cd7dba802..d62e41bebd647 100644
--- a/nixos/modules/services/matrix/matrix-sliding-sync.nix
+++ b/nixos/modules/services/matrix/matrix-sliding-sync.nix
@@ -37,7 +37,7 @@ in
             type = lib.types.str;
             default = "127.0.0.1:8009";
             example = "[::]:8008";
-            description = lib.mdDoc "The interface and port to listen on.";
+            description = lib.mdDoc "The interface and port or path (for unix socket) to listen on.";
           };
 
           SYNCV3_LOG_LEVEL = lib.mkOption {
@@ -98,6 +98,7 @@ in
         ExecStart = lib.getExe cfg.package;
         StateDirectory = "matrix-sliding-sync";
         WorkingDirectory = "%S/matrix-sliding-sync";
+        RuntimeDirectory = "matrix-sliding-sync";
         Restart = "on-failure";
         RestartSec = "1s";
       };
diff --git a/nixos/modules/services/misc/etebase-server.nix b/nixos/modules/services/misc/etebase-server.nix
index 045048a1a2e32..f5a5e8a780d48 100644
--- a/nixos/modules/services/misc/etebase-server.nix
+++ b/nixos/modules/services/misc/etebase-server.nix
@@ -5,9 +5,6 @@ with lib;
 let
   cfg = config.services.etebase-server;
 
-  pythonEnv = pkgs.python3.withPackages (ps: with ps;
-    [ etebase-server daphne ]);
-
   iniFmt = pkgs.formats.ini {};
 
   configIni = iniFmt.generate "etebase-server.ini" cfg.settings;
@@ -46,6 +43,13 @@ in
         '';
       };
 
+      package = mkOption {
+        type = types.package;
+        default = pkgs.python3.pkgs.etebase-server;
+        defaultText = literalExpression "pkgs.python3.pkgs.etebase-server";
+        description = lib.mdDoc "etebase-server package to use.";
+      };
+
       dataDir = mkOption {
         type = types.str;
         default = "/var/lib/etebase-server";
@@ -164,7 +168,7 @@ in
       (runCommand "etebase-server" {
         nativeBuildInputs = [ makeWrapper ];
       } ''
-        makeWrapper ${pythonEnv}/bin/etebase-server \
+        makeWrapper ${cfg.package}/bin/etebase-server \
           $out/bin/etebase-server \
           --chdir ${escapeShellArg cfg.dataDir} \
           --prefix ETEBASE_EASY_CONFIG_PATH : "${configIni}"
@@ -178,8 +182,8 @@ in
     systemd.services.etebase-server = {
       description = "An Etebase (EteSync 2.0) server";
       after = [ "network.target" "systemd-tmpfiles-setup.service" ];
+      path = [ cfg.package ];
       wantedBy = [ "multi-user.target" ];
-      path = [ pythonEnv ];
       serviceConfig = {
         User = cfg.user;
         Restart = "always";
@@ -187,24 +191,26 @@ in
       };
       environment = {
         ETEBASE_EASY_CONFIG_PATH = configIni;
+        PYTHONPATH = cfg.package.pythonPath;
       };
       preStart = ''
         # Auto-migrate on first run or if the package has changed
         versionFile="${cfg.dataDir}/src-version"
-        if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
+        if [[ $(cat "$versionFile" 2>/dev/null) != ${cfg.package} ]]; then
           etebase-server migrate --no-input
           etebase-server collectstatic --no-input --clear
-          echo ${pkgs.etebase-server} > "$versionFile"
+          echo ${cfg.package} > "$versionFile"
         fi
       '';
       script =
         let
+          python = cfg.package.python;
           networking = if cfg.unixSocket != null
-          then "-u ${cfg.unixSocket}"
-          else "-b 0.0.0.0 -p ${toString cfg.port}";
+          then "--uds ${cfg.unixSocket}"
+          else "--host 0.0.0.0 --port ${toString cfg.port}";
         in ''
-          cd "${pythonEnv}/lib/etebase-server";
-          daphne ${networking} \
+          ${python.pkgs.uvicorn}/bin/uvicorn ${networking} \
+            --app-dir ${cfg.package}/${cfg.package.python.sitePackages} \
             etebase_server.asgi:application
         '';
     };
diff --git a/nixos/modules/services/misc/homepage-dashboard.nix b/nixos/modules/services/misc/homepage-dashboard.nix
index 07a09e2b6bbf5..02f1378cb0d59 100644
--- a/nixos/modules/services/misc/homepage-dashboard.nix
+++ b/nixos/modules/services/misc/homepage-dashboard.nix
@@ -6,6 +6,8 @@
 
 let
   cfg = config.services.homepage-dashboard;
+  # Define the settings format used for this program
+  settingsFormat = pkgs.formats.yaml { };
 in
 {
   options = {
@@ -25,31 +27,217 @@ in
         default = 8082;
         description = lib.mdDoc "Port for Homepage to bind to.";
       };
+
+      environmentFile = lib.mkOption {
+        type = lib.types.str;
+        description = ''
+          The path to an environment file that contains environment variables to pass
+          to the homepage-dashboard service, for the purpose of passing secrets to
+          the service.
+
+          See the upstream documentation:
+
+          https://gethomepage.dev/latest/installation/docker/#using-environment-secrets
+        '';
+        default = "";
+      };
+
+      customCSS = lib.mkOption {
+        type = lib.types.lines;
+        description = lib.mdDoc ''
+          Custom CSS for styling Homepage.
+
+          See https://gethomepage.dev/latest/configs/custom-css-js/.
+        '';
+        default = "";
+      };
+
+      customJS = lib.mkOption {
+        type = lib.types.lines;
+        description = lib.mdDoc ''
+          Custom Javascript for Homepage.
+
+          See https://gethomepage.dev/latest/configs/custom-css-js/.
+        '';
+        default = "";
+      };
+
+      bookmarks = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage bookmarks configuration.
+
+          See https://gethomepage.dev/latest/configs/bookmarks/.
+        '';
+        # Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/bookmarks.yaml
+        example = [
+          {
+            Developer = [
+              { Github = [{ abbr = "GH"; href = "https://github.com/"; }]; }
+            ];
+          }
+          {
+            Entertainment = [
+              { YouTube = [{ abbr = "YT"; href = "https://youtube.com/"; }]; }
+            ];
+          }
+        ];
+        default = [ ];
+      };
+
+      services = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage services configuration.
+
+          See https://gethomepage.dev/latest/configs/services/.
+        '';
+        # Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/services.yaml
+        example = [
+          {
+            "My First Group" = [
+              {
+                "My First Service" = {
+                  href = "http://localhost/";
+                  description = "Homepage is awesome";
+                };
+              }
+            ];
+          }
+          {
+            "My Second Group" = [
+              {
+                "My Second Service" = {
+                  href = "http://localhost/";
+                  description = "Homepage is the best";
+                };
+              }
+            ];
+          }
+        ];
+        default = [ ];
+      };
+
+      widgets = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage widgets configuration.
+
+          See https://gethomepage.dev/latest/configs/service-widgets/.
+        '';
+        # Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/widgets.yaml
+        example = [
+          {
+            resources = {
+              cpu = true;
+              memory = true;
+              disk = "/";
+            };
+          }
+          {
+            search = {
+              provider = "duckduckgo";
+              target = "_blank";
+            };
+          }
+        ];
+        default = [ ];
+      };
+
+      kubernetes = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage kubernetes configuration.
+
+          See https://gethomepage.dev/latest/configs/kubernetes/.
+        '';
+        default = { };
+      };
+
+      docker = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage docker configuration.
+
+          See https://gethomepage.dev/latest/configs/docker/.
+        '';
+        default = { };
+      };
+
+      settings = lib.mkOption {
+        inherit (settingsFormat) type;
+        description = lib.mdDoc ''
+          Homepage settings.
+
+          See https://gethomepage.dev/latest/configs/settings/.
+        '';
+        # Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/settings.yaml
+        default = { };
+      };
     };
   };
 
-  config = lib.mkIf cfg.enable {
-    systemd.services.homepage-dashboard = {
-      description = "Homepage Dashboard";
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
+  config =
+    let
+      # If homepage-dashboard is enabled, but none of the configuration values have been updated,
+      # then default to "unmanaged" configuration which is manually updated in
+      # var/lib/homepage-dashboard. This is to maintain backwards compatibility, and should be
+      # deprecated in a future release.
+      managedConfig = !(
+        cfg.bookmarks == [ ] &&
+        cfg.customCSS == "" &&
+        cfg.customJS == "" &&
+        cfg.docker == { } &&
+        cfg.kubernetes == { } &&
+        cfg.services == [ ] &&
+        cfg.settings == { } &&
+        cfg.widgets == [ ]
+      );
+
+      configDir = if managedConfig then "/etc/homepage-dashboard" else "/var/lib/homepage-dashboard";
+
+      msg = "using unmanaged configuration for homepage-dashboard is deprecated and will be removed"
+        + " in 24.05. please see the NixOS documentation for `services.homepage-dashboard' and add"
+        + " your bookmarks, services, widgets, and other configuration using the options provided.";
+    in
+    lib.mkIf cfg.enable {
+      warnings = lib.optional (!managedConfig) msg;
 
-      environment = {
-        HOMEPAGE_CONFIG_DIR = "/var/lib/homepage-dashboard";
-        PORT = "${toString cfg.listenPort}";
+      environment.etc = lib.mkIf managedConfig {
+        "homepage-dashboard/custom.css".text = cfg.customCSS;
+        "homepage-dashboard/custom.js".text = cfg.customJS;
+
+        "homepage-dashboard/bookmarks.yaml".source = settingsFormat.generate "bookmarks.yaml" cfg.bookmarks;
+        "homepage-dashboard/docker.yaml".source = settingsFormat.generate "docker.yaml" cfg.docker;
+        "homepage-dashboard/kubernetes.yaml".source = settingsFormat.generate "kubernetes.yaml" cfg.kubernetes;
+        "homepage-dashboard/services.yaml".source = settingsFormat.generate "services.yaml" cfg.services;
+        "homepage-dashboard/settings.yaml".source = settingsFormat.generate "settings.yaml" cfg.settings;
+        "homepage-dashboard/widgets.yaml".source = settingsFormat.generate "widgets.yaml" cfg.widgets;
       };
 
-      serviceConfig = {
-        Type = "simple";
-        DynamicUser = true;
-        StateDirectory = "homepage-dashboard";
-        ExecStart = "${lib.getExe cfg.package}";
-        Restart = "on-failure";
+      systemd.services.homepage-dashboard = {
+        description = "Homepage Dashboard";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        environment = {
+          HOMEPAGE_CONFIG_DIR = configDir;
+          PORT = toString cfg.listenPort;
+          LOG_TARGETS = lib.mkIf managedConfig "stdout";
+        };
+
+        serviceConfig = {
+          Type = "simple";
+          DynamicUser = true;
+          EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+          StateDirectory = lib.mkIf (!managedConfig) "homepage-dashboard";
+          ExecStart = lib.getExe cfg.package;
+          Restart = "on-failure";
+        };
       };
-    };
 
-    networking.firewall = lib.mkIf cfg.openFirewall {
-      allowedTCPPorts = [ cfg.listenPort ];
+      networking.firewall = lib.mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.listenPort ];
+      };
     };
-  };
 }
diff --git a/nixos/modules/services/misc/paperless.nix b/nixos/modules/services/misc/paperless.nix
index ab042e4b6ee2a..9314c4f3848d8 100644
--- a/nixos/modules/services/misc/paperless.nix
+++ b/nixos/modules/services/misc/paperless.nix
@@ -342,6 +342,7 @@ in
         User = cfg.user;
         Restart = "on-failure";
 
+        LimitNOFILE = 65536;
         # gunicorn needs setuid, liblapack needs mbind
         SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "@setuid mbind" ];
         # Needs to serve web page
diff --git a/nixos/modules/services/misc/tabby.nix b/nixos/modules/services/misc/tabby.nix
new file mode 100644
index 0000000000000..a3072e5df75ea
--- /dev/null
+++ b/nixos/modules/services/misc/tabby.nix
@@ -0,0 +1,203 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) types;
+
+  cfg = config.services.tabby;
+  format = pkgs.formats.toml { };
+  tabbyPackage = cfg.package.override {
+    inherit (cfg) acceleration;
+  };
+in
+{
+  options = {
+    services.tabby = {
+      enable = lib.mkEnableOption (
+        lib.mdDoc "Self-hosted AI coding assistant using large language models"
+      );
+
+      package = lib.mkPackageOption pkgs "tabby" { };
+
+      port = lib.mkOption {
+        type = types.port;
+        default = 11029;
+        description = lib.mdDoc ''
+          Specifies the bind port on which the tabby server HTTP interface listens.
+        '';
+      };
+
+      model = lib.mkOption {
+        type = types.str;
+        default = "TabbyML/StarCoder-1B";
+        description = lib.mdDoc ''
+          Specify the model that tabby will use to generate completions.
+
+          This model will be downloaded automatically if it is not already present.
+
+          If you want to utilize an existing model that you've already
+          downloaded you'll need to move it into tabby's state directory which
+          lives in `/var/lib/tabby`. Because the tabby.service is configured to
+          use a DyanmicUser the service will need to have been started at least
+          once before you can move the locally existing model into
+          `/var/lib/tabby`. You can set the model to 'none' and tabby will
+          startup and fail to download a model, but will have created the
+          `/var/lib/tabby` directory. You can then copy over the model manually
+          into `/var/lib/tabby`, update the model option to the name you just
+          downloaded and copied over then `nixos-rebuild switch` to start using
+          it.
+
+          $ tabby download --model TabbyML/DeepseekCoder-6.7B
+          $ find ~/.tabby/ | tail -n1
+          /home/ghthor/.tabby/models/TabbyML/DeepseekCoder-6.7B/ggml/q8_0.v2.gguf
+          $ sudo rsync -r ~/.tabby/models/ /var/lib/tabby/models/
+          $ sudo chown -R tabby:tabby /var/lib/tabby/models/
+
+          See for Model Options:
+          > https://github.com/TabbyML/registry-tabby
+        '';
+      };
+
+      acceleration = lib.mkOption {
+        type = types.nullOr (types.enum [ "cpu" "rocm" "cuda" "metal" ]);
+        default = null;
+        example = "rocm";
+        description = lib.mdDoc ''
+          Specifies the device to use for hardware acceleration.
+
+          -   `cpu`: no acceleration just use the CPU
+          -  `rocm`: supported by modern AMD GPUs
+          -  `cuda`: supported by modern NVIDIA GPUs
+          - `metal`: supported on darwin aarch64 machines
+
+          Tabby will try and determine what type of acceleration that is
+          already enabled in your configuration when `acceleration = null`.
+
+          - nixpkgs.config.cudaSupport
+          - nixpkgs.config.rocmSupport
+          - if stdenv.isDarwin && stdenv.isAarch64
+
+          IFF multiple acceleration methods are found to be enabled or if you
+          haven't set either `cudaSupport or rocmSupport` you will have to
+          specify the device type manually here otherwise it will default to
+          the first from the list above or to cpu.
+        '';
+      };
+
+      settings = lib.mkOption {
+        inherit (format) type;
+        default = { };
+        description = lib.mdDoc ''
+          Tabby scheduler configuration
+
+          See for more details:
+          > https://tabby.tabbyml.com/docs/configuration/#repository-context-for-code-completion
+        '';
+        example = lib.literalExpression ''
+          settings = {
+            repositories = [
+              { name = "tabby"; git_url = "https://github.com/TabbyML/tabby.git"; }
+              { name = "CTranslate2"; git_url = "git@github.com:OpenNMT/CTranslate2.git"; }
+
+              # local directory is also supported, but limited by systemd DynamicUser=1
+              # adding local repositories will need to be done manually
+              { name = "repository_a"; git_url = "file:///var/lib/tabby/repository_a"; }
+            ];
+          };
+        '';
+      };
+
+      usageCollection = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable sending anonymous usage data.
+
+          See for more details:
+          > https://tabby.tabbyml.com/docs/configuration#usage-collection
+        '';
+      };
+
+      indexInterval = lib.mkOption {
+        type = types.str;
+        default = "5hours";
+        example = "5hours";
+        description = lib.mdDoc ''
+          Run tabby scheduler to generate the index database at this interval.
+          Updates by default every 5 hours. This value applies to
+          `OnUnitInactiveSec`
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+
+          To disable running `tabby scheduler --now` updates, set to `"never"`
+        '';
+      };
+    };
+  };
+
+  # TODO(ghthor): firewall config
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      etc."tabby/config.toml".source = format.generate "config.toml" cfg.settings;
+      systemPackages = [ tabbyPackage ];
+    };
+
+
+    systemd = let
+      serviceUser = {
+        WorkingDirectory = "/var/lib/tabby";
+        StateDirectory = [ "tabby" ];
+        ConfigurationDirectory = [ "tabby" ];
+        DynamicUser = true;
+        User = "tabby";
+        Group = "tabby";
+      };
+
+      serviceEnv = lib.mkMerge [
+        {
+          TABBY_ROOT = "%S/tabby";
+        }
+        (lib.mkIf (!cfg.usageCollection) {
+          TABBY_DISABLE_USAGE_COLLECTION = "1";
+        })
+      ];
+    in {
+      services.tabby = {
+        wantedBy = [ "multi-user.target" ];
+        description = "Self-hosted AI coding assistant using large language models";
+        after = [ "network.target" ];
+        environment = serviceEnv;
+        serviceConfig = lib.mkMerge [
+          serviceUser
+          {
+            ExecStart =
+              "${lib.getExe tabbyPackage} serve --model ${cfg.model} --port ${toString cfg.port} --device ${tabbyPackage.featureDevice}";
+          }
+        ];
+      };
+
+      services.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
+        wantedBy = [ "multi-user.target" ];
+        description = "Tabby repository indexing service";
+        after = [ "network.target" ];
+        environment = serviceEnv;
+        preStart = "cp -f /etc/tabby/config.toml \${TABBY_ROOT}/config.toml";
+        serviceConfig = lib.mkMerge [
+          serviceUser
+          {
+            # Type = "oneshot";
+            ExecStart = "${lib.getExe tabbyPackage} scheduler --now";
+          }
+        ];
+      };
+      timers.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
+        description = "Update timer for tabby-scheduler";
+        partOf = [ "tabby-scheduler.service" ];
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnUnitInactiveSec = cfg.indexInterval;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ghthor ];
+}
diff --git a/nixos/modules/services/monitoring/mackerel-agent.nix b/nixos/modules/services/monitoring/mackerel-agent.nix
index 5915634ed26fe..d1e84c0359dc1 100644
--- a/nixos/modules/services/monitoring/mackerel-agent.nix
+++ b/nixos/modules/services/monitoring/mackerel-agent.nix
@@ -81,7 +81,7 @@ in {
       include = mkDefault "/etc/mackerel-agent/conf.d/*.conf";
     };
 
-    # upstream service file in https://git.io/JUt4Q
+    # upstream service file in https://github.com/mackerelio/mackerel-agent/blob/master/packaging/rpm/src/mackerel-agent.service
     systemd.services.mackerel-agent = {
       description = "mackerel.io agent";
       wants = [ "network-online.target" ];
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/restic.nix b/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
index 5b32c93a666da..977bd42e9812e 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters/restic.nix
@@ -93,12 +93,14 @@ in
   };
 
   serviceOpts = {
+    script = ''
+      export RESTIC_PASSWORD_FILE=$CREDENTIALS_DIRECTORY/RESTIC_PASSWORD_FILE
+      ${pkgs.prometheus-restic-exporter}/bin/restic-exporter.py \
+        ${concatStringsSep " \\\n  " cfg.extraFlags}
+    '';
     serviceConfig = {
-      ExecStart = ''
-        ${pkgs.prometheus-restic-exporter}/bin/restic-exporter.py \
-          ${concatStringsSep " \\\n  " cfg.extraFlags}
-      '';
       EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+      LoadCredential = [ "RESTIC_PASSWORD_FILE:${cfg.passwordFile}" ];
     };
     environment =
       let
@@ -108,8 +110,7 @@ in
         toRcloneVal = v: if lib.isBool v then lib.boolToString v else v;
       in
       {
-        RESTIC_REPO_URL = cfg.repository;
-        RESTIC_REPO_PASSWORD_FILE = cfg.passwordFile;
+        RESTIC_REPOSITORY = cfg.repository;
         LISTEN_ADDRESS = cfg.listenAddress;
         LISTEN_PORT = toString cfg.port;
         REFRESH_INTERVAL = toString cfg.refreshInterval;
diff --git a/nixos/modules/services/monitoring/scrutiny.nix b/nixos/modules/services/monitoring/scrutiny.nix
index 454668a9a128d..aef924ef840cc 100644
--- a/nixos/modules/services/monitoring/scrutiny.nix
+++ b/nixos/modules/services/monitoring/scrutiny.nix
@@ -1,5 +1,11 @@
 { config, lib, pkgs, ... }:
 let
+  inherit (lib) maintainers;
+  inherit (lib.meta) getExe;
+  inherit (lib.modules) mkIf;
+  inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
+  inherit (lib.types) bool enum nullOr port str submodule;
+
   cfg = config.services.scrutiny;
   # Define the settings format used for this program
   settingsFormat = pkgs.formats.yaml { };
@@ -7,20 +13,16 @@ in
 {
   options = {
     services.scrutiny = {
-      enable = lib.mkEnableOption "Enables the scrutiny web application.";
+      enable = mkEnableOption "Scrutiny, a web application for drive monitoring";
 
-      package = lib.mkPackageOptionMD pkgs "scrutiny" { };
+      package = mkPackageOption pkgs "scrutiny" { };
 
-      openFirewall = lib.mkOption {
-        type = lib.types.bool;
-        default = false;
-        description = "Open the default ports in the firewall for Scrutiny.";
-      };
+      openFirewall = mkEnableOption "opening the default ports in the firewall for Scrutiny";
 
-      influxdb.enable = lib.mkOption {
-        type = lib.types.bool;
+      influxdb.enable = mkOption {
+        type = bool;
         default = true;
-        description = lib.mdDoc ''
+        description = ''
           Enables InfluxDB on the host system using the `services.influxdb2` NixOS module
           with default options.
 
@@ -29,127 +31,124 @@ in
         '';
       };
 
-      settings = lib.mkOption {
-        description = lib.mdDoc ''
+      settings = mkOption {
+        description = ''
           Scrutiny settings to be rendered into the configuration file.
 
           See https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml.
         '';
         default = { };
-        type = lib.types.submodule {
+        type = submodule {
           freeformType = settingsFormat.type;
 
-          options.web.listen.port = lib.mkOption {
-            type = lib.types.port;
+          options.web.listen.port = mkOption {
+            type = port;
             default = 8080;
-            description = lib.mdDoc "Port for web application to listen on.";
+            description = "Port for web application to listen on.";
           };
 
-          options.web.listen.host = lib.mkOption {
-            type = lib.types.str;
+          options.web.listen.host = mkOption {
+            type = str;
             default = "0.0.0.0";
-            description = lib.mdDoc "Interface address for web application to bind to.";
+            description = "Interface address for web application to bind to.";
           };
 
-          options.web.listen.basepath = lib.mkOption {
-            type = lib.types.str;
+          options.web.listen.basepath = mkOption {
+            type = str;
             default = "";
             example = "/scrutiny";
-            description = lib.mdDoc ''
+            description = ''
               If Scrutiny will be behind a path prefixed reverse proxy, you can override this
               value to serve Scrutiny on a subpath.
             '';
           };
 
-          options.log.level = lib.mkOption {
-            type = lib.types.enum [ "INFO" "DEBUG" ];
+          options.log.level = mkOption {
+            type = enum [ "INFO" "DEBUG" ];
             default = "INFO";
-            description = lib.mdDoc "Log level for Scrutiny.";
+            description = "Log level for Scrutiny.";
           };
 
-          options.web.influxdb.scheme = lib.mkOption {
-            type = lib.types.str;
+          options.web.influxdb.scheme = mkOption {
+            type = str;
             default = "http";
-            description = lib.mdDoc "URL scheme to use when connecting to InfluxDB.";
+            description = "URL scheme to use when connecting to InfluxDB.";
           };
 
-          options.web.influxdb.host = lib.mkOption {
-            type = lib.types.str;
+          options.web.influxdb.host = mkOption {
+            type = str;
             default = "0.0.0.0";
-            description = lib.mdDoc "IP or hostname of the InfluxDB instance.";
+            description = "IP or hostname of the InfluxDB instance.";
           };
 
-          options.web.influxdb.port = lib.mkOption {
-            type = lib.types.port;
+          options.web.influxdb.port = mkOption {
+            type = port;
             default = 8086;
-            description = lib.mdDoc "The port of the InfluxDB instance.";
+            description = "The port of the InfluxDB instance.";
           };
 
-          options.web.influxdb.tls.insecure_skip_verify = lib.mkOption {
-            type = lib.types.bool;
-            default = false;
-            description = lib.mdDoc "Skip TLS verification when connecting to InfluxDB.";
-          };
+          options.web.influxdb.tls.insecure_skip_verify = mkEnableOption "skipping TLS verification when connecting to InfluxDB";
 
-          options.web.influxdb.token = lib.mkOption {
-            type = lib.types.nullOr lib.types.str;
+          options.web.influxdb.token = mkOption {
+            type = nullOr str;
             default = null;
-            description = lib.mdDoc "Authentication token for connecting to InfluxDB.";
+            description = "Authentication token for connecting to InfluxDB.";
           };
 
-          options.web.influxdb.org = lib.mkOption {
-            type = lib.types.nullOr lib.types.str;
+          options.web.influxdb.org = mkOption {
+            type = nullOr str;
             default = null;
-            description = lib.mdDoc "InfluxDB organisation under which to store data.";
+            description = "InfluxDB organisation under which to store data.";
           };
 
-          options.web.influxdb.bucket = lib.mkOption {
-            type = lib.types.nullOr lib.types.str;
+          options.web.influxdb.bucket = mkOption {
+            type = nullOr str;
             default = null;
-            description = lib.mdDoc "InfluxDB bucket in which to store data.";
+            description = "InfluxDB bucket in which to store data.";
           };
         };
       };
 
       collector = {
-        enable = lib.mkEnableOption "Enables the scrutiny metrics collector.";
+        enable = mkEnableOption "the Scrutiny metrics collector";
 
-        package = lib.mkPackageOptionMD pkgs "scrutiny-collector" { };
+        package = mkPackageOption pkgs "scrutiny-collector" { };
 
-        schedule = lib.mkOption {
-          type = lib.types.str;
+        schedule = mkOption {
+          type = str;
           default = "*:0/15";
-          description = lib.mdDoc ''
+          description = ''
             How often to run the collector in systemd calendar format.
           '';
         };
 
-        settings = lib.mkOption {
-          description = lib.mdDoc ''
+        settings = mkOption {
+          description = ''
             Collector settings to be rendered into the collector configuration file.
 
             See https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml.
           '';
           default = { };
-          type = lib.types.submodule {
+          type = submodule {
             freeformType = settingsFormat.type;
 
-            options.host.id = lib.mkOption {
-              type = lib.types.nullOr lib.types.str;
+            options.host.id = mkOption {
+              type = nullOr str;
               default = null;
-              description = lib.mdDoc "Host ID for identifying/labelling groups of disks";
+              description = "Host ID for identifying/labelling groups of disks";
             };
 
-            options.api.endpoint = lib.mkOption {
-              type = lib.types.str;
-              default = "http://localhost:8080";
-              description = lib.mdDoc "Scrutiny app API endpoint for sending metrics to.";
+            options.api.endpoint = mkOption {
+              type = str;
+              default = "http://localhost:${toString cfg.settings.web.listen.port}";
+              defaultText = literalExpression ''"http://localhost:''${config.services.scrutiny.settings.web.listen.port}"'';
+              description = "Scrutiny app API endpoint for sending metrics to.";
             };
 
-            options.log.level = lib.mkOption {
-              type = lib.types.enum [ "INFO" "DEBUG" ];
+            options.log.level = mkOption {
+              type = enum [ "INFO" "DEBUG" ];
               default = "INFO";
-              description = lib.mdDoc "Log level for Scrutiny collector.";
+              description = "Log level for Scrutiny collector.";
             };
           };
         };
@@ -157,14 +156,14 @@ in
     };
   };
 
-  config = lib.mkIf (cfg.enable || cfg.collector.enable) {
+  config = mkIf (cfg.enable || cfg.collector.enable) {
     services.influxdb2.enable = cfg.influxdb.enable;
 
-    networking.firewall = lib.mkIf cfg.openFirewall {
+    networking.firewall = mkIf cfg.openFirewall {
       allowedTCPPorts = [ cfg.settings.web.listen.port ];
     };
 
-    services.smartd = lib.mkIf cfg.collector.enable {
+    services.smartd = mkIf cfg.collector.enable {
       enable = true;
       extraOptions = [
         "-A /var/log/smartd/"
@@ -174,7 +173,7 @@ in
 
     systemd = {
       services = {
-        scrutiny = lib.mkIf cfg.enable {
+        scrutiny = mkIf cfg.enable {
           description = "Hard Drive S.M.A.R.T Monitoring, Historical Trends & Real World Failure Thresholds";
           wantedBy = [ "multi-user.target" ];
           after = [ "network.target" ];
@@ -185,14 +184,14 @@ in
           };
           serviceConfig = {
             DynamicUser = true;
-            ExecStart = "${lib.getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
+            ExecStart = "${getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
             Restart = "always";
             StateDirectory = "scrutiny";
             StateDirectoryMode = "0750";
           };
         };
 
-        scrutiny-collector = lib.mkIf cfg.collector.enable {
+        scrutiny-collector = mkIf cfg.collector.enable {
           description = "Scrutiny Collector Service";
           environment = {
             COLLECTOR_VERSION = "1";
@@ -200,12 +199,12 @@ in
           };
           serviceConfig = {
             Type = "oneshot";
-            ExecStart = "${lib.getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
+            ExecStart = "${getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
           };
         };
       };
 
-      timers = lib.mkIf cfg.collector.enable {
+      timers = mkIf cfg.collector.enable {
         scrutiny-collector = {
           timerConfig = {
             OnCalendar = cfg.collector.schedule;
@@ -217,5 +216,5 @@ in
     };
   };
 
-  meta.maintainers = [ lib.maintainers.jnsgruk ];
+  meta.maintainers = [ maintainers.jnsgruk ];
 }
diff --git a/nixos/modules/services/networking/bird-lg.nix b/nixos/modules/services/networking/bird-lg.nix
index be9f4101e6abe..1c59f7a6ae7c6 100644
--- a/nixos/modules/services/networking/bird-lg.nix
+++ b/nixos/modules/services/networking/bird-lg.nix
@@ -194,8 +194,8 @@ in
         allowedIPs = mkOption {
           type = types.listOf types.str;
           default = [ ];
-          example = [ "192.168.25.52" "192.168.25.53" ];
-          description = lib.mdDoc "List of IPs to allow (default all allowed).";
+          example = [ "192.168.25.52" "192.168.25.53" "192.168.0.0/24" ];
+          description = lib.mdDoc "List of IPs or networks to allow (default all allowed).";
         };
 
         birdSocket = mkOption {
diff --git a/nixos/modules/services/networking/murmur.nix b/nixos/modules/services/networking/murmur.nix
index 5805f332a66fe..1fb5063e5ad8d 100644
--- a/nixos/modules/services/networking/murmur.nix
+++ b/nixos/modules/services/networking/murmur.nix
@@ -33,7 +33,7 @@ let
     sendversion=${boolToString cfg.sendVersion}
 
     ${optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
-    ${optionalString (cfg.registerPassword == "") "registerPassword=${cfg.registerPassword}"}
+    ${optionalString (cfg.registerPassword != "") "registerPassword=${cfg.registerPassword}"}
     ${optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
     ${optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}
 
diff --git a/nixos/modules/services/networking/nebula.nix b/nixos/modules/services/networking/nebula.nix
index e13876172dac6..2f9e41ae9c801 100644
--- a/nixos/modules/services/networking/nebula.nix
+++ b/nixos/modules/services/networking/nebula.nix
@@ -10,6 +10,15 @@ let
   format = pkgs.formats.yaml {};
 
   nameToId = netName: "nebula-${netName}";
+
+  resolveFinalPort = netCfg:
+    if netCfg.listen.port == null then
+      if (netCfg.isLighthouse || netCfg.isRelay) then
+        4242
+      else
+        0
+    else
+      netCfg.listen.port;
 in
 {
   # Interface
@@ -95,8 +104,15 @@ in
             };
 
             listen.port = mkOption {
-              type = types.port;
-              default = 4242;
+              type = types.nullOr types.port;
+              default = null;
+              defaultText = lib.literalExpression ''
+                if (config.services.nebula.networks.''${name}.isLighthouse ||
+                    config.services.nebula.networks.''${name}.isRelay) then
+                  4242
+                else
+                  0;
+              '';
               description = lib.mdDoc "Port number to listen on.";
             };
 
@@ -174,7 +190,7 @@ in
           };
           listen = {
             host = netCfg.listen.host;
-            port = netCfg.listen.port;
+            port = resolveFinalPort netCfg;
           };
           tun = {
             disabled = netCfg.tun.disable;
@@ -185,7 +201,15 @@ in
             outbound = netCfg.firewall.outbound;
           };
         } netCfg.settings;
-        configFile = format.generate "nebula-config-${netName}.yml" settings;
+        configFile = format.generate "nebula-config-${netName}.yml" (
+          warnIf
+            ((settings.lighthouse.am_lighthouse || settings.relay.am_relay) && settings.listen.port == 0)
+            ''
+              Nebula network '${netName}' is configured as a lighthouse or relay, and its port is ${builtins.toString settings.listen.port}.
+              You will likely experience connectivity issues: https://nebula.defined.net/docs/config/listen/#listenport
+            ''
+            settings
+          );
         in
         {
           # Create the systemd service for Nebula.
@@ -229,7 +253,7 @@ in
 
     # Open the chosen ports for UDP.
     networking.firewall.allowedUDPPorts =
-      unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
+      unique (filter (port: port > 0) (mapAttrsToList (netName: netCfg: resolveFinalPort netCfg) enabledNetworks));
 
     # Create the service users and groups.
     users.users = mkMerge (mapAttrsToList (netName: netCfg:
diff --git a/nixos/modules/services/networking/networkmanager.nix b/nixos/modules/services/networking/networkmanager.nix
index c96439cf2641a..dcde505b7f2ac 100644
--- a/nixos/modules/services/networking/networkmanager.nix
+++ b/nixos/modules/services/networking/networkmanager.nix
@@ -436,6 +436,7 @@ in
             And if you edit a declarative profile NetworkManager will move it to the persistent storage and treat it like a ad-hoc one,
             but there will be two profiles as soon as the systemd unit from this option runs again which can be confusing since NetworkManager tools will start displaying two profiles with the same name and probably a bit different settings depending on what you edited.
             A profile won't be deleted even if it's removed from the config until the system reboots because that's when NetworkManager clears it's temp directory.
+            If `networking.resolvconf.enable` is true, attributes affecting the name resolution (such as `ignore-auto-dns`) may not end up changing `/etc/resolv.conf` as expected when other name services (for example `networking.dhcpcd`) are enabled. Run `resolvconf -l` in the terminal to see what each service produces.
           '';
         };
         environmentFiles = mkOption {
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index 8438e472e11ee..17c6789827b9b 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -76,12 +76,13 @@ in {
 
       checkconf = mkOption {
         type = types.bool;
-        default = !cfg.settings ? include;
-        defaultText = "!config.services.unbound.settings ? include";
+        default = !cfg.settings ? include && !cfg.settings ? remote-control;
+        defaultText = "!services.unbound.settings ? include && !services.unbound.settings ? remote-control";
         description = lib.mdDoc ''
           Wether to check the resulting config file with unbound checkconf for syntax errors.
 
-          If settings.include is used, then this options is disabled, as the import can likely not be resolved at build time.
+          If settings.include is used, this options is disabled, as the import can likely not be accessed at build time.
+          If settings.remote-control is used, this option is disabled, too as the control-key-file, server-cert-file and server-key-file cannot be accessed at build time.
         '';
       };
 
diff --git a/nixos/modules/services/security/esdm.nix b/nixos/modules/services/security/esdm.nix
index 134b4be1a94c8..c34fba1b3c75b 100644
--- a/nixos/modules/services/security/esdm.nix
+++ b/nixos/modules/services/security/esdm.nix
@@ -4,49 +4,33 @@ let
   cfg = config.services.esdm;
 in
 {
+  imports = [
+    # removed option 'services.esdm.cuseRandomEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "cuseRandomEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.cuseUrandomEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "cuseUrandomEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.procEnable'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "procEnable" ] ''
+      Use services.esdm.enableLinuxCompatServices instead.
+    '')
+    # removed option 'services.esdm.verbose'
+    (lib.mkRemovedOptionModule [ "services" "esdm" "verbose" ] ''
+      There is no replacement.
+    '')
+  ];
+
   options.services.esdm = {
     enable = lib.mkEnableOption (lib.mdDoc "ESDM service configuration");
     package = lib.mkPackageOption pkgs "esdm" { };
-    serverEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM server service. If serverEnable == false, then the esdm-server
-        will not start. Also the subsequent services esdm-cuse-random, esdm-cuse-urandom
-        and esdm-proc will not start as these have the entry Want=esdm-server.service.
-      '';
-    };
-    cuseRandomEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM cuse-random service. Determines if the esdm-cuse-random.service
-        is started.
-      '';
-    };
-    cuseUrandomEnable = lib.mkOption {
-      type = lib.types.bool;
-      default = true;
-      description = lib.mdDoc ''
-        Enable option for ESDM cuse-urandom service. Determines if the esdm-cuse-urandom.service
-        is started.
-      '';
-    };
-    procEnable = lib.mkOption {
+    enableLinuxCompatServices = lib.mkOption {
       type = lib.types.bool;
       default = true;
       description = lib.mdDoc ''
-        Enable option for ESDM proc service. Determines if the esdm-proc.service
-        is started.
-      '';
-    };
-    verbose = lib.mkOption {
-      type = lib.types.bool;
-      default = false;
-      description = lib.mdDoc ''
-        Enable verbose ExecStart for ESDM. If verbose == true, then the corresponding "ExecStart"
-        values of the 4 aforementioned services are overwritten with the option
-        for the highest verbosity.
+        Enable /dev/random, /dev/urandom and /proc/sys/kernel/random/* userspace wrapper.
       '';
     };
   };
@@ -55,46 +39,13 @@ in
     lib.mkMerge [
       ({
         systemd.packages = [ cfg.package ];
-      })
-      # It is necessary to set those options for these services to be started by systemd in NixOS
-      (lib.mkIf cfg.serverEnable {
         systemd.services."esdm-server".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-server".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-server.service'
-            "${cfg.package}/bin/esdm-server -f -vvvvvv"
-          ];
-        };
-      })
-
-      (lib.mkIf cfg.cuseRandomEnable {
-        systemd.services."esdm-cuse-random".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-cuse-random".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-cuse-random.service'
-            "${cfg.package}/bin/esdm-cuse-random -f -v 6"
-          ];
-        };
       })
-
-      (lib.mkIf cfg.cuseUrandomEnable {
-        systemd.services."esdm-cuse-urandom".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-cuse-urandom".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-cuse-urandom.service'
-            "${config.services.esdm.package}/bin/esdm-cuse-urandom -f -v 6"
-          ];
-        };
-      })
-
-      (lib.mkIf cfg.procEnable {
-        systemd.services."esdm-proc".wantedBy = [ "basic.target" ];
-        systemd.services."esdm-proc".serviceConfig = lib.mkIf cfg.verbose {
-          ExecStart = [
-            " " # unset previous value defined in 'esdm-proc.service'
-            "${cfg.package}/bin/esdm-proc --relabel -f -o allow_other /proc/sys/kernel/random -v 6"
-          ];
-        };
+      # It is necessary to set those options for these services to be started by systemd in NixOS
+      (lib.mkIf cfg.enableLinuxCompatServices {
+        systemd.targets."esdm-linux-compat".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-server-suspend".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
+        systemd.services."esdm-server-resume".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
       })
     ]);
 
diff --git a/nixos/modules/services/security/vaultwarden/default.nix b/nixos/modules/services/security/vaultwarden/default.nix
index 470db735bf649..60d8015d0ceeb 100644
--- a/nixos/modules/services/security/vaultwarden/default.nix
+++ b/nixos/modules/services/security/vaultwarden/default.nix
@@ -180,7 +180,6 @@ in {
     users.groups.vaultwarden = { };
 
     systemd.services.vaultwarden = {
-      aliases = [ "bitwarden_rs.service" ];
       after = [ "network.target" ];
       path = with pkgs; [ openssl ];
       serviceConfig = {
@@ -202,7 +201,6 @@ in {
     };
 
     systemd.services.backup-vaultwarden = mkIf (cfg.backupDir != null) {
-      aliases = [ "backup-bitwarden_rs.service" ];
       description = "Backup vaultwarden";
       environment = {
         DATA_FOLDER = "/var/lib/bitwarden_rs";
@@ -222,7 +220,6 @@ in {
     };
 
     systemd.timers.backup-vaultwarden = mkIf (cfg.backupDir != null) {
-      aliases = [ "backup-bitwarden_rs.timer" ];
       description = "Backup vaultwarden on time";
       timerConfig = {
         OnCalendar = mkDefault "23:00";
@@ -240,6 +237,9 @@ in {
     };
   };
 
-  # uses attributes of the linked package
-  meta.buildDocsInSandbox = false;
+  meta = {
+    # uses attributes of the linked package
+    buildDocsInSandbox = false;
+    maintainers = with lib.maintainers; [ dotlambda SuperSandro2000 ];
+  };
 }
diff --git a/nixos/modules/services/security/yubikey-agent.nix b/nixos/modules/services/security/yubikey-agent.nix
index a9f15e4405f23..3d5f84af2cf48 100644
--- a/nixos/modules/services/security/yubikey-agent.nix
+++ b/nixos/modules/services/security/yubikey-agent.nix
@@ -6,9 +6,6 @@ with lib;
 
 let
   cfg = config.services.yubikey-agent;
-
-  # reuse the pinentryFlavor option from the gnupg module
-  pinentryFlavor = config.programs.gnupg.agent.pinentryFlavor;
 in
 {
   ###### interface
@@ -40,14 +37,9 @@ in
 
     # This overrides the systemd user unit shipped with the
     # yubikey-agent package
-    systemd.user.services.yubikey-agent = mkIf (pinentryFlavor != null) {
-      path = [ pkgs.pinentry.${pinentryFlavor} ];
-      wantedBy = [
-        (if pinentryFlavor == "tty" || pinentryFlavor == "curses" then
-          "default.target"
-        else
-          "graphical-session.target")
-      ];
+    systemd.user.services.yubikey-agent = mkIf (config.programs.gnupg.agent.pinentryPackage != null) {
+      path = [ config.programs.gnupg.agent.pinentryPackage ];
+      wantedBy = [ "default.target" ];
     };
 
     # Yubikey-agent expects pcsd to be running in order to function.
diff --git a/nixos/modules/services/web-apps/miniflux.nix b/nixos/modules/services/web-apps/miniflux.nix
index 1a5b7d0c24e9b..16b6fb0d655d7 100644
--- a/nixos/modules/services/web-apps/miniflux.nix
+++ b/nixos/modules/services/web-apps/miniflux.nix
@@ -16,10 +16,20 @@ in
 {
   options = {
     services.miniflux = {
-      enable = mkEnableOption (lib.mdDoc "miniflux and creates a local postgres database for it");
+      enable = mkEnableOption (lib.mdDoc "miniflux");
 
       package = mkPackageOption pkgs "miniflux" { };
 
+      createDatabaseLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = ''
+          Whether a PostgreSQL database should be automatically created and
+          configured on the local host. If set to `false`, you need provision a
+          database yourself and make sure to create the hstore extension in it.
+        '';
+      };
+
       config = mkOption {
         type = with types; attrsOf (oneOf [ str int ]);
         example = literalExpression ''
@@ -38,7 +48,7 @@ in
         '';
       };
 
-      adminCredentialsFile = mkOption  {
+      adminCredentialsFile = mkOption {
         type = types.path;
         description = lib.mdDoc ''
           File containing the ADMIN_USERNAME and
@@ -51,14 +61,14 @@ in
   };
 
   config = mkIf cfg.enable {
-    services.miniflux.config =  {
+    services.miniflux.config = {
       LISTEN_ADDR = mkDefault defaultAddress;
-      DATABASE_URL = "user=miniflux host=/run/postgresql dbname=miniflux";
+      DATABASE_URL = lib.mkIf cfg.createDatabaseLocally "user=miniflux host=/run/postgresql dbname=miniflux";
       RUN_MIGRATIONS = 1;
       CREATE_ADMIN = 1;
     };
 
-    services.postgresql = {
+    services.postgresql = lib.mkIf cfg.createDatabaseLocally {
       enable = true;
       ensureUsers = [ {
         name = "miniflux";
@@ -67,7 +77,7 @@ in
       ensureDatabases = [ "miniflux" ];
     };
 
-    systemd.services.miniflux-dbsetup = {
+    systemd.services.miniflux-dbsetup = lib.mkIf cfg.createDatabaseLocally {
       description = "Miniflux database setup";
       requires = [ "postgresql.service" ];
       after = [ "network.target" "postgresql.service" ];
@@ -81,8 +91,9 @@ in
     systemd.services.miniflux = {
       description = "Miniflux service";
       wantedBy = [ "multi-user.target" ];
-      requires = [ "miniflux-dbsetup.service" ];
-      after = [ "network.target" "postgresql.service" "miniflux-dbsetup.service" ];
+      requires = lib.optional cfg.createDatabaseLocally "miniflux-dbsetup.service";
+      after = [ "network.target" ]
+        ++ lib.optionals cfg.createDatabaseLocally [ "postgresql.service" "miniflux-dbsetup.service" ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/miniflux";
@@ -129,6 +140,7 @@ in
         include "${pkgs.apparmorRulesFromClosure { name = "miniflux"; } cfg.package}"
         r ${cfg.package}/bin/miniflux,
         r @{sys}/kernel/mm/transparent_hugepage/hpage_pmd_size,
+        rw /run/miniflux/**,
       }
     '';
   };
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 08f90dcf59d80..5cda4a00a9de5 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -45,7 +45,7 @@ let
     };
   };
 
-  webroot = pkgs.runCommand
+  webroot = pkgs.runCommandLocal
     "${cfg.package.name or "nextcloud"}-with-apps"
     { }
     ''
diff --git a/nixos/modules/services/web-apps/photoprism.nix b/nixos/modules/services/web-apps/photoprism.nix
index ccf995fccf3e5..39eb7c65c6359 100644
--- a/nixos/modules/services/web-apps/photoprism.nix
+++ b/nixos/modules/services/web-apps/photoprism.nix
@@ -104,6 +104,7 @@ in
         StateDirectory = "photoprism";
         WorkingDirectory = "/var/lib/photoprism";
         RuntimeDirectory = "photoprism";
+        ReadWritePaths = [ cfg.originalsPath cfg.importPath cfg.storagePath ];
 
         LoadCredential = lib.optionalString (cfg.passwordFile != null)
           "PHOTOPRISM_ADMIN_PASSWORD:${cfg.passwordFile}";
diff --git a/nixos/modules/services/web-apps/vikunja.nix b/nixos/modules/services/web-apps/vikunja.nix
index b893f2c1f33c7..efa9c676d9a5d 100644
--- a/nixos/modules/services/web-apps/vikunja.nix
+++ b/nixos/modules/services/web-apps/vikunja.nix
@@ -9,10 +9,13 @@ let
   useMysql = cfg.database.type == "mysql";
   usePostgresql = cfg.database.type == "postgres";
 in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "vikunja" "setupNginx" ] "services.vikunja no longer supports the automatic set up of a nginx virtual host. Set up your own webserver config with a proxy pass to the vikunja service.")
+  ];
+
   options.services.vikunja = with lib; {
     enable = mkEnableOption (lib.mdDoc "vikunja service");
-    package-api = mkPackageOption pkgs "vikunja-api" { };
-    package-frontend = mkPackageOption pkgs "vikunja-frontend" { };
+    package = mkPackageOption pkgs "vikunja" { };
     environmentFiles = mkOption {
       type = types.listOf types.path;
       default = [ ];
@@ -21,25 +24,10 @@ in {
         For example passwords should be set in one of these files.
       '';
     };
-    setupNginx = mkOption {
-      type = types.bool;
-      default = config.services.nginx.enable;
-      defaultText = literalExpression "config.services.nginx.enable";
-      description = lib.mdDoc ''
-        Whether to setup NGINX.
-        Further nginx configuration can be done by changing
-        {option}`services.nginx.virtualHosts.<frontendHostname>`.
-        This does not enable TLS or ACME by default. To enable this, set the
-        {option}`services.nginx.virtualHosts.<frontendHostname>.enableACME` to
-        `true` and if appropriate do the same for
-        {option}`services.nginx.virtualHosts.<frontendHostname>.forceSSL`.
-      '';
-    };
     frontendScheme = mkOption {
       type = types.enum [ "http" "https" ];
       description = lib.mdDoc ''
         Whether the site is available via http or https.
-        This does not configure https or ACME in nginx!
       '';
     };
     frontendHostname = mkOption {
@@ -104,42 +92,27 @@ in {
       };
     };
 
-    systemd.services.vikunja-api = {
-      description = "vikunja-api";
+    systemd.services.vikunja = {
+      description = "vikunja";
       after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
       wantedBy = [ "multi-user.target" ];
-      path = [ cfg.package-api ];
+      path = [ cfg.package ];
       restartTriggers = [ configFile ];
 
       serviceConfig = {
         Type = "simple";
         DynamicUser = true;
         StateDirectory = "vikunja";
-        ExecStart = "${cfg.package-api}/bin/vikunja";
+        ExecStart = "${cfg.package}/bin/vikunja";
         Restart = "always";
         EnvironmentFile = cfg.environmentFiles;
       };
     };
 
-    services.nginx.virtualHosts."${cfg.frontendHostname}" = mkIf cfg.setupNginx {
-      locations = {
-        "/" = {
-          root = cfg.package-frontend;
-          tryFiles = "try_files $uri $uri/ /";
-        };
-        "~* ^/(api|dav|\\.well-known)/" = {
-          proxyPass = "http://localhost:${toString cfg.port}";
-          extraConfig = ''
-            client_max_body_size 20M;
-          '';
-        };
-      };
-    };
-
     environment.etc."vikunja/config.yaml".source = configFile;
 
     environment.systemPackages = [
-      cfg.package-api # for admin `vikunja` CLI
+      cfg.package # for admin `vikunja` CLI
     ];
   };
 }
diff --git a/nixos/modules/services/web-servers/stargazer.nix b/nixos/modules/services/web-servers/stargazer.nix
index 18f57363137cf..4eca33326040b 100644
--- a/nixos/modules/services/web-servers/stargazer.nix
+++ b/nixos/modules/services/web-servers/stargazer.nix
@@ -129,6 +129,12 @@ in
       example = lib.literalExpression "\"1y\"";
     };
 
+    debugMode = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Run Stargazer in debug mode.";
+    };
+
     routes = lib.mkOption {
       type = lib.types.listOf
         (lib.types.submodule {
@@ -195,7 +201,7 @@ in
       after = [ "network.target" ];
       wantedBy = [ "multi-user.target" ];
       serviceConfig = {
-        ExecStart = "${pkgs.stargazer}/bin/stargazer ${configFile}";
+        ExecStart = "${pkgs.stargazer}/bin/stargazer ${configFile} ${lib.optionalString cfg.debugMode "-D"}";
         Restart = "always";
         # User and group
         User = cfg.user;
diff --git a/nixos/modules/services/x11/desktop-managers/deepin.nix b/nixos/modules/services/x11/desktop-managers/deepin.nix
index 0824d6e30a8a9..e6f221201013e 100644
--- a/nixos/modules/services/x11/desktop-managers/deepin.nix
+++ b/nixos/modules/services/x11/desktop-managers/deepin.nix
@@ -66,6 +66,7 @@ in
       services.upower.enable = mkDefault config.powerManagement.enable;
       networking.networkmanager.enable = mkDefault true;
       programs.dconf.enable = mkDefault true;
+      programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
 
       fonts.packages = with pkgs; [ noto-fonts ];
       xdg.mime.enable = true;
diff --git a/nixos/modules/services/x11/desktop-managers/default.nix b/nixos/modules/services/x11/desktop-managers/default.nix
index ecb8d1e91bde2..33d0a7b526436 100644
--- a/nixos/modules/services/x11/desktop-managers/default.nix
+++ b/nixos/modules/services/x11/desktop-managers/default.nix
@@ -18,7 +18,7 @@ in
   # determines the default: later modules (if enabled) are preferred.
   # E.g., if Plasma 5 is enabled, it supersedes xterm.
   imports = [
-    ./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./plasma6.nix ./lumina.nix
+    ./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ../../desktop-managers/plasma6.nix ./lumina.nix
     ./lxqt.nix ./enlightenment.nix ./gnome.nix ./retroarch.nix ./kodi.nix
     ./mate.nix ./pantheon.nix ./surf-display.nix ./cde.nix
     ./cinnamon.nix ./budgie.nix ./deepin.nix
diff --git a/nixos/modules/services/x11/desktop-managers/lxqt.nix b/nixos/modules/services/x11/desktop-managers/lxqt.nix
index 50ad72dc7388d..d3bdc4326a908 100644
--- a/nixos/modules/services/x11/desktop-managers/lxqt.nix
+++ b/nixos/modules/services/x11/desktop-managers/lxqt.nix
@@ -62,6 +62,8 @@ in
     # Link some extra directories in /run/current-system/software/share
     environment.pathsToLink = [ "/share" ];
 
+    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
+
     # virtual file systems support for PCManFM-QT
     services.gvfs.enable = true;
 
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 7645b3070369c..c884b4487e240 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -336,6 +336,7 @@ in
         serif = [ "Noto Serif" ];
       };
 
+      programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
       programs.ssh.askPassword = mkDefault "${pkgs.plasma5Packages.ksshaskpass.out}/bin/ksshaskpass";
 
       # Enable helpful DBus services.
diff --git a/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixos/modules/services/x11/desktop-managers/xfce.nix
index e28486bcc12d8..6bc964f4c6ed7 100644
--- a/nixos/modules/services/x11/desktop-managers/xfce.nix
+++ b/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -131,6 +131,7 @@ in
         xfdesktop
       ] ++ optional cfg.enableScreensaver xfce4-screensaver) excludePackages;
 
+    programs.gnupg.agent.pinentryPackage = pkgs.pinentry-gtk2;
     programs.xfconf.enable = true;
     programs.thunar.enable = true;
 
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 38fb1074fcdf8..3d7474e18263d 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -749,6 +749,8 @@ in
     boot.kernel.sysctl."fs.inotify.max_user_instances" = mkDefault 524288;
     boot.kernel.sysctl."fs.inotify.max_user_watches" = mkDefault 524288;
 
+    programs.gnupg.agent.pinentryPackage = lib.mkDefault pkgs.pinentry-gnome3;
+
     systemd.defaultUnit = mkIf cfg.autorun "graphical.target";
 
     systemd.services.display-manager =
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 88d6a2ded873c..b6b0f64b94c81 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -797,6 +797,7 @@ let
           "UseHostname"
           "Hostname"
           "UseDomains"
+          "UseGateway"
           "UseRoutes"
           "UseTimezone"
           "ClientIdentifier"
@@ -829,6 +830,7 @@ let
         (assertValueOneOf "SendHostname" boolValues)
         (assertValueOneOf "UseHostname" boolValues)
         (assertValueOneOf "UseDomains" (boolValues ++ ["route"]))
+        (assertValueOneOf "UseGateway" boolValues)
         (assertValueOneOf "UseRoutes" boolValues)
         (assertValueOneOf "UseTimezone" boolValues)
         (assertValueOneOf "ClientIdentifier" ["mac" "duid" "duid-only"])
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index 49090423e078c..a8885aee78f2b 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -97,7 +97,7 @@ let
 
       # Maintaining state across reboots.
       "systemd-random-seed.service"
-      "systemd-boot-random-seed.service"
+      ] ++ (optional cfg.package.withBootloader "systemd-boot-random-seed.service") ++ [
       "systemd-backlight@.service"
       "systemd-rfkill.service"
       "systemd-rfkill.socket"
diff --git a/nixos/modules/system/boot/uki.nix b/nixos/modules/system/boot/uki.nix
index ce00ac8e63978..0965b887c12e6 100644
--- a/nixos/modules/system/boot/uki.nix
+++ b/nixos/modules/system/boot/uki.nix
@@ -75,6 +75,8 @@ in
         OSRelease = lib.mkOptionDefault "@${config.system.build.etc}/etc/os-release";
         # This is needed for cross compiling.
         EFIArch = lib.mkOptionDefault efiArch;
+      } // lib.optionalAttrs (config.hardware.deviceTree.enable && config.hardware.deviceTree.name != null) {
+        DeviceTree = lib.mkOptionDefault "${config.hardware.deviceTree.package}/${config.hardware.deviceTree.name}";
       };
     };
 
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index a561c5682ae58..da7873c7bec86 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -164,19 +164,24 @@ in
         "network-online.target"
         "lxcfs.service"
         "incus.socket"
-      ];
+      ]
+        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
+
       requires = [
         "lxcfs.service"
         "incus.socket"
-      ];
+      ]
+        ++ lib.optional config.virtualisation.vswitch.enable "ovs-vswitchd.service";
+
       wants = [
         "network-online.target"
       ];
 
-      path = lib.mkIf config.boot.zfs.enabled [
+      path = lib.optionals config.boot.zfs.enabled [
         config.boot.zfs.package
         "${config.boot.zfs.package}/lib/udev"
-      ];
+      ]
+        ++ lib.optional config.virtualisation.vswitch.enable config.virtualisation.vswitch.package;
 
       environment = lib.mkMerge [ {
         # Override Path to the LXC template configuration directory
diff --git a/nixos/modules/virtualisation/oci-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index a88715587d654..5bffb3f047166 100644
--- a/nixos/modules/virtualisation/oci-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -312,7 +312,7 @@ let
 
     preStop = if cfg.backend == "podman"
       then "podman stop --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
-      else "${cfg.backend} stop ${name}";
+      else "${cfg.backend} stop ${name} || true";
 
     postStop = if cfg.backend == "podman"
       then "podman rm -f --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
diff --git a/nixos/modules/virtualisation/virtualbox-host.nix b/nixos/modules/virtualisation/virtualbox-host.nix
index 50a8f8189590e..0ecf7f490cf6f 100644
--- a/nixos/modules/virtualisation/virtualbox-host.nix
+++ b/nixos/modules/virtualisation/virtualbox-host.nix
@@ -6,7 +6,7 @@ let
   cfg = config.virtualisation.virtualbox.host;
 
   virtualbox = cfg.package.override {
-    inherit (cfg) enableHardening headless enableWebService;
+    inherit (cfg) enableHardening headless enableWebService enableKvm;
     extensionPack = if cfg.enableExtensionPack then pkgs.virtualboxExtpack else null;
   };
 
@@ -81,13 +81,24 @@ in
         Build VirtualBox web service tool (vboxwebsrv) to allow managing VMs via other webpage frontend tools. Useful for headless servers.
       '';
     };
+
+    enableKvm = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable KVM support for VirtualBox. This increases compatibility with Linux kernel versions, because the VirtualBox kernel modules
+        are not required.
+
+        This option is incompatible with `enableHardening` and `addNetworkInterface`.
+
+        Note: This is experimental. Please check https://github.com/cyberus-technology/virtualbox-kvm/issues.
+      '';
+    };
   };
 
   config = mkIf cfg.enable (mkMerge [{
     warnings = mkIf (pkgs.config.virtualbox.enableExtensionPack or false)
       ["'nixpkgs.virtualbox.enableExtensionPack' has no effect, please use 'virtualisation.virtualbox.host.enableExtensionPack'"];
-    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
-    boot.extraModulePackages = [ kernelModules ];
     environment.systemPackages = [ virtualbox ];
 
     security.wrappers = let
@@ -114,17 +125,43 @@ in
 
     services.udev.extraRules =
       ''
-        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
-        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
-        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
         SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
         SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
         SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
         SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
       '';
+  } (mkIf cfg.enableKvm {
+    assertions = [
+      {
+        assertion = !cfg.addNetworkInterface;
+        message = "VirtualBox KVM only supports standard NAT networking for VMs. Please turn off virtualisation.virtualbox.host.addNetworkInferface.";
+      }
+
+      {
+        assertion = !cfg.enableHardening;
+        message = "VirtualBox KVM is not compatible with hardening: Please turn off virtualisation.virtualbox.host.enableHardening.";
+      }
+    ];
+
+    warnings = [
+      ''
+        KVM support in VirtualBox is experimental. Not all security features are available yet.
+        See: https://github.com/cyberus-technology/virtualbox-kvm/issues/12
+      ''
+    ];
+  }) (mkIf (!cfg.enableKvm) {
+    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
+    boot.extraModulePackages = [ kernelModules ];
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
+        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+     '';
 
     # Since we lack the right setuid/setcap binaries, set up a host-only network by default.
-  } (mkIf cfg.addNetworkInterface {
+  }) (mkIf cfg.addNetworkInterface {
     systemd.services.vboxnet0 =
       { description = "VirtualBox vboxnet0 Interface";
         requires = [ "dev-vboxnetctl.device" ];
diff --git a/nixos/tests/akkoma.nix b/nixos/tests/akkoma.nix
index 287e2d485999e..2907017ee3d54 100644
--- a/nixos/tests/akkoma.nix
+++ b/nixos/tests/akkoma.nix
@@ -31,16 +31,12 @@ let
 
     export REQUESTS_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt"
 
-    echo '${userPassword}' | ${pkgs.toot}/bin/toot login_cli -i "akkoma.nixos.test" -e "jamy@nixos.test"
-    echo "y" | ${pkgs.toot}/bin/toot post "hello world Jamy here"
-
-    # Retrieving timeline with toot currently broken due to incompatible timestamp format
-    # cf. <https://akkoma.dev/AkkomaGang/akkoma/issues/637> and <https://github.com/ihabunek/toot/issues/399>
-    #echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
+    ${pkgs.toot}/bin/toot login_cli -i "akkoma.nixos.test" -e "jamy@nixos.test" -p '${userPassword}'
+    ${pkgs.toot}/bin/toot post "hello world Jamy here"
+    ${pkgs.toot}/bin/toot timeline -1 | grep -F -q "hello world Jamy here"
 
     # Test file upload
-    echo "y" | ${pkgs.toot}/bin/toot upload <(dd if=/dev/zero bs=1024 count=1024 status=none) \
-      | grep -F -q "https://akkoma.nixos.test/media"
+    ${pkgs.toot}/bin/toot upload <(dd if=/dev/zero bs=1024 count=1024 status=none)
   '';
 
   checkFe = pkgs.writers.writeBashBin "checkFe" ''
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 7376cd40b910a..ac64b85dd486e 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -640,6 +640,7 @@ in {
   nzbget = handleTest ./nzbget.nix {};
   nzbhydra2 = handleTest ./nzbhydra2.nix {};
   oh-my-zsh = handleTest ./oh-my-zsh.nix {};
+  ollama = handleTest ./ollama.nix {};
   ombi = handleTest ./ombi.nix {};
   openarena = handleTest ./openarena.nix {};
   openldap = handleTest ./openldap.nix {};
@@ -686,6 +687,7 @@ in {
   pgbouncer = handleTest ./pgbouncer.nix {};
   pgjwt = handleTest ./pgjwt.nix {};
   pgmanage = handleTest ./pgmanage.nix {};
+  pgvecto-rs = handleTest ./pgvecto-rs.nix {};
   phosh = handleTest ./phosh.nix {};
   photoprism = handleTest ./photoprism.nix {};
   php = handleTest ./php {};
diff --git a/nixos/tests/homepage-dashboard.nix b/nixos/tests/homepage-dashboard.nix
index 56e077f5ff6de..dd36473e8ac02 100644
--- a/nixos/tests/homepage-dashboard.nix
+++ b/nixos/tests/homepage-dashboard.nix
@@ -2,13 +2,35 @@ import ./make-test-python.nix ({ lib, ... }: {
   name = "homepage-dashboard";
   meta.maintainers = with lib.maintainers; [ jnsgruk ];
 
-  nodes.machine = { pkgs, ... }: {
+  nodes.unmanaged_conf = { pkgs, ... }: {
     services.homepage-dashboard.enable = true;
   };
 
+  nodes.managed_conf = { pkgs, ... }: {
+    services.homepage-dashboard = {
+      enable = true;
+      settings.title = "custom";
+    };
+  };
+
   testScript = ''
-    machine.wait_for_unit("homepage-dashboard.service")
-    machine.wait_for_open_port(8082)
-    machine.succeed("curl --fail http://localhost:8082/")
+    # Ensure the services are started on unmanaged machine
+    unmanaged_conf.wait_for_unit("homepage-dashboard.service")
+    unmanaged_conf.wait_for_open_port(8082)
+    unmanaged_conf.succeed("curl --fail http://localhost:8082/")
+
+    # Ensure that /etc/homepage-dashboard doesn't exist, and boilerplate
+    # configs are copied into place.
+    unmanaged_conf.fail("test -d /etc/homepage-dashboard")
+    unmanaged_conf.succeed("test -f /var/lib/private/homepage-dashboard/settings.yaml")
+
+    # Ensure the services are started on managed machine
+    managed_conf.wait_for_unit("homepage-dashboard.service")
+    managed_conf.wait_for_open_port(8082)
+    managed_conf.succeed("curl --fail http://localhost:8082/")
+
+    # Ensure /etc/homepage-dashboard is created and unmanaged conf location isn't.
+    managed_conf.succeed("test -d /etc/homepage-dashboard")
+    managed_conf.fail("test -f /var/lib/private/homepage-dashboard/settings.yaml")
   '';
 })
diff --git a/nixos/tests/incus/container.nix b/nixos/tests/incus/container.nix
index eb00429e53fe1..9260f70da98c2 100644
--- a/nixos/tests/incus/container.nix
+++ b/nixos/tests/incus/container.nix
@@ -29,6 +29,7 @@ in
 
       incus.enable = true;
     };
+    networking.nftables.enable = true;
   };
 
   testScript = ''
diff --git a/nixos/tests/incus/default.nix b/nixos/tests/incus/default.nix
index ff36fe9d67308..474a621c5ce91 100644
--- a/nixos/tests/incus/default.nix
+++ b/nixos/tests/incus/default.nix
@@ -11,6 +11,7 @@
     boot.initrd.systemd.enable = true;
   }; };
   lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
+  openvswitch = import ./openvswitch.nix { inherit system pkgs; };
   preseed = import ./preseed.nix { inherit system pkgs; };
   socket-activated = import ./socket-activated.nix { inherit system pkgs; };
   ui = import ./ui.nix {inherit system pkgs;};
diff --git a/nixos/tests/incus/lxd-to-incus.nix b/nixos/tests/incus/lxd-to-incus.nix
index c0fc98c224df1..262f63c0f26fb 100644
--- a/nixos/tests/incus/lxd-to-incus.nix
+++ b/nixos/tests/incus/lxd-to-incus.nix
@@ -67,6 +67,7 @@ import ../make-test-python.nix (
 
           incus.enable = true;
         };
+        networking.nftables.enable = true;
       };
 
     testScript = ''
diff --git a/nixos/tests/incus/openvswitch.nix b/nixos/tests/incus/openvswitch.nix
new file mode 100644
index 0000000000000..5d4aef031ad0a
--- /dev/null
+++ b/nixos/tests/incus/openvswitch.nix
@@ -0,0 +1,65 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "incus-openvswitch";
+
+  meta = {
+    maintainers = lib.teams.lxc.members;
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      incus.enable = true;
+      vswitch.enable = true;
+      incus.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "bridge.driver" = "openvswitch";
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+    networking.nftables.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("incus.service")
+    machine.wait_for_unit("incus-preseed.service")
+
+    with subtest("Verify openvswitch bridge"):
+      machine.succeed("incus network info nixostestbr0")
+
+    with subtest("Verify openvswitch bridge"):
+      machine.succeed("ovs-vsctl br-exists nixostestbr0")
+  '';
+})
diff --git a/nixos/tests/incus/preseed.nix b/nixos/tests/incus/preseed.nix
index a488d71f3c92a..f2d928115f3ec 100644
--- a/nixos/tests/incus/preseed.nix
+++ b/nixos/tests/incus/preseed.nix
@@ -48,6 +48,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
         ];
       };
     };
+    networking.nftables.enable = true;
   };
 
   testScript = ''
diff --git a/nixos/tests/incus/socket-activated.nix b/nixos/tests/incus/socket-activated.nix
index fca536b7054f0..59caf1090fbd8 100644
--- a/nixos/tests/incus/socket-activated.nix
+++ b/nixos/tests/incus/socket-activated.nix
@@ -12,6 +12,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... } :
       incus.enable = true;
       incus.socketActivation = true;
     };
+    networking.nftables.enable = true;
   };
 
   testScript = ''
diff --git a/nixos/tests/incus/ui.nix b/nixos/tests/incus/ui.nix
index 24ce1217d8df7..837eb14844cea 100644
--- a/nixos/tests/incus/ui.nix
+++ b/nixos/tests/incus/ui.nix
@@ -10,6 +10,7 @@ import ../make-test-python.nix ({ pkgs, lib, ... }: {
       incus.enable = true;
       incus.ui.enable = true;
     };
+    networking.nftables.enable = true;
 
     environment.systemPackages =
       let
diff --git a/nixos/tests/incus/virtual-machine.nix b/nixos/tests/incus/virtual-machine.nix
index c76e4f448f2f2..ab378c7b9490e 100644
--- a/nixos/tests/incus/virtual-machine.nix
+++ b/nixos/tests/incus/virtual-machine.nix
@@ -32,6 +32,7 @@ in
 
       incus.enable = true;
     };
+    networking.nftables.enable = true;
   };
 
   testScript = ''
diff --git a/nixos/tests/knot.nix b/nixos/tests/knot.nix
index c5af8bf1edcc9..eec94a22f2fa7 100644
--- a/nixos/tests/knot.nix
+++ b/nixos/tests/knot.nix
@@ -66,6 +66,10 @@ in {
             "0.0.0.0@53"
             "::@53"
            ];
+          listen-quic = [
+            "0.0.0.0@853"
+            "::@853"
+           ];
           automatic-acl = true;
         };
 
@@ -129,8 +133,13 @@ in {
           key = "xfr_key";
         };
 
+        remote.primary-quic = {
+          address = "192.168.0.1@853";
+          key = "xfr_key";
+          quic = true;
+        };
+
         template.default = {
-          master = "primary";
           # zonefileless setup
           # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
           zonefile-sync = "-1";
@@ -139,8 +148,14 @@ in {
         };
 
         zone = {
-          "example.com".file = "example.com.zone";
-          "sub.example.com".file = "sub.example.com.zone";
+          "example.com" = {
+            master = "primary";
+            file = "example.com.zone";
+          };
+          "sub.example.com" = {
+            master = "primary-quic";
+            file = "sub.example.com.zone";
+          };
         };
 
         log.syslog.any = "debug";
diff --git a/nixos/tests/miniflux.nix b/nixos/tests/miniflux.nix
index a3af53db0e7a1..6d38224448ed6 100644
--- a/nixos/tests/miniflux.nix
+++ b/nixos/tests/miniflux.nix
@@ -15,6 +15,10 @@ let
             ADMIN_USERNAME=${username}
             ADMIN_PASSWORD=${password}
           '';
+  postgresPassword = "correcthorsebatterystaple";
+  postgresPasswordFile = pkgs.writeText "pgpass" ''
+    *:*:*:*:${postgresPassword}
+  '';
 
 in
 {
@@ -56,32 +60,62 @@ in
           adminCredentialsFile = customAdminCredentialsFile;
         };
       };
+
+    postgresTcp = { config, pkgs, lib, ... }: {
+      services.postgresql = {
+        enable = true;
+        initialScript = pkgs.writeText "init-postgres" ''
+          CREATE USER miniflux WITH PASSWORD '${postgresPassword}';
+          CREATE DATABASE miniflux WITH OWNER miniflux;
+        '';
+        enableTCPIP = true;
+        authentication = ''
+          host sameuser miniflux samenet scram-sha-256
+        '';
+      };
+      systemd.services.postgresql.postStart = lib.mkAfter ''
+        $PSQL -tAd miniflux -c 'CREATE EXTENSION hstore;'
+      '';
+      networking.firewall.allowedTCPPorts = [ config.services.postgresql.port ];
+    };
+    externalDb = { ... }: {
+      security.apparmor.enable = true;
+      services.miniflux = {
+        enable = true;
+        createDatabaseLocally = false;
+        inherit adminCredentialsFile;
+        config = {
+          DATABASE_URL = "user=miniflux host=postgresTcp dbname=miniflux sslmode=disable";
+          PGPASSFILE = "/run/miniflux/pgpass";
+        };
+      };
+      systemd.services.miniflux.preStart = ''
+        cp ${postgresPasswordFile} /run/miniflux/pgpass
+        chmod 600 /run/miniflux/pgpass
+      '';
+    };
   };
   testScript = ''
-    start_all()
+    def runTest(machine, port, user):
+      machine.wait_for_unit("miniflux.service")
+      machine.wait_for_open_port(port)
+      machine.succeed(f"curl --fail 'http://localhost:{port}/healthcheck' | grep OK")
+      machine.succeed(
+          f"curl 'http://localhost:{port}/v1/me' -u '{user}' -H Content-Type:application/json | grep '\"is_admin\":true'"
+      )
+      machine.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
 
-    default.wait_for_unit("miniflux.service")
-    default.wait_for_open_port(${toString defaultPort})
-    default.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
-    default.succeed(
-        "curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
-    )
-    default.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+    default.start()
+    withoutSudo.start()
+    customized.start()
+    postgresTcp.start()
 
-    withoutSudo.wait_for_unit("miniflux.service")
-    withoutSudo.wait_for_open_port(${toString defaultPort})
-    withoutSudo.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
-    withoutSudo.succeed(
-        "curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
-    )
-    withoutSudo.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+    runTest(default, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}")
+    runTest(withoutSudo, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}")
+    runTest(customized, ${toString port}, "${username}:${password}")
 
-    customized.wait_for_unit("miniflux.service")
-    customized.wait_for_open_port(${toString port})
-    customized.succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep OK")
-    customized.succeed(
-        "curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep '\"is_admin\":true'"
-    )
-    customized.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+    postgresTcp.wait_for_unit("postgresql.service")
+    externalDb.start()
+    runTest(externalDb, ${toString defaultPort}, "${defaultUsername}:${defaultPassword}")
   '';
 })
diff --git a/nixos/tests/minio.nix b/nixos/tests/minio.nix
index ece4864f771c0..67eb0cd884400 100644
--- a/nixos/tests/minio.nix
+++ b/nixos/tests/minio.nix
@@ -43,17 +43,17 @@ import ./make-test-python.nix ({ pkgs, ... }:
 
         # Minio requires at least 1GiB of free disk space to run.
         virtualisation.diskSize = 4 * 1024;
+
+        # Minio pre allocates 2GiB or memory, reserve some more
+        virtualisation.memorySize = 4096;
       };
     };
 
     testScript = ''
-      import time
 
       start_all()
       # simulate manually editing root credentials file
       machine.wait_for_unit("multi-user.target")
-      machine.copy_from_host("${credsPartial}", "${rootCredentialsFile}")
-      time.sleep(3)
       machine.copy_from_host("${credsFull}", "${rootCredentialsFile}")
 
       machine.wait_for_unit("minio.service")
diff --git a/nixos/tests/miriway.nix b/nixos/tests/miriway.nix
index a0987d9fc41b6..24e6ec6367cdb 100644
--- a/nixos/tests/miriway.nix
+++ b/nixos/tests/miriway.nix
@@ -100,7 +100,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     # Test Wayland
     # We let Miriway start the first terminal, as we might get stuck if it's not ready to process the first keybind
     # machine.send_key("ctrl-alt-t")
-    machine.wait_for_text("alice@machine")
+    machine.wait_for_text(r"(alice|machine)")
     machine.send_chars("test-wayland\n")
     machine.wait_for_file("/tmp/test-wayland-exit-ok")
     machine.copy_from_vm("/tmp/test-wayland.out")
@@ -112,7 +112,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
     # Test XWayland
     machine.send_key("ctrl-alt-a")
-    machine.wait_for_text("alice@machine")
+    machine.wait_for_text(r"(alice|machine)")
     machine.send_chars("test-x11\n")
     machine.wait_for_file("/tmp/test-x11-exit-ok")
     machine.copy_from_vm("/tmp/test-x11.out")
diff --git a/nixos/tests/nebula.nix b/nixos/tests/nebula.nix
index 89b91d89fcb3f..6c468153d5b27 100644
--- a/nixos/tests/nebula.nix
+++ b/nixos/tests/nebula.nix
@@ -10,6 +10,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
       environment.systemPackages = [ pkgs.nebula ];
       users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
       services.openssh.enable = true;
+      networking.firewall.enable = true; # Implicitly true, but let's make sure.
       networking.interfaces.eth1.useDHCP = false;
 
       services.nebula.networks.smoke = {
@@ -17,7 +18,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
         ca = "/etc/nebula/ca.crt";
         cert = "/etc/nebula/${name}.crt";
         key = "/etc/nebula/${name}.key";
-        listen = { host = "0.0.0.0"; port = 4242; };
+        listen = {
+          host = "0.0.0.0";
+          port = if (config.services.nebula.networks.smoke.isLighthouse || config.services.nebula.networks.smoke.isRelay) then 4242 else 0;
+        };
       };
     }
     extraConfig
diff --git a/nixos/tests/nixops/default.nix b/nixos/tests/nixops/default.nix
index 6501d13a2ed36..8477e5059fcaf 100644
--- a/nixos/tests/nixops/default.nix
+++ b/nixos/tests/nixops/default.nix
@@ -9,7 +9,7 @@ let
     #  - Alternatively, blocked on a NixOps 2 release
     #    https://github.com/NixOS/nixops/issues/1242
     # stable = testsLegacyNetwork { nixopsPkg = pkgs.nixops; };
-    unstable = testsForPackage { nixopsPkg = pkgs.nixops_unstable; };
+    unstable = testsForPackage { nixopsPkg = pkgs.nixops_unstable_minimal; };
 
     # inherit testsForPackage;
   };
@@ -32,6 +32,7 @@ let
           pkgs.hello
           pkgs.figlet
         ];
+        virtualisation.memorySize = 2048;
 
         # TODO: make this efficient, https://github.com/NixOS/nixpkgs/issues/180529
         system.includeBuildDependencies = true;
diff --git a/nixos/tests/ollama.nix b/nixos/tests/ollama.nix
new file mode 100644
index 0000000000000..4b21f445cdbd3
--- /dev/null
+++ b/nixos/tests/ollama.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  mainPort = "11434";
+  altPort = "11435";
+
+  curlRequest = port: request:
+    "curl http://127.0.0.1:${port}/api/generate -d '${builtins.toJSON request}'";
+
+  prompt = {
+    model = "tinydolphin";
+    prompt = "lorem ipsum";
+    options = {
+      seed = 69;
+      temperature = 0;
+    };
+  };
+in
+{
+  name = "ollama";
+  meta = with lib.maintainers; {
+    maintainers = [ abysssol ];
+  };
+
+  nodes = {
+    cpu = { ... }: {
+      services.ollama.enable = true;
+    };
+
+    rocm = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "rocm";
+    };
+
+    cuda = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.acceleration = "cuda";
+    };
+
+    altAddress = { ... }: {
+      services.ollama.enable = true;
+      services.ollama.listenAddress = "127.0.0.1:${altPort}";
+    };
+  };
+
+  testScript = ''
+    vms = [ cpu, rocm, cuda, altAddress ];
+
+    start_all()
+    for vm in vms:
+        vm.wait_for_unit("multi-user.target")
+
+    stdout = cpu.succeed("""${curlRequest mainPort prompt}""", timeout=100)
+
+    stdout = altAddress.succeed("""${curlRequest altPort prompt}""", timeout=100)
+  '';
+})
diff --git a/nixos/tests/pass-secret-service.nix b/nixos/tests/pass-secret-service.nix
index e0dddf0ad29e2..cdbdaa52dbc0a 100644
--- a/nixos/tests/pass-secret-service.nix
+++ b/nixos/tests/pass-secret-service.nix
@@ -26,7 +26,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
 
       programs.gnupg = {
         agent.enable = true;
-        agent.pinentryFlavor = "tty";
         dirmngr.enable = true;
       };
     };
diff --git a/nixos/tests/pgvecto-rs.nix b/nixos/tests/pgvecto-rs.nix
new file mode 100644
index 0000000000000..cd871dab6a0f1
--- /dev/null
+++ b/nixos/tests/pgvecto-rs.nix
@@ -0,0 +1,76 @@
+# mostly copied from ./timescaledb.nix which was copied from ./postgresql.nix
+# as it seemed unapproriate to test additional extensions for postgresql there.
+
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  # Test cases from https://docs.pgvecto.rs/use-cases/hybrid-search.html
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE EXTENSION vectors;
+
+    CREATE TABLE items (
+      id bigserial PRIMARY KEY,
+      content text NOT NULL,
+      embedding vectors.vector(3) NOT NULL -- 3 dimensions
+    );
+
+    INSERT INTO items (content, embedding) VALUES
+      ('a fat cat sat on a mat and ate a fat rat', '[1, 2, 3]'),
+      ('a fat dog sat on a mat and ate a fat rat', '[4, 5, 6]'),
+      ('a thin cat sat on a mat and ate a thin rat', '[7, 8, 9]'),
+      ('a thin dog sat on a mat and ate a thin rat', '[10, 11, 12]');
+  '';
+  make-postgresql-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ diogotcorreia ];
+    };
+
+    nodes.machine = { ... }:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          extraPlugins = ps: with ps; [
+            pgvecto-rs
+          ];
+          settings.shared_preload_libraries = "vectors";
+        };
+      };
+
+    testScript = ''
+      def check_count(statement, lines):
+          return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
+              statement, lines
+          )
+
+
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("Postgresql with extension vectors is available just after unit start"):
+          machine.succeed(check_count("SELECT * FROM pg_available_extensions WHERE name = 'vectors' AND default_version = '${postgresql-package.pkgs.pgvecto-rs.version}';", 1))
+
+      machine.succeed("sudo -u postgres psql -f ${test-sql}")
+
+      machine.succeed(check_count("SELECT content, embedding FROM items WHERE to_tsvector('english', content) @@ 'cat & rat'::tsquery;", 2))
+
+      machine.shutdown()
+    '';
+
+  };
+  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12") postgresql-versions;
+in
+mapAttrs'
+  (name: package: {
+    inherit name;
+    value = make-postgresql-test name package;
+  })
+  applicablePostgresqlVersions
diff --git a/nixos/tests/privoxy.nix b/nixos/tests/privoxy.nix
index 2d95c4522a01c..2a18d332c8778 100644
--- a/nixos/tests/privoxy.nix
+++ b/nixos/tests/privoxy.nix
@@ -77,6 +77,11 @@ in
     networking.proxy.httpsProxy = "http://localhost:8118";
   };
 
+  nodes.machine_socks4  = { ... }: { services.privoxy = { enable = true; settings.forward-socks4  = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks4a = { ... }: { services.privoxy = { enable = true; settings.forward-socks4a = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks5  = { ... }: { services.privoxy = { enable = true; settings.forward-socks5  = "/ 127.0.0.1:9050 ."; }; };
+  nodes.machine_socks5t = { ... }: { services.privoxy = { enable = true; settings.forward-socks5t = "/ 127.0.0.1:9050 ."; }; };
+
   testScript =
     ''
       with subtest("Privoxy is running"):
@@ -109,5 +114,13 @@ in
           machine.systemctl("start systemd-tmpfiles-clean")
           # ...and count again
           machine.succeed("test $(ls /run/privoxy/certs | wc -l) -eq 0")
+
+      with subtest("Privoxy supports socks upstream proxies"):
+          for m in [machine_socks4, machine_socks4a, machine_socks5, machine_socks5t]:
+              m.wait_for_unit("privoxy")
+              m.wait_for_open_port(8118)
+              # We expect a 503 error because the dummy upstream proxy is not reachable.
+              # In issue #265654, instead privoxy segfaulted causing curl to exit with "Empty reply from server".
+              m.succeed("http_proxy=http://localhost:8118 curl -v http://does-not-exist/ 2>&1 | grep 'HTTP/1.1 503'")
     '';
 })
diff --git a/nixos/tests/sanoid.nix b/nixos/tests/sanoid.nix
index 411ebcead9f6e..1575634e62842 100644
--- a/nixos/tests/sanoid.nix
+++ b/nixos/tests/sanoid.nix
@@ -115,8 +115,11 @@ in {
     source.systemctl("start --wait syncoid-pool-sanoid.service")
     target.succeed("cat /mnt/pool/sanoid/test.txt")
     source.systemctl("start --wait syncoid-pool-syncoid.service")
+    source.systemctl("start --wait syncoid-pool-syncoid.service")
     target.succeed("cat /mnt/pool/syncoid/test.txt")
 
+    assert(len(source.succeed("zfs list -H -t snapshot pool/syncoid").splitlines()) == 1), "Syncoid should only retain one sync snapshot"
+
     source.systemctl("start --wait syncoid-pool.service")
     target.succeed("[[ -d /mnt/pool/full-pool/syncoid ]]")
 
diff --git a/nixos/tests/vikunja.nix b/nixos/tests/vikunja.nix
index 60fd5ce13854e..4e2bf166a7b6c 100644
--- a/nixos/tests/vikunja.nix
+++ b/nixos/tests/vikunja.nix
@@ -13,15 +13,20 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
         frontendScheme = "http";
         frontendHostname = "localhost";
       };
-      services.nginx.enable = true;
+      services.nginx = {
+        enable = true;
+        virtualHosts."http://localhost" = {
+          locations."/".proxyPass = "http://localhost:3456";
+        };
+      };
     };
     vikunjaPostgresql = { pkgs, ... }: {
       services.vikunja = {
         enable = true;
         database = {
           type = "postgres";
-          user = "vikunja-api";
-          database = "vikunja-api";
+          user = "vikunja";
+          database = "vikunja";
           host = "/run/postgresql";
         };
         frontendScheme = "http";
@@ -30,20 +35,25 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       };
       services.postgresql = {
         enable = true;
-        ensureDatabases = [ "vikunja-api" ];
+        ensureDatabases = [ "vikunja" ];
         ensureUsers = [
-          { name = "vikunja-api";
+          { name = "vikunja";
             ensureDBOwnership = true;
           }
         ];
       };
-      services.nginx.enable = true;
+      services.nginx = {
+        enable = true;
+        virtualHosts."http://localhost" = {
+          locations."/".proxyPass = "http://localhost:9090";
+        };
+      };
     };
   };
 
   testScript =
     ''
-      vikunjaSqlite.wait_for_unit("vikunja-api.service")
+      vikunjaSqlite.wait_for_unit("vikunja.service")
       vikunjaSqlite.wait_for_open_port(3456)
       vikunjaSqlite.succeed("curl --fail http://localhost:3456/api/v1/info")
 
@@ -52,7 +62,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
       vikunjaSqlite.succeed("curl --fail http://localhost/api/v1/info")
       vikunjaSqlite.succeed("curl --fail http://localhost")
 
-      vikunjaPostgresql.wait_for_unit("vikunja-api.service")
+      vikunjaPostgresql.wait_for_unit("vikunja.service")
       vikunjaPostgresql.wait_for_open_port(9090)
       vikunjaPostgresql.succeed("curl --fail http://localhost:9090/api/v1/info")
 
diff --git a/nixos/tests/virtualbox.nix b/nixos/tests/virtualbox.nix
index e522d0679e151..3c2a391233dbd 100644
--- a/nixos/tests/virtualbox.nix
+++ b/nixos/tests/virtualbox.nix
@@ -3,6 +3,7 @@
   pkgs ? import ../.. { inherit system config; },
   debug ? false,
   enableUnfree ? false,
+  enableKvm ? false,
   use64bitGuest ? true
 }:
 
@@ -340,7 +341,7 @@ let
     testExtensionPack.vmFlags = enableExtensionPackVMFlags;
   };
 
-  mkVBoxTest = useExtensionPack: vms: name: testScript: makeTest {
+  mkVBoxTest = vboxHostConfig: vms: name: testScript: makeTest {
     name = "virtualbox-${name}";
 
     nodes.machine = { lib, config, ... }: {
@@ -349,14 +350,23 @@ let
         vmConfigs = mapAttrsToList mkVMConf vms;
       in [ ./common/user-account.nix ./common/x11.nix ] ++ vmConfigs;
       virtualisation.memorySize = 2048;
-      virtualisation.qemu.options = ["-cpu" "kvm64,svm=on,vmx=on"];
-      virtualisation.virtualbox.host.enable = true;
+
+      virtualisation.qemu.options = let
+        # IvyBridge is reasonably ancient to be compatible with recent
+        # Intel/AMD hosts and sufficient for the KVM flavor.
+        guestCpu = if config.virtualisation.virtualbox.host.enableKvm then "IvyBridge" else "kvm64";
+      in ["-cpu" "${guestCpu},svm=on,vmx=on"];
+
       test-support.displayManager.auto.user = "alice";
       users.users.alice.extraGroups = let
         inherit (config.virtualisation.virtualbox.host) enableHardening;
-      in lib.mkIf enableHardening (lib.singleton "vboxusers");
-      virtualisation.virtualbox.host.enableExtensionPack = useExtensionPack;
-      nixpkgs.config.allowUnfree = useExtensionPack;
+      in lib.mkIf enableHardening [ "vboxusers" ];
+
+      virtualisation.virtualbox.host = {
+        enable = true;
+      } // vboxHostConfig;
+
+      nixpkgs.config.allowUnfree = config.virtualisation.virtualbox.host.enableExtensionPack;
     };
 
     testScript = ''
@@ -390,7 +400,7 @@ let
     };
   };
 
-  unfreeTests = mapAttrs (mkVBoxTest true vboxVMsWithExtpack) {
+  unfreeTests = mapAttrs (mkVBoxTest { enableExtensionPack = true; } vboxVMsWithExtpack) {
     enable-extension-pack = ''
       create_vm_testExtensionPack()
       vbm("startvm testExtensionPack")
@@ -409,7 +419,24 @@ let
     '';
   };
 
-in mapAttrs (mkVBoxTest false vboxVMs) {
+  kvmTests = mapAttrs (mkVBoxTest {
+    enableKvm = true;
+
+    # Once the KVM version supports these, we can enable them.
+    addNetworkInterface = false;
+    enableHardening = false;
+  } vboxVMs) {
+    kvm-headless = ''
+      create_vm_headless()
+      machine.succeed(ru("VBoxHeadless --startvm headless >&2 & disown %1"))
+      wait_for_startup_headless()
+      wait_for_vm_boot_headless()
+      shutdown_vm_headless()
+      destroy_vm_headless()
+    '';
+  };
+
+in mapAttrs (mkVBoxTest {} vboxVMs) {
   simple-gui = ''
     # Home to select Tools, down to move to the VM, enter to start it.
     def send_vm_startup():
@@ -519,4 +546,6 @@ in mapAttrs (mkVBoxTest false vboxVMs) {
     destroy_vm_test1()
     destroy_vm_test2()
   '';
-} // (optionalAttrs enableUnfree unfreeTests)
+}
+// (optionalAttrs enableKvm kvmTests)
+// (optionalAttrs enableUnfree unfreeTests)