about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md36
-rw-r--r--nixos/lib/testing/driver.nix7
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix20
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-container-image.nix (renamed from nixos/maintainers/scripts/lxd/lxd-image.nix)6
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-image-inner.nix95
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix20
-rw-r--r--nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix27
-rw-r--r--nixos/modules/config/update-users-groups.pl15
-rw-r--r--nixos/modules/config/users-groups.nix42
-rw-r--r--nixos/modules/hardware/decklink.nix16
-rw-r--r--nixos/modules/hardware/infiniband.nix58
-rw-r--r--nixos/modules/hardware/video/nvidia.nix550
-rw-r--r--nixos/modules/i18n/input-method/uim.nix2
-rw-r--r--nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix1
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix129
-rw-r--r--nixos/modules/module-list.nix7
-rw-r--r--nixos/modules/programs/ecryptfs.nix31
-rw-r--r--nixos/modules/programs/streamdeck-ui.nix2
-rw-r--r--nixos/modules/programs/yazi.nix49
-rw-r--r--nixos/modules/security/acme/default.md4
-rw-r--r--nixos/modules/security/acme/default.nix149
-rw-r--r--nixos/modules/security/pam.nix2
-rw-r--r--nixos/modules/security/sudo.nix4
-rw-r--r--nixos/modules/security/wrappers/default.nix20
-rw-r--r--nixos/modules/security/wrappers/wrapper.c109
-rw-r--r--nixos/modules/security/wrappers/wrapper.nix4
-rw-r--r--nixos/modules/services/audio/tts.nix1
-rw-r--r--nixos/modules/services/backup/duplicati.nix7
-rw-r--r--nixos/modules/services/backup/restic.nix32
-rw-r--r--nixos/modules/services/continuous-integration/buildbot/master.nix8
-rw-r--r--nixos/modules/services/continuous-integration/hail.nix61
-rw-r--r--nixos/modules/services/continuous-integration/woodpecker/agents.nix59
-rw-r--r--nixos/modules/services/databases/surrealdb.nix29
-rw-r--r--nixos/modules/services/hardware/auto-cpufreq.nix7
-rw-r--r--nixos/modules/services/hardware/openrgb.nix9
-rw-r--r--nixos/modules/services/home-automation/zigbee2mqtt.nix3
-rw-r--r--nixos/modules/services/logging/logrotate.nix12
-rw-r--r--nixos/modules/services/mail/listmonk.nix4
-rw-r--r--nixos/modules/services/mail/stalwart-mail.nix106
-rw-r--r--nixos/modules/services/matrix/mautrix-telegram.nix1
-rw-r--r--nixos/modules/services/matrix/mautrix-whatsapp.nix99
-rw-r--r--nixos/modules/services/misc/atuin.nix2
-rw-r--r--nixos/modules/services/misc/cfdyndns.nix14
-rw-r--r--nixos/modules/services/misc/gitlab.nix7
-rw-r--r--nixos/modules/services/monitoring/mimir.nix14
-rw-r--r--nixos/modules/services/networking/dae.nix170
-rw-r--r--nixos/modules/services/networking/jool.nix313
-rw-r--r--nixos/modules/services/networking/nftables.nix22
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix3
-rw-r--r--nixos/modules/services/networking/wstunnel.nix4
-rw-r--r--nixos/modules/services/web-apps/galene.nix2
-rw-r--r--nixos/modules/services/web-apps/grocy.nix16
-rw-r--r--nixos/modules/services/web-apps/healthchecks.nix45
-rw-r--r--nixos/modules/services/web-apps/meme-bingo-web.nix93
-rw-r--r--nixos/modules/services/web-apps/mobilizon.nix442
-rw-r--r--nixos/modules/services/web-apps/plausible.nix1
-rw-r--r--nixos/modules/services/web-apps/tt-rss.nix44
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix12
-rw-r--r--nixos/modules/services/x11/desktop-managers/budgie.nix3
-rw-r--r--nixos/modules/services/x11/window-managers/oroborus.nix25
-rw-r--r--nixos/modules/system/boot/binfmt.nix2
-rw-r--r--nixos/modules/system/boot/networkd.nix13
-rwxr-xr-xnixos/modules/system/boot/stage-2-init.sh2
-rw-r--r--nixos/modules/system/boot/systemd.nix4
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix16
-rw-r--r--nixos/modules/tasks/bcache.nix12
-rw-r--r--nixos/modules/tasks/network-interfaces.nix6
-rw-r--r--nixos/modules/tasks/swraid.nix14
-rw-r--r--nixos/modules/virtualisation/anbox.nix59
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix130
-rw-r--r--nixos/modules/virtualisation/lxc-image-metadata.nix104
-rw-r--r--nixos/modules/virtualisation/lxc-instance-common.nix30
-rw-r--r--nixos/modules/virtualisation/lxd-virtual-machine.nix46
-rw-r--r--nixos/modules/virtualisation/lxd.nix129
-rw-r--r--nixos/release.nix51
-rw-r--r--nixos/tests/acme.nix44
-rw-r--r--nixos/tests/akkoma.nix5
-rw-r--r--nixos/tests/all-tests.nix9
-rw-r--r--nixos/tests/anbox.nix40
-rw-r--r--nixos/tests/custom-ca.nix4
-rw-r--r--nixos/tests/dae.nix29
-rw-r--r--nixos/tests/jool.nix106
-rw-r--r--nixos/tests/lxd/container.nix13
-rw-r--r--nixos/tests/lxd/default.nix3
-rw-r--r--nixos/tests/lxd/preseed.nix71
-rw-r--r--nixos/tests/lxd/virtual-machine.nix64
-rw-r--r--nixos/tests/mobilizon.nix44
-rw-r--r--nixos/tests/postgis.nix2
-rw-r--r--nixos/tests/restic.nix20
-rw-r--r--nixos/tests/stalwart-mail.nix117
-rw-r--r--nixos/tests/sudo.nix2
-rw-r--r--nixos/tests/systemd-initrd-swraid.nix16
-rw-r--r--nixos/tests/wrappers.nix24
93 files changed, 3073 insertions, 1233 deletions
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index eb6fb6fc6e451..a59dccfbc42bf 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -6,6 +6,10 @@
 
 - Support for WiFi6 (IEEE 802.11ax) and WPA3-SAE-PK was enabled in the `hostapd` package, along with a significant rework of the hostapd module.
 
+- LXD now supports virtual machine instances to complement the existing container support
+
+- The `nixos-rebuild` command has been given a `list-generations` subcommand. See `man nixos-rebuild` for more details.
+
 ## New Services {#sec-release-23.11-new-services}
 
 - [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable).
@@ -28,13 +32,17 @@
 
 * [NS-USBLoader](https://github.com/developersu/ns-usbloader/), an all-in-one tool for managing Nintendo Switch homebrew. Available as [programs.ns-usbloader](#opt-programs.ns-usbloader.enable).
 
+- [Mobilizon](https://joinmobilizon.org/), a Fediverse platform for publishing events.
+
 - [Anuko Time Tracker](https://github.com/anuko/timetracker), a simple, easy to use, open source time tracking system. Available as [services.anuko-time-tracker](#opt-services.anuko-time-tracker.enable).
 
 - [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable).
 
 - [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
 
-- [Jool](https://nicmx.github.io/Jool/en/index.html), an Open Source implementation of IPv4/IPv6 translation on Linux. Available as [networking.jool.enable](#opt-networking.jool.enable).
+- [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable).
+
+- [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable).
 
 - [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
 
@@ -50,6 +58,8 @@
 
 - [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
 
+- hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
+
 - [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs.
   Available as [services.honk](#opt-services.honk.enable).
 
@@ -75,12 +85,18 @@
 
 - `pass` now does not contain `password-store.el`.  Users should get `password-store.el` from Emacs lisp package set `emacs.pkgs.password-store`.
 
+- `mu` now does not install `mu4e` files by default.  Users should get `mu4e` from Emacs lisp package set `emacs.pkgs.mu4e`.
+
 - `mariadb` now defaults to `mariadb_1011` instead of `mariadb_106`, meaning the default version was upgraded from 10.6.x to 10.11.x. See the [upgrade notes](https://mariadb.com/kb/en/upgrading-from-mariadb-10-6-to-mariadb-10-11/) for potential issues.
 
 - `getent` has been moved from `glibc`'s `bin` output to its own dedicated output, reducing closure size for many dependents. Dependents using the `getent` alias should not be affected; others should move from using `glibc.bin` or `getBin glibc` to `getent` (which also improves compatibility with non-glibc platforms).
 
+- The `users.users.<name>.passwordFile` has been renamed to `users.users.<name>.hashedPasswordFile` to avoid possible confusions. The option is in fact the file-based version of `hashedPassword`, not `password`, and expects a file containing the {manpage}`crypt(3)` hash of the user password.
+
 - The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
 
+- JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
+
 - The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
   - The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
   - The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
@@ -90,6 +106,8 @@
 
 - `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
 
+- `gitlab` installations created or updated between versions \[15.11.0, 15.11.2] have an incorrect database schema. This will become a problem when upgrading to `gitlab` >=16.2.0. A workaround for affected users can be found in the [GitLab docs](https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later).
+
 - `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
 
 - `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
@@ -171,6 +189,10 @@
 
 - The `html-proofer` package has been updated from major version 3 to major version 5, which includes [breaking changes](https://github.com/gjtorikian/html-proofer/blob/v5.0.8/UPGRADING.md).
 
+- `kratos` has been updated from 0.10.1 to the first stable version 1.0.0, please read the [0.10.1 to 0.11.0](https://github.com/ory/kratos/releases/tag/v0.11.0), [0.11.0 to 0.11.1](https://github.com/ory/kratos/releases/tag/v0.11.1), [0.11.1 to 0.13.0](https://github.com/ory/kratos/releases/tag/v0.13.0) and [0.13.0 to 1.0.0](https://github.com/ory/kratos/releases/tag/v1.0.0) upgrade guides. The most notable breaking change is the introduction of one-time passwords (`code`) and update of the default recovery strategy from `link` to `code`.
+
+- The `hail` NixOS module was removed, as `hail` was unmaintained since 2017.
+
 ## Other Notable Changes {#sec-release-23.11-notable-changes}
 
 - The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
@@ -213,6 +235,8 @@ The module update takes care of the new config syntax and the data itself (user
 
 - `services.nginx` gained a `defaultListen` option at server-level with support for PROXY protocol listeners, also `proxyProtocol` is now exposed in `services.nginx.virtualHosts.<name>.listen` option. It is now possible to run PROXY listeners and non-PROXY listeners at a server-level, see [#213510](https://github.com/NixOS/nixpkgs/pull/213510/) for more details.
 
+- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easly be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
+
 - `services.prometheus.exporters` has a new exporter to monitor electrical power consumption based on PowercapRAPL sensor called [Scaphandre](https://github.com/hubblo-org/scaphandre), see [#239803](https://github.com/NixOS/nixpkgs/pull/239803) for more details.
 
 - The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recomended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
@@ -239,6 +263,14 @@ The module update takes care of the new config syntax and the data itself (user
 - `networking.nftables` is no longer flushing all rulesets on every reload.
   Use `networking.nftables.flushRuleset = true;` to get back the old behaviour.
 
+- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
+
+- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
+
+- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
+
+- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+
 ## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
 
 - The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
@@ -266,3 +298,5 @@ The module update takes care of the new config syntax and the data itself (user
   ./common/auto-format-root-device.nix ];` When you use the systemd initrd, you
   can automatically format the root device by setting
   `virtualisation.fileSystems."/".autoFormat = true;`.
+
+- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
diff --git a/nixos/lib/testing/driver.nix b/nixos/lib/testing/driver.nix
index 23574698c0629..cc97ca72083f9 100644
--- a/nixos/lib/testing/driver.nix
+++ b/nixos/lib/testing/driver.nix
@@ -175,7 +175,12 @@ in
   };
 
   config = {
-    _module.args.hostPkgs = config.hostPkgs;
+    _module.args = {
+      hostPkgs =
+        # Comment is in nixos/modules/misc/nixpkgs.nix
+        lib.mkOverride lib.modules.defaultOverridePriority
+          config.hostPkgs.__splicedPackages;
+    };
 
     driver = withChecks driver;
 
diff --git a/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
new file mode 100644
index 0000000000000..7b743d170bc64
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      ../../../modules/virtualisation/lxc-container.nix
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "21.05"; # Did you read the comment?
+}
diff --git a/nixos/maintainers/scripts/lxd/lxd-image.nix b/nixos/maintainers/scripts/lxd/lxd-container-image.nix
index 07605c5c31208..3bd1320b2b680 100644
--- a/nixos/maintainers/scripts/lxd/lxd-image.nix
+++ b/nixos/maintainers/scripts/lxd/lxd-container-image.nix
@@ -1,4 +1,4 @@
-{ lib, config, pkgs, ... }:
+{ lib, pkgs, ... }:
 
 {
   imports = [
@@ -16,8 +16,8 @@
   system.activationScripts.config = ''
     if [ ! -e /etc/nixos/configuration.nix ]; then
       mkdir -p /etc/nixos
-      cat ${./lxd-image-inner.nix} > /etc/nixos/configuration.nix
-      sed 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
+      cat ${./lxd-container-image-inner.nix} > /etc/nixos/configuration.nix
+      ${lib.getExe pkgs.gnused} 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
     fi
   '';
 
diff --git a/nixos/maintainers/scripts/lxd/lxd-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-image-inner.nix
deleted file mode 100644
index c1a9b1aacd186..0000000000000
--- a/nixos/maintainers/scripts/lxd/lxd-image-inner.nix
+++ /dev/null
@@ -1,95 +0,0 @@
-# Edit this configuration file to define what should be installed on
-# your system.  Help is available in the configuration.nix(5) man page
-# and in the NixOS manual (accessible by running ‘nixos-help’).
-
-{ config, pkgs, lib, ... }:
-
-{
-  imports =
-    [ # Include the default lxd configuration.
-      ../../../modules/virtualisation/lxc-container.nix
-      # Include the container-specific autogenerated configuration.
-      ./lxd.nix
-    ];
-
-  # networking.hostName = mkForce "nixos"; # Overwrite the hostname.
-  # networking.wireless.enable = true;  # Enables wireless support via wpa_supplicant.
-
-  # Set your time zone.
-  # time.timeZone = "Europe/Amsterdam";
-
-  # The global useDHCP flag is deprecated, therefore explicitly set to false here.
-  # Per-interface useDHCP will be mandatory in the future, so this generated config
-  # replicates the default behaviour.
-  networking.useDHCP = false;
-  networking.interfaces.eth0.useDHCP = true;
-
-  # Configure network proxy if necessary
-  # networking.proxy.default = "http://user:password@proxy:port/";
-  # networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
-
-  # Select internationalisation properties.
-  # i18n.defaultLocale = "en_US.UTF-8";
-  # console = {
-  #   font = "Lat2-Terminus16";
-  #   keyMap = "us";
-  # };
-
-  # Enable the X11 windowing system.
-  # services.xserver.enable = true;
-
-  # Configure keymap in X11
-  # services.xserver.layout = "us";
-  # services.xserver.xkbOptions = "eurosign:e";
-
-  # Enable CUPS to print documents.
-  # services.printing.enable = true;
-
-  # Enable sound.
-  # sound.enable = true;
-  # hardware.pulseaudio.enable = true;
-
-  # Enable touchpad support (enabled default in most desktopManager).
-  # services.xserver.libinput.enable = true;
-
-  # Define a user account. Don't forget to set a password with ‘passwd’.
-  # users.users.alice = {
-  #   isNormalUser = true;
-  #   extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
-  # };
-
-  # List packages installed in system profile. To search, run:
-  # $ nix search wget
-  # environment.systemPackages = with pkgs; [
-  #   vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
-  #   wget
-  #   firefox
-  # ];
-
-  # Some programs need SUID wrappers, can be configured further or are
-  # started in user sessions.
-  # programs.mtr.enable = true;
-  # programs.gnupg.agent = {
-  #   enable = true;
-  #   enableSSHSupport = true;
-  # };
-
-  # List services that you want to enable:
-
-  # Enable the OpenSSH daemon.
-  # services.openssh.enable = true;
-
-  # Open ports in the firewall.
-  # networking.firewall.allowedTCPPorts = [ ... ];
-  # networking.firewall.allowedUDPPorts = [ ... ];
-  # Or disable the firewall altogether.
-  # networking.firewall.enable = false;
-
-  # This value determines the NixOS release from which the default
-  # settings for stateful data, like file locations and database versions
-  # on your system were taken. It’s perfectly fine and recommended to leave
-  # this value at the release version of the first install of this system.
-  # Before changing this value read the documentation for this option
-  # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
-  system.stateVersion = "21.05"; # Did you read the comment?
-}
diff --git a/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
new file mode 100644
index 0000000000000..a8f2c63ac5c69
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      ../../../modules/virtualisation/lxd-virtual-machine.nix
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "23.05"; # Did you read the comment?
+}
diff --git a/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
new file mode 100644
index 0000000000000..eb0d9217d4021
--- /dev/null
+++ b/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
@@ -0,0 +1,27 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxd-virtual-machine.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = ["create" "copy"];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cat ${./lxd-virtual-machine-image-inner.nix} > /etc/nixos/configuration.nix
+      ${lib.getExe pkgs.gnused} 's|../../../modules/virtualisation/lxd-virtual-machine.nix|<nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix>|g' -i /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.enp5s0.useDHCP = true;
+}
diff --git a/nixos/modules/config/update-users-groups.pl b/nixos/modules/config/update-users-groups.pl
index 5236264e16b79..3785ba8c5fb80 100644
--- a/nixos/modules/config/update-users-groups.pl
+++ b/nixos/modules/config/update-users-groups.pl
@@ -4,7 +4,7 @@ use File::Path qw(make_path);
 use File::Slurp;
 use Getopt::Long;
 use JSON;
-use DateTime;
+use Time::Piece;
 
 # Keep track of deleted uids and gids.
 my $uidMapFile = "/var/lib/nixos/uid-map";
@@ -26,17 +26,8 @@ sub updateFile {
 # Converts an ISO date to number of days since 1970-01-01
 sub dateToDays {
     my ($date) = @_;
-    my ($year, $month, $day) = split('-', $date, -3);
-    my $dt = DateTime->new(
-        year      => $year,
-        month     => $month,
-        day       => $day,
-        hour      => 0,
-        minute    => 0,
-        second    => 0,
-        time_zone => 'UTC',
-    );
-    return $dt->epoch / 86400;
+    my $time = Time::Piece->strptime($date, "%Y-%m-%d");
+    return $time->epoch / 60 / 60 / 24;
 }
 
 sub nscdInvalidate {
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index 9629e3964c96f..4893d28924ebd 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -18,11 +18,11 @@ let
 
   passwordDescription = ''
     The options {option}`hashedPassword`,
-    {option}`password` and {option}`passwordFile`
+    {option}`password` and {option}`hashedPasswordFile`
     controls what password is set for the user.
     {option}`hashedPassword` overrides both
-    {option}`password` and {option}`passwordFile`.
-    {option}`password` overrides {option}`passwordFile`.
+    {option}`password` and {option}`hashedPasswordFile`.
+    {option}`password` overrides {option}`hashedPasswordFile`.
     If none of these three options are set, no password is assigned to
     the user, and the user will not be able to do password logins.
     If the option {option}`users.mutableUsers` is true, the
@@ -250,18 +250,26 @@ let
         '';
       };
 
-      passwordFile = mkOption {
+      hashedPasswordFile = mkOption {
         type = with types; nullOr str;
-        default = null;
+        default = cfg.users.${name}.passwordFile;
+        defaultText = literalExpression "null";
         description = lib.mdDoc ''
-          The full path to a file that contains the user's password. The password
-          file is read on each system activation. The file should contain
-          exactly one line, which should be the password in an encrypted form
-          that is suitable for the `chpasswd -e` command.
+          The full path to a file that contains the hash of the user's
+          password. The password file is read on each system activation. The
+          file should contain exactly one line, which should be the password in
+          an encrypted form that is suitable for the `chpasswd -e` command.
           ${passwordDescription}
         '';
       };
 
+      passwordFile = mkOption {
+        type = with types; nullOr (passwdEntry str);
+        default = null;
+        visible = false;
+        description = lib.mdDoc "Deprecated alias of hashedPasswordFile";
+      };
+
       initialHashedPassword = mkOption {
         type = with types; nullOr (passwdEntry str);
         default = null;
@@ -447,7 +455,7 @@ let
     users = mapAttrsToList (_: u:
       { inherit (u)
           name uid group description home homeMode createHome isSystemUser
-          password passwordFile hashedPassword
+          password hashedPasswordFile hashedPassword
           autoSubUidGidRange subUidRanges subGidRanges
           initialPassword initialHashedPassword expires;
         shell = utils.toShellPath u.shell;
@@ -648,7 +656,7 @@ in {
         install -m 0700 -d /root
         install -m 0755 -d /home
 
-        ${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON p.DateTime ])}/bin/perl \
+        ${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON ])}/bin/perl \
         -w ${./update-users-groups.pl} ${spec}
       '';
     };
@@ -756,7 +764,7 @@ in {
             &&
             (allowsLogin cfg.hashedPassword
              || cfg.password != null
-             || cfg.passwordFile != null
+             || cfg.hashedPasswordFile != null
              || cfg.openssh.authorizedKeys.keys != []
              || cfg.openssh.authorizedKeys.keyFiles != [])
           ) cfg.users ++ [
@@ -845,9 +853,13 @@ in {
           The password hash of user "${user.name}" may be invalid. You must set a
           valid hash or the user will be locked out of their account. Please
           check the value of option `users.users."${user.name}".hashedPassword`.''
-        else null
-      ));
-
+        else null)
+        ++ flip mapAttrsToList cfg.users (name: user:
+          if user.passwordFile != null then
+            ''The option `users.users."${name}".passwordFile' has been renamed '' +
+            ''to `users.users."${name}".hashedPasswordFile'.''
+          else null)
+      );
   };
 
 }
diff --git a/nixos/modules/hardware/decklink.nix b/nixos/modules/hardware/decklink.nix
new file mode 100644
index 0000000000000..d179e1d7634ff
--- /dev/null
+++ b/nixos/modules/hardware/decklink.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.decklink;
+  kernelPackages = config.boot.kernelPackages;
+in
+{
+  options.hardware.decklink.enable = lib.mkEnableOption "hardware support for the Blackmagic Design Decklink audio/video interfaces";
+
+  config = lib.mkIf cfg.enable {
+    boot.kernelModules = [ "blackmagic" "blackmagic-io" "snd_blackmagic-io" ];
+    boot.extraModulePackages = [ kernelPackages.decklink ];
+    systemd.packages = [ pkgs.blackmagic-desktop-video ];
+    systemd.services.DesktopVideoHelper.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixos/modules/hardware/infiniband.nix b/nixos/modules/hardware/infiniband.nix
new file mode 100644
index 0000000000000..962883fa79721
--- /dev/null
+++ b/nixos/modules/hardware/infiniband.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.infiniband;
+  opensm-services = {
+    "opensm@" = {
+      enable = true;
+      description = "Starts OpenSM Infiniband fabric Subnet Managers";
+      before = [ "network.target"];
+      unitConfig = {
+        ConditionPathExists = "/sys/class/infiniband_mad/abi_version";
+      };
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.opensm}/bin/opensm --guid %I --log_file /var/log/opensm.%I.log";
+      };
+    };
+  } // (builtins.listToAttrs (map (guid: {
+    name = "opensm@${guid}";
+    value = {
+      enable = true;
+      wantedBy = [ "machines.target" ];
+      overrideStrategy = "asDropin";
+    };
+  } ) cfg.guids));
+
+in
+
+{
+  options.hardware.infiniband = {
+    enable = mkEnableOption "Infiniband support";
+    guids = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "0xe8ebd30000eee2e1" ];
+      description = lib.mdDoc ''
+        A list of infiniband port guids on the system. This is discoverable using `ibstat -p`
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.initrd.kernelModules = [
+      "mlx5_core" "mlx5_ib" "ib_cm"
+      "rdma_cm" "rdma_ucm" "rpcrdma"
+      "ib_ipoib" "ib_isert" "ib_umad" "ib_uverbs"
+    ];
+    # rdma-core exposes ibstat, mstflint exposes mstconfig (which can be needed for
+    # setting link configurations), qperf needed to affirm link speeds
+    environment.systemPackages = with pkgs; [
+      rdma-core mstflint qperf
+    ];
+    systemd.services = opensm-services;
+  };
+}
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
index 67c3afcf320a9..0b1238dd888a5 100644
--- a/nixos/modules/hardware/video/nvidia.nix
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -4,8 +4,10 @@
   pkgs,
   ...
 }: let
+  x11Enabled = config.services.xserver.enable
+               && (lib.elem "nvidia" config.services.xserver.videoDrivers);
   nvidia_x11 =
-    if (lib.elem "nvidia" config.services.xserver.videoDrivers)
+    if  x11Enabled || cfg.datacenter.enable
     then cfg.package
     else null;
 
@@ -18,9 +20,64 @@
   primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
   busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
   ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
+  settingsFormat = pkgs.formats.keyValue {};
 in {
   options = {
     hardware.nvidia = {
+      datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
+        Data Center drivers for NVIDIA cards on a NVLink topology.
+      '');
+      datacenter.settings = lib.mkOption {
+        type = settingsFormat.type;
+        default = {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        };
+        defaultText = lib.literalExpression ''
+        {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        }
+        '';
+        description = lib.mdDoc ''
+          Additional configuration options for fabricmanager.
+        '';
+      };
+
       powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
         experimental power management through systemd. For more information, see
         the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
@@ -167,9 +224,15 @@ in {
         It also drastically increases the time the driver needs to clock down after load.
       '');
 
-      package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
-        default = "stable";
+      package = lib.mkOption {
+        default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
+        defaultText = lib.literalExpression ''
+          config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
+        '';
         example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
+        description = lib.mdDoc ''
+          The NVIDIA driver package to use.
+        '';
       };
 
       open = lib.mkEnableOption (lib.mdDoc ''
@@ -188,8 +251,46 @@ in {
       then pCfg.intelBusId
       else pCfg.amdgpuBusId;
   in
-    lib.mkIf (nvidia_x11 != null) {
-      assertions = [
+    lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
+      # Common
+      ({
+        assertions = [
+          {
+            assertion = !(x11Enabled && cfg.datacenter.enable);
+            message = "You cannot configure both X11 and Data Center drivers at the same time.";
+          }
+        ];
+        boot = {
+          blacklistedKernelModules = ["nouveau" "nvidiafb"];
+          kernelModules = [ "nvidia-uvm" ];
+        };
+        systemd.tmpfiles.rules =
+          lib.optional config.virtualisation.docker.enableNvidia
+            "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
+        services.udev.extraRules =
+        ''
+          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
+          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
+        '';
+        hardware.opengl = {
+          extraPackages = [
+            nvidia_x11.out
+          ];
+          extraPackages32 = [
+            nvidia_x11.lib32
+          ];
+        };
+        environment.systemPackages = [
+          nvidia_x11.bin
+        ];
+      })
+      # X11
+      (lib.mkIf x11Enabled {
+        assertions = [
         {
           assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
           message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
@@ -248,227 +349,207 @@ in {
         {
           assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
           message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
-        }
-      ];
-
-      # If Optimus/PRIME is enabled, we:
-      # - Specify the configured NVIDIA GPU bus ID in the Device section for the
-      #   "nvidia" driver.
-      # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
-      #   "nvidia" driver, in order to allow the X server to start without any outputs.
-      # - Add a separate Device section for the Intel GPU, using the "modesetting"
-      #   driver and with the configured BusID.
-      # - OR add a separate Device section for the AMD APU, using the "amdgpu"
-      #   driver and with the configures BusID.
-      # - Reference that Device section from the ServerLayout section as an inactive
-      #   device.
-      # - Configure the display manager to run specific `xrandr` commands which will
-      #   configure/enable displays connected to the Intel iGPU / AMD APU.
-
-      # reverse sync implies offloading
-      hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
-
-      services.xserver.drivers =
-        lib.optional primeEnabled {
-          name = igpuDriver;
-          display = offloadCfg.enable;
-          modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
-          deviceSection =
-            ''
-              BusID "${igpuBusId}"
-            ''
-            + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
-              Option "AccelMethod" "none"
-            '';
-        }
-        ++ lib.singleton {
-          name = "nvidia";
-          modules = [nvidia_x11.bin];
-          display = !offloadCfg.enable;
-          deviceSection =
-            lib.optionalString primeEnabled
-            ''
-              BusID "${pCfg.nvidiaBusId}"
-            ''
-            + lib.optionalString pCfg.allowExternalGpu ''
-              Option "AllowExternalGpus"
-            '';
-          screenSection =
-            ''
-              Option "RandRRotation" "on"
-            ''
-            + lib.optionalString syncCfg.enable ''
-              Option "AllowEmptyInitialConfiguration"
-            ''
-            + lib.optionalString cfg.forceFullCompositionPipeline ''
-              Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
-              Option         "AllowIndirectGLXProtocol" "off"
-              Option         "TripleBuffer" "on"
-            '';
-        };
-
-      services.xserver.serverLayoutSection =
-        lib.optionalString syncCfg.enable ''
-          Inactive "Device-${igpuDriver}[0]"
-        ''
-        + lib.optionalString reverseSyncCfg.enable ''
-          Inactive "Device-nvidia[0]"
-        ''
-        + lib.optionalString offloadCfg.enable ''
-          Option "AllowNVIDIAGPUScreens"
-        '';
-
-      services.xserver.displayManager.setupCommands = let
-        gpuProviderName =
-          if igpuDriver == "amdgpu"
-          then
-            # find the name of the provider if amdgpu
-            "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
-          else igpuDriver;
-        providerCmdParams =
-          if syncCfg.enable
-          then "\"${gpuProviderName}\" NVIDIA-0"
-          else "NVIDIA-G0 \"${gpuProviderName}\"";
-      in
-        lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
-          # Added by nvidia configuration module for Optimus/PRIME.
-          ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
-          ${lib.getExe pkgs.xorg.xrandr} --auto
-        '';
-
-      environment.etc = {
-        "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+        }];
+
+        # If Optimus/PRIME is enabled, we:
+        # - Specify the configured NVIDIA GPU bus ID in the Device section for the
+        #   "nvidia" driver.
+        # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
+        #   "nvidia" driver, in order to allow the X server to start without any outputs.
+        # - Add a separate Device section for the Intel GPU, using the "modesetting"
+        #   driver and with the configured BusID.
+        # - OR add a separate Device section for the AMD APU, using the "amdgpu"
+        #   driver and with the configures BusID.
+        # - Reference that Device section from the ServerLayout section as an inactive
+        #   device.
+        # - Configure the display manager to run specific `xrandr` commands which will
+        #   configure/enable displays connected to the Intel iGPU / AMD APU.
+
+        # reverse sync implies offloading
+        hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
+
+        services.xserver.drivers =
+          lib.optional primeEnabled {
+            name = igpuDriver;
+            display = offloadCfg.enable;
+            modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
+            deviceSection =
+              ''
+                BusID "${igpuBusId}"
+              ''
+              + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
+                Option "AccelMethod" "none"
+              '';
+          }
+          ++ lib.singleton {
+            name = "nvidia";
+            modules = [nvidia_x11.bin];
+            display = !offloadCfg.enable;
+            deviceSection =
+              lib.optionalString primeEnabled
+              ''
+                BusID "${pCfg.nvidiaBusId}"
+              ''
+              + lib.optionalString pCfg.allowExternalGpu ''
+                Option "AllowExternalGpus"
+              '';
+            screenSection =
+              ''
+                Option "RandRRotation" "on"
+              ''
+              + lib.optionalString syncCfg.enable ''
+                Option "AllowEmptyInitialConfiguration"
+              ''
+              + lib.optionalString cfg.forceFullCompositionPipeline ''
+                Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
+                Option         "AllowIndirectGLXProtocol" "off"
+                Option         "TripleBuffer" "on"
+              '';
+          };
 
-        # 'nvidia_x11' installs it's files to /run/opengl-driver/...
-        "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
-      };
+        services.xserver.serverLayoutSection =
+          lib.optionalString syncCfg.enable ''
+            Inactive "Device-${igpuDriver}[0]"
+          ''
+          + lib.optionalString reverseSyncCfg.enable ''
+            Inactive "Device-nvidia[0]"
+          ''
+          + lib.optionalString offloadCfg.enable ''
+            Option "AllowNVIDIAGPUScreens"
+          '';
+
+        services.xserver.displayManager.setupCommands = let
+          gpuProviderName =
+            if igpuDriver == "amdgpu"
+            then
+              # find the name of the provider if amdgpu
+              "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
+            else igpuDriver;
+          providerCmdParams =
+            if syncCfg.enable
+            then "\"${gpuProviderName}\" NVIDIA-0"
+            else "NVIDIA-G0 \"${gpuProviderName}\"";
+        in
+          lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
+            # Added by nvidia configuration module for Optimus/PRIME.
+            ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
+            ${lib.getExe pkgs.xorg.xrandr} --auto
+          '';
+
+        environment.etc = {
+          "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+
+          # 'nvidia_x11' installs it's files to /run/opengl-driver/...
+          "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
+        };
 
-      hardware.opengl = {
-        extraPackages = [
-          nvidia_x11.out
-          pkgs.nvidia-vaapi-driver
-        ];
-        extraPackages32 = [
-          nvidia_x11.lib32
-          pkgs.pkgsi686Linux.nvidia-vaapi-driver
-        ];
-      };
-      environment.systemPackages =
-        [nvidia_x11.bin]
-        ++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
-        ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
-        ++ lib.optional offloadCfg.enableOffloadCmd
-        (pkgs.writeShellScriptBin "nvidia-offload" ''
-          export __NV_PRIME_RENDER_OFFLOAD=1
-          export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
-          export __GLX_VENDOR_LIBRARY_NAME=nvidia
-          export __VK_LAYER_NV_optimus=NVIDIA_only
-          exec "$@"
-        '');
-
-      systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
-
-      systemd.services = let
-        nvidiaService = state: {
-          description = "NVIDIA system ${state} actions";
-          path = [pkgs.kbd];
-          serviceConfig = {
-            Type = "oneshot";
-            ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
-          };
-          before = ["systemd-${state}.service"];
-          requiredBy = ["systemd-${state}.service"];
+        hardware.opengl = {
+          extraPackages = [
+            pkgs.nvidia-vaapi-driver
+          ];
+          extraPackages32 = [
+            pkgs.pkgsi686Linux.nvidia-vaapi-driver
+          ];
         };
-      in
-        lib.mkMerge [
-          (lib.mkIf cfg.powerManagement.enable {
-            nvidia-suspend = nvidiaService "suspend";
-            nvidia-hibernate = nvidiaService "hibernate";
-            nvidia-resume =
-              (nvidiaService "resume")
-              // {
-                before = [];
-                after = ["systemd-suspend.service" "systemd-hibernate.service"];
-                requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
-              };
-          })
-          (lib.mkIf cfg.nvidiaPersistenced {
-            "nvidia-persistenced" = {
-              description = "NVIDIA Persistence Daemon";
-              wantedBy = ["multi-user.target"];
-              serviceConfig = {
-                Type = "forking";
-                Restart = "always";
-                PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
-                ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
-                ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
-              };
+        environment.systemPackages =
+          lib.optional cfg.nvidiaSettings nvidia_x11.settings
+          ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
+          ++ lib.optional offloadCfg.enableOffloadCmd
+          (pkgs.writeShellScriptBin "nvidia-offload" ''
+            export __NV_PRIME_RENDER_OFFLOAD=1
+            export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
+            export __GLX_VENDOR_LIBRARY_NAME=nvidia
+            export __VK_LAYER_NV_optimus=NVIDIA_only
+            exec "$@"
+          '');
+
+        systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
+
+        systemd.services = let
+          nvidiaService = state: {
+            description = "NVIDIA system ${state} actions";
+            path = [pkgs.kbd];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
             };
-          })
-          (lib.mkIf cfg.dynamicBoost.enable {
-            "nvidia-powerd" = {
-              description = "nvidia-powerd service";
-              path = [
-                pkgs.util-linux # nvidia-powerd wants lscpu
-              ];
-              wantedBy = ["multi-user.target"];
-              serviceConfig = {
-                Type = "dbus";
-                BusName = "nvidia.powerd.server";
-                ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+            before = ["systemd-${state}.service"];
+            requiredBy = ["systemd-${state}.service"];
+          };
+        in
+          lib.mkMerge [
+            (lib.mkIf cfg.powerManagement.enable {
+              nvidia-suspend = nvidiaService "suspend";
+              nvidia-hibernate = nvidiaService "hibernate";
+              nvidia-resume =
+                (nvidiaService "resume")
+                // {
+                  before = [];
+                  after = ["systemd-suspend.service" "systemd-hibernate.service"];
+                  requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
+                };
+            })
+            (lib.mkIf cfg.nvidiaPersistenced {
+              "nvidia-persistenced" = {
+                description = "NVIDIA Persistence Daemon";
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "forking";
+                  Restart = "always";
+                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                };
               };
-            };
-          })
-        ];
-
-      services.acpid.enable = true;
-
-      services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
-
-      hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
-
-      systemd.tmpfiles.rules =
-        lib.optional config.virtualisation.docker.enableNvidia
-        "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
-        ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
-        "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
-
-      boot = {
-        blacklistedKernelModules = ["nouveau" "nvidiafb"];
-
-        extraModulePackages =
-          if cfg.open
-          then [nvidia_x11.open]
-          else [nvidia_x11.bin];
-
-        # nvidia-uvm is required by CUDA applications.
-        kernelModules =
-          ["nvidia-uvm"]
-          ++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
-
-        # If requested enable modesetting via kernel parameter.
-        kernelParams =
-          lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
-          ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
-          ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
-          ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
-
-        # enable finegrained power management
-        extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
-          options nvidia "NVreg_DynamicPowerManagement=0x02"
-        '';
-      };
-
-      services.udev.extraRules =
-        ''
-          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
-          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) $${i}; done'"
-          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
-          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
-        ''
-        + lib.optionalString cfg.powerManagement.finegrained (
+            })
+            (lib.mkIf cfg.dynamicBoost.enable {
+              "nvidia-powerd" = {
+                description = "nvidia-powerd service";
+                path = [
+                  pkgs.util-linux # nvidia-powerd wants lscpu
+                ];
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "dbus";
+                  BusName = "nvidia.powerd.server";
+                  ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+                };
+              };
+            })
+          ];
+        services.acpid.enable = true;
+
+        services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
+
+        hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
+
+        systemd.tmpfiles.rules =
+          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+          "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+        boot = {
+          extraModulePackages =
+            if cfg.open
+            then [nvidia_x11.open]
+            else [nvidia_x11.bin];
+          # nvidia-uvm is required by CUDA applications.
+          kernelModules =
+            lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
+
+          # If requested enable modesetting via kernel parameter.
+          kernelParams =
+            lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+            ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
+            ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
+            ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
+
+          # enable finegrained power management
+          extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
+            options nvidia "NVreg_DynamicPowerManagement=0x02"
+          '';
+        };
+        services.udev.extraRules =
+          lib.optionalString cfg.powerManagement.finegrained (
           lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
             # Remove NVIDIA USB xHCI Host Controller devices, if present
             ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
@@ -489,5 +570,30 @@ in {
             ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
           ''
         );
-    };
+      })
+      # Data Center
+      (lib.mkIf (cfg.datacenter.enable) {
+        boot.extraModulePackages = [
+          nvidia_x11.bin
+        ];
+        systemd.services.nvidia-fabricmanager = {
+          enable = true;
+          description = "Start NVIDIA NVLink Management";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig.After = [ "network-online.target" ];
+          unitConfig.Requires = [ "network-online.target" ];
+          serviceConfig = {
+            Type = "forking";
+            TimeoutStartSec = 240;
+            ExecStart = let
+              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+              in
+                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
+            LimitCORE="infinity";
+          };
+        };
+        environment.systemPackages =
+          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
+      })
+    ]);
 }
diff --git a/nixos/modules/i18n/input-method/uim.nix b/nixos/modules/i18n/input-method/uim.nix
index 9491ab2640fcc..7225783b2a6f8 100644
--- a/nixos/modules/i18n/input-method/uim.nix
+++ b/nixos/modules/i18n/input-method/uim.nix
@@ -10,7 +10,7 @@ in
 
     i18n.inputMethod.uim = {
       toolbar = mkOption {
-        type    = types.enum [ "gtk" "gtk3" "gtk-systray" "gtk3-systray" "qt4" ];
+        type    = types.enum [ "gtk" "gtk3" "gtk-systray" "gtk3-systray" "qt5" ];
         default = "gtk";
         example = "gtk-systray";
         description = lib.mdDoc ''
diff --git a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
index ea8056ff870c5..573b31b439c2d 100644
--- a/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
+++ b/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
@@ -6,7 +6,6 @@
   imports = [ ./installation-cd-graphical-base.nix ];
 
   isoImage.edition = "gnome";
-  isoImage.graphicalGrub = true;
 
   services.xserver.desktopManager.gnome = {
     # Add Firefox and other tools useful for installation to the launcher
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index c430048d6598f..0b5135c088eaf 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -24,6 +24,9 @@ let
         # Name appended to menuentry defaults to params if no specific name given.
         option.name or (optionalString (option ? params) "(${option.params})")
         }' ${optionalString (option ? class) " --class ${option.class}"} {
+          # Fallback to UEFI console for boot, efifb sometimes has difficulties.
+          terminal_output console
+
           linux ${defaults.image} \''${isoboot} ${defaults.params} ${
             option.params or ""
           }
@@ -185,33 +188,25 @@ let
       # So instead we'll list a lot of possibly valid modes :/
       #"3840x2160"
       #"2560x1440"
+      "1920x1200"
       "1920x1080"
       "1366x768"
+      "1280x800"
       "1280x720"
+      "1200x1920"
       "1024x768"
+      "800x1280"
       "800x600"
       "auto"
     ]}
 
-    # Fonts can be loaded?
-    # (This font is assumed to always be provided as a fallback by NixOS)
-    if loadfont (\$root)/EFI/boot/unicode.pf2; then
-      set with_fonts=true
-    fi
-    if [ "\$textmode" != "true" -a "\$with_fonts" == "true" ]; then
-      # Use graphical term, it can be either with background image or a theme.
-      # input is "console", while output is "gfxterm".
-      # This enables "serial" input and output only when possible.
-      # Otherwise the failure mode is to not even enable gfxterm.
-      if test "\$with_serial" == "yes"; then
-        terminal_output gfxterm serial
-        terminal_input  console serial
-      else
-        terminal_output gfxterm
-        terminal_input  console
-      fi
+    if [ "\$textmode" == "false" ]; then
+      terminal_output gfxterm
+      terminal_input  console
     else
-      # Sets colors for the non-graphical term.
+      terminal_output console
+      terminal_input  console
+      # Sets colors for console term.
       set menu_color_normal=cyan/blue
       set menu_color_highlight=white/blue
     fi
@@ -250,18 +245,58 @@ let
     touch $out/EFI/nixos-installer-image
 
     # ALWAYS required modules.
-    MODULES="fat iso9660 part_gpt part_msdos \
-             normal boot linux configfile loopback chain halt \
-             efifwsetup efi_gop \
-             ls search search_label search_fs_uuid search_fs_file \
-             gfxmenu gfxterm gfxterm_background gfxterm_menu test all_video loadenv \
-             exfat ext2 ntfs btrfs hfsplus udf \
-             videoinfo png \
-             echo serial \
-            "
+    MODULES=(
+      # Basic modules for filesystems and partition schemes
+      "fat"
+      "iso9660"
+      "part_gpt"
+      "part_msdos"
+
+      # Basic stuff
+      "normal"
+      "boot"
+      "linux"
+      "configfile"
+      "loopback"
+      "chain"
+      "halt"
+
+      # Allows rebooting into firmware setup interface
+      "efifwsetup"
+
+      # EFI Graphics Output Protocol
+      "efi_gop"
+
+      # User commands
+      "ls"
+
+      # System commands
+      "search"
+      "search_label"
+      "search_fs_uuid"
+      "search_fs_file"
+      "echo"
+
+      # We're not using it anymore, but we'll leave it in so it can be used
+      # by user, with the console using "C"
+      "serial"
+
+      # Graphical mode stuff
+      "gfxmenu"
+      "gfxterm"
+      "gfxterm_background"
+      "gfxterm_menu"
+      "test"
+      "loadenv"
+      "all_video"
+      "videoinfo"
+
+      # File types for graphical mode
+      "png"
+    )
 
     echo "Building GRUB with modules:"
-    for mod in $MODULES; do
+    for mod in ''${MODULES[@]}; do
       echo " - $mod"
     done
 
@@ -270,31 +305,27 @@ let
     for mod in efi_uga; do
       if [ -f ${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget}/$mod.mod ]; then
         echo " - $mod"
-        MODULES+=" $mod"
+        MODULES+=("$mod")
       fi
     done
 
     # Make our own efi program, we can't rely on "grub-install" since it seems to
     # probe for devices, even with --skip-fs-probe.
-    grub-mkimage --directory=${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget} -o $out/EFI/boot/boot${targetArch}.efi -p /EFI/boot -O ${grubPkgs.grub2_efi.grubTarget} \
-      $MODULES
+    grub-mkimage \
+      --directory=${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget} \
+      -o $out/EFI/boot/boot${targetArch}.efi \
+      -p /EFI/boot \
+      -O ${grubPkgs.grub2_efi.grubTarget} \
+      ''${MODULES[@]}
     cp ${grubPkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
 
     cat <<EOF > $out/EFI/boot/grub.cfg
 
-    set with_fonts=false
-    set textmode=${boolToString (!config.isoImage.graphicalGrub)}
-    # If you want to use serial for "terminal_*" commands, you need to set one up:
-    #   Example manual configuration:
-    #    → serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
-    # This uses the defaults, and makes the serial terminal available.
-    set with_serial=no
-    if serial; then set with_serial=yes ;fi
-    export with_serial
-    clear
+    set textmode=${boolToString (config.isoImage.forceTextMode)}
     set timeout=${toString grubEfiTimeout}
 
-    # This message will only be viewable when "gfxterm" is not used.
+    clear
+    # This message will only be viewable on the default (UEFI) console.
     echo ""
     echo "Loading graphical boot menu..."
     echo ""
@@ -306,7 +337,7 @@ let
     hiddenentry 'Text mode' --hotkey 't' {
       loadfont (\$root)/EFI/boot/unicode.pf2
       set textmode=true
-      terminal_output gfxterm console
+      terminal_output console
     }
     hiddenentry 'GUI mode' --hotkey 'g' {
       $(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
@@ -400,6 +431,8 @@ let
     }
     EOF
 
+    grub-script-check $out/EFI/boot/grub.cfg
+
     ${refind}
   '';
 
@@ -658,13 +691,17 @@ in
       '';
     };
 
-    isoImage.graphicalGrub = mkOption {
+    isoImage.forceTextMode = mkOption {
       default = false;
       type = types.bool;
       example = true;
       description = lib.mdDoc ''
-        Whether to use textmode or graphical grub.
-        false means we use textmode grub.
+        Whether to use text mode instead of graphical grub.
+        A value of `true` means graphical mode is not tried to be used.
+
+        This is useful for validating that graphics mode usage is not at the root cause of a problem with the iso image.
+
+        If text mode is required off-handedly (e.g. for serial use) you can use the `T` key, after being prompted, to use text mode for the current boot.
       '';
     };
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index ec6424682ccce..05cf792e3fcbe 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -55,6 +55,7 @@
   ./hardware/cpu/amd-sev.nix
   ./hardware/cpu/intel-microcode.nix
   ./hardware/cpu/intel-sgx.nix
+  ./hardware/decklink.nix
   ./hardware/device-tree.nix
   ./hardware/digitalbitbox.nix
   ./hardware/flipperzero.nix
@@ -63,6 +64,7 @@
   ./hardware/gpgsmartcards.nix
   ./hardware/hackrf.nix
   ./hardware/i2c.nix
+  ./hardware/infiniband.nix
   ./hardware/keyboard/qmk.nix
   ./hardware/keyboard/teck.nix
   ./hardware/keyboard/uhk.nix
@@ -163,6 +165,7 @@
   ./programs/direnv.nix
   ./programs/dmrconfig.nix
   ./programs/droidcam.nix
+  ./programs/ecryptfs.nix
   ./programs/environment.nix
   ./programs/evince.nix
   ./programs/extra-container.nix
@@ -393,7 +396,6 @@
   ./services/continuous-integration/gitlab-runner.nix
   ./services/continuous-integration/gocd-agent/default.nix
   ./services/continuous-integration/gocd-server/default.nix
-  ./services/continuous-integration/hail.nix
   ./services/continuous-integration/hercules-ci-agent/default.nix
   ./services/continuous-integration/hydra/default.nix
   ./services/continuous-integration/jenkins/default.nix
@@ -594,6 +596,7 @@
   ./services/mail/rss2email.nix
   ./services/mail/schleuder.nix
   ./services/mail/spamassassin.nix
+  ./services/mail/stalwart-mail.nix
   ./services/mail/sympa.nix
   ./services/mail/zeyple.nix
   ./services/matrix/appservice-discord.nix
@@ -1247,6 +1250,7 @@
   ./services/web-apps/matomo.nix
   ./services/web-apps/mattermost.nix
   ./services/web-apps/mediawiki.nix
+  ./services/web-apps/meme-bingo-web.nix
   ./services/web-apps/miniflux.nix
   ./services/web-apps/monica.nix
   ./services/web-apps/moodle.nix
@@ -1258,6 +1262,7 @@
   ./services/web-apps/node-red.nix
   ./services/web-apps/onlyoffice.nix
   ./services/web-apps/openvscode-server.nix
+  ./services/web-apps/mobilizon.nix
   ./services/web-apps/openwebrx.nix
   ./services/web-apps/outline.nix
   ./services/web-apps/peering-manager.nix
diff --git a/nixos/modules/programs/ecryptfs.nix b/nixos/modules/programs/ecryptfs.nix
new file mode 100644
index 0000000000000..63c1a3ad44199
--- /dev/null
+++ b/nixos/modules/programs/ecryptfs.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.ecryptfs;
+
+in {
+  options.programs.ecryptfs = {
+    enable = mkEnableOption (lib.mdDoc "ecryptfs setuid mount wrappers");
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers = {
+
+      "mount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/mount.ecryptfs_private";
+      };
+      "umount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/umount.ecryptfs_private";
+      };
+
+    };
+  };
+}
diff --git a/nixos/modules/programs/streamdeck-ui.nix b/nixos/modules/programs/streamdeck-ui.nix
index 4c055029e39b9..220f0a35f1629 100644
--- a/nixos/modules/programs/streamdeck-ui.nix
+++ b/nixos/modules/programs/streamdeck-ui.nix
@@ -24,7 +24,7 @@ in
   config = mkIf cfg.enable {
     environment.systemPackages = with pkgs; [
       cfg.package
-      (mkIf cfg.autoStart (makeAutostartItem { name = "streamdeck-ui"; package = cfg.package; }))
+      (mkIf cfg.autoStart (makeAutostartItem { name = "streamdeck-ui-noui"; package = cfg.package; }))
     ];
 
     services.udev.packages = [ cfg.package ];
diff --git a/nixos/modules/programs/yazi.nix b/nixos/modules/programs/yazi.nix
new file mode 100644
index 0000000000000..3416bca06679f
--- /dev/null
+++ b/nixos/modules/programs/yazi.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.yazi;
+
+  settingsFormat = pkgs.formats.toml { };
+
+  names = [ "yazi" "theme" "keymap" ];
+in
+{
+  options.programs.yazi = {
+    enable = lib.mkEnableOption (lib.mdDoc "yazi terminal file manager");
+
+    package = lib.mkPackageOptionMD pkgs "yazi" { };
+
+    settings = lib.mkOption {
+      type = with lib.types; submodule {
+        options = lib.listToAttrs (map
+          (name: lib.nameValuePair name (lib.mkOption {
+            inherit (settingsFormat) type;
+            default = { };
+            description = lib.mdDoc ''
+              Configuration included in `${name}.toml`.
+
+              See https://github.com/sxyazi/yazi/blob/v${cfg.package.version}/config/docs/${name}.md for documentation.
+            '';
+          }))
+          names);
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration included in `$YAZI_CONFIG_HOME`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      variables.YAZI_CONFIG_HOME = "/etc/yazi/";
+      etc = lib.attrsets.mergeAttrsList (map
+        (name: lib.optionalAttrs (cfg.settings.${name} != { }) {
+          "yazi/${name}.toml".source = settingsFormat.generate "${name}.toml" cfg.settings.${name};
+        })
+        names);
+    };
+  };
+  meta.maintainers = with lib.maintainers; [ linsui ];
+}
diff --git a/nixos/modules/security/acme/default.md b/nixos/modules/security/acme/default.md
index 8ff97b55f6856..31548ad181a73 100644
--- a/nixos/modules/security/acme/default.md
+++ b/nixos/modules/security/acme/default.md
@@ -189,7 +189,7 @@ security.acme.defaults.email = "admin+acme@example.com";
 security.acme.certs."example.com" = {
   domain = "*.example.com";
   dnsProvider = "rfc2136";
-  credentialsFile = "/var/lib/secrets/certs.secret";
+  environmentFile = "/var/lib/secrets/certs.secret";
   # We don't need to wait for propagation since this is a local DNS server
   dnsPropagationCheck = false;
 };
@@ -256,7 +256,7 @@ security.acme.acceptTerms = true;
 security.acme.defaults.email = "admin+acme@example.com";
 security.acme.defaults = {
   dnsProvider = "rfc2136";
-  credentialsFile = "/var/lib/secrets/certs.secret";
+  environmentFile = "/var/lib/secrets/certs.secret";
   # We don't need to wait for propagation since this is a local DNS server
   dnsPropagationCheck = false;
 };
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 8aee71c864d0a..92bed172f4522 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -1,6 +1,8 @@
 { config, lib, pkgs, options, ... }:
 with lib;
 let
+
+
   cfg = config.security.acme;
   opt = options.security.acme;
   user = if cfg.useRoot then "root" else "acme";
@@ -14,6 +16,36 @@ let
   mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
   accountDirRoot = "/var/lib/acme/.lego/accounts/";
 
+  lockdir = "/run/acme/";
+  concurrencyLockfiles = map (n: "${toString n}.lock") (lib.range 1 cfg.maxConcurrentRenewals);
+  # Assign elements of `baseList` to each element of `needAssignmentList`, until the latter is exhausted.
+  # returns: [{fst = "element of baseList"; snd = "element of needAssignmentList"}]
+  roundRobinAssign = baseList: needAssignmentList:
+    if baseList == [] then []
+    else _rrCycler baseList baseList needAssignmentList;
+  _rrCycler = with builtins; origBaseList: workingBaseList: needAssignmentList:
+    if (workingBaseList == [] || needAssignmentList == [])
+    then []
+    else
+      [{ fst = head workingBaseList; snd = head needAssignmentList;}] ++
+      _rrCycler origBaseList (if (tail workingBaseList == []) then origBaseList else tail workingBaseList) (tail needAssignmentList);
+  attrsToList = mapAttrsToList (attrname: attrval: {name = attrname; value = attrval;});
+  # for an AttrSet `funcsAttrs` having functions as values, apply single arguments from
+  # `argsList` to them in a round-robin manner.
+  # Returns an attribute set with the applied functions as values.
+  roundRobinApplyAttrs = funcsAttrs: argsList: lib.listToAttrs (map (x: {inherit (x.snd) name; value = x.snd.value x.fst;}) (roundRobinAssign argsList (attrsToList funcsAttrs)));
+  wrapInFlock = lockfilePath: script:
+    # explainer: https://stackoverflow.com/a/60896531
+    ''
+      exec {LOCKFD}> ${lockfilePath}
+      echo "Waiting to acquire lock ${lockfilePath}"
+      ${pkgs.flock}/bin/flock ''${LOCKFD} || exit 1
+      echo "Acquired lock ${lockfilePath}"
+    ''
+    + script + "\n"
+    + ''echo "Releasing lock ${lockfilePath}"  # only released after process exit'';
+
+
   # There are many services required to make cert renewals work.
   # They all follow a common structure:
   #   - They inherit this commonServiceConfig
@@ -31,6 +63,7 @@ let
     ProtectSystem = "strict";
     ReadWritePaths = [
       "/var/lib/acme"
+      lockdir
     ];
     PrivateTmp = true;
 
@@ -118,7 +151,8 @@ let
       # We don't want this to run every time a renewal happens
       RemainAfterExit = true;
 
-      # These StateDirectory entries negate the need for tmpfiles
+      # StateDirectory entries are a cleaner, service-level mechanism
+      # for dealing with persistent service data
       StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
       StateDirectoryMode = 755;
       WorkingDirectory = "/var/lib/acme";
@@ -127,6 +161,25 @@ let
       ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
     };
   };
+  lockfilePrepareService = {
+    description = "Manage lock files for acme services";
+
+    # ensure all required lock files exist, but none more
+    script = ''
+      GLOBIGNORE="${concatStringsSep ":" concurrencyLockfiles}"
+      rm -f *
+      unset GLOBIGNORE
+
+      xargs touch <<< "${toString concurrencyLockfiles}"
+    '';
+
+    serviceConfig = commonServiceConfig // {
+      # We don't want this to run every time a renewal happens
+      RemainAfterExit = true;
+      WorkingDirectory = lockdir;
+    };
+  };
+
 
   certToConfig = cert: data: let
     acmeServer = data.server;
@@ -229,10 +282,10 @@ let
       };
     };
 
-    selfsignService = {
+    selfsignService = lockfileName: {
       description = "Generate self-signed certificate for ${cert}";
-      after = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ];
-      requires = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ];
+      after = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      requires = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
 
       path = with pkgs; [ minica ];
 
@@ -256,7 +309,7 @@ let
       # Working directory will be /tmp
       # minica will output to a folder sharing the name of the first domain
       # in the list, which will be ${data.domain}
-      script = ''
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
         minica \
           --ca-key ca/key.pem \
           --ca-cert ca/cert.pem \
@@ -277,10 +330,10 @@ let
       '';
     };
 
-    renewService = {
+    renewService = lockfileName: {
       description = "Renew ACME certificate for ${cert}";
-      after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps;
-      wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps;
+      after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
 
       # https://github.com/NixOS/nixpkgs/pull/81371#issuecomment-605526099
       wantedBy = optionals (!config.boot.isContainer) [ "multi-user.target" ];
@@ -309,8 +362,14 @@ let
           "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
         ];
 
-        # Only try loading the credentialsFile if the dns challenge is enabled
-        EnvironmentFile = mkIf useDns data.credentialsFile;
+        # Only try loading the environmentFile if the dns challenge is enabled
+        EnvironmentFile = mkIf useDns data.environmentFile;
+
+        Environment = mkIf useDns
+          (mapAttrsToList (k: v: ''"${k}=%d/${k}"'') data.credentialFiles);
+
+        LoadCredential = mkIf useDns
+          (mapAttrsToList (k: v: "${k}:${v}") data.credentialFiles);
 
         # Run as root (Prefixed with +)
         ExecStartPost = "+" + (pkgs.writeShellScript "acme-postrun" ''
@@ -329,7 +388,7 @@ let
       };
 
       # Working directory will be /tmp
-      script = ''
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
         ${optionalString data.enableDebugLogs "set -x"}
         set -euo pipefail
 
@@ -443,6 +502,10 @@ let
       defaultText = if isDefaults then default else literalExpression "config.security.acme.defaults.${name}";
     };
   in {
+    imports = [
+      (mkRenamedOptionModule [ "credentialsFile" ] [ "environmentFile" ])
+    ];
+
     options = {
       validMinDays = mkOption {
         type = types.int;
@@ -554,9 +617,9 @@ let
         '';
       };
 
-      credentialsFile = mkOption {
+      environmentFile = mkOption {
         type = types.nullOr types.path;
-        inherit (defaultAndText "credentialsFile" null) default defaultText;
+        inherit (defaultAndText "environmentFile" null) default defaultText;
         description = lib.mdDoc ''
           Path to an EnvironmentFile for the cert's service containing any required and
           optional environment variables for your selected dnsProvider.
@@ -566,6 +629,24 @@ let
         example = "/var/src/secrets/example.org-route53-api-token";
       };
 
+      credentialFiles = mkOption {
+        type = types.attrsOf (types.path);
+        inherit (defaultAndText "credentialFiles" {}) default defaultText;
+        description = lib.mdDoc ''
+          Environment variables suffixed by "_FILE" to set for the cert's service
+          for your selected dnsProvider.
+          To find out what values you need to set, consult the documentation at
+          <https://go-acme.github.io/lego/dns/> for the corresponding dnsProvider.
+          This allows to securely pass credential files to lego by leveraging systemd
+          credentials.
+        '';
+        example = literalExpression ''
+          {
+            "RFC2136_TSIG_SECRET_FILE" = "/run/secrets/tsig-secret-example.org";
+          }
+        '';
+      };
+
       dnsPropagationCheck = mkOption {
         type = types.bool;
         inherit (defaultAndText "dnsPropagationCheck" true) default defaultText;
@@ -755,6 +836,17 @@ in {
           }
         '';
       };
+      maxConcurrentRenewals = mkOption {
+        default = 5;
+        type = types.int;
+        description = lib.mdDoc ''
+          Maximum number of concurrent certificate generation or renewal jobs. All other
+          jobs will queue and wait running jobs to finish. Reduces the system load of
+          certificate generation.
+
+          Set to `0` to allow unlimited number of concurrent job runs."
+          '';
+      };
     };
   };
 
@@ -865,6 +957,13 @@ in {
             `security.acme.certs.${cert}.listenHTTP` must be provided.
           '';
         }
+        {
+          assertion = all (hasSuffix "_FILE") (attrNames data.credentialFiles);
+          message = ''
+            Option `security.acme.certs.${cert}.credentialFiles` can only be
+            used for variables suffixed by "_FILE".
+          '';
+        }
       ]) cfg.certs));
 
       users.users.acme = {
@@ -875,12 +974,28 @@ in {
 
       users.groups.acme = {};
 
-      systemd.services = {
-        "acme-fixperms" = userMigrationService;
-      } // (mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewService) certConfigs)
+      # for lock files, still use tmpfiles as they should better reside in /run
+      systemd.tmpfiles.rules = [
+        "d ${lockdir} 0700 ${user} - - -"
+        "Z ${lockdir} 0700 ${user} - - -"
+      ];
+
+      systemd.services = let
+        renewServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewService) certConfigs;
+        renewServices =  if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs renewServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) renewServiceFunctions;
+        selfsignServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-selfsigned-${cert}" conf.selfsignService) certConfigs;
+        selfsignServices = if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs selfsignServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) selfsignServiceFunctions;
+        in
+        { "acme-fixperms" = userMigrationService; }
+        // (optionalAttrs (cfg.maxConcurrentRenewals > 0) {"acme-lockfiles" = lockfilePrepareService; })
+        // renewServices
         // (optionalAttrs (cfg.preliminarySelfsigned) ({
         "acme-selfsigned-ca" = selfsignCAService;
-      } // (mapAttrs' (cert: conf: nameValuePair "acme-selfsigned-${cert}" conf.selfsignService) certConfigs)));
+      } // selfsignServices));
 
       systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
 
diff --git a/nixos/modules/security/pam.nix b/nixos/modules/security/pam.nix
index a431817fe1bb3..d83259ccbebc1 100644
--- a/nixos/modules/security/pam.nix
+++ b/nixos/modules/security/pam.nix
@@ -1303,7 +1303,7 @@ in
 
     security.pam.enableEcryptfs = mkEnableOption (lib.mdDoc "eCryptfs PAM module (mounting ecryptfs home directory on login)");
     security.pam.enableFscrypt = mkEnableOption (lib.mdDoc ''
-      Enables fscrypt to automatically unlock directories with the user's login password.
+      fscrypt to automatically unlock directories with the user's login password.
 
       This also enables a service at security.pam.services.fscrypt which is used by
       fscrypt to verify the user's password when setting up a new protector. If you
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index 9ac91bd0d3686..d225442773c69 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -192,6 +192,10 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.package.pname != "sudo-rs";
+        message = "The NixOS `sudo` module does not work with `sudo-rs` yet."; }
+    ];
 
     # We `mkOrder 600` so that the default rule shows up first, but there is
     # still enough room for a user to `mkBefore` it.
diff --git a/nixos/modules/security/wrappers/default.nix b/nixos/modules/security/wrappers/default.nix
index 12255d8392fe9..ad65f80bb2ca5 100644
--- a/nixos/modules/security/wrappers/default.nix
+++ b/nixos/modules/security/wrappers/default.nix
@@ -5,8 +5,8 @@ let
 
   parentWrapperDir = dirOf wrapperDir;
 
-  securityWrapper = pkgs.callPackage ./wrapper.nix {
-    inherit parentWrapperDir;
+  securityWrapper = sourceProg : pkgs.callPackage ./wrapper.nix {
+    inherit sourceProg;
   };
 
   fileModeType =
@@ -91,8 +91,7 @@ let
     , ...
     }:
     ''
-      cp ${securityWrapper}/bin/security-wrapper "$wrapperDir/${program}"
-      echo -n "${source}" > "$wrapperDir/${program}.real"
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
 
       # Prevent races
       chmod 0000 "$wrapperDir/${program}"
@@ -119,8 +118,7 @@ let
     , ...
     }:
     ''
-      cp ${securityWrapper}/bin/security-wrapper "$wrapperDir/${program}"
-      echo -n "${source}" > "$wrapperDir/${program}.real"
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
 
       # Prevent races
       chmod 0000 "$wrapperDir/${program}"
@@ -248,11 +246,13 @@ in
       export PATH="${wrapperDir}:$PATH"
     '';
 
-    security.apparmor.includes."nixos/security.wrappers" = ''
-      include "${pkgs.apparmorRulesFromClosure { name="security.wrappers"; } [
-        securityWrapper
+    security.apparmor.includes = lib.mapAttrs' (wrapName: wrap: lib.nameValuePair
+     "nixos/security.wrappers/${wrapName}" ''
+      include "${pkgs.apparmorRulesFromClosure { name="security.wrappers.${wrapName}"; } [
+        (securityWrapper wrap.source)
       ]}"
-    '';
+      mrpx ${wrap.source},
+    '') wrappers;
 
     ###### wrappers activation script
     system.activationScripts.wrappers =
diff --git a/nixos/modules/security/wrappers/wrapper.c b/nixos/modules/security/wrappers/wrapper.c
index 17776a97af81e..2cf1727a31c8c 100644
--- a/nixos/modules/security/wrappers/wrapper.c
+++ b/nixos/modules/security/wrappers/wrapper.c
@@ -17,6 +17,10 @@
 #include <syscall.h>
 #include <byteswap.h>
 
+#ifndef SOURCE_PROG
+#error SOURCE_PROG should be defined via preprocessor commandline
+#endif
+
 // aborts when false, printing the failed expression
 #define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
 // aborts when returns non-zero, printing the failed expression and errno
@@ -24,10 +28,6 @@
 
 extern char **environ;
 
-// The WRAPPER_DIR macro is supplied at compile time so that it cannot
-// be changed at runtime
-static char *wrapper_dir = WRAPPER_DIR;
-
 // Wrapper debug variable name
 static char *wrapper_debug = "WRAPPER_DEBUG";
 
@@ -151,115 +151,20 @@ static int make_caps_ambient(const char *self_path) {
     return 0;
 }
 
-int readlink_malloc(const char *p, char **ret) {
-    size_t l = FILENAME_MAX+1;
-    int r;
-
-    for (;;) {
-        char *c = calloc(l, sizeof(char));
-        if (!c) {
-            return -ENOMEM;
-        }
-
-        ssize_t n = readlink(p, c, l-1);
-        if (n < 0) {
-            r = -errno;
-            free(c);
-            return r;
-        }
-
-        if ((size_t) n < l-1) {
-            c[n] = 0;
-            *ret = c;
-            return 0;
-        }
-
-        free(c);
-        l *= 2;
-    }
-}
-
 int main(int argc, char **argv) {
     ASSERT(argc >= 1);
-    char *self_path = NULL;
-    int self_path_size = readlink_malloc("/proc/self/exe", &self_path);
-    if (self_path_size < 0) {
-        fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
-    }
-
-    unsigned int ruid, euid, suid, rgid, egid, sgid;
-    MUSTSUCCEED(getresuid(&ruid, &euid, &suid));
-    MUSTSUCCEED(getresgid(&rgid, &egid, &sgid));
-
-    // If true, then we did not benefit from setuid privilege escalation,
-    // where the original uid is still in ruid and different from euid == suid.
-    int didnt_suid = (ruid == euid) && (euid == suid);
-    // If true, then we did not benefit from setgid privilege escalation
-    int didnt_sgid = (rgid == egid) && (egid == sgid);
-
-
-    // Make sure that we are being executed from the right location,
-    // i.e., `safe_wrapper_dir'.  This is to prevent someone from creating
-    // hard link `X' from some other location, along with a false
-    // `X.real' file, to allow arbitrary programs from being executed
-    // with elevated capabilities.
-    int len = strlen(wrapper_dir);
-    if (len > 0 && '/' == wrapper_dir[len - 1])
-      --len;
-    ASSERT(!strncmp(self_path, wrapper_dir, len));
-    ASSERT('/' == wrapper_dir[0]);
-    ASSERT('/' == self_path[len]);
-
-    // If we got privileges with the fs set[ug]id bit, check that the privilege we
-    // got matches the one one we expected, ie that our effective uid/gid
-    // matches the uid/gid of `self_path`. This ensures that we were executed as
-    // `self_path', and not, say, as some other setuid program.
-    // We don't check that if we did not benefit from the set[ug]id bit, as
-    // can be the case in nosuid mounts or user namespaces.
-    struct stat st;
-    ASSERT(lstat(self_path, &st) != -1);
-
-    // if the wrapper gained privilege with suid, check that we got the uid of the file owner
-    ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == euid));
-    // if the wrapper gained privilege with sgid, check that we got the gid of the file group
-    ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == egid));
-    // same, but with suid instead of euid
-    ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == suid));
-    ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == sgid));
-
-    // And, of course, we shouldn't be writable.
-    ASSERT(!(st.st_mode & (S_IWGRP | S_IWOTH)));
-
-    // Read the path of the real (wrapped) program from <self>.real.
-    char real_fn[PATH_MAX + 10];
-    int real_fn_size = snprintf(real_fn, sizeof(real_fn), "%s.real", self_path);
-    ASSERT(real_fn_size < sizeof(real_fn));
-
-    int fd_self = open(real_fn, O_RDONLY);
-    ASSERT(fd_self != -1);
-
-    char source_prog[PATH_MAX];
-    len = read(fd_self, source_prog, PATH_MAX);
-    ASSERT(len != -1);
-    ASSERT(len < sizeof(source_prog));
-    ASSERT(len > 0);
-    source_prog[len] = 0;
-
-    close(fd_self);
 
     // Read the capabilities set on the wrapper and raise them in to
     // the ambient set so the program we're wrapping receives the
     // capabilities too!
-    if (make_caps_ambient(self_path) != 0) {
-        free(self_path);
+    if (make_caps_ambient("/proc/self/exe") != 0) {
         return 1;
     }
-    free(self_path);
 
-    execve(source_prog, argv, environ);
+    execve(SOURCE_PROG, argv, environ);
     
     fprintf(stderr, "%s: cannot run `%s': %s\n",
-        argv[0], source_prog, strerror(errno));
+        argv[0], SOURCE_PROG, strerror(errno));
 
     return 1;
 }
diff --git a/nixos/modules/security/wrappers/wrapper.nix b/nixos/modules/security/wrappers/wrapper.nix
index e3620fb222d2c..aec43412404ea 100644
--- a/nixos/modules/security/wrappers/wrapper.nix
+++ b/nixos/modules/security/wrappers/wrapper.nix
@@ -1,4 +1,4 @@
-{ stdenv, linuxHeaders, parentWrapperDir, debug ? false }:
+{ stdenv, linuxHeaders, sourceProg, debug ? false }:
 # For testing:
 # $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
 stdenv.mkDerivation {
@@ -7,7 +7,7 @@ stdenv.mkDerivation {
   dontUnpack = true;
   hardeningEnable = [ "pie" ];
   CFLAGS = [
-    ''-DWRAPPER_DIR="${parentWrapperDir}"''
+    ''-DSOURCE_PROG="${sourceProg}"''
   ] ++ (if debug then [
     "-Werror" "-Og" "-g"
   ] else [
diff --git a/nixos/modules/services/audio/tts.nix b/nixos/modules/services/audio/tts.nix
index 1a355c8ee39f5..0d93224ec0306 100644
--- a/nixos/modules/services/audio/tts.nix
+++ b/nixos/modules/services/audio/tts.nix
@@ -134,6 +134,7 @@ in
           ProtectProc = "invisible";
           ProcSubset = "pid";
           RestrictAddressFamilies = [
+            "AF_UNIX"
             "AF_INET"
             "AF_INET6"
           ];
diff --git a/nixos/modules/services/backup/duplicati.nix b/nixos/modules/services/backup/duplicati.nix
index 007396ebfc9b4..9b422635e7f08 100644
--- a/nixos/modules/services/backup/duplicati.nix
+++ b/nixos/modules/services/backup/duplicati.nix
@@ -10,6 +10,8 @@ in
     services.duplicati = {
       enable = mkEnableOption (lib.mdDoc "Duplicati");
 
+      package = mkPackageOptionMD pkgs "duplicati" { };
+
       port = mkOption {
         default = 8200;
         type = types.port;
@@ -53,7 +55,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    environment.systemPackages = [ pkgs.duplicati ];
+    environment.systemPackages = [ cfg.package ];
 
     systemd.services.duplicati = {
       description = "Duplicati backup";
@@ -63,7 +65,7 @@ in
         {
           User = cfg.user;
           Group = "duplicati";
-          ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
+          ExecStart = "${cfg.package}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
           Restart = "on-failure";
         }
         (mkIf (cfg.dataDir == "/var/lib/duplicati") {
@@ -83,4 +85,3 @@ in
 
   };
 }
-
diff --git a/nixos/modules/services/backup/restic.nix b/nixos/modules/services/backup/restic.nix
index 6f4cbab817267..78220e99c3d1f 100644
--- a/nixos/modules/services/backup/restic.nix
+++ b/nixos/modules/services/backup/restic.nix
@@ -260,6 +260,16 @@ in
             Restic package to use.
           '';
         };
+
+        createWrapper = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = ''
+            Whether to generate and add a script to the system path, that has the same environment variables set
+            as the systemd service. This can be used to e.g. mount snapshots or perform other opterations, without
+            having to manually specify most options.
+          '';
+        };
       };
     }));
     default = { };
@@ -316,7 +326,8 @@ in
           in
           nameValuePair "restic-backups-${name}" ({
             environment = {
-              RESTIC_CACHE_DIR = "%C/restic-backups-${name}";
+              # not %C, because that wouldn't work in the wrapper script
+              RESTIC_CACHE_DIR = "/var/cache/restic-backups-${name}";
               RESTIC_PASSWORD_FILE = backup.passwordFile;
               RESTIC_REPOSITORY = backup.repository;
               RESTIC_REPOSITORY_FILE = backup.repositoryFile;
@@ -331,7 +342,7 @@ in
                 nameValuePair (rcloneAttrToConf name) (toRcloneVal value)
               )
               backup.rcloneConfig);
-            path = [ pkgs.openssh ];
+            path = [ config.programs.ssh.package ];
             restartIfChanged = false;
             wants = [ "network-online.target" ];
             after = [ "network-online.target" ];
@@ -378,5 +389,22 @@ in
           timerConfig = backup.timerConfig;
         })
         config.services.restic.backups;
+
+    # generate wrapper scripts, as described in the createWrapper option
+    environment.systemPackages = lib.mapAttrsToList (name: backup: let
+      extraOptions = lib.concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
+      resticCmd = "${backup.package}/bin/restic${extraOptions}";
+    in pkgs.writeShellScriptBin "restic-${name}" ''
+      set -a  # automatically export variables
+      ${lib.optionalString (backup.environmentFile != null) "source ${backup.environmentFile}"}
+      # set same environment variables as the systemd service
+      ${lib.pipe config.systemd.services."restic-backups-${name}".environment [
+        (lib.filterAttrs (_: v: v != null))
+        (lib.mapAttrsToList (n: v: "${n}=${v}"))
+        (lib.concatStringsSep "\n")
+      ]}
+
+      exec ${resticCmd} $@
+    '') (lib.filterAttrs (_: v: v.createWrapper) config.services.restic.backups);
   };
 }
diff --git a/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixos/modules/services/continuous-integration/buildbot/master.nix
index 595374ea1e5b3..b4b997201c8f5 100644
--- a/nixos/modules/services/continuous-integration/buildbot/master.nix
+++ b/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -272,7 +272,13 @@ in {
         Group = cfg.group;
         WorkingDirectory = cfg.home;
         # NOTE: call twistd directly with stdout logging for systemd
-        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${tacFile}";
+        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${cfg.buildbotDir}/buildbot.tac";
+        # To reload on upgrade, set the following in your configuration:
+        # systemd.services.buildbot-master.reloadIfChanged = true;
+        ExecReload = [
+          "${pkgs.coreutils}/bin/ln -sf ${tacFile} ${cfg.buildbotDir}/buildbot.tac"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
       };
     };
   };
diff --git a/nixos/modules/services/continuous-integration/hail.nix b/nixos/modules/services/continuous-integration/hail.nix
deleted file mode 100644
index 62e8b8077c079..0000000000000
--- a/nixos/modules/services/continuous-integration/hail.nix
+++ /dev/null
@@ -1,61 +0,0 @@
-{ config, lib, pkgs, ...}:
-
-with lib;
-
-let
-  cfg = config.services.hail;
-in {
-
-
-  ###### interface
-
-  options.services.hail = {
-    enable = mkOption {
-      type = types.bool;
-      default = false;
-      description = lib.mdDoc ''
-        Enables the Hail Auto Update Service. Hail can automatically deploy artifacts
-        built by a Hydra Continuous Integration server. A common use case is to provide
-        continuous deployment for single services or a full NixOS configuration.'';
-    };
-    profile = mkOption {
-      type = types.str;
-      default = "hail-profile";
-      description = lib.mdDoc "The name of the Nix profile used by Hail.";
-    };
-    hydraJobUri = mkOption {
-      type = types.str;
-      description = lib.mdDoc "The URI of the Hydra Job.";
-    };
-    netrc = mkOption {
-      type = types.nullOr types.path;
-      description = lib.mdDoc "The netrc file to use when fetching data from Hydra.";
-      default = null;
-    };
-    package = mkOption {
-      type = types.package;
-      default = pkgs.haskellPackages.hail;
-      defaultText = literalExpression "pkgs.haskellPackages.hail";
-      description = lib.mdDoc "Hail package to use.";
-    };
-  };
-
-
-  ###### implementation
-
-  config = mkIf cfg.enable {
-    systemd.services.hail = {
-      description = "Hail Auto Update Service";
-      wants = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-      path = with pkgs; [ nix ];
-      environment = {
-        HOME = "/var/lib/empty";
-      };
-      serviceConfig = {
-        ExecStart = "${cfg.package}/bin/hail --profile ${cfg.profile} --job-uri ${cfg.hydraJobUri}"
-          + lib.optionalString (cfg.netrc != null) " --netrc-file ${cfg.netrc}";
-      };
-    };
-  };
-}
diff --git a/nixos/modules/services/continuous-integration/woodpecker/agents.nix b/nixos/modules/services/continuous-integration/woodpecker/agents.nix
index cc5b903afd595..3b883c72ff07f 100644
--- a/nixos/modules/services/continuous-integration/woodpecker/agents.nix
+++ b/nixos/modules/services/continuous-integration/woodpecker/agents.nix
@@ -35,6 +35,16 @@ let
         '';
       };
 
+      path = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [ ];
+        example = [ "" ];
+        description = lib.mdDoc ''
+          Additional packages that should be added to the agent's `PATH`.
+          Mostly useful for the `local` backend.
+        '';
+      };
+
       environmentFile = lib.mkOption {
         type = lib.types.listOf lib.types.path;
         default = [ ];
@@ -94,7 +104,7 @@ let
           "-/etc/localtime"
         ];
       };
-      inherit (agentCfg) environment;
+      inherit (agentCfg) environment path;
     };
   };
 in
@@ -106,28 +116,41 @@ in
       agents = lib.mkOption {
         default = { };
         type = lib.types.attrsOf agentModule;
-        example = {
-          docker = {
-            environment = {
-              WOODPECKER_SERVER = "localhost:9000";
-              WOODPECKER_BACKEND = "docker";
-              DOCKER_HOST = "unix:///run/podman/podman.sock";
+        example = lib.literalExpression ''
+          {
+            podman = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "docker";
+                DOCKER_HOST = "unix:///run/podman/podman.sock";
+              };
+
+              extraGroups = [ "podman" ];
+
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
             };
 
-            extraGroups = [ "docker" ];
+            exec = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "local";
+              };
 
-            environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
-          };
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
 
-          exec = {
-            environment = {
-              WOODPECKER_SERVER = "localhost:9000";
-              WOODPECKER_BACKEND = "exec";
+              path = [
+                # Needed to clone repos
+                git
+                git-lfs
+                woodpecker-plugin-git
+                # Used by the runner as the default shell
+                bash
+                # Most likely to be used in pipeline definitions
+                coreutils
+              ];
             };
-
-            environmentFile = "/run/secrets/woodpecker/agent-secret.txt";
-          };
-        };
+          }
+        '';
         description = lib.mdDoc "woodpecker-agents configurations";
       };
     };
diff --git a/nixos/modules/services/databases/surrealdb.nix b/nixos/modules/services/databases/surrealdb.nix
index 050a5336cb4c6..28bd97cd731ea 100644
--- a/nixos/modules/services/databases/surrealdb.nix
+++ b/nixos/modules/services/databases/surrealdb.nix
@@ -47,17 +47,13 @@ in {
         example = 8000;
       };
 
-      userNamePath = mkOption {
-        type = types.path;
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--allow-all" "--auth" "--user root" "--pass root" ];
         description = lib.mdDoc ''
-          Path to read the username from.
-        '';
-      };
-
-      passwordPath = mkOption {
-        type = types.path;
-        description = lib.mdDoc ''
-          Path to read the password from.
+          Specify a list of additional command line flags,
+          which get escaped and are then passed to surrealdb.
         '';
       };
     };
@@ -73,19 +69,8 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [ "network.target" ];
 
-      script = ''
-        ${cfg.package}/bin/surreal start \
-          --user $(${pkgs.systemd}/bin/systemd-creds cat SURREALDB_USERNAME) \
-          --pass $(${pkgs.systemd}/bin/systemd-creds cat SURREALDB_PASSWORD) \
-          --bind ${cfg.host}:${toString cfg.port} \
-          -- ${cfg.dbPath}
-      '';
       serviceConfig = {
-        LoadCredential = [
-          "SURREALDB_USERNAME:${cfg.userNamePath}"
-          "SURREALDB_PASSWORD:${cfg.passwordPath}"
-        ];
-
+        ExecStart = "${cfg.package}/bin/surreal start --bind ${cfg.host}:${toString cfg.port} ${escapeShellArgs cfg.extraFlags} -- ${cfg.dbPath}";
         DynamicUser = true;
         Restart = "on-failure";
         StateDirectory = "surrealdb";
diff --git a/nixos/modules/services/hardware/auto-cpufreq.nix b/nixos/modules/services/hardware/auto-cpufreq.nix
index fd2e03ef12f5d..cf27bdd8b6eb7 100644
--- a/nixos/modules/services/hardware/auto-cpufreq.nix
+++ b/nixos/modules/services/hardware/auto-cpufreq.nix
@@ -15,8 +15,7 @@ in {
         description = lib.mdDoc ''
           Configuration for `auto-cpufreq`.
 
-          See its [example configuration file] for supported settings.
-          [example configuration file]: https://github.com/AdnanHodzic/auto-cpufreq/blob/master/auto-cpufreq.conf-example
+          The available options can be found in [the example configuration file](https://github.com/AdnanHodzic/auto-cpufreq/blob/v${pkgs.auto-cpufreq.version}/auto-cpufreq.conf-example).
           '';
 
         default = {};
@@ -35,6 +34,7 @@ in {
         wantedBy = [ "multi-user.target" ];
         path = with pkgs; [ bash coreutils ];
 
+        serviceConfig.WorkingDirectory = "";
         serviceConfig.ExecStart = [
           ""
           "${lib.getExe pkgs.auto-cpufreq} --daemon --config ${cfgFile}"
@@ -42,4 +42,7 @@ in {
       };
     };
   };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
 }
diff --git a/nixos/modules/services/hardware/openrgb.nix b/nixos/modules/services/hardware/openrgb.nix
index 310615ecc5396..13b1d07e53b7d 100644
--- a/nixos/modules/services/hardware/openrgb.nix
+++ b/nixos/modules/services/hardware/openrgb.nix
@@ -17,7 +17,14 @@ in {
 
     motherboard = mkOption {
       type = types.nullOr (types.enum [ "amd" "intel" ]);
-      default = null;
+      default = if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      defaultText = literalMD ''
+        if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      '';
       description = lib.mdDoc "CPU family of motherboard. Allows for addition motherboard i2c support.";
     };
 
diff --git a/nixos/modules/services/home-automation/zigbee2mqtt.nix b/nixos/modules/services/home-automation/zigbee2mqtt.nix
index 796de3a491e49..6b5bd8a0d9bbc 100644
--- a/nixos/modules/services/home-automation/zigbee2mqtt.nix
+++ b/nixos/modules/services/home-automation/zigbee2mqtt.nix
@@ -66,9 +66,10 @@ in
         server = mkDefault "mqtt://localhost:1883";
       };
       serial.port = mkDefault "/dev/ttyACM0";
-      # reference device configuration, that is kept in a separate file
+      # reference device/group configuration, that is kept in a separate file
       # to prevent it being overwritten in the units ExecStartPre script
       devices = mkDefault "devices.yaml";
+      groups = mkDefault "groups.yaml";
     };
 
     systemd.services.zigbee2mqtt = {
diff --git a/nixos/modules/services/logging/logrotate.nix b/nixos/modules/services/logging/logrotate.nix
index 342ac5ec6e049..ba1445f083975 100644
--- a/nixos/modules/services/logging/logrotate.nix
+++ b/nixos/modules/services/logging/logrotate.nix
@@ -1,4 +1,4 @@
-{ config, lib, pkgs, ... }:
+{ config, lib, pkgs, utils, ... }:
 
 with lib;
 
@@ -220,6 +220,12 @@ in
           in this case you can disable the failing check with this option.
         '';
       };
+
+      extraArgs = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [];
+        description = "Additional command line arguments to pass on logrotate invocation";
+      };
     };
   };
 
@@ -231,7 +237,7 @@ in
       serviceConfig = {
         Restart = "no";
         User = "root";
-        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${mailOption} ${cfg.configFile}";
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} ${mailOption} ${cfg.configFile}";
       };
     };
     systemd.services.logrotate-checkconf = {
@@ -240,7 +246,7 @@ in
       serviceConfig = {
         Type = "oneshot";
         RemainAfterExit = true;
-        ExecStart = "${pkgs.logrotate}/sbin/logrotate --debug ${cfg.configFile}";
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} --debug ${cfg.configFile}";
       };
     };
   };
diff --git a/nixos/modules/services/mail/listmonk.nix b/nixos/modules/services/mail/listmonk.nix
index 251362fdd89d3..11b2a5186229b 100644
--- a/nixos/modules/services/mail/listmonk.nix
+++ b/nixos/modules/services/mail/listmonk.nix
@@ -54,7 +54,7 @@ let
 
       smtp = mkOption {
         type = listOf (submodule {
-          freeformType = with types; attrsOf (oneOf [ str int bool ]);
+          freeformType = with types; attrsOf anything;
 
           options = {
             enabled = mkEnableOption (lib.mdDoc "this SMTP server for listmonk");
@@ -86,7 +86,7 @@ let
       # TODO: refine this type based on the smtp one.
       "bounce.mailboxes" = mkOption {
         type = listOf
-          (submodule { freeformType = with types; oneOf [ str int bool ]; });
+          (submodule { freeformType = with types; listOf (attrsOf anything); });
         default = [ ];
         description = lib.mdDoc "List of bounce mailboxes";
       };
diff --git a/nixos/modules/services/mail/stalwart-mail.nix b/nixos/modules/services/mail/stalwart-mail.nix
new file mode 100644
index 0000000000000..eb87d9f6f695e
--- /dev/null
+++ b/nixos/modules/services/mail/stalwart-mail.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.stalwart-mail;
+  configFormat = pkgs.formats.toml { };
+  configFile = configFormat.generate "stalwart-mail.toml" cfg.settings;
+  dataDir = "/var/lib/stalwart-mail";
+
+in {
+  options.services.stalwart-mail = {
+    enable = mkEnableOption (mdDoc "the Stalwart all-in-one email server");
+    package = mkPackageOptionMD pkgs "stalwart-mail" { };
+
+    settings = mkOption {
+      inherit (configFormat) type;
+      default = { };
+      description = mdDoc ''
+        Configuration options for the Stalwart email server.
+        See <https://stalw.art/docs/category/configuration> for available options.
+
+        By default, the module is configured to store everything locally.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Default config: all local
+    services.stalwart-mail.settings = {
+      global.tracing.method = mkDefault "stdout";
+      global.tracing.level = mkDefault "info";
+      queue.path = mkDefault "${dataDir}/queue";
+      report.path = mkDefault "${dataDir}/reports";
+      store.db.path = mkDefault "${dataDir}/data/index.sqlite3";
+      store.blob.type = mkDefault "local";
+      store.blob.local.path = mkDefault "${dataDir}/data/blobs";
+      resolver.type = mkDefault "system";
+    };
+
+    systemd.services.stalwart-mail = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "local-fs.target" "network.target" ];
+
+      preStart = ''
+        mkdir -p ${dataDir}/{queue,reports,data/blobs}
+      '';
+
+      serviceConfig = {
+        ExecStart =
+          "${cfg.package}/bin/stalwart-mail --config=${configFile}";
+
+        # Base from template resources/systemd/stalwart-mail.service
+        Type = "simple";
+        LimitNOFILE = 65536;
+        KillMode = "process";
+        KillSignal = "SIGINT";
+        Restart = "on-failure";
+        RestartSec = 5;
+        StandardOutput = "syslog";
+        StandardError = "syslog";
+        SyslogIdentifier = "stalwart-mail";
+
+        DynamicUser = true;
+        User = "stalwart-mail";
+        StateDirectory = "stalwart-mail";
+
+        # Bind standard privileged ports
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+
+        # Hardening
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = false;  # incompatible with CAP_NET_BIND_SERVICE
+        ProcSubset = "pid";
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+
+    # Make admin commands available in the shell
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with maintainers; [ happysalada pacien ];
+  };
+}
diff --git a/nixos/modules/services/matrix/mautrix-telegram.nix b/nixos/modules/services/matrix/mautrix-telegram.nix
index 17032ed808e93..97a6ba858e004 100644
--- a/nixos/modules/services/matrix/mautrix-telegram.nix
+++ b/nixos/modules/services/matrix/mautrix-telegram.nix
@@ -159,7 +159,6 @@ in {
         if [ ! -f '${registrationFile}' ]; then
           ${pkgs.mautrix-telegram}/bin/mautrix-telegram \
             --generate-registration \
-            --base-config='${pkgs.mautrix-telegram}/${pkgs.mautrix-telegram.pythonModule.sitePackages}/mautrix_telegram/example-config.yaml' \
             --config='${settingsFile}' \
             --registration='${registrationFile}'
         fi
diff --git a/nixos/modules/services/matrix/mautrix-whatsapp.nix b/nixos/modules/services/matrix/mautrix-whatsapp.nix
index 80c85980196f3..c4dc482134956 100644
--- a/nixos/modules/services/matrix/mautrix-whatsapp.nix
+++ b/nixos/modules/services/matrix/mautrix-whatsapp.nix
@@ -11,53 +11,47 @@
   settingsFileUnsubstituted = settingsFormat.generate "mautrix-whatsapp-config-unsubstituted.json" cfg.settings;
   settingsFormat = pkgs.formats.json {};
   appservicePort = 29318;
+
+  mkDefaults = lib.mapAttrsRecursive (n: v: lib.mkDefault v);
+  defaultConfig = {
+    homeserver.address = "http://localhost:8448";
+    appservice = {
+      hostname = "[::]";
+      port = appservicePort;
+      database.type = "sqlite3";
+      database.uri = "${dataDir}/mautrix-whatsapp.db";
+      id = "whatsapp";
+      bot.username = "whatsappbot";
+      bot.displayname = "WhatsApp Bridge Bot";
+      as_token = "";
+      hs_token = "";
+    };
+    bridge = {
+      username_template = "whatsapp_{{.}}";
+      displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
+      double_puppet_server_map = {};
+      login_shared_secret_map = {};
+      command_prefix = "!wa";
+      permissions."*" = "relay";
+      relay.enabled = true;
+    };
+    logging = {
+      min_level = "info";
+      writers = lib.singleton {
+        type = "stdout";
+        format = "pretty-colored";
+        time_format = " ";
+      };
+    };
+  };
+
 in {
-  imports = [];
   options.services.mautrix-whatsapp = {
-    enable = lib.mkEnableOption "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp.";
+    enable = lib.mkEnableOption (lib.mdDoc "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp.");
 
     settings = lib.mkOption {
       type = settingsFormat.type;
-      default = {
-        appservice = {
-          address = "http://localhost:${toString appservicePort}";
-          hostname = "[::]";
-          port = appservicePort;
-          database = {
-            type = "sqlite3";
-            uri = "${dataDir}/mautrix-whatsapp.db";
-          };
-          id = "whatsapp";
-          bot = {
-            username = "whatsappbot";
-            displayname = "WhatsApp Bridge Bot";
-          };
-          as_token = "";
-          hs_token = "";
-        };
-        bridge = {
-          username_template = "whatsapp_{{.}}";
-          displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
-          double_puppet_server_map = {};
-          login_shared_secret_map = {};
-          command_prefix = "!wa";
-          permissions."*" = "relay";
-          relay.enabled = true;
-        };
-        logging = {
-          min_level = "info";
-          writers = [
-            {
-              type = "stdout";
-              format = "pretty-colored";
-            }
-            {
-              type = "file";
-              format = "json";
-            }
-          ];
-        };
-      };
+      default = defaultConfig;
       description = lib.mdDoc ''
         {file}`config.yaml` configuration as a Nix attribute set.
         Configuration options should match those described in
@@ -117,10 +111,22 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
-    services.mautrix-whatsapp.settings = {
-      homeserver.domain = lib.mkDefault config.services.matrix-synapse.settings.server_name;
+
+    users.users.mautrix-whatsapp = {
+      isSystemUser = true;
+      group = "mautrix-whatsapp";
+      home = dataDir;
+      description = "Mautrix-WhatsApp bridge user";
     };
 
+    users.groups.mautrix-whatsapp = {};
+
+    services.mautrix-whatsapp.settings = lib.mkMerge (map mkDefaults [
+      defaultConfig
+      # Note: this is defined here to avoid the docs depending on `config`
+      { homeserver.domain = config.services.matrix-synapse.settings.server_name; }
+    ]);
+
     systemd.services.mautrix-whatsapp = {
       description = "Mautrix-WhatsApp Service - A WhatsApp bridge for Matrix";
 
@@ -158,10 +164,11 @@ in {
       '';
 
       serviceConfig = {
-        DynamicUser = true;
+        User = "mautrix-whatsapp";
+        Group = "mautrix-whatsapp";
         EnvironmentFile = cfg.environmentFile;
         StateDirectory = baseNameOf dataDir;
-        WorkingDirectory = "${dataDir}";
+        WorkingDirectory = dataDir;
         ExecStart = ''
           ${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp \
           --config='${settingsFile}' \
diff --git a/nixos/modules/services/misc/atuin.nix b/nixos/modules/services/misc/atuin.nix
index 57ff02df7d68b..8d2c1b5242ffa 100644
--- a/nixos/modules/services/misc/atuin.nix
+++ b/nixos/modules/services/misc/atuin.nix
@@ -6,7 +6,7 @@ in
 {
   options = {
     services.atuin = {
-      enable = lib.mkEnableOption (mdDoc "Enable server for shell history sync with atuin");
+      enable = lib.mkEnableOption (mdDoc "Atuin server for shell history sync");
 
       openRegistration = mkOption {
         type = types.bool;
diff --git a/nixos/modules/services/misc/cfdyndns.nix b/nixos/modules/services/misc/cfdyndns.nix
index 9cd8b188ffaec..5a02de2aad213 100644
--- a/nixos/modules/services/misc/cfdyndns.nix
+++ b/nixos/modules/services/misc/cfdyndns.nix
@@ -23,6 +23,15 @@ in
         '';
       };
 
+      apiTokenFile = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The path to a file containing the API Token
+          used to authenticate with CloudFlare.
+        '';
+      };
+
       apikeyFile = mkOption {
         default = null;
         type = types.nullOr types.str;
@@ -55,12 +64,15 @@ in
         Group = config.ids.gids.cfdyndns;
       };
       environment = {
-        CLOUDFLARE_EMAIL="${cfg.email}";
         CLOUDFLARE_RECORDS="${concatStringsSep "," cfg.records}";
       };
       script = ''
         ${optionalString (cfg.apikeyFile != null) ''
           export CLOUDFLARE_APIKEY="$(cat ${escapeShellArg cfg.apikeyFile})"
+          export CLOUDFLARE_EMAIL="${cfg.email}"
+        ''}
+        ${optionalString (cfg.apiTokenFile != null) ''
+          export CLOUDFLARE_APITOKEN="$(cat ${escapeShellArg cfg.apiTokenFile})"
         ''}
         ${pkgs.cfdyndns}/bin/cfdyndns
       '';
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index c5e38b4988298..b399ccc38f58d 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -1088,6 +1088,11 @@ in {
         ''Support for container registries other than gitlab-container-registry has ended since GitLab 16.0.0 and is scheduled for removal in a future release.
           Please back up your data and migrate to the gitlab-container-registry package.''
       )
+      (mkIf
+        (versionAtLeast (getVersion cfg.packages.gitlab) "16.2.0" && versionOlder (getVersion cfg.packages.gitlab) "16.5.0")
+        ''GitLab instances created or updated between versions [15.11.0, 15.11.2] have an incorrect database schema.
+        Check the upstream documentation for a workaround: https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later''
+      )
     ];
 
     assertions = [
@@ -1655,7 +1660,7 @@ in {
         Restart = "on-failure";
         WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
         ExecStart = concatStringsSep " " [
-          "${cfg.packages.gitlab.rubyEnv}/bin/puma"
+          "${cfg.packages.gitlab.rubyEnv}/bin/bundle" "exec" "puma"
           "-e production"
           "-C ${cfg.statePath}/config/puma.rb"
           "-w ${cfg.puma.workers}"
diff --git a/nixos/modules/services/monitoring/mimir.nix b/nixos/modules/services/monitoring/mimir.nix
index edca9b7be4ff0..6ed139b229748 100644
--- a/nixos/modules/services/monitoring/mimir.nix
+++ b/nixos/modules/services/monitoring/mimir.nix
@@ -32,11 +32,21 @@ in {
       type = types.package;
       description = lib.mdDoc ''Mimir package to use.'';
     };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--config.expand-env=true" ];
+      description = lib.mdDoc ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Mimir.
+      '';
+    };
   };
 
   config = mkIf cfg.enable {
     # for mimirtool
-    environment.systemPackages = [ pkgs.mimir ];
+    environment.systemPackages = [ cfg.package ];
 
     assertions = [{
       assertion = (
@@ -60,7 +70,7 @@ in {
                else cfg.configFile;
       in
       {
-        ExecStart = "${cfg.package}/bin/mimir --config.file=${conf}";
+        ExecStart = "${cfg.package}/bin/mimir --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
         DynamicUser = true;
         Restart = "always";
         ProtectSystem = "full";
diff --git a/nixos/modules/services/networking/dae.nix b/nixos/modules/services/networking/dae.nix
index 231c555b33030..42ed3c7f8d4aa 100644
--- a/nixos/modules/services/networking/dae.nix
+++ b/nixos/modules/services/networking/dae.nix
@@ -1,41 +1,161 @@
-{ config, pkgs, lib, ... }:
+{ config, lib, pkgs, ... }:
+
 let
   cfg = config.services.dae;
+  assets = cfg.assets;
+  genAssetsDrv = paths: pkgs.symlinkJoin {
+    name = "dae-assets";
+    inherit paths;
+  };
 in
 {
-  meta.maintainers = with lib.maintainers; [ pokon548 ];
+  meta.maintainers = with lib.maintainers; [ pokon548 oluceps ];
 
   options = {
-    services.dae = {
-      enable = lib.options.mkEnableOption (lib.mdDoc "the dae service");
-      package = lib.mkPackageOptionMD pkgs "dae" { };
+    services.dae = with lib;{
+      enable = mkEnableOption
+        (mdDoc "A Linux high-performance transparent proxy solution based on eBPF");
+
+      package = mkPackageOptionMD pkgs "dae" { };
+
+      assets = mkOption {
+        type = with types;(listOf path);
+        default = with pkgs; [ v2ray-geoip v2ray-domain-list-community ];
+        defaultText = literalExpression "with pkgs; [ v2ray-geoip v2ray-domain-list-community ]";
+        description = mdDoc ''
+          Assets required to run dae.
+        '';
+      };
+
+      assetsPath = mkOption {
+        type = types.str;
+        default = "${genAssetsDrv assets}/share/v2ray";
+        defaultText = literalExpression ''
+          (symlinkJoin {
+              name = "dae-assets";
+              paths = assets;
+          })/share/v2ray
+        '';
+        description = mdDoc ''
+          The path which contains geolocation database.
+          This option will override `assets`.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = with types; submodule {
+          options = {
+            enable = mkEnableOption "enable";
+            port = mkOption {
+              type = types.int;
+              description = ''
+                Port to be opened. Consist with field `tproxy_port` in config file.
+              '';
+            };
+          };
+        };
+        default = {
+          enable = true;
+          port = 12345;
+        };
+        defaultText = literalExpression ''
+          {
+            enable = true;
+            port = 12345;
+          }
+        '';
+        description = mdDoc ''
+          Open the firewall port.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = "/etc/dae/config.dae";
+        example = "/path/to/your/config.dae";
+        description = mdDoc ''
+          The path of dae config file, end with `.dae`.
+        '';
+      };
+
+      config = mkOption {
+        type = types.str;
+        default = ''
+          global{}
+          routing{}
+        '';
+        description = mdDoc ''
+          Config text for dae.
+
+          See <https://github.com/daeuniverse/dae/blob/main/example.dae>.
+        '';
+      };
+
+      disableTxChecksumIpGeneric =
+        mkEnableOption (mdDoc "See <https://github.com/daeuniverse/dae/issues/43>");
+
     };
   };
 
-  config = lib.mkIf config.services.dae.enable {
-    networking.firewall.allowedTCPPorts = [ 12345 ];
-    networking.firewall.allowedUDPPorts = [ 12345 ];
+  config = lib.mkIf cfg.enable
+
+    {
+      environment.systemPackages = [ cfg.package ];
+      systemd.packages = [ cfg.package ];
 
-    systemd.services.dae = {
-      unitConfig = {
-        Description = "dae Service";
-        Documentation = "https://github.com/daeuniverse/dae";
-        After = [ "network-online.target" "systemd-sysctl.service" ];
-        Wants = [ "network-online.target" ];
+      environment.etc."dae/config.dae" = {
+        mode = "0400";
+        source = pkgs.writeText "config.dae" cfg.config;
       };
 
-      serviceConfig = {
-        User = "root";
-        ExecStartPre = "${lib.getExe cfg.package} validate -c /etc/dae/config.dae";
-        ExecStart = "${lib.getExe cfg.package} run --disable-timestamp -c /etc/dae/config.dae";
-        ExecReload = "${lib.getExe cfg.package} reload $MAINPID";
-        LimitNPROC = 512;
-        LimitNOFILE = 1048576;
-        Restart = "on-abnormal";
-        Type = "notify";
+      networking = lib.mkIf cfg.openFirewall.enable {
+        firewall =
+          let portToOpen = cfg.openFirewall.port;
+          in
+          {
+            allowedTCPPorts = [ portToOpen ];
+            allowedUDPPorts = [ portToOpen ];
+          };
       };
 
-      wantedBy = [ "multi-user.target" ];
+      systemd.services.dae =
+        let
+          daeBin = lib.getExe cfg.package;
+          TxChecksumIpGenericWorkaround = with lib;(getExe pkgs.writeShellApplication {
+            name = "disable-tx-checksum-ip-generic";
+            text = with pkgs; ''
+              iface=$(${iproute2}/bin/ip route | ${lib.getExe gawk} '/default/ {print $5}')
+              ${lib.getExe ethtool} -K "$iface" tx-checksum-ip-generic off
+            '';
+          });
+        in
+        {
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            ExecStartPre = [ "" "${daeBin} validate -c ${cfg.configFile}" ]
+              ++ (with lib; optional cfg.disableTxChecksumIpGeneric TxChecksumIpGenericWorkaround);
+            ExecStart = [ "" "${daeBin} run --disable-timestamp -c ${cfg.configFile}" ];
+            Environment = "DAE_LOCATION_ASSET=${cfg.assetsPath}";
+          };
+        };
+
+      assertions = [
+        {
+          assertion = lib.pathExists (toString (genAssetsDrv cfg.assets) + "/share/v2ray");
+          message = ''
+            Packages in `assets` has no preset paths included.
+            Please set `assetsPath` instead.
+          '';
+        }
+
+        {
+          assertion = !((config.services.dae.config != "global{}\nrouting{}\n")
+            && (config.services.dae.configFile != "/etc/dae/config.dae"));
+          message = ''
+            Option `config` and `configFile` could not be set
+            at the same time.
+          '';
+        }
+      ];
     };
-  };
 }
diff --git a/nixos/modules/services/networking/jool.nix b/nixos/modules/services/networking/jool.nix
index 3aafbe40967ce..d2d2b0956e8aa 100644
--- a/nixos/modules/services/networking/jool.nix
+++ b/nixos/modules/services/networking/jool.nix
@@ -16,7 +16,7 @@ let
     TemporaryFileSystem = [ "/" ];
     BindReadOnlyPaths = [
       builtins.storeDir
-      "/run/current-system/kernel-modules"
+      "/run/booted-system/kernel-modules"
     ];
 
     # Give capabilities to load the module and configure it
@@ -31,26 +31,96 @@ let
 
   configFormat = pkgs.formats.json {};
 
-  mkDefaultAttrs = lib.mapAttrs (n: v: lib.mkDefault v);
+  # Generate the config file of instance `name`
+  nat64Conf = name:
+    configFormat.generate "jool-nat64-${name}.conf"
+      (cfg.nat64.${name} // { instance = name; });
+  siitConf = name:
+    configFormat.generate "jool-siit-${name}.conf"
+      (cfg.siit.${name} // { instance = name; });
+
+  # NAT64 config type
+  nat64Options = lib.types.submodule {
+    # The format is plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options.framework = lib.mkOption {
+      type = lib.types.enum [ "netfilter" "iptables" ];
+      default = "netfilter";
+      description = lib.mdDoc ''
+        The framework to use for attaching Jool's translation to the exist
+        kernel packet processing rules. See the
+        [documentation](https://nicmx.github.io/Jool/en/intro-jool.html#design)
+        for the differences between the two options.
+      '';
+    };
+    options.global.pool6 = lib.mkOption {
+      type = lib.types.strMatching "[[:xdigit:]:]+/[[:digit:]]+"
+        // { description = "Network prefix in CIDR notation"; };
+      default = "64:ff9b::/96";
+      description = lib.mdDoc ''
+        The prefix used for embedding IPv4 into IPv6 addresses.
+        Defaults to the well-known NAT64 prefix, defined by
+        [RFC 6052](https://datatracker.ietf.org/doc/html/rfc6052).
+      '';
+    };
+  };
 
-  defaultNat64 = {
-    instance = "default";
-    framework = "netfilter";
-    global.pool6 = "64:ff9b::/96";
+  # SIIT config type
+  siitOptions = lib.types.submodule {
+    # The format is, again, plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options = { inherit (nat64Options.getSubOptions []) framework; };
+  };
+
+  makeNat64Unit = name: opts: {
+    "jool-nat64-${name}" = {
+      description = "Jool, NAT64 setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
+        ExecStart    = "${jool-cli}/bin/jool file handle ${nat64Conf name}";
+        ExecStop     = "${jool-cli}/bin/jool -f ${nat64Conf name} instance remove";
+      } // hardening;
+    };
   };
-  defaultSiit = {
-    instance = "default";
-    framework = "netfilter";
+
+  makeSiitUnit = name: opts: {
+    "jool-siit-${name}" = {
+      description = "Jool, SIIT setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
+        ExecStart    = "${jool-cli}/bin/jool_siit file handle ${siitConf name}";
+        ExecStop     = "${jool-cli}/bin/jool_siit -f ${siitConf name} instance remove";
+      } // hardening;
+    };
   };
 
-  nat64Conf = configFormat.generate "jool-nat64.conf" cfg.nat64.config;
-  siitConf  = configFormat.generate "jool-siit.conf" cfg.siit.config;
+  checkNat64 = name: _: ''
+    printf 'Validating Jool configuration for NAT64 instance "${name}"... '
+    jool file check ${nat64Conf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
+
+  checkSiit = name: _: ''
+    printf 'Validating Jool configuration for SIIT instance "${name}"... '
+    jool_siit file check ${siitConf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
 
 in
 
 {
-  ###### interface
-
   options = {
     networking.jool.enable = lib.mkOption {
       type = lib.types.bool;
@@ -64,157 +134,146 @@ in
         NAT64, analogous to the IPv4 NAPT. Refer to the upstream
         [documentation](https://nicmx.github.io/Jool/en/intro-xlat.html) for
         the supported modes of translation and how to configure them.
+
+        Enabling this option will install the Jool kernel module and the
+        command line tools for controlling it.
       '';
     };
 
-    networking.jool.nat64.enable = lib.mkEnableOption (lib.mdDoc "a NAT64 instance of Jool.");
-    networking.jool.nat64.config = lib.mkOption {
-      type = configFormat.type;
-      default = defaultNat64;
+    networking.jool.nat64 = lib.mkOption {
+      type = lib.types.attrsOf nat64Options;
+      default = { };
       example = lib.literalExpression ''
         {
-          # custom NAT64 prefix
-          global.pool6 = "2001:db8:64::/96";
-
-          # Port forwarding
-          bib = [
-            { # SSH 192.0.2.16 → 2001:db8:a::1
-              "protocol"     = "TCP";
-              "ipv4 address" = "192.0.2.16#22";
-              "ipv6 address" = "2001:db8:a::1#22";
-            }
-            { # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
-              "protocol"     = "TCP";
-              "ipv4 address" = "192.0.2.16#53";
-              "ipv6 address" = "2001:db8:a::2#53";
-            }
-            { # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
-              "protocol" = "UDP";
-              "ipv4 address" = "192.0.2.16#53";
-              "ipv6 address" = "2001:db8:a::2#53";
-            }
-          ];
-
-          pool4 = [
-            # Ports for dynamic translation
-            { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-            { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-            { protocol = "ICMP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
-
-            # Ports for static BIB entries
-            { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "22"; }
-            { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "53"; }
-          ];
+          default = {
+            # custom NAT64 prefix
+            global.pool6 = "2001:db8:64::/96";
+
+            # Port forwarding
+            bib = [
+              { # SSH 192.0.2.16 → 2001:db8:a::1
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#22";
+                "ipv6 address" = "2001:db8:a::1#22";
+              }
+              { # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+              { # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
+                "protocol" = "UDP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+            ];
+
+            pool4 = [
+              # Port ranges for dynamic translation
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol = "ICMP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+
+              # Ports for static BIB entries
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "22"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "53"; }
+            ];
+          };
         }
       '';
       description = lib.mdDoc ''
-        The configuration of a stateful NAT64 instance of Jool managed through
-        NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
-        available options.
+        Definitions of NAT64 instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-nat64.html) for an
+        introduction to NAT64 and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
 
         ::: {.note}
-        Existing or more instances created manually will not interfere with the
-        NixOS instance, provided the respective `pool4` addresses and port
-        ranges are not overlapping.
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective `pool4` addresses and
+        port ranges are not overlapping.
         :::
 
         ::: {.warning}
-        Changes to the NixOS instance performed via `jool instance nixos-nat64`
-        are applied correctly but will be lost after restarting
-        `jool-nat64.service`.
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-nat64-<name>.service`.
         :::
       '';
     };
 
-    networking.jool.siit.enable = lib.mkEnableOption (lib.mdDoc "a SIIT instance of Jool.");
-    networking.jool.siit.config = lib.mkOption {
-      type = configFormat.type;
-      default = defaultSiit;
+    networking.jool.siit = lib.mkOption {
+      type = lib.types.attrsOf siitOptions;
+      default = { };
       example = lib.literalExpression ''
         {
-          # Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
-          pool6 = "2001:db8::/96";
-
-          # Explicit address mappings
-          eamt = [
-            # 2001:db8:1:: ←→ 192.0.2.0
-            { "ipv6 prefix": "2001:db8:1::/128", "ipv4 prefix": "192.0.2.0" }
-            # 2001:db8:1::x ←→ 198.51.100.x
-            { "ipv6 prefix": "2001:db8:2::/120", "ipv4 prefix": "198.51.100.0/24" }
-          ]
+          default = {
+            # Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
+            global.pool6 = "2001:db8::/96";
+
+            # Explicit address mappings
+            eamt = [
+              # 2001:db8:1:: ←→ 192.0.2.0
+              { "ipv6 prefix" = "2001:db8:1::/128"; "ipv4 prefix" = "192.0.2.0"; }
+              # 2001:db8:1::x ←→ 198.51.100.x
+              { "ipv6 prefix" = "2001:db8:2::/120"; "ipv4 prefix" = "198.51.100.0/24"; }
+            ];
+          };
         }
       '';
       description = lib.mdDoc ''
-        The configuration of a SIIT instance of Jool managed through
-        NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
-        available options.
+        Definitions of SIIT instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-vanilla.html) for an
+        introduction to SIIT and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
 
         ::: {.note}
-        Existing or more instances created manually will not interfere with the
-        NixOS instance, provided the respective `EAMT` address mappings are not
-        overlapping.
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective EAMT addresses and
+        port ranges are not overlapping.
         :::
 
         ::: {.warning}
-        Changes to the NixOS instance performed via `jool instance nixos-siit`
-        are applied correctly but will be lost after restarting
-        `jool-siit.service`.
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-siit-<name>.service`.
         :::
       '';
     };
 
   };
 
-  ###### implementation
-
   config = lib.mkIf cfg.enable {
-    environment.systemPackages = [ jool-cli ];
+    # Install kernel module and cli tools
     boot.extraModulePackages = [ jool ];
+    environment.systemPackages = [ jool-cli ];
 
-    systemd.services.jool-nat64 = lib.mkIf cfg.nat64.enable {
-      description = "Jool, NAT64 setup";
-      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      reloadIfChanged = true;
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
-        ExecStart    = "${jool-cli}/bin/jool file handle ${nat64Conf}";
-        ExecStop     = "${jool-cli}/bin/jool -f ${nat64Conf} instance remove";
-      } // hardening;
-    };
-
-    systemd.services.jool-siit = lib.mkIf cfg.siit.enable {
-      description = "Jool, SIIT setup";
-      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
-      after = [ "network.target" ];
-      wantedBy = [ "multi-user.target" ];
-      reloadIfChanged = true;
-      serviceConfig = {
-        Type = "oneshot";
-        RemainAfterExit = true;
-        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
-        ExecStart    = "${jool-cli}/bin/jool_siit file handle ${siitConf}";
-        ExecStop     = "${jool-cli}/bin/jool_siit -f ${siitConf} instance remove";
-      } // hardening;
-    };
-
-    system.checks = lib.singleton (pkgs.runCommand "jool-validated" {
-      nativeBuildInputs = [ pkgs.buildPackages.jool-cli ];
-      preferLocalBuild = true;
-    } ''
-      printf 'Validating Jool configuration... '
-      ${lib.optionalString cfg.siit.enable "jool_siit file check ${siitConf}"}
-      ${lib.optionalString cfg.nat64.enable "jool file check ${nat64Conf}"}
-      printf 'ok\n'
-      touch "$out"
-    '');
-
-    networking.jool.nat64.config = mkDefaultAttrs defaultNat64;
-    networking.jool.siit.config  = mkDefaultAttrs defaultSiit;
+    # Install services for each instance
+    systemd.services = lib.mkMerge
+      (lib.mapAttrsToList makeNat64Unit cfg.nat64 ++
+       lib.mapAttrsToList makeSiitUnit cfg.siit);
 
+    # Check the configuration of each instance
+    system.checks = lib.optional (cfg.nat64 != {} || cfg.siit != {})
+      (pkgs.runCommand "jool-validated"
+        {
+          nativeBuildInputs = with pkgs.buildPackages; [ jool-cli ];
+          preferLocalBuild = true;
+        }
+        (lib.concatStrings
+          (lib.mapAttrsToList checkNat64 cfg.nat64 ++
+           lib.mapAttrsToList checkSiit cfg.siit)));
   };
 
   meta.maintainers = with lib.maintainers; [ rnhmjoj ];
diff --git a/nixos/modules/services/networking/nftables.nix b/nixos/modules/services/networking/nftables.nix
index 0e4cd6fa1503b..47159ade328c8 100644
--- a/nixos/modules/services/networking/nftables.nix
+++ b/nixos/modules/services/networking/nftables.nix
@@ -70,6 +70,26 @@ in
       '';
     };
 
+    networking.nftables.checkRulesetRedirects = mkOption {
+      type = types.addCheck (types.attrsOf types.path) (attrs: all types.path.check (attrNames attrs));
+      default = {
+        "/etc/hosts" = config.environment.etc.hosts.source;
+        "/etc/protocols" = config.environment.etc.protocols.source;
+        "/etc/services" = config.environment.etc.services.source;
+      };
+      defaultText = literalExpression ''
+        {
+          "/etc/hosts" = config.environment.etc.hosts.source;
+          "/etc/protocols" = config.environment.etc.protocols.source;
+          "/etc/services" = config.environment.etc.services.source;
+        }
+      '';
+      description = mdDoc ''
+        Set of paths that should be intercepted and rewritten while checking the ruleset
+        using `pkgs.buildPackages.libredirect`.
+      '';
+    };
+
     networking.nftables.preCheckRuleset = mkOption {
       type = types.lines;
       default = "";
@@ -282,7 +302,7 @@ in
             cp $out ruleset.conf
             sed 's|include "${deletionsScriptVar}"||' -i ruleset.conf
             ${cfg.preCheckRuleset}
-            export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
+            export NIX_REDIRECTS=${escapeShellArg (concatStringsSep ":" (mapAttrsToList (n: v: "${n}=${v}") cfg.checkRulesetRedirects))}
             LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
               ${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
           '';
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index e75239e059d3b..702423ef09cd0 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -33,8 +33,7 @@ let
     ${cfg.extraConfig}
     EOL
 
-    ssh-keygen -q -f mock-hostkey -N ""
-    sshd -t -f $out -h mock-hostkey
+    sshd -G -f $out
   '';
 
   cfg  = config.services.openssh;
diff --git a/nixos/modules/services/networking/wstunnel.nix b/nixos/modules/services/networking/wstunnel.nix
index 067d5df487255..3c3ecc3e04d79 100644
--- a/nixos/modules/services/networking/wstunnel.nix
+++ b/nixos/modules/services/networking/wstunnel.nix
@@ -86,12 +86,12 @@ let
         description = mdDoc "Address and port to listen on. Setting the port to a value below 1024 will also give the process the required `CAP_NET_BIND_SERVICE` capability.";
         type = types.submodule hostPortSubmodule;
         default = {
-          address = "0.0.0.0";
+          host = "0.0.0.0";
           port = if config.enableHTTPS then 443 else 80;
         };
         defaultText = literalExpression ''
           {
-            address = "0.0.0.0";
+            host = "0.0.0.0";
             port = if enableHTTPS then 443 else 80;
           }
         '';
diff --git a/nixos/modules/services/web-apps/galene.nix b/nixos/modules/services/web-apps/galene.nix
index 747b85f94c65a..81fed8a0b99a5 100644
--- a/nixos/modules/services/web-apps/galene.nix
+++ b/nixos/modules/services/web-apps/galene.nix
@@ -186,7 +186,7 @@ in
           ProtectSystem = "strict";
           ReadWritePaths = cfg.recordingsDir;
           RemoveIPC = true;
-          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" ];
           RestrictNamespaces = true;
           RestrictRealtime = true;
           RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/web-apps/grocy.nix b/nixos/modules/services/web-apps/grocy.nix
index b0d3c44cea81c..4d1084e295ff3 100644
--- a/nixos/modules/services/web-apps/grocy.nix
+++ b/nixos/modules/services/web-apps/grocy.nix
@@ -115,9 +115,9 @@ in {
       user = "grocy";
       group = "nginx";
 
-      # PHP 8.1 is the only version which is supported/tested by upstream:
-      # https://github.com/grocy/grocy/blob/v4.0.0/README.md#platform-support
-      phpPackage = pkgs.php81;
+      # PHP 8.1 and 8.2 are the only version which are supported/tested by upstream:
+      # https://github.com/grocy/grocy/blob/v4.0.2/README.md#platform-support
+      phpPackage = pkgs.php82;
 
       inherit (cfg.phpfpm) settings;
 
@@ -130,6 +130,16 @@ in {
       };
     };
 
+    # After an update of grocy, the viewcache needs to be deleted. Otherwise grocy will not work
+    # https://github.com/grocy/grocy#how-to-update
+    systemd.services.grocy-setup = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-grocy.service" ];
+      script = ''
+        rm -rf ${cfg.dataDir}/viewcache/*
+      '';
+    };
+
     services.nginx = {
       enable = true;
       virtualHosts."${cfg.hostName}" = mkMerge [
diff --git a/nixos/modules/services/web-apps/healthchecks.nix b/nixos/modules/services/web-apps/healthchecks.nix
index b3fdb681e2f30..b92525075541a 100644
--- a/nixos/modules/services/web-apps/healthchecks.nix
+++ b/nixos/modules/services/web-apps/healthchecks.nix
@@ -1,16 +1,16 @@
-{ config, lib, pkgs, buildEnv, ... }:
+{ config, lib, options, pkgs, buildEnv, ... }:
 
 with lib;
 
 let
   defaultUser = "healthchecks";
   cfg = config.services.healthchecks;
+  opt = options.services.healthchecks;
   pkg = cfg.package;
   boolToPython = b: if b then "True" else "False";
   environment = {
     PYTHONPATH = pkg.pythonPath;
     STATIC_ROOT = cfg.dataDir + "/static";
-    DB_NAME = "${cfg.dataDir}/healthchecks.sqlite";
   } // cfg.settings;
 
   environmentFile = pkgs.writeText "healthchecks-environment" (lib.generators.toKeyValue { } environment);
@@ -98,17 +98,24 @@ in
       description = lib.mdDoc ''
         Environment variables which are read by healthchecks `(local)_settings.py`.
 
-        Settings which are explicitly covered in options bewlow, are type-checked and/or transformed
+        Settings which are explicitly covered in options below, are type-checked and/or transformed
         before added to the environment, everything else is passed as a string.
 
         See <https://healthchecks.io/docs/self_hosted_configuration/>
         for a full documentation of settings.
 
-        We add two variables to this list inside the packages `local_settings.py.`
-        - STATIC_ROOT to set a state directory for dynamically generated static files.
-        - SECRET_KEY_FILE to read SECRET_KEY from a file at runtime and keep it out of /nix/store.
+        We add additional variables to this list inside the packages `local_settings.py.`
+        - `STATIC_ROOT` to set a state directory for dynamically generated static files.
+        - `SECRET_KEY_FILE` to read `SECRET_KEY` from a file at runtime and keep it out of
+          /nix/store.
+        - `_FILE` variants for several values that hold sensitive information in
+          [Healthchecks configuration](https://healthchecks.io/docs/self_hosted_configuration/) so
+          that they also can be read from a file and kept out of /nix/store. To see which values
+          have support for a `_FILE` variant, run:
+          - `nix-instantiate --eval --expr '(import <nixpkgs> {}).healthchecks.secrets'`
+          - or `nix eval 'nixpkgs#healthchecks.secrets'` if the flake support has been enabled.
       '';
-      type = types.submodule {
+      type = types.submodule (settings: {
         freeformType = types.attrsOf types.str;
         options = {
           ALLOWED_HOSTS = lib.mkOption {
@@ -143,8 +150,28 @@ in
             '';
             apply = boolToPython;
           };
+
+          DB = mkOption {
+            type = types.enum [ "sqlite" "postgres" "mysql" ];
+            default = "sqlite";
+            description = lib.mdDoc "Database engine to use.";
+          };
+
+          DB_NAME = mkOption {
+            type = types.str;
+            default =
+              if settings.config.DB == "sqlite"
+              then "${cfg.dataDir}/healthchecks.sqlite"
+              else "hc";
+            defaultText = lib.literalExpression ''
+              if config.${settings.options.DB} == "sqlite"
+              then "''${config.${opt.dataDir}}/healthchecks.sqlite"
+              else "hc"
+            '';
+            description = lib.mdDoc "Database name.";
+          };
         };
-      };
+      });
     };
   };
 
@@ -168,7 +195,7 @@ in
           StateDirectoryMode = mkIf (cfg.dataDir == "/var/lib/healthchecks") "0750";
         };
       in
-        {
+      {
         healthchecks-migration = {
           description = "Healthchecks migrations";
           wantedBy = [ "healthchecks.target" ];
diff --git a/nixos/modules/services/web-apps/meme-bingo-web.nix b/nixos/modules/services/web-apps/meme-bingo-web.nix
new file mode 100644
index 0000000000000..cb864321ef276
--- /dev/null
+++ b/nixos/modules/services/web-apps/meme-bingo-web.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption mdDoc types literalExpression;
+
+  cfg = config.services.meme-bingo-web;
+in {
+  options = {
+    services.meme-bingo-web = {
+      enable = mkEnableOption (mdDoc ''
+        A web app for the meme bingo, rendered entirely on the web server and made interactive with forms.
+
+        Note: The application's author suppose to run meme-bingo-web behind a reverse proxy for SSL and HTTP/3.
+      '');
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.meme-bingo-web;
+        defaultText = literalExpression "pkgs.meme-bingo-web";
+        description = mdDoc "meme-bingo-web package to use.";
+      };
+
+      baseUrl = mkOption {
+        description = mdDoc ''
+          URL to be used for the HTML <base> element on all HTML routes.
+        '';
+        type = types.str;
+        default = "http://localhost:41678/";
+        example = "https://bingo.example.com/";
+      };
+      port = mkOption {
+        description = mdDoc ''
+          Port to be used for the web server.
+        '';
+        type = types.port;
+        default = 41678;
+        example = 21035;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.meme-bingo-web = {
+      description = "A web app for playing meme bingos.";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        MEME_BINGO_BASE = cfg.baseUrl;
+        MEME_BINGO_PORT = toString cfg.port;
+      };
+      path = [ cfg.package ];
+
+      serviceConfig = {
+        User = "meme-bingo-web";
+        Group = "meme-bingo-web";
+
+        DynamicUser = true;
+
+        ExecStart = "${cfg.package}/bin/meme-bingo-web";
+
+        Restart = "always";
+        RestartSec = 1;
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "/dev/random" ];
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0077";
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        NoNewPrivileges = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/mobilizon.nix b/nixos/modules/services/web-apps/mobilizon.nix
new file mode 100644
index 0000000000000..4e796e2bc80cf
--- /dev/null
+++ b/nixos/modules/services/web-apps/mobilizon.nix
@@ -0,0 +1,442 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mobilizon;
+
+  user = "mobilizon";
+  group = "mobilizon";
+
+  settingsFormat = pkgs.formats.elixirConf { elixir = pkgs.elixir_1_14; };
+
+  configFile = settingsFormat.generate "mobilizon-config.exs" cfg.settings;
+
+  # Make a package containing launchers with the correct envirenment, instead of
+  # setting it with systemd services, so that the user can also use them without
+  # troubles
+  launchers = pkgs.stdenv.mkDerivation rec {
+    pname = "${cfg.package.pname}-launchers";
+    inherit (cfg.package) version;
+
+    src = cfg.package;
+
+    nativeBuildInputs = with pkgs; [ makeWrapper ];
+
+    dontBuild = true;
+
+    installPhase = ''
+      mkdir -p $out/bin
+
+      makeWrapper \
+        $src/bin/mobilizon \
+        $out/bin/mobilizon \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+
+      makeWrapper \
+        $src/bin/mobilizon_ctl \
+        $out/bin/mobilizon_ctl \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+    '';
+  };
+
+  repoSettings = cfg.settings.":mobilizon"."Mobilizon.Storage.Repo";
+  instanceSettings = cfg.settings.":mobilizon".":instance";
+
+  isLocalPostgres = repoSettings.socket_dir != null;
+
+  dbUser = if repoSettings.username != null then repoSettings.username else "mobilizon";
+
+  postgresql = config.services.postgresql.package;
+  postgresqlSocketDir = "/var/run/postgresql";
+
+  secretEnvFile = "/var/lib/mobilizon/secret-env.sh";
+in
+{
+  options = {
+    services.mobilizon = {
+      enable = mkEnableOption
+        "Mobilizon federated organization and mobilization platform";
+
+      nginx.enable = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = ''
+          Whether an <literal>nginx</literal> virtual host should be
+          set up to serve Mobilizon.
+        '';
+      };
+
+      package = mkPackageOptionMD pkgs "mobilizon" { };
+
+      settings = mkOption {
+        type =
+          let
+            elixirTypes = settingsFormat.lib.types;
+          in
+          types.submodule {
+            freeformType = settingsFormat.type;
+
+            options = {
+              ":mobilizon" = {
+
+                "Mobilizon.Web.Endpoint" = {
+                  url.host = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      ''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = ''
+                      Your instance's hostname for generating URLs throughout the app
+                    '';
+                  };
+
+                  http = {
+                    port = mkOption {
+                      type = elixirTypes.port;
+                      default = 4000;
+                      description = ''
+                        The port to run the server
+                      '';
+                    };
+                    ip = mkOption {
+                      type = elixirTypes.tuple;
+                      default = settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ];
+                      description = ''
+                        The IP address to listen on. Defaults to [::1] notated as a byte tuple.
+                      '';
+                    };
+                  };
+
+                  has_reverse_proxy = mkOption {
+                    type = elixirTypes.bool;
+                    default = true;
+                    description = ''
+                      Whether you use a reverse proxy
+                    '';
+                  };
+                };
+
+                ":instance" = {
+                  name = mkOption {
+                    type = elixirTypes.str;
+                    description = ''
+                      The fallback instance name if not configured into the admin UI
+                    '';
+                  };
+
+                  hostname = mkOption {
+                    type = elixirTypes.str;
+                    description = ''
+                      Your instance's hostname
+                    '';
+                  };
+
+                  email_from = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      noreply@''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = ''
+                      The email for the From: header in emails
+                    '';
+                  };
+
+                  email_reply_to = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      ''${email_from}
+                    '';
+                    description = ''
+                      The email for the Reply-To: header in emails
+                    '';
+                  };
+                };
+
+                "Mobilizon.Storage.Repo" = {
+                  socket_dir = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = postgresqlSocketDir;
+                    description = ''
+                      Path to the postgres socket directory.
+
+                      Set this to null if you want to connect to a remote database.
+
+                      If non-null, the local PostgreSQL server will be configured with
+                      the configured database, permissions, and required extensions.
+
+                      If connecting to a remote database, please follow the
+                      instructions on how to setup your database:
+                      <link xlink:href="https://docs.joinmobilizon.org/administration/install/release/#database-setup"/>
+                    '';
+                  };
+
+                  username = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = user;
+                    description = ''
+                      User used to connect to the database
+                    '';
+                  };
+
+                  database = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = "mobilizon_prod";
+                    description = ''
+                      Name of the database
+                    '';
+                  };
+                };
+              };
+            };
+          };
+        default = { };
+
+        description = ''
+          Mobilizon Elixir documentation, see
+          <link xlink:href="https://docs.joinmobilizon.org/administration/configure/reference/"/>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.nginx.enable -> (cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.ip == settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ]);
+        message = "Setting the IP mobilizon listens on is only possible when the nginx config is not used, as it is hardcoded there.";
+      }
+    ];
+
+    services.mobilizon.settings = {
+      ":mobilizon" = {
+        "Mobilizon.Web.Endpoint" = {
+          server = true;
+          url.host = mkDefault instanceSettings.hostname;
+          secret_key_base =
+            settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_INSTANCE_SECRET"; };
+        };
+
+        "Mobilizon.Web.Auth.Guardian".secret_key =
+          settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_AUTH_SECRET"; };
+
+        ":instance" = {
+          registrations_open = mkDefault false;
+          demo = mkDefault false;
+          email_from = mkDefault "noreply@${instanceSettings.hostname}";
+          email_reply_to = mkDefault instanceSettings.email_from;
+        };
+
+        "Mobilizon.Storage.Repo" = {
+          # Forced by upstream since it uses PostgreSQL-specific extensions
+          adapter = settingsFormat.lib.mkAtom "Ecto.Adapters.Postgres";
+          pool_size = mkDefault 10;
+        };
+      };
+
+      ":tzdata".":data_dir" = "/var/lib/mobilizon/tzdata/";
+    };
+
+    # This somewhat follows upstream's systemd service here:
+    # https://framagit.org/framasoft/mobilizon/-/blob/master/support/systemd/mobilizon.service
+    systemd.services.mobilizon = {
+      description = "Mobilizon federated organization and mobilization platform";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [
+        gawk
+        imagemagick
+        libwebp
+        file
+
+        # Optional:
+        gifsicle
+        jpegoptim
+        optipng
+        pngquant
+      ];
+
+      serviceConfig = {
+        ExecStartPre = "${launchers}/bin/mobilizon_ctl migrate";
+        ExecStart = "${launchers}/bin/mobilizon start";
+        ExecStop = "${launchers}/bin/mobilizon stop";
+
+        User = user;
+        Group = group;
+
+        StateDirectory = "mobilizon";
+
+        Restart = "on-failure";
+
+        PrivateTmp = true;
+        ProtectSystem = "full";
+        NoNewPrivileges = true;
+
+        ReadWritePaths = mkIf isLocalPostgres postgresqlSocketDir;
+      };
+    };
+
+    # Create the needed secrets before running Mobilizon, so that they are not
+    # in the nix store
+    #
+    # Since some of these tasks are quite common for Elixir projects (COOKIE for
+    # every BEAM project, Phoenix and Guardian are also quite common), this
+    # service could be abstracted in the future, and used by other Elixir
+    # projects.
+    systemd.services.mobilizon-setup-secrets = {
+      description = "Mobilizon setup secrets";
+      before = [ "mobilizon.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      script =
+        let
+          # Taken from here:
+          # https://framagit.org/framasoft/mobilizon/-/blob/1.0.7/lib/mix/tasks/mobilizon/instance.ex#L132-133
+          genSecret =
+            "IO.puts(:crypto.strong_rand_bytes(64)" +
+            "|> Base.encode64()" +
+            "|> binary_part(0, 64))";
+
+          # Taken from here:
+          # https://github.com/elixir-lang/elixir/blob/v1.11.3/lib/mix/lib/mix/release.ex#L499
+          genCookie = "IO.puts(Base.encode32(:crypto.strong_rand_bytes(32)))";
+
+          evalElixir = str: ''
+            ${pkgs.elixir_1_14}/bin/elixir --eval '${str}'
+          '';
+        in
+        ''
+          set -euxo pipefail
+
+          if [ ! -f "${secretEnvFile}" ]; then
+            install -m 600 /dev/null "${secretEnvFile}"
+            cat > "${secretEnvFile}" <<EOF
+          # This file was automatically generated by mobilizon-setup-secrets.service
+          export MOBILIZON_AUTH_SECRET='$(${evalElixir genSecret})'
+          export MOBILIZON_INSTANCE_SECRET='$(${evalElixir genSecret})'
+          export RELEASE_COOKIE='$(${evalElixir genCookie})'
+          EOF
+          fi
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        Group = group;
+        StateDirectory = "mobilizon";
+      };
+    };
+
+    # Add the required PostgreSQL extensions to the local PostgreSQL server,
+    # if local PostgreSQL is configured.
+    systemd.services.mobilizon-postgresql = mkIf isLocalPostgres {
+      description = "Mobilizon PostgreSQL setup";
+
+      after = [ "postgresql.service" ];
+      before = [ "mobilizon.service" "mobilizon-setup-secrets.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      path = [ postgresql ];
+
+      # Taken from here:
+      # https://framagit.org/framasoft/mobilizon/-/blob/1.1.0/priv/templates/setup_db.eex
+      script =
+        ''
+          psql "${repoSettings.database}" -c "\
+            CREATE EXTENSION IF NOT EXISTS postgis; \
+            CREATE EXTENSION IF NOT EXISTS pg_trgm; \
+            CREATE EXTENSION IF NOT EXISTS unaccent;"
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = config.services.postgresql.superUser;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/mobilizon/uploads/exports/csv 700 mobilizon mobilizon - -"
+      "Z /var/lib/mobilizon 700 mobilizon mobilizon - -"
+    ];
+
+    services.postgresql = mkIf isLocalPostgres {
+      enable = true;
+      ensureDatabases = [ repoSettings.database ];
+      ensureUsers = [
+        {
+          name = dbUser;
+          ensurePermissions = {
+            "DATABASE \"${repoSettings.database}\"" = "ALL PRIVILEGES";
+          };
+        }
+      ];
+      extraPlugins = with postgresql.pkgs; [ postgis ];
+    };
+
+    # Nginx config taken from support/nginx/mobilizon-release.conf
+    services.nginx =
+      let
+        inherit (cfg.settings.":mobilizon".":instance") hostname;
+        proxyPass = "http://[::1]:"
+          + toString cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.port;
+      in
+      lib.mkIf cfg.nginx.enable {
+        enable = true;
+        virtualHosts."${hostname}" = {
+          enableACME = lib.mkDefault true;
+          forceSSL = lib.mkDefault true;
+          extraConfig = ''
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+          '';
+          locations."/" = {
+            inherit proxyPass;
+          };
+          locations."~ ^/(js|css|img)" = {
+            root = "${cfg.package}/lib/mobilizon-${cfg.package.version}/priv/static";
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+          locations."~ ^/(media|proxy)" = {
+            inherit proxyPass;
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+        };
+      };
+
+    users.users.${user} = {
+      description = "Mobilizon daemon user";
+      group = group;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = { };
+
+    # So that we have the `mobilizon` and `mobilizon_ctl` commands.
+    # The `mobilizon remote` command is useful for dropping a shell into the
+    # running Mobilizon instance, and `mobilizon_ctl` is used for common
+    # management tasks (e.g. adding users).
+    environment.systemPackages = [ launchers ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+}
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix
index 911daa53e6587..4b308d2ee56ee 100644
--- a/nixos/modules/services/web-apps/plausible.nix
+++ b/nixos/modules/services/web-apps/plausible.nix
@@ -248,6 +248,7 @@ in {
             # setup
             ${cfg.package}/createdb.sh
             ${cfg.package}/migrate.sh
+            ${cfg.package}/bin/plausible eval "(Plausible.Release.prepare() ; Plausible.Auth.create_user(\"$ADMIN_USER_NAME\", \"$ADMIN_USER_EMAIL\", \"$ADMIN_USER_PWD\"))"
             ${optionalString cfg.adminUser.activate ''
               if ! ${cfg.package}/init-admin.sh | grep 'already exists'; then
                 psql -d plausible <<< "UPDATE users SET email_verified=true;"
diff --git a/nixos/modules/services/web-apps/tt-rss.nix b/nixos/modules/services/web-apps/tt-rss.nix
index 3102e6a469536..592ab253f7daf 100644
--- a/nixos/modules/services/web-apps/tt-rss.nix
+++ b/nixos/modules/services/web-apps/tt-rss.nix
@@ -595,47 +595,9 @@ let
       tt-rss = {
         description = "Tiny Tiny RSS feeds update daemon";
 
-        preStart = let
-          callSql = e:
-              if cfg.database.type == "pgsql" then ''
-                  ${optionalString (cfg.database.password != null) "PGPASSWORD=${cfg.database.password}"} \
-                  ${optionalString (cfg.database.passwordFile != null) "PGPASSWORD=$(cat ${cfg.database.passwordFile})"} \
-                  ${config.services.postgresql.package}/bin/psql \
-                    -U ${cfg.database.user} \
-                    ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} --port ${toString dbPort}"} \
-                    -c '${e}' \
-                    ${cfg.database.name}''
-
-              else if cfg.database.type == "mysql" then ''
-                  echo '${e}' | ${config.services.mysql.package}/bin/mysql \
-                    -u ${cfg.database.user} \
-                    ${optionalString (cfg.database.password != null) "-p${cfg.database.password}"} \
-                    ${optionalString (cfg.database.host != null) "-h ${cfg.database.host} -P ${toString dbPort}"} \
-                    ${cfg.database.name}''
-
-              else "";
-
-        in (optionalString (cfg.database.type == "pgsql") ''
-          exists=$(${callSql "select count(*) > 0 from pg_tables where tableowner = user"} \
-          | tail -n+3 | head -n-2 | sed -e 's/[ \n\t]*//')
-
-          if [ "$exists" == 'f' ]; then
-            ${callSql "\\i ${pkgs.tt-rss}/schema/ttrss_schema_${cfg.database.type}.sql"}
-          else
-            echo 'The database contains some data. Leaving it as it is.'
-          fi;
-        '')
-
-        + (optionalString (cfg.database.type == "mysql") ''
-          exists=$(${callSql "select count(*) > 0 from information_schema.tables where table_schema = schema()"} \
-          | tail -n+2 | sed -e 's/[ \n\t]*//')
-
-          if [ "$exists" == '0' ]; then
-            ${callSql "\\. ${pkgs.tt-rss}/schema/ttrss_schema_${cfg.database.type}.sql"}
-          else
-            echo 'The database contains some data. Leaving it as it is.'
-          fi;
-        '');
+        preStart = ''
+          ${pkgs.php81}/bin/php ${cfg.root}/www/update.php --update-schema
+        '';
 
         serviceConfig = {
           User = "${cfg.user}";
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index fccc31b5116cb..7a7fb4061eea5 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -578,11 +578,13 @@ in
           };
         });
         default = [];
-        example = literalExpression ''[
-          { addr = "10.0.0.12"; proxyProtocol = true; ssl = true; }
-          { addr = "0.0.0.0"; }
-          { addr = "[::0]"; }
-        ]'';
+        example = literalExpression ''
+          [
+            { addr = "10.0.0.12"; proxyProtocol = true; ssl = true; }
+            { addr = "0.0.0.0"; }
+            { addr = "[::0]"; }
+          ]
+        '';
         description = lib.mdDoc ''
           If vhosts do not specify listen, use these addresses by default.
           This option takes precedence over {option}`defaultListenAddresses` and
diff --git a/nixos/modules/services/x11/desktop-managers/budgie.nix b/nixos/modules/services/x11/desktop-managers/budgie.nix
index bee627ec76c01..a4f8bd5051ecc 100644
--- a/nixos/modules/services/x11/desktop-managers/budgie.nix
+++ b/nixos/modules/services/x11/desktop-managers/budgie.nix
@@ -134,6 +134,7 @@ in {
         # Update user directories.
         xdg-user-dirs
       ]
+      ++ lib.optional config.networking.networkmanager.enable pkgs.networkmanagerapplet
       ++ (utils.removePackagesByName [
           cinnamon.nemo
           mate.eom
@@ -192,7 +193,7 @@ in {
     # Required by Budgie Panel plugins and/or Budgie Control Center panels.
     networking.networkmanager.enable = mkDefault true; # for BCC's Network panel.
     programs.nm-applet.enable = config.networking.networkmanager.enable; # Budgie has no Network applet.
-    programs.nm-applet.indicator = false; # Budgie doesn't support AppIndicators.
+    programs.nm-applet.indicator = true; # Budgie uses AppIndicators.
 
     hardware.bluetooth.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Bluetooth panel.
     hardware.pulseaudio.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Sound panel.
diff --git a/nixos/modules/services/x11/window-managers/oroborus.nix b/nixos/modules/services/x11/window-managers/oroborus.nix
deleted file mode 100644
index 654b8708e48fe..0000000000000
--- a/nixos/modules/services/x11/window-managers/oroborus.nix
+++ /dev/null
@@ -1,25 +0,0 @@
-{ config, lib, pkgs, ... }:
-
-with lib;
-
-let
-  cfg = config.services.xserver.windowManager.oroborus;
-in
-{
-  ###### interface
-  options = {
-    services.xserver.windowManager.oroborus.enable = mkEnableOption (lib.mdDoc "oroborus");
-  };
-
-  ###### implementation
-  config = mkIf cfg.enable {
-    services.xserver.windowManager.session = singleton {
-      name = "oroborus";
-      start = ''
-        ${pkgs.oroborus}/bin/oroborus &
-        waitPID=$!
-      '';
-    };
-    environment.systemPackages = [ pkgs.oroborus ];
-  };
-}
diff --git a/nixos/modules/system/boot/binfmt.nix b/nixos/modules/system/boot/binfmt.nix
index fb7afce4a5808..8c9483f01c102 100644
--- a/nixos/modules/system/boot/binfmt.nix
+++ b/nixos/modules/system/boot/binfmt.nix
@@ -279,7 +279,7 @@ in {
           support your new systems.
           Warning: the builder can execute all emulated systems within the same build, which introduces impurities in the case of cross compilation.
         '';
-        type = types.listOf types.str;
+        type = types.listOf (types.enum (builtins.attrNames magics));
       };
     };
   };
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index 6d0afcc57fcc6..238c6670ea0f8 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -902,6 +902,9 @@ let
           "RelayTarget"
           "RelayAgentCircuitId"
           "RelayAgentRemoteId"
+          "BootServerAddress"
+          "BootServerName"
+          "BootFilename"
         ])
         (assertInt "PoolOffset")
         (assertMinimum "PoolOffset" 0)
@@ -2812,9 +2815,15 @@ let
 
       environment.etc."systemd/networkd.conf" = renderConfig cfg.config;
 
-      systemd.services.systemd-networkd = {
+      systemd.services.systemd-networkd = let
+        isReloadableUnitFileName = unitFileName: strings.hasSuffix ".network" unitFileName;
+        reloadableUnitFiles = attrsets.filterAttrs (k: v: isReloadableUnitFileName k) unitFiles;
+        nonReloadableUnitFiles = attrsets.filterAttrs (k: v: !isReloadableUnitFileName k) unitFiles;
+        unitFileSources = unitFiles: map (x: x.source) (attrValues unitFiles);
+      in {
         wantedBy = [ "multi-user.target" ];
-        restartTriggers = map (x: x.source) (attrValues unitFiles) ++ [
+        reloadTriggers = unitFileSources reloadableUnitFiles;
+        restartTriggers = unitFileSources nonReloadableUnitFiles ++ [
           config.environment.etc."systemd/networkd.conf".source
         ];
         aliases = [ "dbus-org.freedesktop.network1.service" ];
diff --git a/nixos/modules/system/boot/stage-2-init.sh b/nixos/modules/system/boot/stage-2-init.sh
index c0af29e3b9909..5a2133f960e2b 100755
--- a/nixos/modules/system/boot/stage-2-init.sh
+++ b/nixos/modules/system/boot/stage-2-init.sh
@@ -105,7 +105,7 @@ fi
 
 # Required by the activation script
 install -m 0755 -d /etc
-if [ -d "/etc/nixos" ]; then
+if [ ! -h "/etc/nixos" ]; then
     install -m 0755 -d /etc/nixos
 fi
 install -m 01777 -d /tmp
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index ac55461107fb9..b6c3085c4f16f 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -395,7 +395,9 @@ in
       description = lib.mdDoc ''
         The amount of time which can elapse after a reboot has been triggered
         before a watchdog hardware device will automatically reboot the system.
-        Valid time units include "ms", "s", "min", "h", "d", and "w".
+        Valid time units include "ms", "s", "min", "h", "d", and "w". If left
+        `null`, systemd will use its default of `10min`; see also {command}`man
+        5 systemd-system.conf`.
       '';
     };
 
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index 3f40a5b2dfa0c..5d9fca7a605ee 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -333,6 +333,14 @@ in {
       visible = "shallow";
       description = lib.mdDoc "Definition of slice configurations.";
     };
+
+    enableTpm2 = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable TPM2 support in the initrd.
+      '';
+    };
   };
 
   config = mkIf (config.boot.initrd.enable && cfg.enable) {
@@ -342,8 +350,8 @@ in {
       # systemd needs this for some features
       "autofs4"
       # systemd-cryptenroll
-      "tpm-tis"
-    ] ++ lib.optional (pkgs.stdenv.hostPlatform.system != "riscv64-linux") "tpm-crb";
+    ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
+    ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
 
     boot.initrd.systemd = {
       initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package] ++ config.system.fsPackages;
@@ -421,11 +429,11 @@ in {
 
         # so NSS can look up usernames
         "${pkgs.glibc}/lib/libnss_files.so.2"
-      ] ++ optionals cfg.package.withCryptsetup [
+      ] ++ optionals (cfg.package.withCryptsetup && cfg.enableTpm2) [
         # tpm2 support
         "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-tpm2.so"
         pkgs.tpm2-tss
-
+      ] ++ optionals cfg.package.withCryptsetup [
         # fido2 support
         "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-fido2.so"
         "${pkgs.libfido2}/lib/libfido2.so.1"
diff --git a/nixos/modules/tasks/bcache.nix b/nixos/modules/tasks/bcache.nix
index 35b922dc8a1d3..68531a4d2fed7 100644
--- a/nixos/modules/tasks/bcache.nix
+++ b/nixos/modules/tasks/bcache.nix
@@ -1,6 +1,10 @@
-{ config, lib, pkgs, ... }:
-
-{
+{ config, lib, pkgs, ... }: let
+  cfg = config.boot.bcache;
+in {
+  options.boot.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache mount support") // {
+    default = true;
+    example = false;
+  };
   options.boot.initrd.services.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache support in the initrd") // {
     description = lib.mdDoc ''
       *This will only be used when systemd is used in stage 1.*
@@ -9,7 +13,7 @@
     '';
   };
 
-  config = {
+  config = lib.mkIf cfg.enable {
 
     environment.systemPackages = [ pkgs.bcache-tools ];
 
diff --git a/nixos/modules/tasks/network-interfaces.nix b/nixos/modules/tasks/network-interfaces.nix
index eb1c7512d9200..0d4033ca9430a 100644
--- a/nixos/modules/tasks/network-interfaces.nix
+++ b/nixos/modules/tasks/network-interfaces.nix
@@ -1396,14 +1396,12 @@ in
     security.apparmor.policies."bin.ping".profile = lib.mkIf config.security.apparmor.policies."bin.ping".enable (lib.mkAfter ''
       /run/wrappers/bin/ping {
         include <abstractions/base>
-        include <nixos/security.wrappers>
+        include <nixos/security.wrappers/ping>
         rpx /run/wrappers/wrappers.*/ping,
       }
       /run/wrappers/wrappers.*/ping {
         include <abstractions/base>
-        include <nixos/security.wrappers>
-        r /run/wrappers/wrappers.*/ping.real,
-        mrpx ${config.security.wrappers.ping.source},
+        include <nixos/security.wrappers/ping>
         capability net_raw,
         capability setpcap,
       }
diff --git a/nixos/modules/tasks/swraid.nix b/nixos/modules/tasks/swraid.nix
index f624294565b06..1174187062d74 100644
--- a/nixos/modules/tasks/swraid.nix
+++ b/nixos/modules/tasks/swraid.nix
@@ -2,6 +2,8 @@
 
   cfg = config.boot.swraid;
 
+  mdadm_conf = config.environment.etc."mdadm.conf";
+
 in {
   imports = [
     (lib.mkRenamedOptionModule [ "boot" "initrd" "services" "swraid" "enable" ] [ "boot" "swraid" "enable" ])
@@ -36,8 +38,14 @@ in {
   };
 
   config = lib.mkIf cfg.enable {
+    warnings = lib.mkIf
+        ((builtins.match ".*(MAILADDR|PROGRAM).*" mdadm_conf.text) == null)
+        [ "mdadm: Neither MAILADDR nor PROGRAM has been set. This will cause the `mdmon` service to crash." ];
+
     environment.systemPackages = [ pkgs.mdadm ];
 
+    environment.etc."mdadm.conf".text = lib.mkAfter cfg.mdadmConf;
+
     services.udev.packages = [ pkgs.mdadm ];
 
     systemd.packages = [ pkgs.mdadm ];
@@ -59,12 +67,10 @@ in {
         $out/bin/mdadm --version
       '';
 
-      extraFiles."/etc/mdadm.conf".source = pkgs.writeText "mdadm.conf" config.boot.swraid.mdadmConf;
+      extraFiles."/etc/mdadm.conf".source = pkgs.writeText "mdadm.conf" mdadm_conf.text;
 
       systemd = {
-        contents."/etc/mdadm.conf" = lib.mkIf (cfg.mdadmConf != "") {
-          text = cfg.mdadmConf;
-        };
+        contents."/etc/mdadm.conf".text = mdadm_conf.text;
 
         packages = [ pkgs.mdadm ];
         initrdBin = [ pkgs.mdadm ];
diff --git a/nixos/modules/virtualisation/anbox.nix b/nixos/modules/virtualisation/anbox.nix
index c7e9e23c4c928..523d9a9576ef3 100644
--- a/nixos/modules/virtualisation/anbox.nix
+++ b/nixos/modules/virtualisation/anbox.nix
@@ -5,7 +5,7 @@ with lib;
 let
 
   cfg = config.virtualisation.anbox;
-  kernelPackages = config.boot.kernelPackages;
+
   addrOpts = v: addr: pref: name: {
     address = mkOption {
       default = addr;
@@ -25,6 +25,28 @@ let
     };
   };
 
+  finalImage = if cfg.imageModifications == "" then cfg.image else ( pkgs.callPackage (
+    { runCommandNoCC, squashfsTools }:
+
+    runCommandNoCC "${cfg.image.name}-modified.img" {
+      nativeBuildInputs = [
+        squashfsTools
+      ];
+    } ''
+      echo "-> Extracting Anbox root image..."
+      unsquashfs -dest rootfs ${cfg.image}
+
+      echo "-> Modifying Anbox root image..."
+      (
+      cd rootfs
+      ${cfg.imageModifications}
+      )
+
+      echo "-> Packing modified Anbox root image..."
+      mksquashfs rootfs $out -comp xz -no-xattrs -all-root
+    ''
+  ) { });
+
 in
 
 {
@@ -42,6 +64,18 @@ in
       '';
     };
 
+    imageModifications = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Commands to edit the image filesystem.
+
+        This can be used to e.g. bundle a privileged F-Droid.
+
+        Commands are ran with PWD being at the root of the filesystem.
+      '';
+    };
+
     extraInit = mkOption {
       type = types.lines;
       default = "";
@@ -67,16 +101,19 @@ in
   config = mkIf cfg.enable {
 
     assertions = singleton {
-      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
-      message = "Anbox needs user namespace support to work properly";
+      assertion = with config.boot.kernelPackages; kernelAtLeast "5.5" && kernelOlder "5.18";
+      message = "Anbox needs a kernel with binder and ashmem support";
     };
 
     environment.systemPackages = with pkgs; [ anbox ];
 
-    services.udev.extraRules = ''
-      KERNEL=="ashmem", NAME="%k", MODE="0666"
-      KERNEL=="binder*", NAME="%k", MODE="0666"
-    '';
+    systemd.mounts = singleton {
+      requiredBy = [ "anbox-container-manager.service" ];
+      description = "Anbox Binder File System";
+      what = "binder";
+      where = "/dev/binderfs";
+      type = "binder";
+    };
 
     virtualisation.lxc.enable = true;
     networking.bridges.anbox0.interfaces = [];
@@ -87,6 +124,9 @@ in
       internalInterfaces = [ "anbox0" ];
     };
 
+    # Ensures NetworkManager doesn't touch anbox0
+    networking.networkmanager.unmanaged = [ "anbox0" ];
+
     systemd.services.anbox-container-manager = let
       anboxloc = "/var/lib/anbox";
     in {
@@ -121,12 +161,13 @@ in
         ExecStart = ''
           ${pkgs.anbox}/bin/anbox container-manager \
             --data-path=${anboxloc} \
-            --android-image=${cfg.image} \
+            --android-image=${finalImage} \
             --container-network-address=${cfg.ipv4.container.address} \
             --container-network-gateway=${cfg.ipv4.gateway.address} \
             --container-network-dns-servers=${cfg.ipv4.dns} \
             --use-rootfs-overlay \
-            --privileged
+            --privileged \
+            --daemon
         '';
       };
     };
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 55b285b69147a..9402d3bf37d0c 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -1,96 +1,16 @@
 { lib, config, pkgs, ... }:
 
-with lib;
-
 let
-  templateSubmodule = { ... }: {
-    options = {
-      enable = mkEnableOption (lib.mdDoc "this template");
-
-      target = mkOption {
-        description = lib.mdDoc "Path in the container";
-        type = types.path;
-      };
-      template = mkOption {
-        description = lib.mdDoc ".tpl file for rendering the target";
-        type = types.path;
-      };
-      when = mkOption {
-        description = lib.mdDoc "Events which trigger a rewrite (create, copy)";
-        type = types.listOf (types.str);
-      };
-      properties = mkOption {
-        description = lib.mdDoc "Additional properties";
-        type = types.attrs;
-        default = {};
-      };
-    };
-  };
-
-  toYAML = name: data: pkgs.writeText name (generators.toYAML {} data);
-
   cfg = config.virtualisation.lxc;
-  templates = if cfg.templates != {} then let
-    list = mapAttrsToList (name: value: { inherit name; } // value)
-      (filterAttrs (name: value: value.enable) cfg.templates);
-  in
-    {
-      files = map (tpl: {
-        source = tpl.template;
-        target = "/templates/${tpl.name}.tpl";
-      }) list;
-      properties = listToAttrs (map (tpl: nameValuePair tpl.target {
-        when = tpl.when;
-        template = "${tpl.name}.tpl";
-        properties = tpl.properties;
-      }) list);
-    }
-  else { files = []; properties = {}; };
-
-in
-{
+in {
   imports = [
-    ../installer/cd-dvd/channel.nix
-    ../profiles/clone-config.nix
-    ../profiles/minimal.nix
+    ./lxc-instance-common.nix
   ];
 
   options = {
     virtualisation.lxc = {
-      templates = mkOption {
-        description = lib.mdDoc "Templates for LXD";
-        type = types.attrsOf (types.submodule (templateSubmodule));
-        default = {};
-        example = literalExpression ''
-          {
-            # create /etc/hostname on container creation. also requires networking.hostName = "" to be set
-            "hostname" = {
-              enable = true;
-              target = "/etc/hostname";
-              template = builtins.toFile "hostname.tpl" "{{ container.name }}";
-              when = [ "create" ];
-            };
-            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
-            "hostname-nix" = {
-              enable = true;
-              target = "/etc/nixos/hostname.nix";
-              template = builtins.toFile "hostname-nix.tpl" "{ ... }: { networking.hostName = \"{{ container.name }}\"; }";
-              # copy keeps the file updated when the container is changed
-              when = [ "create" "copy" ];
-            };
-            # copy allow the user to specify a custom configuration.nix
-            "configuration-nix" = {
-              enable = true;
-              target = "/etc/nixos/configuration.nix";
-              template = builtins.toFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
-              when = [ "create" ];
-            };
-          };
-        '';
-      };
-
-      privilegedContainer = mkOption {
-        type = types.bool;
+      privilegedContainer = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Whether this LXC container will be running as a privileged container or not. If set to `true` then
@@ -116,24 +36,6 @@ in
         ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
       '';
 
-    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
-      contents = [
-        {
-          source = toYAML "metadata.yaml" {
-            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
-            creation_date = 1;
-            properties = {
-              description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
-              os = "${config.system.nixos.distroId}";
-              release = "${config.system.nixos.codeName}";
-            };
-            templates = templates.properties;
-          };
-          target = "/metadata.yaml";
-        }
-      ] ++ templates.files;
-    };
-
     # TODO: build rootfs as squashfs for faster unpack
     system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
       extraArgs = "--owner=0";
@@ -180,7 +82,7 @@ in
           ProtectKernelTunables=no
           NoNewPrivileges=no
           LoadCredential=
-        '' + optionalString cfg.privilegedContainer ''
+        '' + lib.optionalString cfg.privilegedContainer ''
           # Additional settings for privileged containers
           ProtectHome=no
           ProtectSystem=no
@@ -193,28 +95,8 @@ in
       })
     ];
 
-    # Allow the user to login as root without password.
-    users.users.root.initialHashedPassword = mkOverride 150 "";
-
-    system.activationScripts.installInitScript = mkForce ''
+    system.activationScripts.installInitScript = lib.mkForce ''
       ln -fs $systemConfig/init /sbin/init
     '';
-
-    # Some more help text.
-    services.getty.helpLine =
-      ''
-
-        Log in as "root" with an empty password.
-      '';
-
-    # Containers should be light-weight, so start sshd on demand.
-    services.openssh.enable = mkDefault true;
-    services.openssh.startWhenNeeded = mkDefault true;
-
-    # As this is intended as a standalone image, undo some of the minimal profile stuff
-    environment.noXlibs = false;
-    documentation.enable = true;
-    documentation.nixos.enable = true;
-    services.logrotate.enable = true;
   };
 }
diff --git a/nixos/modules/virtualisation/lxc-image-metadata.nix b/nixos/modules/virtualisation/lxc-image-metadata.nix
new file mode 100644
index 0000000000000..2c0568b4c4682
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc-image-metadata.nix
@@ -0,0 +1,104 @@
+{ lib, config, pkgs, ... }:
+
+let
+  templateSubmodule = {...}: {
+    options = {
+      enable = lib.mkEnableOption "this template";
+
+      target = lib.mkOption {
+        description = "Path in the container";
+        type = lib.types.path;
+      };
+      template = lib.mkOption {
+        description = ".tpl file for rendering the target";
+        type = lib.types.path;
+      };
+      when = lib.mkOption {
+        description = "Events which trigger a rewrite (create, copy)";
+        type = lib.types.listOf (lib.types.str);
+      };
+      properties = lib.mkOption {
+        description = "Additional properties";
+        type = lib.types.attrs;
+        default = {};
+      };
+    };
+  };
+
+  toYAML = name: data: pkgs.writeText name (lib.generators.toYAML {} data);
+
+  cfg = config.virtualisation.lxc;
+  templates = if cfg.templates != {} then let
+    list = lib.mapAttrsToList (name: value: { inherit name; } // value)
+      (lib.filterAttrs (name: value: value.enable) cfg.templates);
+  in
+    {
+      files = map (tpl: {
+        source = tpl.template;
+        target = "/templates/${tpl.name}.tpl";
+      }) list;
+      properties = lib.listToAttrs (map (tpl: lib.nameValuePair tpl.target {
+        when = tpl.when;
+        template = "${tpl.name}.tpl";
+        properties = tpl.properties;
+      }) list);
+    }
+  else { files = []; properties = {}; };
+
+in {
+  options = {
+    virtualisation.lxc = {
+      templates = lib.mkOption {
+        description = "Templates for LXD";
+        type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
+        default = {};
+        example = lib.literalExpression ''
+          {
+            # create /etc/hostname on container creation
+            "hostname" = {
+              enable = true;
+              target = "/etc/hostname";
+              template = builtins.writeFile "hostname.tpl" "{{ container.name }}";
+              when = [ "create" ];
+            };
+            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
+            "hostname-nix" = {
+              enable = true;
+              target = "/etc/nixos/hostname.nix";
+              template = builtins.writeFile "hostname-nix.tpl" "{ ... }: { networking.hostName = "{{ container.name }}"; }";
+              # copy keeps the file updated when the container is changed
+              when = [ "create" "copy" ];
+            };
+            # copy allow the user to specify a custom configuration.nix
+            "configuration-nix" = {
+              enable = true;
+              target = "/etc/nixos/configuration.nix";
+              template = builtins.writeFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
+              when = [ "create" ];
+            };
+          };
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      contents = [
+        {
+          source = toYAML "metadata.yaml" {
+            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
+            creation_date = 1;
+            properties = {
+              description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
+              os = "${config.system.nixos.distroId}";
+              release = "${config.system.nixos.codeName}";
+            };
+            templates = templates.properties;
+          };
+          target = "/metadata.yaml";
+        }
+      ] ++ templates.files;
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/lxc-instance-common.nix b/nixos/modules/virtualisation/lxc-instance-common.nix
new file mode 100644
index 0000000000000..d6a0e05fb1c96
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc-instance-common.nix
@@ -0,0 +1,30 @@
+{lib, ...}:
+
+{
+  imports = [
+    ./lxc-image-metadata.nix
+
+    ../installer/cd-dvd/channel.nix
+    ../profiles/clone-config.nix
+    ../profiles/minimal.nix
+  ];
+
+  # Allow the user to login as root without password.
+  users.users.root.initialHashedPassword = lib.mkOverride 150 "";
+
+  # Some more help text.
+  services.getty.helpLine = ''
+
+    Log in as "root" with an empty password.
+  '';
+
+  # Containers should be light-weight, so start sshd on demand.
+  services.openssh.enable = lib.mkDefault true;
+  services.openssh.startWhenNeeded = lib.mkDefault true;
+
+  # As this is intended as a standalone image, undo some of the minimal profile stuff
+  environment.noXlibs = false;
+  documentation.enable = true;
+  documentation.nixos.enable = true;
+  services.logrotate.enable = true;
+}
diff --git a/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixos/modules/virtualisation/lxd-virtual-machine.nix
new file mode 100644
index 0000000000000..ba729465ec2f8
--- /dev/null
+++ b/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+let
+  serialDevice =
+    if pkgs.stdenv.hostPlatform.isx86
+    then "ttyS0"
+    else "ttyAMA0"; # aarch64
+in {
+  imports = [
+    ./lxc-instance-common.nix
+
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    system.build.qemuImage = import ../../lib/make-disk-image.nix {
+      inherit pkgs lib config;
+
+      partitionTableType = "efi";
+      format = "qcow2-compressed";
+      copyChannel = true;
+    };
+
+    fileSystems = {
+      "/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      "/boot" = {
+        device = "/dev/disk/by-label/ESP";
+        fsType = "vfat";
+      };
+    };
+
+    boot.growPartition = true;
+    boot.loader.systemd-boot.enable = true;
+
+    # image building needs to know what device to install bootloader on
+    boot.loader.grub.device = "/dev/vda";
+
+    boot.kernelParams = ["console=tty1" "console=${serialDevice}"];
+
+    virtualisation.lxd.agent.enable = lib.mkDefault true;
+  };
+}
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
index e22ba9a0ae2ca..e30fbebb662c7 100644
--- a/nixos/modules/virtualisation/lxd.nix
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -2,21 +2,20 @@
 
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
   cfg = config.virtualisation.lxd;
+  preseedFormat = pkgs.formats.yaml {};
 in {
   imports = [
-    (mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
+    (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
   ];
 
   ###### interface
 
   options = {
     virtualisation.lxd = {
-      enable = mkOption {
-        type = types.bool;
+      enable = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           This option enables lxd, a daemon that manages
@@ -32,28 +31,28 @@ in {
         '';
       };
 
-      package = mkOption {
-        type = types.package;
+      package = lib.mkOption {
+        type = lib.types.package;
         default = pkgs.lxd;
-        defaultText = literalExpression "pkgs.lxd";
+        defaultText = lib.literalExpression "pkgs.lxd";
         description = lib.mdDoc ''
           The LXD package to use.
         '';
       };
 
-      lxcPackage = mkOption {
-        type = types.package;
+      lxcPackage = lib.mkOption {
+        type = lib.types.package;
         default = pkgs.lxc;
-        defaultText = literalExpression "pkgs.lxc";
+        defaultText = lib.literalExpression "pkgs.lxc";
         description = lib.mdDoc ''
           The LXC package to use with LXD (required for AppArmor profiles).
         '';
       };
 
-      zfsSupport = mkOption {
-        type = types.bool;
+      zfsSupport = lib.mkOption {
+        type = lib.types.bool;
         default = config.boot.zfs.enabled;
-        defaultText = literalExpression "config.boot.zfs.enabled";
+        defaultText = lib.literalExpression "config.boot.zfs.enabled";
         description = lib.mdDoc ''
           Enables lxd to use zfs as a storage for containers.
 
@@ -62,8 +61,8 @@ in {
         '';
       };
 
-      recommendedSysctlSettings = mkOption {
-        type = types.bool;
+      recommendedSysctlSettings = lib.mkOption {
+        type = lib.types.bool;
         default = false;
         description = lib.mdDoc ''
           Enables various settings to avoid common pitfalls when
@@ -75,8 +74,67 @@ in {
         '';
       };
 
-      startTimeout = mkOption {
-        type = types.int;
+      preseed = lib.mkOption {
+        type = lib.types.nullOr (lib.types.submodule {
+          freeformType = preseedFormat.type;
+        });
+
+        default = null;
+
+        description = lib.mdDoc ''
+          Configuration for LXD preseed, see
+          <https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#initialize-preseed>
+          for supported values.
+
+          Changes to this will be re-applied to LXD which will overwrite existing entities or create missing ones,
+          but entities will *not* be removed by preseed.
+        '';
+
+        example = lib.literalExpression ''
+          {
+            networks = [
+              {
+                name = "lxdbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "lxdbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "default";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "default";
+                driver = "dir";
+                config = {
+                  source = "/var/lib/lxd/storage-pools/default";
+                };
+              }
+            ];
+          }
+        '';
+      };
+
+      startTimeout = lib.mkOption {
+        type = lib.types.int;
         default = 600;
         apply = toString;
         description = lib.mdDoc ''
@@ -91,13 +149,13 @@ in {
           Enables the (experimental) LXD UI.
         '');
 
-        package = mkPackageOption pkgs.lxd-unwrapped "ui" { };
+        package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { };
       };
     };
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
     environment.systemPackages = [ cfg.package ];
 
     # Note: the following options are also declared in virtualisation.lxc, but
@@ -139,19 +197,19 @@ in {
       wantedBy = [ "multi-user.target" ];
       after = [
         "network-online.target"
-        (mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
       ];
       requires = [
         "network-online.target"
         "lxd.socket"
-        (mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
       ];
       documentation = [ "man:lxd(1)" ];
 
       path = [ pkgs.util-linux ]
-        ++ optional cfg.zfsSupport config.boot.zfs.package;
+        ++ lib.optional cfg.zfsSupport config.boot.zfs.package;
 
-      environment = mkIf (cfg.ui.enable) {
+      environment = lib.mkIf (cfg.ui.enable) {
         "LXD_UI" = cfg.ui.package;
       };
 
@@ -173,11 +231,26 @@ in {
         # By default, `lxd` loads configuration files from hard-coded
         # `/usr/share/lxc/config` - since this is a no-go for us, we have to
         # explicitly tell it where the actual configuration files are
-        Environment = mkIf (config.virtualisation.lxc.lxcfs.enable)
+        Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
           "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
       };
     };
 
+    systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
+      description = "LXD initialization with preseed file";
+      wantedBy = ["multi-user.target"];
+      requires = ["lxd.service"];
+      after = ["lxd.service"];
+
+      script = ''
+        ${pkgs.coreutils}/bin/cat ${preseedFormat.generate "lxd-preseed.yaml" cfg.preseed} | ${cfg.package}/bin/lxd init --preseed
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+
     users.groups.lxd = {};
 
     users.users.root = {
@@ -185,7 +258,7 @@ in {
       subGidRanges = [ { startGid = 1000000; count = 65536; } ];
     };
 
-    boot.kernel.sysctl = mkIf cfg.recommendedSysctlSettings {
+    boot.kernel.sysctl = lib.mkIf cfg.recommendedSysctlSettings {
       "fs.inotify.max_queued_events" = 1048576;
       "fs.inotify.max_user_instances" = 1048576;
       "fs.inotify.max_user_watches" = 1048576;
@@ -196,7 +269,7 @@ in {
       "kernel.keys.maxkeys" = 2000;
     };
 
-    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" ]
-      ++ optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" "vhost_vsock" ]
+      ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
   };
 }
diff --git a/nixos/release.nix b/nixos/release.nix
index 6da6faab73bed..abaa7ef9a7113 100644
--- a/nixos/release.nix
+++ b/nixos/release.nix
@@ -44,10 +44,13 @@ let
   pkgs = import ./.. { system = "x86_64-linux"; };
 
 
-  versionModule =
-    { system.nixos.versionSuffix = versionSuffix;
-      system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
-    };
+  versionModule = { config, ... }: {
+    system.nixos.versionSuffix = versionSuffix;
+    system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
+
+    # At creation time we do not have state yet, so just default to latest.
+    system.stateVersion = config.system.nixos.version;
+  };
 
   makeModules = module: rest: [ configuration versionModule module rest ];
 
@@ -310,7 +313,7 @@ in rec {
   );
 
   # An image that can be imported into lxd and used for container creation
-  lxdImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+  lxdContainerImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
     with import ./.. { inherit system; };
 
@@ -319,14 +322,46 @@ in rec {
       modules =
         [ configuration
           versionModule
-          ./maintainers/scripts/lxd/lxd-image.nix
+          ./maintainers/scripts/lxd/lxd-container-image.nix
         ];
     }).config.system.build.tarball)
 
   );
 
   # Metadata for the lxd image
-  lxdMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+  lxdContainerMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.metadata)
+
+  );
+
+  # An image that can be imported into lxd and used for container creation
+  lxdVirtualMachineImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
+        ];
+    }).config.system.build.qemuImage)
+
+  );
+
+  # Metadata for the lxd image
+  lxdVirtualMachineImageMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
 
     with import ./.. { inherit system; };
 
@@ -335,7 +370,7 @@ in rec {
       modules =
         [ configuration
           versionModule
-          ./maintainers/scripts/lxd/lxd-image.nix
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
         ];
     }).config.system.build.metadata)
 
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 4d220b9747aa6..e5f2d4c7934a1 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -18,7 +18,7 @@
   dnsConfig = nodes: {
     dnsProvider = "exec";
     dnsPropagationCheck = false;
-    credentialsFile = pkgs.writeText "wildcard.env" ''
+    environmentFile = pkgs.writeText "wildcard.env" ''
       EXEC_PATH=${dnsScript nodes}
       EXEC_POLLING_INTERVAL=1
       EXEC_PROPAGATION_TIMEOUT=1
@@ -266,6 +266,37 @@ in {
           }
         ];
 
+        concurrency-limit.configuration = {pkgs, ...}: lib.mkMerge [
+          webserverBasicConfig {
+            security.acme.maxConcurrentRenewals = 1;
+
+            services.nginx.virtualHosts = {
+              "f.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "g.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "h.example.test" = vhostBase // {
+                enableACME = true;
+              };
+            };
+
+            systemd.services = {
+              # check for mutual exclusion of starting renew services
+              "acme-f.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-f" ''
+                test "$(systemctl is-active acme-{g,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-g.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-g" ''
+                test "$(systemctl is-active acme-{f,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-h.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-h" ''
+                test "$(systemctl is-active acme-{g,f}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              };
+          }
+        ];
+
         # Test lego internal server (listenHTTP option)
         # Also tests useRoot option
         lego-server.configuration = { ... }: {
@@ -297,7 +328,7 @@ in {
 
           services.caddy = {
             enable = true;
-            virtualHosts."a.exmaple.test" = {
+            virtualHosts."a.example.test" = {
               useACMEHost = "example.test";
               extraConfig = ''
                 root * ${documentRoot}
@@ -591,6 +622,15 @@ in {
           webserver.wait_for_unit("nginx.service")
           check_connection(client, "slow.example.test")
 
+      with subtest("Can limit concurrency of running renewals"):
+          switch_to(webserver, "concurrency-limit")
+          webserver.wait_for_unit("acme-finished-f.example.test.target")
+          webserver.wait_for_unit("acme-finished-g.example.test.target")
+          webserver.wait_for_unit("acme-finished-h.example.test.target")
+          check_connection(client, "f.example.test")
+          check_connection(client, "g.example.test")
+          check_connection(client, "h.example.test")
+
       with subtest("Works with caddy"):
           switch_to(webserver, "caddy")
           webserver.wait_for_unit("acme-finished-example.test.target")
diff --git a/nixos/tests/akkoma.nix b/nixos/tests/akkoma.nix
index 7115c0beed34d..287e2d485999e 100644
--- a/nixos/tests/akkoma.nix
+++ b/nixos/tests/akkoma.nix
@@ -33,7 +33,10 @@ let
 
     echo '${userPassword}' | ${pkgs.toot}/bin/toot login_cli -i "akkoma.nixos.test" -e "jamy@nixos.test"
     echo "y" | ${pkgs.toot}/bin/toot post "hello world Jamy here"
-    echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
+
+    # Retrieving timeline with toot currently broken due to incompatible timestamp format
+    # cf. <https://akkoma.dev/AkkomaGang/akkoma/issues/637> and <https://github.com/ihabunek/toot/issues/399>
+    #echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
 
     # Test file upload
     echo "y" | ${pkgs.toot}/bin/toot upload <(dd if=/dev/zero bs=1024 count=1024 status=none) \
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index c1e124bda5c7a..d674d0cdba37d 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -109,6 +109,7 @@ in {
   allTerminfo = handleTest ./all-terminfo.nix {};
   alps = handleTest ./alps.nix {};
   amazon-init-shell = handleTest ./amazon-init-shell.nix {};
+  anbox = runTest ./anbox.nix;
   anuko-time-tracker = handleTest ./anuko-time-tracker.nix {};
   apcupsd = handleTest ./apcupsd.nix {};
   apfs = runTest ./apfs.nix;
@@ -210,6 +211,7 @@ in {
   custom-ca = handleTest ./custom-ca.nix {};
   croc = handleTest ./croc.nix {};
   darling = handleTest ./darling.nix {};
+  dae = handleTest ./dae.nix {};
   dconf = handleTest ./dconf.nix {};
   deepin = handleTest ./deepin.nix {};
   deluge = handleTest ./deluge.nix {};
@@ -273,7 +275,6 @@ in {
   firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; };
   firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
   firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
-  firefox-esr-102 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-102; };
   firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
   firejail = handleTest ./firejail.nix {};
   firewall = handleTest ./firewall.nix { nftables = false; };
@@ -395,7 +396,7 @@ in {
   jibri = handleTest ./jibri.nix {};
   jirafeau = handleTest ./jirafeau.nix {};
   jitsi-meet = handleTest ./jitsi-meet.nix {};
-  jool = handleTest ./jool.nix {};
+  jool = import ./jool.nix { inherit pkgs runTest; };
   k3s = handleTest ./k3s {};
   kafka = handleTest ./kafka.nix {};
   kanidm = handleTest ./kanidm.nix {};
@@ -446,7 +447,7 @@ in {
   loki = handleTest ./loki.nix {};
   luks = handleTest ./luks.nix {};
   lvm2 = handleTest ./lvm2 {};
-  lxd = pkgs.recurseIntoAttrs (handleTest ./lxd {});
+  lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
   lxd-image-server = handleTest ./lxd-image-server.nix {};
   #logstash = handleTest ./logstash.nix {};
   lorri = handleTest ./lorri/default.nix {};
@@ -483,6 +484,7 @@ in {
   miriway = handleTest ./miriway.nix {};
   misc = handleTest ./misc.nix {};
   mjolnir = handleTest ./matrix/mjolnir.nix {};
+  mobilizon = handleTest ./mobilizon.nix {};
   mod_perl = handleTest ./mod_perl.nix {};
   molly-brown = handleTest ./molly-brown.nix {};
   monica = handleTest ./web-apps/monica.nix {};
@@ -728,6 +730,7 @@ in {
   sslh = handleTest ./sslh.nix {};
   sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {};
   sssd-ldap = handleTestOn ["x86_64-linux"] ./sssd-ldap.nix {};
+  stalwart-mail = handleTest ./stalwart-mail.nix {};
   stargazer = runTest ./web-servers/stargazer.nix;
   starship = handleTest ./starship.nix {};
   static-web-server = handleTest ./web-servers/static-web-server.nix {};
diff --git a/nixos/tests/anbox.nix b/nixos/tests/anbox.nix
new file mode 100644
index 0000000000000..d78f63ec761fc
--- /dev/null
+++ b/nixos/tests/anbox.nix
@@ -0,0 +1,40 @@
+{ lib, pkgs, ... }:
+
+{
+  name = "anbox";
+  meta.maintainers = with lib.maintainers; [ mvnetbiz ];
+
+  nodes.machine = { pkgs, config, ... }: {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ android-tools ];
+
+    test-support.displayManager.auto.user = "alice";
+
+    virtualisation.anbox.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_5_15;
+
+    # The AArch64 anbox image will not start.
+    # Meanwhile the postmarketOS images work just fine.
+    virtualisation.anbox.image = pkgs.anbox.postmarketos-image;
+    virtualisation.memorySize = 2500;
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
+  in ''
+    machine.wait_for_x()
+
+    machine.wait_until_succeeds(
+        "sudo -iu alice ${bus} anbox wait-ready"
+    )
+
+    machine.wait_until_succeeds("adb shell true")
+
+    print(machine.succeed("adb devices"))
+  '';
+}
diff --git a/nixos/tests/custom-ca.nix b/nixos/tests/custom-ca.nix
index 25a7b6fdea46e..0fcdf81022d72 100644
--- a/nixos/tests/custom-ca.nix
+++ b/nixos/tests/custom-ca.nix
@@ -131,8 +131,8 @@ let
         # chromium-based browsers refuse to run as root
         test-support.displayManager.auto.user = "alice";
 
-        # browsers may hang with the default memory
-        virtualisation.memorySize = 600;
+        # machine often runs out of memory with less
+        virtualisation.memorySize = 1024;
 
         environment.systemPackages = [ pkgs.xdotool pkgs.${browser} ];
       };
diff --git a/nixos/tests/dae.nix b/nixos/tests/dae.nix
new file mode 100644
index 0000000000000..b8c8ebce74574
--- /dev/null
+++ b/nixos/tests/dae.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+
+  name = "dae";
+
+  meta = {
+    maintainers = with lib.maintainers; [ oluceps ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+    services.dae = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("dae.service")
+
+    machine.wait_for_open_port(80)
+
+    machine.succeed("curl --fail --max-time 10 http://localhost")
+  '';
+
+})
diff --git a/nixos/tests/jool.nix b/nixos/tests/jool.nix
index 6d5ded9b18e07..93575f07b1c8c 100644
--- a/nixos/tests/jool.nix
+++ b/nixos/tests/jool.nix
@@ -1,9 +1,4 @@
-{ system ? builtins.currentSystem,
-  config ? {},
-  pkgs ? import ../.. { inherit system config; }
-}:
-
-with import ../lib/testing-python.nix { inherit system pkgs; };
+{ pkgs, runTest }:
 
 let
   inherit (pkgs) lib;
@@ -23,7 +18,6 @@ let
       description = "Mock webserver";
       wants = [ "network-online.target" ];
       wantedBy = [ "multi-user.target" ];
-      serviceConfig.Restart = "always";
       script = ''
         while true; do
         {
@@ -40,7 +34,7 @@ let
 in
 
 {
-  siit = makeTest {
+  siit = runTest {
     # This test simulates the setup described in [1] with two IPv6 and
     # IPv4-only devices on different subnets communicating through a border
     # relay running Jool in SIIT mode.
@@ -49,8 +43,7 @@ in
     meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 
     # Border relay
-    nodes.relay = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+    nodes.relay = {
       virtualisation.vlans = [ 1 2 ];
 
       # Enable packet routing
@@ -65,20 +58,13 @@ in
         eth2.ipv4.addresses = [ { address = "192.0.2.1";  prefixLength = 24; } ];
       };
 
-      networking.jool = {
-        enable = true;
-        siit.enable = true;
-        siit.config.global.pool6 = "fd::/96";
-      };
+      networking.jool.enable = true;
+      networking.jool.siit.default.global.pool6 = "fd::/96";
     };
 
     # IPv6 only node
-    nodes.alice = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv6Only
-        (webserver 6 "Hello, Bob!")
-      ];
+    nodes.alice = {
+      imports = [ ipv6Only (webserver 6 "Hello, Bob!") ];
 
       virtualisation.vlans = [ 1 ];
       networking.interfaces.eth1.ipv6 = {
@@ -89,12 +75,8 @@ in
     };
 
     # IPv4 only node
-    nodes.bob = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv4Only
-        (webserver 4 "Hello, Alice!")
-      ];
+    nodes.bob = {
+      imports = [ ipv4Only (webserver 4 "Hello, Alice!") ];
 
       virtualisation.vlans = [ 2 ];
       networking.interfaces.eth1.ipv4 = {
@@ -107,17 +89,17 @@ in
     testScript = ''
       start_all()
 
-      relay.wait_for_unit("jool-siit.service")
+      relay.wait_for_unit("jool-siit-default.service")
       alice.wait_for_unit("network-addresses-eth1.service")
       bob.wait_for_unit("network-addresses-eth1.service")
 
       with subtest("Alice and Bob can't ping each other"):
-        relay.systemctl("stop jool-siit.service")
+        relay.systemctl("stop jool-siit-default.service")
         alice.fail("ping -c1 fd::192.0.2.16")
         bob.fail("ping -c1 198.51.100.8")
 
       with subtest("Alice and Bob can ping using the relay"):
-        relay.systemctl("start jool-siit.service")
+        relay.systemctl("start jool-siit-default.service")
         alice.wait_until_succeeds("ping -c1 fd::192.0.2.16")
         bob.wait_until_succeeds("ping -c1 198.51.100.8")
 
@@ -132,7 +114,7 @@ in
     '';
   };
 
-  nat64 = makeTest {
+  nat64 = runTest {
     # This test simulates the setup described in [1] with two IPv6-only nodes
     # (a client and a homeserver) on the LAN subnet and an IPv4 node on the WAN.
     # The router runs Jool in stateful NAT64 mode, masquarading the LAN and
@@ -142,8 +124,7 @@ in
     meta.maintainers = with lib.maintainers; [ rnhmjoj ];
 
     # Router
-    nodes.router = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ];
+    nodes.router = {
       virtualisation.vlans = [ 1 2 ];
 
       # Enable packet routing
@@ -158,32 +139,29 @@ in
         eth2.ipv4.addresses = [ { address = "203.0.113.1"; prefixLength = 24; } ];
       };
 
-      networking.jool = {
-        enable = true;
-        nat64.enable = true;
-        nat64.config = {
-          bib = [
-            { # forward HTTP 203.0.113.1 (router) → 2001:db8::9 (homeserver)
-              "protocol"     = "TCP";
-              "ipv4 address" = "203.0.113.1#80";
-              "ipv6 address" = "2001:db8::9#80";
-            }
-          ];
-          pool4 = [
-            # Ports for dynamic translation
-            { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            { protocol =  "UDP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            { protocol = "ICMP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
-            # Ports for static BIB entries
-            { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "80"; }
-          ];
-        };
+      networking.jool.enable = true;
+      networking.jool.nat64.default = {
+        bib = [
+          { # forward HTTP 203.0.113.1 (router) → 2001:db8::9 (homeserver)
+            "protocol"     = "TCP";
+            "ipv4 address" = "203.0.113.1#80";
+            "ipv6 address" = "2001:db8::9#80";
+          }
+        ];
+        pool4 = [
+          # Ports for dynamic translation
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol =  "UDP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol = "ICMP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          # Ports for static BIB entries
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "80"; }
+        ];
       };
     };
 
     # LAN client (IPv6 only)
-    nodes.client = { ... }: {
-      imports = [ ../modules/profiles/minimal.nix ipv6Only ];
+    nodes.client = {
+      imports = [ ipv6Only ];
       virtualisation.vlans = [ 1 ];
 
       networking.interfaces.eth1.ipv6 = {
@@ -194,12 +172,8 @@ in
     };
 
     # LAN server (IPv6 only)
-    nodes.homeserver = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv6Only
-        (webserver 6 "Hello from IPv6!")
-      ];
+    nodes.homeserver = {
+      imports = [ ipv6Only (webserver 6 "Hello from IPv6!") ];
 
       virtualisation.vlans = [ 1 ];
       networking.interfaces.eth1.ipv6 = {
@@ -210,12 +184,8 @@ in
     };
 
     # WAN server (IPv4 only)
-    nodes.server = { ... }: {
-      imports = [
-        ../modules/profiles/minimal.nix
-        ipv4Only
-        (webserver 4 "Hello from IPv4!")
-      ];
+    nodes.server = {
+      imports = [ ipv4Only (webserver 4 "Hello from IPv4!") ];
 
       virtualisation.vlans = [ 2 ];
       networking.interfaces.eth1.ipv4.addresses =
@@ -229,7 +199,7 @@ in
         node.wait_for_unit("network-addresses-eth1.service")
 
       with subtest("Client can ping the WAN server"):
-        router.wait_for_unit("jool-nat64.service")
+        router.wait_for_unit("jool-nat64-default.service")
         client.succeed("ping -c1 64:ff9b::203.0.113.16")
 
       with subtest("Client can connect to the WAN webserver"):
diff --git a/nixos/tests/lxd/container.nix b/nixos/tests/lxd/container.nix
index 9e56f6e41e054..bdaaebfc00281 100644
--- a/nixos/tests/lxd/container.nix
+++ b/nixos/tests/lxd/container.nix
@@ -1,7 +1,7 @@
 import ../make-test-python.nix ({ pkgs, lib, ... } :
 
 let
-  lxd-image = import ../../release.nix {
+  releases = import ../../release.nix {
     configuration = {
       # Building documentation makes the test unnecessarily take a longer time:
       documentation.enable = lib.mkForce false;
@@ -11,14 +11,14 @@ let
     };
   };
 
-  lxd-image-metadata = lxd-image.lxdMeta.${pkgs.stdenv.hostPlatform.system};
-  lxd-image-rootfs = lxd-image.lxdImage.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
 
 in {
-  name = "lxd";
+  name = "lxd-container";
 
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ patryk27 ];
+    maintainers = [ patryk27 adamcstephens ];
   };
 
   nodes.machine = { lib, ... }: {
@@ -49,6 +49,9 @@ in {
     # Wait for lxd to settle
     machine.succeed("lxd waitready")
 
+    # no preseed should mean no service
+    machine.fail("systemctl status lxd-preseed.service")
+
     machine.succeed("lxd init --minimal")
 
     machine.succeed(
diff --git a/nixos/tests/lxd/default.nix b/nixos/tests/lxd/default.nix
index 2e34907d79369..20afdd5e48bbe 100644
--- a/nixos/tests/lxd/default.nix
+++ b/nixos/tests/lxd/default.nix
@@ -2,8 +2,11 @@
   system ? builtins.currentSystem,
   config ? {},
   pkgs ? import ../../.. {inherit system config;},
+  handleTestOn,
 }: {
   container = import ./container.nix {inherit system pkgs;};
   nftables = import ./nftables.nix {inherit system pkgs;};
+  preseed = import ./preseed.nix {inherit system pkgs;};
   ui = import ./ui.nix {inherit system pkgs;};
+  virtual-machine = handleTestOn ["x86_64-linux"] ./virtual-machine.nix { inherit system pkgs; };
 }
diff --git a/nixos/tests/lxd/preseed.nix b/nixos/tests/lxd/preseed.nix
new file mode 100644
index 0000000000000..7d89b9f56daa4
--- /dev/null
+++ b/nixos/tests/lxd/preseed.nix
@@ -0,0 +1,71 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "lxd-preseed";
+
+  meta = {
+    maintainers = with lib.maintainers; [ adamcstephens ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      diskSize = 4096;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+
+      lxd.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    def wait_for_preseed(_) -> bool:
+      _, output = machine.systemctl("is-active lxd-preseed.service")
+      return ("inactive" in output)
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    with machine.nested("Waiting for preseed to complete"):
+      retry(wait_for_preseed)
+
+    with subtest("Verify preseed resources created"):
+      machine.succeed("lxc profile show nixostest_default")
+      machine.succeed("lxc network info nixostestbr0")
+      machine.succeed("lxc storage show nixostest_pool")
+  '';
+})
diff --git a/nixos/tests/lxd/virtual-machine.nix b/nixos/tests/lxd/virtual-machine.nix
new file mode 100644
index 0000000000000..93705e9350c5a
--- /dev/null
+++ b/nixos/tests/lxd/virtual-machine.nix
@@ -0,0 +1,64 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+
+      # Our tests require `grep` & friends:
+      environment.systemPackages = with pkgs; [busybox];
+    };
+  };
+
+  lxd-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+
+  instance-name = "instance1";
+in {
+  name = "lxd-virtual-machine";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [adamcstephens];
+  };
+
+  nodes.machine = {lib, ...}: {
+    virtualisation = {
+      diskSize = 4096;
+
+      cores = 2;
+
+      # Ensure we have enough memory for the nested virtual machine
+      memorySize = 1024;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    machine.succeed("lxd init --minimal")
+
+    with subtest("virtual-machine image can be imported"):
+        machine.succeed("lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-disk}/nixos.qcow2 --alias nixos")
+
+    with subtest("virtual-machine can be launched and become available"):
+        machine.succeed("lxc launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+
+    with subtest("lxd-agent is started"):
+        machine.succeed("lxc exec ${instance-name} systemctl is-active lxd-agent")
+  '';
+})
diff --git a/nixos/tests/mobilizon.nix b/nixos/tests/mobilizon.nix
new file mode 100644
index 0000000000000..2b070ca9d9609
--- /dev/null
+++ b/nixos/tests/mobilizon.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ lib, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    mobilizonDomain = certs.domain;
+    port = 41395;
+  in
+
+  {
+    name = "mobilizon";
+    meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+
+    nodes.server =
+      { ... }:
+      {
+        services.mobilizon = {
+          enable = true;
+          settings = {
+            ":mobilizon" = {
+              ":instance" = {
+                name = "Test Mobilizon";
+                hostname = mobilizonDomain;
+              };
+              "Mobilizon.Web.Endpoint".http.port = port;
+            };
+          };
+        };
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+
+        services.nginx.virtualHosts."${mobilizonDomain}" = {
+          enableACME = lib.mkForce false;
+          sslCertificate = certs.${mobilizonDomain}.cert;
+          sslCertificateKey = certs.${mobilizonDomain}.key;
+        };
+
+        networking.hosts."::1" = [ mobilizonDomain ];
+      };
+
+    testScript = ''
+      server.wait_for_unit("mobilizon.service")
+      server.wait_for_open_port(${toString port})
+      server.succeed("curl --fail https://${mobilizonDomain}/")
+    '';
+  })
diff --git a/nixos/tests/postgis.nix b/nixos/tests/postgis.nix
index 9d81ebaad85f2..d0685abc510c9 100644
--- a/nixos/tests/postgis.nix
+++ b/nixos/tests/postgis.nix
@@ -9,7 +9,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
       { pkgs, ... }:
 
       {
-        services.postgresql = let mypg = pkgs.postgresql_11; in {
+        services.postgresql = let mypg = pkgs.postgresql; in {
             enable = true;
             package = mypg;
             extraPlugins = with mypg.pkgs; [
diff --git a/nixos/tests/restic.nix b/nixos/tests/restic.nix
index 626049e73341f..3b9ea2f85b1ed 100644
--- a/nixos/tests/restic.nix
+++ b/nixos/tests/restic.nix
@@ -97,9 +97,9 @@ import ./make-test-python.nix (
       server.start()
       server.wait_for_unit("dbus.socket")
       server.fail(
-          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots",
-          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots"',
-          "${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots",
+          "restic-remotebackup snapshots",
+          'restic-remote-from-file-backup snapshots"',
+          "restic-rclonebackup snapshots",
           "grep 'backup.* /opt' /root/fake-restic.log",
       )
       server.succeed(
@@ -112,20 +112,20 @@ import ./make-test-python.nix (
           "timedatectl set-time '2016-12-13 13:45'",
           "systemctl start restic-backups-remotebackup.service",
           "rm /root/backupCleanupCommand",
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that restoring that snapshot produces the same directory
           "mkdir /tmp/restore-1",
-          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-1",
+          "restic-remotebackup restore latest -t /tmp/restore-1",
           "diff -ru ${testDir} /tmp/restore-1/opt",
 
           # test that remote-from-file-backup produces a snapshot
           "systemctl start restic-backups-remote-from-file-backup.service",
-          '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that rclonebackup produces a snapshot
           "systemctl start restic-backups-rclonebackup.service",
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
           # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
           "systemctl start restic-backups-custompackage.service",
@@ -158,12 +158,12 @@ import ./make-test-python.nix (
           "rm /root/backupCleanupCommand",
           "systemctl start restic-backups-rclonebackup.service",
 
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
-          '${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
 
           # test that remoteprune brings us back to 1 snapshot in remotebackup
           "systemctl start restic-backups-remoteprune.service",
-          '${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
 
       )
     '';
diff --git a/nixos/tests/stalwart-mail.nix b/nixos/tests/stalwart-mail.nix
new file mode 100644
index 0000000000000..b5589966a160f
--- /dev/null
+++ b/nixos/tests/stalwart-mail.nix
@@ -0,0 +1,117 @@
+# Rudimentary test checking that the Stalwart email server can:
+# - receive some message through SMTP submission, then
+# - serve this message through IMAP.
+
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+
+in import ./make-test-python.nix ({ lib, ... }: {
+  name = "stalwart-mail";
+
+  nodes.main = { pkgs, ... }: {
+    security.pki.certificateFiles = [ certs.ca.cert ];
+
+    services.stalwart-mail = {
+      enable = true;
+      settings = {
+        server.hostname = domain;
+
+        certificate."snakeoil" = {
+          cert = "file://${certs.${domain}.cert}";
+          private-key = "file://${certs.${domain}.key}";
+        };
+
+        server.tls = {
+          certificate = "snakeoil";
+          enable = true;
+          implicit = false;
+        };
+
+        server.listener = {
+          "smtp-submission" = {
+            bind = [ "[::]:587" ];
+            protocol = "smtp";
+          };
+
+          "imap" = {
+            bind = [ "[::]:143" ];
+            protocol = "imap";
+          };
+        };
+
+        session.auth.mechanisms = [ "PLAIN" ];
+        session.auth.directory = "in-memory";
+        jmap.directory = "in-memory";  # shared with imap
+
+        session.rcpt.directory = "in-memory";
+        queue.outbound.next-hop = [ "local" ];
+
+        directory."in-memory" = {
+          type = "memory";
+          users = [
+            {
+              name = "alice";
+              secret = "foobar";
+              email = [ "alice@${domain}" ];
+            }
+            {
+              name = "bob";
+              secret = "foobar";
+              email = [ "bob@${domain}" ];
+            }
+          ];
+        };
+      };
+    };
+
+    environment.systemPackages = [
+      (pkgs.writers.writePython3Bin "test-smtp-submission" { } ''
+        from smtplib import SMTP
+
+        with SMTP('localhost', 587) as smtp:
+            smtp.starttls()
+            smtp.login('alice', 'foobar')
+            smtp.sendmail(
+                'alice@${domain}',
+                'bob@${domain}',
+                """
+                    From: alice@${domain}
+                    To: bob@${domain}
+                    Subject: Some test message
+
+                    This is a test message.
+                """.strip()
+            )
+      '')
+
+      (pkgs.writers.writePython3Bin "test-imap-read" { } ''
+        from imaplib import IMAP4
+
+        with IMAP4('localhost') as imap:
+            imap.starttls()
+            imap.login('bob', 'foobar')
+            imap.select('"All Mail"')
+            status, [ref] = imap.search(None, 'ALL')
+            assert status == 'OK'
+            [msgId] = ref.split()
+            status, msg = imap.fetch(msgId, 'BODY[TEXT]')
+            assert status == 'OK'
+            assert msg[0][1].strip() == b'This is a test message.'
+      '')
+    ];
+  };
+
+  testScript = /* python */ ''
+    main.wait_for_unit("stalwart-mail.service")
+    main.wait_for_open_port(587)
+    main.wait_for_open_port(143)
+
+    main.succeed("test-smtp-submission")
+    main.succeed("test-imap-read")
+  '';
+
+  meta = {
+    maintainers = with lib.maintainers; [ happysalada pacien ];
+  };
+})
diff --git a/nixos/tests/sudo.nix b/nixos/tests/sudo.nix
index 3b6028c6f0b09..1b177391488d8 100644
--- a/nixos/tests/sudo.nix
+++ b/nixos/tests/sudo.nix
@@ -5,7 +5,7 @@ let
 in
   import ./make-test-python.nix ({ lib, pkgs, ...} : {
     name = "sudo";
-    meta.maintainers = with lib.maintainers; [ lschuermann ];
+    meta.maintainers = pkgs.sudo.meta.maintainers;
 
     nodes.machine =
       { lib, ... }:
diff --git a/nixos/tests/systemd-initrd-swraid.nix b/nixos/tests/systemd-initrd-swraid.nix
index d87170c925742..d00e67b5705ab 100644
--- a/nixos/tests/systemd-initrd-swraid.nix
+++ b/nixos/tests/systemd-initrd-swraid.nix
@@ -20,6 +20,9 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
         ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
       '';
     };
+    environment.etc."mdadm.conf".text = ''
+      MAILADDR test@example.com
+    '';
     boot.initrd = {
       systemd = {
         enable = true;
@@ -29,11 +32,14 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     };
 
     specialisation.boot-swraid.configuration.virtualisation.rootDevice = "/dev/disk/by-label/testraid";
+    # This protects against a regression. We do not have to switch to it.
+    # It's sufficient to trigger its evaluation.
+    specialisation.build-old-initrd.configuration.boot.initrd.systemd.enable = lib.mkForce false;
   };
 
   testScript = ''
     # Create RAID
-    machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid0 /dev/vdb /dev/vdc")
+    machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid1 /dev/vdb /dev/vdc --metadata=0.90")
     machine.succeed("mkfs.ext4 -L testraid /dev/md0")
     machine.succeed("mkdir -p /mnt && mount /dev/md0 /mnt && echo hello > /mnt/test && umount /mnt")
 
@@ -48,5 +54,13 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
     assert "/dev/md0 on / type ext4" in machine.succeed("mount")
     assert "hello" in machine.succeed("cat /test")
     assert "md0" in machine.succeed("cat /proc/mdstat")
+
+    expected_config = """MAILADDR test@example.com
+
+    ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
+    """
+    got_config = machine.execute("cat /etc/mdadm.conf")[1]
+    assert expected_config == got_config, repr((expected_config, got_config))
+    machine.wait_for_unit("mdmonitor.service")
   '';
 })
diff --git a/nixos/tests/wrappers.nix b/nixos/tests/wrappers.nix
index 391e9b42b45bd..1d4fa85d73993 100644
--- a/nixos/tests/wrappers.nix
+++ b/nixos/tests/wrappers.nix
@@ -21,6 +21,8 @@ in
       };
     };
 
+    security.apparmor.enable = true;
+
     security.wrappers = {
       suidRoot = {
         owner = "root";
@@ -84,17 +86,27 @@ in
       test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -g', '0')
       test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -rg', '0')
 
+      # Test that in nonewprivs environment the wrappers simply exec their target.
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}')
+
       # We are only testing the permitted set, because it's easiest to look at with capsh.
       machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN'))
       machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN'))
       machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN'))
       machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN'))
 
-      # test a few "attacks" against which the wrapper protects itself
-      machine.succeed("cp /run/wrappers/bin/suid_root_busybox{,.real} /tmp/")
-      machine.fail(cmd_as_regular("/tmp/suid_root_busybox id -u"))
-
-      machine.succeed("chmod u+s,a+w /run/wrappers/bin/suid_root_busybox")
-      machine.fail(cmd_as_regular("/run/wrappers/bin/suid_root_busybox id -u"))
+      # Test that the only user of apparmor policy includes generated by
+      # wrappers works. Ideally this'd be located in a test for the module that
+      # actually makes the apparmor policy for ping, but there's no convenient
+      # test for that one.
+      machine.succeed("ping -c 1 127.0.0.1")
     '';
 })