about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/README.md21
-rw-r--r--nixos/doc/manual/release-notes/rl-2305.section.md2
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md30
-rw-r--r--nixos/modules/config/stevenblack.nix2
-rw-r--r--nixos/modules/installer/tools/nixos-generate-config.pl14
-rw-r--r--nixos/modules/security/acme/default.nix4
-rw-r--r--nixos/modules/security/sudo.nix10
-rw-r--r--nixos/modules/services/audio/navidrome.nix7
-rw-r--r--nixos/modules/services/databases/postgresql.md119
-rw-r--r--nixos/modules/services/desktops/pipewire/pipewire.nix17
-rw-r--r--nixos/modules/services/hardware/fwupd.nix9
-rw-r--r--nixos/modules/services/mail/mailman.nix5
-rw-r--r--nixos/modules/services/misc/autofs.nix2
-rw-r--r--nixos/modules/services/misc/forgejo.md79
-rw-r--r--nixos/modules/services/misc/forgejo.nix1
-rw-r--r--nixos/modules/services/monitoring/grafana.nix1
-rw-r--r--nixos/modules/services/monitoring/parsedmarc.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix46
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix55
-rw-r--r--nixos/modules/services/networking/harmonia.nix2
-rw-r--r--nixos/modules/services/networking/ircd-hybrid/builder.sh2
-rw-r--r--nixos/modules/services/networking/ntp/chrony.nix39
-rw-r--r--nixos/modules/services/networking/unbound.nix28
-rw-r--r--nixos/modules/services/networking/unifi.nix82
-rw-r--r--nixos/modules/services/search/opensearch.nix19
-rw-r--r--nixos/modules/services/torrent/flexget.nix1
-rw-r--r--nixos/modules/services/web-apps/mastodon.nix132
-rw-r--r--nixos/modules/services/web-apps/plantuml-server.nix118
-rw-r--r--nixos/modules/services/web-apps/plausible.nix52
-rw-r--r--nixos/modules/services/web-servers/jboss/builder.sh2
-rw-r--r--nixos/modules/system/boot/kernel.nix9
-rw-r--r--nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py99
-rw-r--r--nixos/modules/system/boot/systemd/initrd.nix2
-rw-r--r--nixos/modules/tasks/filesystems/bcachefs.nix51
-rw-r--r--nixos/modules/testing/test-instrumentation.nix27
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix9
-rw-r--r--nixos/modules/virtualisation/azure-image.nix37
-rw-r--r--nixos/tests/all-tests.nix2
-rw-r--r--nixos/tests/archi.nix31
-rw-r--r--nixos/tests/dnscrypt-wrapper/default.nix144
-rw-r--r--nixos/tests/plantuml-server.nix20
-rw-r--r--nixos/tests/plausible.nix7
-rw-r--r--nixos/tests/pleroma.nix9
-rw-r--r--nixos/tests/prometheus-exporters.nix15
-rw-r--r--nixos/tests/web-apps/mastodon/remote-postgresql.nix22
-rw-r--r--nixos/tests/web-apps/mastodon/script.nix3
-rw-r--r--nixos/tests/web-apps/mastodon/standard.nix4
-rw-r--r--nixos/tests/wordpress.nix2
-rw-r--r--nixos/tests/xmpp/ejabberd.nix2
49 files changed, 1038 insertions, 359 deletions
diff --git a/nixos/README.md b/nixos/README.md
index d0257e12d9333..07e82bf0ad938 100644
--- a/nixos/README.md
+++ b/nixos/README.md
@@ -8,6 +8,27 @@ https://nixos.org/nixos and in the manual in doc/manual.
 
 You can add new module to your NixOS configuration file (usually it’s `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
 
+## Commit conventions
+
+- Make sure you read about the [commit conventions](../CONTRIBUTING.md#commit-conventions) common to Nixpkgs as a whole.
+
+- Format the commit messages in the following way:
+
+  ```
+  nixos/(module): (init module | add setting | refactor | etc)
+
+  (Motivation for change. Link to release notes. Additional information.)
+  ```
+
+  Examples:
+
+  * nixos/hydra: add bazBaz option
+
+    Dual baz behavior is needed to do foo.
+  * nixos/nginx: refactor config generation
+
+    The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
+
 ## Reviewing contributions
 
 When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people’s installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from \@edolstra.
diff --git a/nixos/doc/manual/release-notes/rl-2305.section.md b/nixos/doc/manual/release-notes/rl-2305.section.md
index 0b54b8b32a35b..21c798b3b4a46 100644
--- a/nixos/doc/manual/release-notes/rl-2305.section.md
+++ b/nixos/doc/manual/release-notes/rl-2305.section.md
@@ -660,5 +660,5 @@ If reloading the module is not an option, proceed to [Nuclear option](#sec-relea
 
 #### Nuclear option {#sec-release-23.05-migration-pipewire-nuclear}
 If all else fails, you can still manually copy the contents of the default configuration file
-from `${pkgs.pipewire.lib}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
+from `${pkgs.pipewire}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
 However, this should be done only as a last resort. Please talk to the Pipewire maintainers if you ever need to do this.
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index 722bcb6b90886..e5e4e713ae276 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -104,6 +104,8 @@
 
 - [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
 
+- [forgejo](https://forgejo.org/), a git forge. Previously deployed as a drop-in replacement package in the [gitea module](#opt-services.gitea.package). Available as [services.forgejo](#opt-services.forgejo.enable). See migration instructions in the [NixOS manual](#module-forgejo) on how to migrate your forgejo instance using [`services.gitea.package = pkgs.forgejo`](#opt-services.gitea.package) to [`services.forgejo`](#opt-services.forgejo.enable).
+
 - hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
 
 - [zwave-js](https://github.com/zwave-js/zwave-js-server), a small server wrapper around Z-Wave JS to access it via a WebSocket. Available as [services.zwave-js](#opt-services.zwave-js.enable).
@@ -127,6 +129,8 @@
 
 - [ZITADEL](https://zitadel.com), a turnkey identity and access management platform. Available as [services.zitadel](#opt-services.zitadel.enable).
 
+- [exportarr](https://github.com/onedr0p/exportarr), Prometheus Exporters for Bazarr, Lidarr, Prowlarr, Radarr, Readarr, and Sonarr. Available as [services.prometheus.exporters.exportarr-bazarr](#opt-services.prometheus.exporters.exportarr-bazarr.enable)/[services.prometheus.exporters.exportarr-lidarr](#opt-services.prometheus.exporters.exportarr-lidarr.enable)/[services.prometheus.exporters.exportarr-prowlarr](#opt-services.prometheus.exporters.exportarr-prowlarr.enable)/[services.prometheus.exporters.exportarr-radarr](#opt-services.prometheus.exporters.exportarr-radarr.enable)/[services.prometheus.exporters.exportarr-readarr](#opt-services.prometheus.exporters.exportarr-readarr.enable)/[services.prometheus.exporters.exportarr-sonarr](#opt-services.prometheus.exporters.exportarr-sonarr.enable).
+
 - [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
 
 - [trunk-ng](https://github.com/ctron/trunk), A fork of `trunk`: Build, bundle & ship your Rust WASM application to the web
@@ -155,6 +159,8 @@
 
 - The latest version of `clonehero` now stores custom content in `~/.clonehero`. See the [migration instructions](https://clonehero.net/2022/11/29/v23-to-v1-migration-instructions.html). Typically, these content files would exist along side the binary, but the previous build used a wrapper script that would store them in `~/.config/unity3d/srylain Inc_/Clone Hero`.
 
+- `services.mastodon` doesn't support providing a TCP port to its `streaming` component anymore, as upstream implemented parallelization by running multiple instances instead of running multiple processes in one instance. Please create a PR if you are interested in this feature.
+
 - The `services.hostapd` module was rewritten to support `passwordFile` like options, WPA3-SAE, and management of multiple interfaces. This breaks compatibility with older configurations.
   - `hostapd` is now started with additional systemd sandbox/hardening options for better security.
   - `services.hostapd.interface` was replaced with a per-radio and per-bss configuration scheme using [services.hostapd.radios](#opt-services.hostapd.radios).
@@ -190,6 +196,8 @@
 
 - JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
 
+- The `waagent` service does provisioning now
+
 - The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
   - The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
   - The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
@@ -267,6 +275,18 @@
 
 - `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be.
 
+- `fuse3` has been updated from 3.11.0 to 3.16.2; see [ChangeLog.rst](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3162-2023-10-10) for an overview of the changes.
+
+  Unsupported mount options are no longer silently accepted [(since 3.15.0)](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3150-2023-06-09). The [affected mount options](https://github.com/libfuse/libfuse/commit/dba6b3983af34f30de01cf532dff0b66f0ed6045) are: `atime`, `diratime`, `lazytime`, `nolazytime`, `relatime`, `norelatime`, `strictatime`.
+
+  For example,
+
+  ```bash
+  $ sshfs 127.0.0.1:/home/test/testdir /home/test/sshfs_mnt -o atime`
+  ```
+
+  would previously terminate successfully with the mount point established, now it outputs the error message ``fuse: unknown option(s): `-o atime'`` and terminates with exit status 1.
+
 - `nixos-rebuild {switch,boot,test,dry-activate}` now runs the system activation inside `systemd-run`, creating an ephemeral systemd service and protecting the system switch against issues like network disconnections during remote (e.g. SSH) sessions. This has the side effect of running the switch in an isolated environment, that could possible break post-switch scripts that depends on things like environment variables being set. If you want to opt-out from this behavior for now, you may set the `NIXOS_SWITCH_USE_DIRTY_ENV` environment variable before running `nixos-rebuild`. However, keep in mind that this option will be removed in the future.
 
 - The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192).
@@ -303,7 +323,7 @@
 
 - The default `kops` version is now 1.28.0 and support for 1.25 and older has been dropped.
 
-- `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
+- `pharo` has been updated to latest stable (PharoVM 10.0.8), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
 
 - Emacs mainline version 29 was introduced. This new version includes many major additions, most notably `tree-sitter` support (enabled by default) and the pgtk variant (useful for Wayland users), which is available under the attribute `emacs29-pgtk`.
 
@@ -324,6 +344,8 @@
 
 - Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
 
+- The option `services.plausible.releaseCookiePath` has been removed: Plausible does not use any distributed Erlang features, and does not plan to (see [discussion](https://github.com/NixOS/nixpkgs/pull/130297#issuecomment-1805851333)), so NixOS now disables them, and the Erlang cookie becomes unnecessary. You may delete the file that `releaseCookiePath` was set to.
+
 - `security.sudo.extraRules` now includes `root`'s default rule, with ordering
   priority 400. This is functionally identical for users not specifying rule
   order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
@@ -368,6 +390,8 @@
 
 - The `prayer` package as well as `services.prayer` have been removed because it's been unmaintained for several years and the author's website has vanished.
 
+- The `chrony` NixOS module now tracks the Real-Time Clock drift from the System Clock with `rtcfile` and automatically adjusts it with `rtcautotrim` when it exceeds the maximum error specified in `services.chrony.autotrimThreshold` (default 30 seconds). If you enabled `rtcsync` in `extraConfig`, you should remove RTC related options from `extraConfig`. If you do not want chrony configured to keep the RTC in check, you can set `services.chrony.enableRTCTrimming = false;`
+
 ## Other Notable Changes {#sec-release-23.11-notable-changes}
 
 - A new option `system.switch.enable` was added. By default, this is option is
@@ -522,6 +546,8 @@ The module update takes care of the new config syntax and the data itself (user
 
 - The Home Assistant module now offers support for installing custom components and lovelace modules. Available at [`services.home-assistant.customComponents`](#opt-services.home-assistant.customComponents) and [`services.home-assistant.customLovelaceModules`](#opt-services.home-assistant.customLovelaceModules).
 
+- The argument `vendorSha256` of `buildGoModule` is deprecated. Use `vendorHash` instead. ([\#259999](https://github.com/NixOS/nixpkgs/pull/259999))
+
 ## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
 
 - The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
@@ -566,4 +592,6 @@ The module update takes care of the new config syntax and the data itself (user
 
 - The Linux kernel module `msr` (see [`msr(4)`](https://man7.org/linux/man-pages/man4/msr.4.html)), which provides an interface to read and write the model-specific registers (MSRs) of an x86 CPU, can now be configured via `hardware.cpu.x86.msr`.
 
+- Docker now defaults to 24, as 20.10 is stopping to receive security updates and bug fixes after [December 10, 2023](https://github.com/moby/moby/discussions/45104).
+
 - There is a new NixOS option when writing NixOS tests `testing.initrdBackdoor`, that enables `backdoor.service` in initrd. Requires `boot.initrd.systemd.enable` to be enabled. Boot will pause in stage 1 at `initrd.target`, and will listen for commands from the `Machine` python interface, just like stage 2 normally does. This enables commands to be sent to test and debug stage 1. Use `machine.switch_root()` to leave stage 1 and proceed to stage 2.
diff --git a/nixos/modules/config/stevenblack.nix b/nixos/modules/config/stevenblack.nix
index 30ef7ff259f09..7e6235169847a 100644
--- a/nixos/modules/config/stevenblack.nix
+++ b/nixos/modules/config/stevenblack.nix
@@ -30,5 +30,5 @@ in
       ++ optionals (activatedHosts == [ ]) [ "${pkgs.stevenblack-blocklist}/hosts" ];
   };
 
-  meta.maintainers = [ maintainers.fortuneteller2k maintainers.artturin ];
+  meta.maintainers = [ maintainers.moni maintainers.artturin ];
 }
diff --git a/nixos/modules/installer/tools/nixos-generate-config.pl b/nixos/modules/installer/tools/nixos-generate-config.pl
index 71737cd8ebc41..2f9edba4f0c9c 100644
--- a/nixos/modules/installer/tools/nixos-generate-config.pl
+++ b/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -257,6 +257,7 @@ foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
 
 # Add bcache module, if needed.
 my @bcacheDevices = glob("/dev/bcache*");
+@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
 if (scalar @bcacheDevices > 0) {
     push @initrdAvailableKernelModules, "bcache";
 }
@@ -467,6 +468,19 @@ EOF
     # boot.tmp.useTmpfs option in configuration.nix (managed declaratively).
     next if ($mountPoint eq "/tmp" && $fsType eq "tmpfs");
 
+    # This should work for single and multi-device systems.
+    # still needs subvolume support
+    if ($fsType eq "bcachefs") {
+        my ($status, @info) = runCommand("bcachefs fs usage $rootDir$mountPoint");
+        my $UUID = $info[0];
+
+        if ($status == 0 && $UUID =~ /^Filesystem:[ \t\n]*([0-9a-z-]+)/) {
+            $stableDevPath = "UUID=$1";
+        } else {
+            print STDERR "warning: can't find bcachefs mount UUID falling back to device-path";
+        }
+    }
+
     # Emit the filesystem.
     $fileSystems .= <<EOF;
   fileSystems.\"$mountPoint\" =
diff --git a/nixos/modules/security/acme/default.nix b/nixos/modules/security/acme/default.nix
index 932bf3e791159..7cc302969fb6d 100644
--- a/nixos/modules/security/acme/default.nix
+++ b/nixos/modules/security/acme/default.nix
@@ -345,6 +345,10 @@ let
       serviceConfig = commonServiceConfig // {
         Group = data.group;
 
+        # Let's Encrypt Failed Validation Limit allows 5 retries per hour, per account, hostname and hour.
+        # This avoids eating them all up if something is misconfigured upon the first try.
+        RestartSec = 15 * 60;
+
         # Keep in mind that these directories will be deleted if the user runs
         # systemctl clean --what=state
         # acme/.lego/${cert} is listed for this reason.
diff --git a/nixos/modules/security/sudo.nix b/nixos/modules/security/sudo.nix
index ff912dec5073f..3dd5d2e525d91 100644
--- a/nixos/modules/security/sudo.nix
+++ b/nixos/modules/security/sudo.nix
@@ -192,10 +192,12 @@ in
   ###### implementation
 
   config = mkIf cfg.enable {
-    assertions = [
-      { assertion = cfg.package.pname != "sudo-rs";
-        message = "The NixOS `sudo` module does not work with `sudo-rs` yet."; }
-    ];
+    assertions = [ {
+      assertion = cfg.package.pname != "sudo-rs";
+      message = ''
+        NixOS' `sudo` module does not support `sudo-rs`; see `security.sudo-rs` instead.
+      '';
+    } ];
 
     security.sudo.extraRules =
       let
diff --git a/nixos/modules/services/audio/navidrome.nix b/nixos/modules/services/audio/navidrome.nix
index e18e61eb6d44b..77a0e74af9ca2 100644
--- a/nixos/modules/services/audio/navidrome.nix
+++ b/nixos/modules/services/audio/navidrome.nix
@@ -28,10 +28,17 @@ in {
         '';
       };
 
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open the TCP port in the firewall";
+      };
     };
   };
 
   config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.settings.Port];
+
     systemd.services.navidrome = {
       description = "Navidrome Media Server";
       after = [ "network.target" ];
diff --git a/nixos/modules/services/databases/postgresql.md b/nixos/modules/services/databases/postgresql.md
index d65d9616e2f2d..e5e0b7efec29a 100644
--- a/nixos/modules/services/databases/postgresql.md
+++ b/nixos/modules/services/databases/postgresql.md
@@ -39,6 +39,125 @@ By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlS
 services.postgresql.dataDir = "/data/postgresql";
 ```
 
+## Initializing {#module-services-postgres-initializing}
+
+As of NixOS 23.11,
+`services.postgresql.ensureUsers.*.ensurePermissions` has been
+deprecated, after a change to default permissions in PostgreSQL 15
+invalidated most of its previous use cases:
+
+- In psql < 15, `ALL PRIVILEGES` used to include `CREATE TABLE`, where
+  in psql >= 15 that would be a separate permission
+- psql >= 15 instead gives only the database owner create permissions
+- Even on psql < 15 (or databases migrated to >= 15), it is
+  recommended to manually assign permissions along these lines
+  - https://www.postgresql.org/docs/release/15.0/
+  - https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PRIV
+
+### Assigning ownership {#module-services-postgres-initializing-ownership}
+
+Usually, the database owner should be a database user of the same
+name. This can be done with
+`services.postgresql.ensureUsers.*.ensureDBOwnership = true;`.
+
+If the database user name equals the connecting system user name,
+postgres by default will accept a passwordless connection via unix
+domain socket. This makes it possible to run many postgres-backed
+services without creating any database secrets at all
+
+### Assigning extra permissions {#module-services-postgres-initializing-extra-permissions}
+
+For many cases, it will be enough to have the database user be the
+owner. Until `services.postgresql.ensureUsers.*.ensurePermissions` has
+been re-thought, if more users need access to the database, please use
+one of the following approaches:
+
+**WARNING:** `services.postgresql.initialScript` is not recommended
+for `ensurePermissions` replacement, as that is *only run on first
+start of PostgreSQL*.
+
+**NOTE:** all of these methods may be obsoleted, when `ensure*` is
+reworked, but it is expected that they will stay viable for running
+database migrations.
+
+**NOTE:** please make sure that any added migrations are idempotent (re-runnable).
+
+#### as superuser {#module-services-postgres-initializing-extra-permissions-superuser}
+
+**Advantage:** compatible with postgres < 15, because it's run
+as the database superuser `postgres`.
+
+##### in database `postStart` {#module-services-postgres-initializing-extra-permissions-superuser-post-start}
+
+**Disadvantage:** need to take care of ordering yourself. In this
+example, `mkAfter` ensures that permissions are assigned after any
+databases from `ensureDatabases` and `extraUser1` from `ensureUsers`
+are already created.
+
+```nix
+    systemd.services.postgresql.postStart = lib.mkAfter ''
+      $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-superuser-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "postgres";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
+#### as service user {#module-services-postgres-initializing-extra-permissions-service-user}
+
+**Advantage:** re-uses systemd's dependency ordering;
+
+**Disadvantage:** relies on service user having grant permission. To be combined with `ensureDBOwnership`.
+
+##### in service `preStart` {#module-services-postgres-initializing-extra-permissions-service-user-pre-start}
+
+```nix
+    environment.PSQL = "psql --port=${toString services.postgresql.port}";
+    path = [ postgresql ];
+    systemd.services."service1".preStart = ''
+      $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-service-user-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "service1";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
 ## Upgrading {#module-services-postgres-upgrading}
 
 ::: {.note}
diff --git a/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixos/modules/services/desktops/pipewire/pipewire.nix
index ae695baf42c60..07ca2727cf48e 100644
--- a/nixos/modules/services/desktops/pipewire/pipewire.nix
+++ b/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -115,8 +115,7 @@ in {
     environment.systemPackages = [ cfg.package ]
                                  ++ lib.optional cfg.jack.enable jack-libs;
 
-    systemd.packages = [ cfg.package ]
-                       ++ lib.optional cfg.pulse.enable cfg.package.pulse;
+    systemd.packages = [ cfg.package ];
 
     # PipeWire depends on DBUS but doesn't list it. Without this booting
     # into a terminal results in the service crashing with an error.
@@ -130,9 +129,13 @@ in {
     systemd.user.sockets.pipewire.enable = !cfg.systemWide;
     systemd.user.services.pipewire.enable = !cfg.systemWide;
 
+    # Mask pw-pulse if it's not wanted
+    systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
+    systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
+
     systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
     systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
-    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf (cfg.socketActivation && cfg.pulse.enable) ["sockets.target"];
+    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
 
     services.udev.packages = [ cfg.package ];
 
@@ -140,14 +143,14 @@ in {
     environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
       text = ''
         pcm_type.pipewire {
-          libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
           ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
         }
         ctl_type.pipewire {
-          libs.native = ${cfg.package.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
           ${optionalString enable32BitAlsaPlugins
-            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire.lib}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
         }
       '';
     };
diff --git a/nixos/modules/services/hardware/fwupd.nix b/nixos/modules/services/hardware/fwupd.nix
index 7a938459d0cb5..7b6c336bd2211 100644
--- a/nixos/modules/services/hardware/fwupd.nix
+++ b/nixos/modules/services/hardware/fwupd.nix
@@ -187,13 +187,20 @@ in {
       # fwupd-refresh expects a user that we do not create, so just run with DynamicUser
       # instead and ensure we take ownership of /var/lib/fwupd
       services.fwupd-refresh.serviceConfig = {
-        DynamicUser = true;
         StateDirectory = "fwupd";
+        # Better for debugging, upstream sets stderr to null for some reason..
+        StandardError = "inherit";
       };
 
       timers.fwupd-refresh.wantedBy = [ "timers.target" ];
     };
 
+    users.users.fwupd-refresh = {
+      isSystemUser = true;
+      group = "fwupd-refresh";
+    };
+    users.groups.fwupd-refresh = {};
+
     security.polkit.enable = true;
   };
 
diff --git a/nixos/modules/services/mail/mailman.nix b/nixos/modules/services/mail/mailman.nix
index a7e8aee1f2a26..76035625fbe17 100644
--- a/nixos/modules/services/mail/mailman.nix
+++ b/nixos/modules/services/mail/mailman.nix
@@ -493,6 +493,9 @@ in {
           RuntimeDirectory = "mailman";
           LogsDirectory = "mailman";
           PIDFile = "/run/mailman/master.pid";
+          Restart = "on-failure";
+          TimeoutStartSec = 180;
+          TimeoutStopSec = 180;
         };
       };
 
@@ -596,6 +599,7 @@ in {
           User = cfg.webUser;
           Group = "mailman";
           RuntimeDirectory = "mailman-uwsgi";
+          Restart = "on-failure";
         };
       });
 
@@ -620,6 +624,7 @@ in {
           User = cfg.webUser;
           Group = "mailman";
           WorkingDirectory = "/var/lib/mailman-web";
+          Restart = "on-failure";
         };
       };
     } // flip lib.mapAttrs' {
diff --git a/nixos/modules/services/misc/autofs.nix b/nixos/modules/services/misc/autofs.nix
index 55ab15ff003d1..723b67e8bb6bc 100644
--- a/nixos/modules/services/misc/autofs.nix
+++ b/nixos/modules/services/misc/autofs.nix
@@ -74,7 +74,7 @@ in
 
   config = mkIf cfg.enable {
 
-    boot.kernelModules = [ "autofs4" ];
+    boot.kernelModules = [ "autofs" ];
 
     systemd.services.autofs =
       { description = "Automounts filesystems on demand";
diff --git a/nixos/modules/services/misc/forgejo.md b/nixos/modules/services/misc/forgejo.md
new file mode 100644
index 0000000000000..6a34073820856
--- /dev/null
+++ b/nixos/modules/services/misc/forgejo.md
@@ -0,0 +1,79 @@
+# Forgejo {#module-forgejo}
+
+Forgejo is a soft-fork of gitea, with strong community focus, as well
+as on self-hosting and federation. [Codeberg](https://codeberg.org) is
+deployed from it.
+
+See [upstream docs](https://forgejo.org/docs/latest/).
+
+The method of choice for running forgejo is using [`services.forgejo`](#opt-services.forgejo.enable).
+
+::: {.warning}
+Running forgejo using `services.gitea.package = pkgs.forgejo` is no longer
+recommended.
+If you experience issues with your instance using `services.gitea`,
+**DO NOT** report them to the `services.gitea` module maintainers.
+**DO** report them to the `services.forgejo` module maintainers instead.
+:::
+
+## Migration from Gitea {#module-forgejo-migration-gitea}
+
+::: {.note}
+Migrating is, while not strictly necessary at this point, highly recommended.
+Both modules and projects are likely to divide further with each release.
+Which might lead to an even more involved migration.
+:::
+
+### Full-Migration {#module-forgejo-migration-gitea-default}
+
+This will migrate the state directory (data), rename and chown the database and
+delete the gitea user.
+
+::: {.note}
+This will also change the git remote ssh-url user from `gitea@` to `forgejo@`,
+when using the host's openssh server (default) instead of the integrated one.
+:::
+
+Instructions for PostgreSQL (default). Adapt accordingly for other databases:
+
+```sh
+systemctl stop gitea
+mv /var/lib/gitea /var/lib/forgejo
+runuser -u postgres -- psql -c '
+  ALTER USER gitea RENAME TO forgejo;
+  ALTER DATABASE gitea RENAME TO forgejo;
+'
+nixos-rebuild switch
+systemctl stop forgejo
+chown -R forgejo:forgejo /var/lib/forgejo
+systemctl restart forgejo
+```
+
+### Alternatively, keeping the gitea user {#module-forgejo-migration-gitea-impersonate}
+
+Alternatively, instead of renaming the database, copying the state folder and
+changing the user, the forgejo module can be set up to re-use the old storage
+locations and database, instead of having to copy or rename them.
+Make sure to disable `services.gitea`, when doing this.
+
+```nix
+services.gitea.enable = false;
+
+services.forgejo = {
+  enable = true;
+  user = "gitea";
+  group = "gitea";
+  stateDir = "/var/lib/gitea";
+  database.name = "gitea";
+  database.user = "gitea";
+};
+
+users.users,gitea = {
+  home = "/var/lib/gitea";
+  useDefaultShell = true;
+  group = "gitea";
+  isSystemUser = true;
+};
+
+users.groups.gitea = {};
+```
diff --git a/nixos/modules/services/misc/forgejo.nix b/nixos/modules/services/misc/forgejo.nix
index 6f459048f347e..15966adfe38e7 100644
--- a/nixos/modules/services/misc/forgejo.nix
+++ b/nixos/modules/services/misc/forgejo.nix
@@ -685,5 +685,6 @@ in
     };
   };
 
+  meta.doc = ./forgejo.md;
   meta.maintainers = with lib.maintainers; [ bendlas emilylange ];
 }
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 5e21407042b6a..f84d677f14d85 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -1841,6 +1841,7 @@ in
       serviceConfig = {
         WorkingDirectory = cfg.dataDir;
         User = "grafana";
+        Restart = "on-failure";
         RuntimeDirectory = "grafana";
         RuntimeDirectoryMode = "0755";
         # Hardening
diff --git a/nixos/modules/services/monitoring/parsedmarc.nix b/nixos/modules/services/monitoring/parsedmarc.nix
index 44fc359b6a7de..a146e7ab95435 100644
--- a/nixos/modules/services/monitoring/parsedmarc.nix
+++ b/nixos/modules/services/monitoring/parsedmarc.nix
@@ -301,6 +301,7 @@ in
               description = lib.mdDoc ''
                 The addresses to send outgoing mail to.
               '';
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
             };
           };
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 305f235054be7..f89522c098646 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -2,8 +2,8 @@
 
 let
   inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
-                mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
-                optional types mkOptionDefault flip attrNames;
+    mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
+    optional types mkOptionDefault flip attrNames;
 
   cfg = config.services.prometheus.exporters;
 
@@ -20,7 +20,7 @@ let
   #  systemd service must be provided by specifying either
   #  `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
 
-  exporterOpts = genAttrs [
+  exporterOpts = (genAttrs [
     "apcupsd"
     "artifactory"
     "bind"
@@ -34,14 +34,15 @@ let
     "domain"
     "dovecot"
     "fastly"
+    "flow"
     "fritzbox"
     "graphite"
     "idrac"
     "imap-mailstat"
     "influxdb"
     "ipmi"
-    "json"
     "jitsi"
+    "json"
     "junos-czerwonk"
     "kea"
     "keylight"
@@ -74,9 +75,9 @@ let
     "scaphandre"
     "script"
     "shelly"
-    "snmp"
     "smartctl"
     "smokeping"
+    "snmp"
     "sql"
     "statsd"
     "surfboard"
@@ -88,10 +89,39 @@ let
     "v2ray"
     "varnish"
     "wireguard"
-    "flow"
     "zfs"
-  ] (name:
-    import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+  ]
+    (name:
+      import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+    )) // (mapAttrs
+    (name: params:
+      import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options; type = params.type ; })
+    {
+      exportarr-bazarr = {
+        name = "exportarr";
+        type = "bazarr";
+      };
+      exportarr-lidarr = {
+        name = "exportarr";
+        type = "lidarr";
+      };
+      exportarr-prowlarr = {
+        name = "exportarr";
+        type = "prowlarr";
+      };
+      exportarr-radarr = {
+        name = "exportarr";
+        type = "radarr";
+      };
+      exportarr-readarr = {
+        name = "exportarr";
+        type = "readarr";
+      };
+      exportarr-sonarr = {
+        name = "exportarr";
+        type = "sonarr";
+      };
+    }
   );
 
   mkExporterOpts = ({ name, port }: {
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
new file mode 100644
index 0000000000000..1322093354109
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, options, type }:
+
+let
+  cfg = config.services.prometheus.exporters."exportarr-${type}";
+  exportarrEnvironment = (
+    lib.mapAttrs (_: toString) cfg.environment
+  ) // {
+    PORT = toString cfg.port;
+    URL = cfg.url;
+    API_KEY_FILE = lib.mkIf (cfg.apiKeyFile != null) "%d/api-key";
+  };
+in
+{
+  port = 9708;
+  extraOpts = {
+    url = lib.mkOption {
+      type = lib.types.str;
+      default = "http://127.0.0.1";
+      description = lib.mdDoc ''
+        The full URL to Sonarr, Radarr, or Lidarr.
+      '';
+    };
+
+    apiKeyFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing the api-key.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "exportarr" { };
+
+    environment = lib.mkOption {
+      type = lib.types.attrsOf lib.types.str;
+      default = { };
+      description = lib.mdDoc ''
+        See [the configuration guide](https://github.com/onedr0p/exportarr#configuration) for available options.
+      '';
+      example = {
+        PROWLARR__BACKFILL = true;
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      LoadCredential = lib.optionalString (cfg.apiKeyFile != null) "api-key:${cfg.apiKeyFile}";
+      ExecStart = ''${cfg.package}/bin/exportarr ${type} "$@"'';
+      ProcSubset = "pid";
+      ProtectProc = "invisible";
+      SystemCallFilter = ["@system-service" "~@privileged"];
+    };
+    environment = exportarrEnvironment;
+  };
+}
diff --git a/nixos/modules/services/networking/harmonia.nix b/nixos/modules/services/networking/harmonia.nix
index 144fa6c708e28..4733165cf7d19 100644
--- a/nixos/modules/services/networking/harmonia.nix
+++ b/nixos/modules/services/networking/harmonia.nix
@@ -28,6 +28,8 @@ in
   };
 
   config = lib.mkIf cfg.enable {
+    nix.settings.extra-allowed-users = [ "harmonia" ];
+
     systemd.services.harmonia = {
       description = "harmonia binary cache service";
 
diff --git a/nixos/modules/services/networking/ircd-hybrid/builder.sh b/nixos/modules/services/networking/ircd-hybrid/builder.sh
index d9d2e4264dfd1..07a3788abf7d2 100644
--- a/nixos/modules/services/networking/ircd-hybrid/builder.sh
+++ b/nixos/modules/services/networking/ircd-hybrid/builder.sh
@@ -1,4 +1,4 @@
-if [ -e .attrs.sh ]; then source .attrs.sh; fi
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
 source $stdenv/setup
 
 doSub() {
diff --git a/nixos/modules/services/networking/ntp/chrony.nix b/nixos/modules/services/networking/ntp/chrony.nix
index afd721e34da5a..d370e6946d7b4 100644
--- a/nixos/modules/services/networking/ntp/chrony.nix
+++ b/nixos/modules/services/networking/ntp/chrony.nix
@@ -9,6 +9,7 @@ let
   stateDir = cfg.directory;
   driftFile = "${stateDir}/chrony.drift";
   keyFile = "${stateDir}/chrony.keys";
+  rtcFile = "${stateDir}/chrony.rtc";
 
   configFile = pkgs.writeText "chrony.conf" ''
     ${concatMapStringsSep "\n" (server: "server " + server + " " + cfg.serverOption + optionalString (cfg.enableNTS) " nts") cfg.servers}
@@ -20,8 +21,10 @@ let
 
     driftfile ${driftFile}
     keyfile ${keyFile}
+    ${optionalString (cfg.enableRTCTrimming) "rtcfile ${rtcFile}"}
     ${optionalString (cfg.enableNTS) "ntsdumpdir ${stateDir}"}
 
+    ${optionalString (cfg.enableRTCTrimming) "rtcautotrim ${builtins.toString cfg.autotrimThreshold}"}
     ${optionalString (!config.time.hardwareClockInLocalTime) "rtconutc"}
 
     ${cfg.extraConfig}
@@ -85,6 +88,33 @@ in
         '';
       };
 
+      enableRTCTrimming = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable tracking of the RTC offset to the system clock and automatic trimming.
+          See also [](#opt-services.chrony.autotrimThreshold)
+
+          ::: {.note}
+          This is not compatible with the `rtcsync` directive, which naively syncs the RTC time every 11 minutes.
+
+          Tracking the RTC drift will allow more precise timekeeping,
+          especially on intermittently running devices, where the RTC is very relevant.
+          :::
+        '';
+      };
+
+      autotrimThreshold = mkOption {
+        type = types.ints.positive;
+        default = 30;
+        example = 10;
+        description = ''
+          Maximum estimated error threshold for the `rtcautotrim` command.
+          When reached, the RTC will be trimmed.
+          Only used when [](#opt-services.chrony.enableRTCTrimming) is enabled.
+        '';
+      };
+
       enableNTS = mkOption {
         type = types.bool;
         default = false;
@@ -141,7 +171,7 @@ in
   };
 
   config = mkIf cfg.enable {
-    meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+    meta.maintainers = with lib.maintainers; [ thoughtpolice vifino ];
 
     environment.systemPackages = [ chronyPkg ];
 
@@ -156,12 +186,19 @@ in
 
     services.timesyncd.enable = mkForce false;
 
+    # If chrony controls and tracks the RTC, writing it externally causes clock error.
+    systemd.services.save-hwclock = lib.mkIf cfg.enableRTCTrimming {
+      enable = lib.mkForce false;
+    };
+
     systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
 
     systemd.tmpfiles.rules = [
       "d ${stateDir} 0750 chrony chrony - -"
       "f ${driftFile} 0640 chrony chrony - -"
       "f ${keyFile} 0640 chrony chrony - -"
+    ] ++ lib.optionals cfg.enableRTCTrimming [
+      "f ${rtcFile} 0640 chrony chrony - -"
     ];
 
     systemd.services.chronyd =
diff --git a/nixos/modules/services/networking/unbound.nix b/nixos/modules/services/networking/unbound.nix
index 0426dbb0c83c3..b6579af10a793 100644
--- a/nixos/modules/services/networking/unbound.nix
+++ b/nixos/modules/services/networking/unbound.nix
@@ -166,7 +166,7 @@ in {
     services.unbound.settings = {
       server = {
         directory = mkDefault cfg.stateDir;
-        username = cfg.user;
+        username = ''""'';
         chroot = ''""'';
         pidfile = ''""'';
         # when running under systemd there is no need to daemonize
@@ -245,14 +245,13 @@ in {
         NotifyAccess = "main";
         Type = "notify";
 
-        # FIXME: Which of these do we actually need, can we drop the chroot flag?
         AmbientCapabilities = [
           "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW" # needed if ip-transparent is set to true
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
           "CAP_NET_RAW"
-          "CAP_SETGID"
-          "CAP_SETUID"
-          "CAP_SYS_CHROOT"
-          "CAP_SYS_RESOURCE"
         ];
 
         User = cfg.user;
@@ -266,22 +265,19 @@ in {
         ProtectControlGroups = true;
         ProtectKernelModules = true;
         ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelTunables = true;
         RuntimeDirectory = "unbound";
         ConfigurationDirectory = "unbound";
         StateDirectory = "unbound";
         RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" "AF_UNIX" ];
         RestrictRealtime = true;
         SystemCallArchitectures = "native";
-        SystemCallFilter = [
-          "~@clock"
-          "@cpu-emulation"
-          "@debug"
-          "@keyring"
-          "@module"
-          "mount"
-          "@obsolete"
-          "@resources"
-        ];
+        SystemCallFilter = [ "@system-service" ];
         RestrictNamespaces = true;
         LockPersonality = true;
         RestrictSUIDSGID = true;
diff --git a/nixos/modules/services/networking/unifi.nix b/nixos/modules/services/networking/unifi.nix
index 6b68371098065..537a4db95ca70 100644
--- a/nixos/modules/services/networking/unifi.nix
+++ b/nixos/modules/services/networking/unifi.nix
@@ -1,60 +1,61 @@
 { config, options, lib, pkgs, utils, ... }:
-with lib;
 let
   cfg = config.services.unifi;
   stateDir = "/var/lib/unifi";
-  cmd = ''
-    @${cfg.jrePackage}/bin/java java \
-        ${optionalString (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16")
-        ("--add-opens java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED "
-        + "--add-opens java.base/sun.security.util=ALL-UNNAMED --add-opens java.base/java.io=ALL-UNNAMED "
-        + "--add-opens java.rmi/sun.rmi.transport=ALL-UNNAMED")} \
-        ${optionalString (cfg.initialJavaHeapSize != null) "-Xms${(toString cfg.initialJavaHeapSize)}m"} \
-        ${optionalString (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m"} \
-        -jar ${stateDir}/lib/ace.jar
-  '';
+  cmd = lib.escapeShellArgs ([ "@${cfg.jrePackage}/bin/java" "java" ]
+    ++ lib.optionals (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16") [
+      "--add-opens=java.base/java.lang=ALL-UNNAMED"
+      "--add-opens=java.base/java.time=ALL-UNNAMED"
+      "--add-opens=java.base/sun.security.util=ALL-UNNAMED"
+      "--add-opens=java.base/java.io=ALL-UNNAMED"
+      "--add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED"
+    ]
+    ++ (lib.optional (cfg.initialJavaHeapSize != null) "-Xms${(toString cfg.initialJavaHeapSize)}m")
+    ++ (lib.optional (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m")
+    ++ cfg.extraJvmOptions
+    ++ [ "-jar" "${stateDir}/lib/ace.jar" ]);
 in
 {
 
   options = {
 
-    services.unifi.enable = mkOption {
-      type = types.bool;
+    services.unifi.enable = lib.mkOption {
+      type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
         Whether or not to enable the unifi controller service.
       '';
     };
 
-    services.unifi.jrePackage = mkOption {
-      type = types.package;
+    services.unifi.jrePackage = lib.mkOption {
+      type = lib.types.package;
       default = if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3") then pkgs.jdk11 else pkgs.jre8;
-      defaultText = literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
+      defaultText = lib.literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
       description = lib.mdDoc ''
         The JRE package to use. Check the release notes to ensure it is supported.
       '';
     };
 
-    services.unifi.unifiPackage = mkOption {
-      type = types.package;
+    services.unifi.unifiPackage = lib.mkOption {
+      type = lib.types.package;
       default = pkgs.unifi5;
-      defaultText = literalExpression "pkgs.unifi5";
+      defaultText = lib.literalExpression "pkgs.unifi5";
       description = lib.mdDoc ''
         The unifi package to use.
       '';
     };
 
-    services.unifi.mongodbPackage = mkOption {
-      type = types.package;
+    services.unifi.mongodbPackage = lib.mkOption {
+      type = lib.types.package;
       default = pkgs.mongodb-4_4;
-      defaultText = literalExpression "pkgs.mongodb";
+      defaultText = lib.literalExpression "pkgs.mongodb";
       description = lib.mdDoc ''
         The mongodb package to use. Please note: unifi7 officially only supports mongodb up until 3.6 but works with 4.4.
       '';
     };
 
-    services.unifi.openFirewall = mkOption {
-      type = types.bool;
+    services.unifi.openFirewall = lib.mkOption {
+      type = lib.types.bool;
       default = false;
       description = lib.mdDoc ''
         Whether or not to open the minimum required ports on the firewall.
@@ -65,8 +66,8 @@ in
       '';
     };
 
-    services.unifi.initialJavaHeapSize = mkOption {
-      type = types.nullOr types.int;
+    services.unifi.initialJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
       default = null;
       example = 1024;
       description = lib.mdDoc ''
@@ -75,8 +76,8 @@ in
       '';
     };
 
-    services.unifi.maximumJavaHeapSize = mkOption {
-      type = types.nullOr types.int;
+    services.unifi.maximumJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
       default = null;
       example = 4096;
       description = lib.mdDoc ''
@@ -85,9 +86,18 @@ in
       '';
     };
 
+    services.unifi.extraJvmOptions = lib.mkOption {
+      type = with lib.types; listOf str;
+      default = [ ];
+      example = lib.literalExpression ''["-Xlog:gc"]'';
+      description = lib.mdDoc ''
+        Set extra options to pass to the JVM.
+      '';
+    };
+
   };
 
-  config = mkIf cfg.enable {
+  config = lib.mkIf cfg.enable {
 
     users.users.unifi = {
       isSystemUser = true;
@@ -97,7 +107,7 @@ in
     };
     users.groups.unifi = {};
 
-    networking.firewall = mkIf cfg.openFirewall {
+    networking.firewall = lib.mkIf cfg.openFirewall {
       # https://help.ubnt.com/hc/en-us/articles/218506997
       allowedTCPPorts = [
         8080  # Port for UAP to inform controller.
@@ -123,8 +133,8 @@ in
 
       serviceConfig = {
         Type = "simple";
-        ExecStart = "${(removeSuffix "\n" cmd)} start";
-        ExecStop = "${(removeSuffix "\n" cmd)} stop";
+        ExecStart = "${cmd} start";
+        ExecStop = "${cmd} stop";
         Restart = "on-failure";
         TimeoutSec = "5min";
         User = "unifi";
@@ -166,7 +176,7 @@ in
         StateDirectory = "unifi";
         RuntimeDirectory = "unifi";
         LogsDirectory = "unifi";
-        CacheDirectory= "unifi";
+        CacheDirectory = "unifi";
 
         TemporaryFileSystem = [
           # required as we want to create bind mounts below
@@ -176,7 +186,7 @@ in
         # We must create the binary directories as bind mounts instead of symlinks
         # This is because the controller resolves all symlinks to absolute paths
         # to be used as the working directory.
-        BindPaths =  [
+        BindPaths = [
           "/var/log/unifi:${stateDir}/logs"
           "/run/unifi:${stateDir}/run"
           "${cfg.unifiPackage}/dl:${stateDir}/dl"
@@ -194,7 +204,7 @@ in
 
   };
   imports = [
-    (mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data" )
-    (mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
+    (lib.mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data")
+    (lib.mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
   ];
 }
diff --git a/nixos/modules/services/search/opensearch.nix b/nixos/modules/services/search/opensearch.nix
index 9a50e79631380..ae79d5545fd76 100644
--- a/nixos/modules/services/search/opensearch.nix
+++ b/nixos/modules/services/search/opensearch.nix
@@ -72,6 +72,18 @@ in
             The port to listen on for transport traffic.
           '';
         };
+
+        options."plugins.security.disabled" = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable the security plugin,
+            `plugins.security.ssl.transport.keystore_filepath` or
+            `plugins.security.ssl.transport.server.pemcert_filepath` and
+            `plugins.security.ssl.transport.client.pemcert_filepath`
+            must be set for this plugin to be enabled.
+          '';
+        };
       };
 
       default = {};
@@ -186,6 +198,13 @@ in
               shopt -s inherit_errexit
 
               # Install plugins
+
+              # remove plugins directory if it is empty.
+              if [ -z "$(ls -A ${cfg.dataDir}/plugins)" ]; then
+                rm -r "${cfg.dataDir}/plugins"
+              fi
+
+              ln -sfT "${cfg.package}/plugins" "${cfg.dataDir}/plugins"
               ln -sfT ${cfg.package}/lib ${cfg.dataDir}/lib
               ln -sfT ${cfg.package}/modules ${cfg.dataDir}/modules
 
diff --git a/nixos/modules/services/torrent/flexget.nix b/nixos/modules/services/torrent/flexget.nix
index 5cd7ae6ad7db3..58a4b70014977 100644
--- a/nixos/modules/services/torrent/flexget.nix
+++ b/nixos/modules/services/torrent/flexget.nix
@@ -64,7 +64,6 @@ in {
         path = [ pkg ];
         serviceConfig = {
           User = cfg.user;
-          Environment = "TZ=${config.time.timeZone}";
           ExecStartPre = "${pkgs.coreutils}/bin/install -m644 ${ymlFile} ${configFile}";
           ExecStart = "${pkg}/bin/flexget -c ${configFile} daemon start";
           ExecStop = "${pkg}/bin/flexget -c ${configFile} daemon stop";
diff --git a/nixos/modules/services/web-apps/mastodon.nix b/nixos/modules/services/web-apps/mastodon.nix
index 5feca2525ea15..8686506b1c282 100644
--- a/nixos/modules/services/web-apps/mastodon.nix
+++ b/nixos/modules/services/web-apps/mastodon.nix
@@ -17,9 +17,6 @@ let
     WEB_CONCURRENCY = toString cfg.webProcesses;
     MAX_THREADS = toString cfg.webThreads;
 
-    # mastodon-streaming concurrency.
-    STREAMING_CLUSTER_NUM = toString cfg.streamingProcesses;
-
     DB_USER = cfg.database.user;
 
     REDIS_HOST = cfg.redis.host;
@@ -33,13 +30,15 @@ let
     PAPERCLIP_ROOT_PATH = "/var/lib/mastodon/public-system";
     PAPERCLIP_ROOT_URL = "/system";
     ES_ENABLED = if (cfg.elasticsearch.host != null) then "true" else "false";
-    ES_HOST = cfg.elasticsearch.host;
-    ES_PORT = toString(cfg.elasticsearch.port);
 
     TRUSTED_PROXY_IP = cfg.trustedProxy;
   }
   // lib.optionalAttrs (cfg.database.host != "/run/postgresql" && cfg.database.port != null) { DB_PORT = toString cfg.database.port; }
   // lib.optionalAttrs cfg.smtp.authenticate { SMTP_LOGIN  = cfg.smtp.user; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_HOST = cfg.elasticsearch.host; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PORT = toString(cfg.elasticsearch.port); }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PRESET = cfg.elasticsearch.preset; }
+  // lib.optionalAttrs (cfg.elasticsearch.user != null) { ES_USER = cfg.elasticsearch.user; }
   // cfg.extraConfig;
 
   systemCallsList = [ "@cpu-emulation" "@debug" "@keyring" "@ipc" "@mount" "@obsolete" "@privileged" "@setuid" ];
@@ -141,8 +140,44 @@ let
     })
   ) cfg.sidekiqProcesses;
 
+  streamingUnits = builtins.listToAttrs
+      (map (i: {
+        name = "mastodon-streaming-${toString i}";
+        value = {
+          after = [ "network.target" "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          requires = [ "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          wantedBy = [ "mastodon.target" "mastodon-streaming.target" ];
+          description = "Mastodon streaming ${toString i}";
+          environment = env // { SOCKET = "/run/mastodon-streaming/streaming-${toString i}.socket"; };
+          serviceConfig = {
+            ExecStart = "${cfg.package}/run-streaming.sh";
+            Restart = "always";
+            RestartSec = 20;
+            EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+            WorkingDirectory = cfg.package;
+            # Runtime directory and mode
+            RuntimeDirectory = "mastodon-streaming";
+            RuntimeDirectoryMode = "0750";
+            # System Call Filtering
+            SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@memlock" "@resources" ])) "pipe" "pipe2" ];
+          } // cfgService;
+        };
+      })
+      (lib.range 1 cfg.streamingProcesses));
+
 in {
 
+  imports = [
+    (lib.mkRemovedOptionModule
+      [ "services" "mastodon" "streamingPort" ]
+      "Mastodon currently doesn't support streaming via TCP ports. Please open a PR if you need this."
+    )
+  ];
+
   options = {
     services.mastodon = {
       enable = lib.mkEnableOption (lib.mdDoc "Mastodon, a federated social network server");
@@ -191,18 +226,13 @@ in {
         default = "mastodon";
       };
 
-      streamingPort = lib.mkOption {
-        description = lib.mdDoc "TCP port used by the mastodon-streaming service.";
-        type = lib.types.port;
-        default = 55000;
-      };
       streamingProcesses = lib.mkOption {
         description = lib.mdDoc ''
-          Processes used by the mastodon-streaming service.
-          Defaults to the number of CPU cores minus one.
+          Number of processes used by the mastodon-streaming service.
+          Recommended is the amount of your CPU cores minus one.
         '';
-        type = lib.types.nullOr lib.types.int;
-        default = null;
+        type = lib.types.ints.positive;
+        example = 3;
       };
 
       webPort = lib.mkOption {
@@ -485,6 +515,31 @@ in {
           type = lib.types.port;
           default = 9200;
         };
+
+        preset = lib.mkOption {
+          description = lib.mdDoc ''
+            It controls the ElasticSearch indices configuration (number of shards and replica).
+          '';
+          type = lib.types.enum [ "single_node_cluster" "small_cluster" "large_cluster" ];
+          default = "single_node_cluster";
+          example = "large_cluster";
+        };
+
+        user = lib.mkOption {
+          description = lib.mdDoc "Used for optionally authenticating with Elasticsearch.";
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "elasticsearch-mastodon";
+        };
+
+        passwordFile = lib.mkOption {
+          description = lib.mdDoc ''
+            Path to file containing password for optionally authenticating with Elasticsearch.
+          '';
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/mastodon/secrets/elasticsearch-password";
+        };
       };
 
       package = lib.mkOption {
@@ -603,6 +658,12 @@ in {
       after = [ "network.target" ];
     };
 
+    systemd.targets.mastodon-streaming = {
+      description = "Target for all Mastodon streaming services";
+      wantedBy = [ "multi-user.target" "mastodon.target" ];
+      after = [ "network.target" ];
+    };
+
     systemd.services.mastodon-init-dirs = {
       script = ''
         umask 077
@@ -631,6 +692,8 @@ in {
         DB_PASS="$(cat ${cfg.database.passwordFile})"
       '' + lib.optionalString cfg.smtp.authenticate ''
         SMTP_PASSWORD="$(cat ${cfg.smtp.passwordFile})"
+      '' + lib.optionalString (cfg.elasticsearch.passwordFile != null) ''
+        ES_PASS="$(cat ${cfg.elasticsearch.passwordFile})"
       '' + ''
         EOF
       '';
@@ -688,33 +751,6 @@ in {
         ++ lib.optional databaseActuallyCreateLocally "postgresql.service";
     };
 
-    systemd.services.mastodon-streaming = {
-      after = [ "network.target" "mastodon-init-dirs.service" ]
-        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
-        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
-      requires = [ "mastodon-init-dirs.service" ]
-        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
-        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
-      wantedBy = [ "mastodon.target" ];
-      description = "Mastodon streaming";
-      environment = env // (if cfg.enableUnixSocket
-        then { SOCKET = "/run/mastodon-streaming/streaming.socket"; }
-        else { PORT = toString(cfg.streamingPort); }
-      );
-      serviceConfig = {
-        ExecStart = "${cfg.package}/run-streaming.sh";
-        Restart = "always";
-        RestartSec = 20;
-        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
-        WorkingDirectory = cfg.package;
-        # Runtime directory and mode
-        RuntimeDirectory = "mastodon-streaming";
-        RuntimeDirectoryMode = "0750";
-        # System Call Filtering
-        SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@memlock" "@resources" ])) "pipe" "pipe2" ];
-      } // cfgService;
-    };
-
     systemd.services.mastodon-web = {
       after = [ "network.target" "mastodon-init-dirs.service" ]
         ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
@@ -780,10 +816,20 @@ in {
         };
 
         locations."/api/v1/streaming/" = {
-          proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-streaming/streaming.socket" else "http://127.0.0.1:${toString(cfg.streamingPort)}/");
+          proxyPass = "http://mastodon-streaming";
           proxyWebsockets = true;
         };
       };
+      upstreams.mastodon-streaming = {
+        extraConfig = ''
+          least_conn;
+        '';
+        servers = builtins.listToAttrs
+          (map (i: {
+            name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
+            value = { };
+          }) (lib.range 1 cfg.streamingProcesses));
+      };
     };
 
     services.postfix = lib.mkIf (cfg.smtp.createLocally && cfg.smtp.host == "127.0.0.1") {
@@ -819,7 +865,7 @@ in {
 
     users.groups.${cfg.group}.members = lib.optional cfg.configureNginx config.services.nginx.user;
   }
-  { systemd.services = sidekiqUnits; }
+  { systemd.services = lib.mkMerge [ sidekiqUnits streamingUnits ]; }
   ]);
 
   meta.maintainers = with lib.maintainers; [ happy-river erictapen ];
diff --git a/nixos/modules/services/web-apps/plantuml-server.nix b/nixos/modules/services/web-apps/plantuml-server.nix
index 5ebee48c3e0b5..1fa69814c6c9f 100644
--- a/nixos/modules/services/web-apps/plantuml-server.nix
+++ b/nixos/modules/services/web-apps/plantuml-server.nix
@@ -1,123 +1,110 @@
 { config, lib, pkgs, ... }:
 
-with lib;
-
 let
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    types
+    ;
 
   cfg = config.services.plantuml-server;
 
 in
 
 {
+  imports = [
+    (mkRemovedOptionModule [ "services" "plantuml-server" "allowPlantumlInclude" ] "This option has been removed from PlantUML.")
+  ];
+
   options = {
     services.plantuml-server = {
-      enable = mkEnableOption (lib.mdDoc "PlantUML server");
+      enable = mkEnableOption (mdDoc "PlantUML server");
 
-      package = mkOption {
-        type = types.package;
-        default = pkgs.plantuml-server;
-        defaultText = literalExpression "pkgs.plantuml-server";
-        description = lib.mdDoc "PlantUML server package to use";
-      };
+      package = mkPackageOptionMD pkgs "plantuml-server" { };
 
       packages = {
-        jdk = mkOption {
-          type = types.package;
-          default = pkgs.jdk;
-          defaultText = literalExpression "pkgs.jdk";
-          description = lib.mdDoc "JDK package to use for the server";
-        };
-        jetty = mkOption {
-          type = types.package;
-          default = pkgs.jetty;
-          defaultText = literalExpression "pkgs.jetty";
-          description = lib.mdDoc "Jetty package to use for the server";
+        jdk = mkPackageOptionMD pkgs "jdk" { };
+        jetty = mkPackageOptionMD pkgs "jetty" {
+          default = "jetty_11";
+          extraDescription = ''
+            At the time of writing (v1.2023.12), PlantUML Server does not support
+            Jetty versions higher than 12.x.
+
+            Jetty 12.x has introduced major breaking changes, see
+            <https://github.com/jetty/jetty.project/releases/tag/jetty-12.0.0> and
+            <https://eclipse.dev/jetty/documentation/jetty-12/programming-guide/index.html#pg-migration-11-to-12>
+          '';
         };
       };
 
       user = mkOption {
         type = types.str;
         default = "plantuml";
-        description = lib.mdDoc "User which runs PlantUML server.";
+        description = mdDoc "User which runs PlantUML server.";
       };
 
       group = mkOption {
         type = types.str;
         default = "plantuml";
-        description = lib.mdDoc "Group which runs PlantUML server.";
+        description = mdDoc "Group which runs PlantUML server.";
       };
 
       home = mkOption {
-        type = types.str;
+        type = types.path;
         default = "/var/lib/plantuml";
-        description = lib.mdDoc "Home directory of the PlantUML server instance.";
+        description = mdDoc "Home directory of the PlantUML server instance.";
       };
 
       listenHost = mkOption {
         type = types.str;
         default = "127.0.0.1";
-        description = lib.mdDoc "Host to listen on.";
+        description = mdDoc "Host to listen on.";
       };
 
       listenPort = mkOption {
         type = types.int;
         default = 8080;
-        description = lib.mdDoc "Port to listen on.";
+        description = mdDoc "Port to listen on.";
       };
 
       plantumlLimitSize = mkOption {
         type = types.int;
         default = 4096;
-        description = lib.mdDoc "Limits image width and height.";
+        description = mdDoc "Limits image width and height.";
       };
 
-      graphvizPackage = mkOption {
-        type = types.package;
-        default = pkgs.graphviz;
-        defaultText = literalExpression "pkgs.graphviz";
-        description = lib.mdDoc "Package containing the dot executable.";
-      };
+      graphvizPackage = mkPackageOptionMD pkgs "graphviz" { };
 
       plantumlStats = mkOption {
         type = types.bool;
         default = false;
-        description = lib.mdDoc "Set it to on to enable statistics report (https://plantuml.com/statistics-report).";
+        description = mdDoc "Set it to on to enable statistics report (https://plantuml.com/statistics-report).";
       };
 
       httpAuthorization = mkOption {
         type = types.nullOr types.str;
         default = null;
-        description = lib.mdDoc "When calling the proxy endpoint, the value of HTTP_AUTHORIZATION will be used to set the HTTP Authorization header.";
-      };
-
-      allowPlantumlInclude = mkOption {
-        type = types.bool;
-        default = false;
-        description = lib.mdDoc "Enables !include processing which can read files from the server into diagrams. Files are read relative to the current working directory.";
+        description = mdDoc "When calling the proxy endpoint, the value of HTTP_AUTHORIZATION will be used to set the HTTP Authorization header.";
       };
     };
   };
 
   config = mkIf cfg.enable {
-    users.users.${cfg.user} = {
-      isSystemUser = true;
-      group = cfg.group;
-      home = cfg.home;
-      createHome = true;
-    };
-
-    users.groups.${cfg.group} = {};
-
     systemd.services.plantuml-server = {
       description = "PlantUML server";
       wantedBy = [ "multi-user.target" ];
       path = [ cfg.home ];
+
       environment = {
         PLANTUML_LIMIT_SIZE = builtins.toString cfg.plantumlLimitSize;
         GRAPHVIZ_DOT = "${cfg.graphvizPackage}/bin/dot";
         PLANTUML_STATS = if cfg.plantumlStats then "on" else "off";
         HTTP_AUTHORIZATION = cfg.httpAuthorization;
-        ALLOW_PLANTUML_INCLUDE = if cfg.allowPlantumlInclude then "true" else "false";
       };
       script = ''
       ${cfg.packages.jdk}/bin/java \
@@ -128,13 +115,40 @@ in
           jetty.http.host=${cfg.listenHost} \
           jetty.http.port=${builtins.toString cfg.listenPort}
       '';
+
       serviceConfig = {
         User = cfg.user;
         Group = cfg.group;
+        StateDirectory = mkIf (cfg.home == "/var/lib/plantuml") "plantuml";
+        StateDirectoryMode = mkIf (cfg.home == "/var/lib/plantuml") "0750";
+
+        # Hardening
+        AmbientCapabilities = [ "" ];
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateNetwork = false;
         PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" ];
       };
     };
   };
 
-  meta.maintainers = with lib.maintainers; [ truh ];
+  meta.maintainers = with lib.maintainers; [ truh anthonyroussel ];
 }
diff --git a/nixos/modules/services/web-apps/plausible.nix b/nixos/modules/services/web-apps/plausible.nix
index 576b54a7edf24..300a0f892ef77 100644
--- a/nixos/modules/services/web-apps/plausible.nix
+++ b/nixos/modules/services/web-apps/plausible.nix
@@ -11,13 +11,6 @@ in {
 
     package = mkPackageOptionMD pkgs "plausible" { };
 
-    releaseCookiePath = mkOption {
-      type = with types; either str path;
-      description = lib.mdDoc ''
-        The path to the file with release cookie. (used for remote connection to the running node).
-      '';
-    };
-
     adminUser = {
       name = mkOption {
         default = "admin";
@@ -92,6 +85,13 @@ in {
           framework docs](https://hexdocs.pm/phoenix/Mix.Tasks.Phx.Gen.Secret.html#content).
         '';
       };
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = lib.mdDoc ''
+          The IP address on which the server is listening.
+        '';
+      };
       port = mkOption {
         default = 8000;
         type = types.port;
@@ -162,6 +162,10 @@ in {
     };
   };
 
+  imports = [
+    (mkRemovedOptionModule [ "services" "plausible" "releaseCookiePath" ] "Plausible uses no distributed Erlang features, so this option is no longer necessary and was removed")
+  ];
+
   config = mkIf cfg.enable {
     assertions = [
       { assertion = cfg.adminUser.activate -> cfg.database.postgres.setup;
@@ -180,8 +184,6 @@ in {
       enable = true;
     };
 
-    services.epmd.enable = true;
-
     environment.systemPackages = [ cfg.package ];
 
     systemd.services = mkMerge [
@@ -209,6 +211,32 @@ in {
             # Configuration options from
             # https://plausible.io/docs/self-hosting-configuration
             PORT = toString cfg.server.port;
+            LISTEN_IP = cfg.server.listenAddress;
+
+            # Note [plausible-needs-no-erlang-distributed-features]:
+            # Plausible does not use, and does not plan to use, any of
+            # Erlang's distributed features, see:
+            #     https://github.com/plausible/analytics/pull/1190#issuecomment-1018820934
+            # Thus, disable distribution for improved simplicity and security:
+            #
+            # When distribution is enabled,
+            # Elixir spwans the Erlang VM, which will listen by default on all
+            # interfaces for messages between Erlang nodes (capable of
+            # remote code execution); it can be protected by a cookie; see
+            # https://erlang.org/doc/reference_manual/distributed.html#security).
+            #
+            # It would be possible to restrict the interface to one of our choice
+            # (e.g. localhost or a VPN IP) similar to how we do it with `listenAddress`
+            # for the Plausible web server; if distribution is ever needed in the future,
+            # https://github.com/NixOS/nixpkgs/pull/130297 shows how to do it.
+            #
+            # But since Plausible does not use this feature in any way,
+            # we just disable it.
+            RELEASE_DISTRIBUTION = "none";
+            # Additional safeguard, in case `RELEASE_DISTRIBUTION=none` ever
+            # stops disabling the start of EPMD.
+            ERL_EPMD_ADDRESS = "127.0.0.1";
+
             DISABLE_REGISTRATION = if isBool cfg.server.disableRegistration then boolToString cfg.server.disableRegistration else cfg.server.disableRegistration;
 
             RELEASE_TMP = "/var/lib/plausible/tmp";
@@ -238,7 +266,10 @@ in {
           path = [ cfg.package ]
             ++ optional cfg.database.postgres.setup config.services.postgresql.package;
           script = ''
-            export RELEASE_COOKIE="$(< $CREDENTIALS_DIRECTORY/RELEASE_COOKIE )"
+            # Elixir does not start up if `RELEASE_COOKIE` is not set,
+            # even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused.
+            # Thus, make a random one, which should then be ignored.
+            export RELEASE_COOKIE=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 20)
             export ADMIN_USER_PWD="$(< $CREDENTIALS_DIRECTORY/ADMIN_USER_PWD )"
             export SECRET_KEY_BASE="$(< $CREDENTIALS_DIRECTORY/SECRET_KEY_BASE )"
 
@@ -265,7 +296,6 @@ in {
             LoadCredential = [
               "ADMIN_USER_PWD:${cfg.adminUser.passwordFile}"
               "SECRET_KEY_BASE:${cfg.server.secretKeybaseFile}"
-              "RELEASE_COOKIE:${cfg.releaseCookiePath}"
             ] ++ lib.optionals (cfg.mail.smtp.passwordFile != null) [ "SMTP_USER_PWD:${cfg.mail.smtp.passwordFile}"];
           };
         };
diff --git a/nixos/modules/services/web-servers/jboss/builder.sh b/nixos/modules/services/web-servers/jboss/builder.sh
index ac573089cd5a7..8c49b87db0604 100644
--- a/nixos/modules/services/web-servers/jboss/builder.sh
+++ b/nixos/modules/services/web-servers/jboss/builder.sh
@@ -1,6 +1,6 @@
 set -e
 
-if [ -e .attrs.sh ]; then source .attrs.sh; fi
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
 source $stdenv/setup
 
 mkdir -p $out/bin
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index 6b07686efcba2..a46331ccd431d 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -96,8 +96,8 @@ in
                                         # (required, but can be null if only config changes
                                         # are needed)
 
-          extraStructuredConfig = {     # attrset of extra configuration parameters
-            FOO = lib.kernel.yes;       # (without the CONFIG_ prefix, optional)
+          extraStructuredConfig = {     # attrset of extra configuration parameters without the CONFIG_ prefix
+            FOO = lib.kernel.yes;       # (optional)
           };                            # values should generally be lib.kernel.yes,
                                         # lib.kernel.no or lib.kernel.module
 
@@ -105,8 +105,9 @@ in
             foo = true;                 # (may be checked by other NixOS modules, optional)
           };
 
-          extraConfig = "CONFIG_FOO y"; # extra configuration options in string form
-                                        # (deprecated, use extraStructuredConfig instead, optional)
+          extraConfig = "FOO y";        # extra configuration options in string form without the CONFIG_ prefix
+                                        # (optional, multiple lines allowed to specify multiple options)
+                                        # (deprecated, use extraStructuredConfig instead)
         }
         ```
 
diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
index 310584e398bc1..96b42066b2239 100644
--- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
+++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -11,7 +11,23 @@ import shutil
 import subprocess
 import sys
 import warnings
-from typing import NamedTuple
+import json
+from typing import NamedTuple, Dict, List
+from dataclasses import dataclass
+
+
+@dataclass
+class BootSpec:
+    init: str
+    initrd: str
+    initrdSecrets: str
+    kernel: str
+    kernelParams: List[str]
+    label: str
+    system: str
+    toplevel: str
+    specialisations: Dict[str, "BootSpec"]
+
 
 
 libc = ctypes.CDLL("libc.so.6")
@@ -71,12 +87,20 @@ def write_loader_conf(profile: str | None, generation: int, specialisation: str
     os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
 
 
-def profile_path(profile: str | None, generation: int, specialisation: str | None, name: str) -> str:
-    return os.path.realpath("%s/%s" % (system_dir(profile, generation, specialisation), name))
+def get_bootspec(profile: str | None, generation: int) -> BootSpec:
+    boot_json_path = os.path.realpath("%s/%s" % (system_dir(profile, generation, None), "boot.json"))
+    boot_json_f = open(boot_json_path, 'r')
+    bootspec_json = json.load(boot_json_f)
+    return bootspec_from_json(bootspec_json)
 
+def bootspec_from_json(bootspec_json: Dict) -> BootSpec:
+    specialisations = bootspec_json['org.nixos.specialisation.v1']
+    specialisations = {k: bootspec_from_json(v) for k, v in specialisations.items()}
+    return BootSpec(**bootspec_json['org.nixos.bootspec.v1'], specialisations=specialisations)
 
-def copy_from_profile(profile: str | None, generation: int, specialisation: str | None, name: str, dry_run: bool = False) -> str:
-    store_file_path = profile_path(profile, generation, specialisation, name)
+
+def copy_from_file(file: str, dry_run: bool = False) -> str:
+    store_file_path = os.path.realpath(file)
     suffix = os.path.basename(store_file_path)
     store_dir = os.path.basename(os.path.dirname(store_file_path))
     efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
@@ -84,40 +108,19 @@ def copy_from_profile(profile: str | None, generation: int, specialisation: str
         copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
     return efi_file_path
 
-
-def describe_generation(profile: str | None, generation: int, specialisation: str | None) -> str:
-    try:
-        with open(profile_path(profile, generation, specialisation, "nixos-version")) as f:
-            nixos_version = f.read()
-    except IOError:
-        nixos_version = "Unknown"
-
-    kernel_dir = os.path.dirname(profile_path(profile, generation, specialisation, "kernel"))
-    module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0]
-    kernel_version = os.path.basename(module_dir)
-
-    build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
-    build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
-
-    description = "@distroName@ {}, Linux Kernel {}, Built on {}".format(
-        nixos_version, kernel_version, build_date
-    )
-
-    return description
-
-
 def write_entry(profile: str | None, generation: int, specialisation: str | None,
-                machine_id: str, current: bool) -> None:
-    kernel = copy_from_profile(profile, generation, specialisation, "kernel")
-    initrd = copy_from_profile(profile, generation, specialisation, "initrd")
+                machine_id: str, bootspec: BootSpec, current: bool) -> None:
+    if specialisation:
+        bootspec = bootspec.specialisations[specialisation]
+    kernel = copy_from_file(bootspec.kernel)
+    initrd = copy_from_file(bootspec.initrd)
 
     title = "@distroName@{profile}{specialisation}".format(
         profile=" [" + profile + "]" if profile else "",
         specialisation=" (%s)" % specialisation if specialisation else "")
 
     try:
-        append_initrd_secrets = profile_path(profile, generation, specialisation, "append-initrd-secrets")
-        subprocess.check_call([append_initrd_secrets, "@efiSysMountPoint@%s" % (initrd)])
+        subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
     except FileNotFoundError:
         pass
     except subprocess.CalledProcessError:
@@ -132,17 +135,19 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
     entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
         generation_conf_filename(profile, generation, specialisation))
     tmp_path = "%s.tmp" % (entry_file)
-    kernel_params = "init=%s " % profile_path(profile, generation, specialisation, "init")
+    kernel_params = "init=%s " % bootspec.init
+
+    kernel_params = kernel_params + " ".join(bootspec.kernelParams)
+    build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
+    build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
 
-    with open(profile_path(profile, generation, specialisation, "kernel-params")) as params_file:
-        kernel_params = kernel_params + params_file.read()
     with open(tmp_path, 'w') as f:
         f.write(BOOT_ENTRY.format(title=title,
                     generation=generation,
                     kernel=kernel,
                     initrd=initrd,
                     kernel_params=kernel_params,
-                    description=describe_generation(profile, generation, specialisation)))
+                    description=f"{bootspec.label}, built on {build_date}"))
         if machine_id is not None:
             f.write("machine-id %s\n" % machine_id)
         f.flush()
@@ -173,21 +178,14 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
     return configurations[-configurationLimit:]
 
 
-def get_specialisations(profile: str | None, generation: int, _: str | None) -> list[SystemIdentifier]:
-    specialisations_dir = os.path.join(
-            system_dir(profile, generation, None), "specialisation")
-    if not os.path.exists(specialisations_dir):
-        return []
-    return [SystemIdentifier(profile, generation, spec) for spec in os.listdir(specialisations_dir)]
-
-
 def remove_old_entries(gens: list[SystemIdentifier]) -> None:
     rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
     rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
     known_paths = []
     for gen in gens:
-        known_paths.append(copy_from_profile(*gen, "kernel", True))
-        known_paths.append(copy_from_profile(*gen, "initrd", True))
+        bootspec = get_bootspec(gen.profile, gen.generation)
+        known_paths.append(copy_from_file(bootspec.kernel, True))
+        known_paths.append(copy_from_file(bootspec.initrd, True))
     for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
         if rex_profile.match(path):
             prof = rex_profile.sub(r"\1", path)
@@ -279,10 +277,11 @@ def install_bootloader(args: argparse.Namespace) -> None:
     remove_old_entries(gens)
     for gen in gens:
         try:
-            is_default = os.path.dirname(profile_path(*gen, "init")) == args.default_config
-            write_entry(*gen, machine_id, current=is_default)
-            for specialisation in get_specialisations(*gen):
-                write_entry(*specialisation, machine_id, current=is_default)
+            bootspec = get_bootspec(gen.profile, gen.generation)
+            is_default = os.path.dirname(bootspec.init) == args.default_config
+            write_entry(*gen, machine_id, bootspec, current=is_default)
+            for specialisation in bootspec.specialisations.keys():
+                write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, current=is_default)
             if is_default:
                 write_loader_conf(*gen)
         except OSError as e:
diff --git a/nixos/modules/system/boot/systemd/initrd.nix b/nixos/modules/system/boot/systemd/initrd.nix
index e223451652b2e..0e7d59b32075b 100644
--- a/nixos/modules/system/boot/systemd/initrd.nix
+++ b/nixos/modules/system/boot/systemd/initrd.nix
@@ -370,7 +370,7 @@ in {
 
     boot.initrd.availableKernelModules = [
       # systemd needs this for some features
-      "autofs4"
+      "autofs"
       # systemd-cryptenroll
     ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
     ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
diff --git a/nixos/modules/tasks/filesystems/bcachefs.nix b/nixos/modules/tasks/filesystems/bcachefs.nix
index 4eadec239e67d..af7ba7aa6a0f0 100644
--- a/nixos/modules/tasks/filesystems/bcachefs.nix
+++ b/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -6,23 +6,39 @@ let
 
   bootFs = filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
 
-  mountCommand = pkgs.runCommand "mount.bcachefs" {} ''
-    mkdir -p $out/bin
-    cat > $out/bin/mount.bcachefs <<EOF
-    #!/bin/sh
-    exec "/bin/bcachefs" mount "\$@"
-    EOF
-    chmod +x $out/bin/mount.bcachefs
-  '';
-
   commonFunctions = ''
     prompt() {
         local name="$1"
         printf "enter passphrase for $name: "
     }
+
     tryUnlock() {
         local name="$1"
         local path="$2"
+        local success=false
+        local target
+        local uuid=$(echo -n $path | sed -e 's,UUID=\(.*\),\1,g')
+
+        printf "waiting for device to appear $path"
+        for try in $(seq 10); do
+          if [ -e $path ]; then
+              success=true
+              break
+          else
+              target=$(blkid --uuid $uuid)
+              if [ $? == 0 ]; then
+                 success=true
+                 break
+              fi
+          fi
+          echo -n "."
+          sleep 1
+        done
+        printf "\n"
+        if [ $success == true ]; then
+            path=$target
+        fi
+
         if bcachefs unlock -c $path > /dev/null 2> /dev/null; then    # test for encryption
             prompt $name
             until bcachefs unlock $path 2> /dev/null; do              # repeat until successfully unlocked
@@ -30,6 +46,8 @@ let
                 prompt $name
             done
             printf "unlocking successful.\n"
+        else
+            echo "Cannot unlock device $uuid with path $path" >&2
         fi
     }
   '';
@@ -77,13 +95,11 @@ in
 {
   config = mkIf (elem "bcachefs" config.boot.supportedFilesystems) (mkMerge [
     {
-      # We do not want to include bachefs in the fsPackages for systemd-initrd
-      # because we provide the unwrapped version of mount.bcachefs
-      # through the extraBin option, which will make it available for use.
-      system.fsPackages = lib.optional (!config.boot.initrd.systemd.enable) pkgs.bcachefs-tools;
-      environment.systemPackages = lib.optional (config.boot.initrd.systemd.enable) pkgs.bcachefs-tools;
+      # needed for systemd-remount-fs
+      system.fsPackages = [ pkgs.bcachefs-tools ];
 
       # use kernel package with bcachefs support until it's in mainline
+      # TODO replace with requireKernelConfig
       boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
 
       systemd.services = lib.mapAttrs' (mkUnits "") (lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems);
@@ -92,15 +108,14 @@ in
     (mkIf ((elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
       # chacha20 and poly1305 are required only for decryption attempts
       boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
-
       boot.initrd.systemd.extraBin = {
+        # do we need this? boot/systemd.nix:566 & boot/systemd/initrd.nix:357
         "bcachefs" = "${pkgs.bcachefs-tools}/bin/bcachefs";
-        "mount.bcachefs" = "${mountCommand}/bin/mount.bcachefs";
+        "mount.bcachefs" = "${pkgs.bcachefs-tools}/bin/mount.bcachefs";
       };
-
       boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
         copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
-        copy_bin_and_libs ${mountCommand}/bin/mount.bcachefs
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/mount.bcachefs
       '';
       boot.initrd.extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
         $out/bin/bcachefs version
diff --git a/nixos/modules/testing/test-instrumentation.nix b/nixos/modules/testing/test-instrumentation.nix
index abe68dd6eae68..9ee77cd79a9b1 100644
--- a/nixos/modules/testing/test-instrumentation.nix
+++ b/nixos/modules/testing/test-instrumentation.nix
@@ -11,10 +11,6 @@ let
   qemu-common = import ../../lib/qemu-common.nix { inherit lib pkgs; };
 
   backdoorService = {
-    wantedBy = [ "sysinit.target" ];
-    unitConfig.DefaultDependencies = false;
-    conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
-    before = [ "shutdown.target" "initrd-switch-root.target" ];
     requires = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
     after = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
     script =
@@ -80,7 +76,12 @@ in
       }
     ];
 
-    systemd.services.backdoor = backdoorService;
+    systemd.services.backdoor = lib.mkMerge [
+      backdoorService
+      {
+        wantedBy = [ "multi-user.target" ];
+      }
+    ];
 
     boot.initrd.systemd = lib.mkMerge [
       {
@@ -104,7 +105,21 @@ in
           "/bin/true"
         ];
 
-        services.backdoor = backdoorService;
+        services.backdoor = lib.mkMerge [
+          backdoorService
+          {
+            # TODO: Both stage 1 and stage 2 should use these same
+            # settings. But a lot of existing tests rely on
+            # backdoor.service having default orderings,
+            # e.g. systemd-boot.update relies on /boot being mounted
+            # as soon as backdoor starts. But it can be useful for
+            # backdoor to start even earlier.
+            wantedBy = [ "sysinit.target" ];
+            unitConfig.DefaultDependencies = false;
+            conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
+            before = [ "shutdown.target" "initrd-switch-root.target" ];
+          }
+        ];
 
         contents."/usr/bin/env".source = "${pkgs.coreutils}/bin/env";
       })
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
index a88b78bc98219..e712fac17a462 100644
--- a/nixos/modules/virtualisation/azure-agent.nix
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -61,7 +61,7 @@ in
 
         # Which provisioning agent to use. Supported values are "auto" (default), "waagent",
         # "cloud-init", or "disabled".
-        Provisioning.Agent=disabled
+        Provisioning.Agent=auto
 
         # Password authentication for root account will be unavailable.
         Provisioning.DeleteRootPassword=n
@@ -246,7 +246,7 @@ in
         pkgs.bash
 
         # waagent's Microsoft.OSTCExtensions.VMAccessForLinux needs Python 3
-        pkgs.python3
+        pkgs.python39
 
         # waagent's Microsoft.CPlat.Core.RunCommandLinux needs lsof
         pkgs.lsof
@@ -259,5 +259,10 @@ in
       };
     };
 
+    # waagent will generate files under /etc/sudoers.d during provisioning
+    security.sudo.extraConfig = ''
+      #includedir /etc/sudoers.d
+    '';
+
   };
 }
diff --git a/nixos/modules/virtualisation/azure-image.nix b/nixos/modules/virtualisation/azure-image.nix
index 39c6cab5980a1..d909680cca1ff 100644
--- a/nixos/modules/virtualisation/azure-image.nix
+++ b/nixos/modules/virtualisation/azure-image.nix
@@ -37,42 +37,5 @@ in
       inherit config lib pkgs;
     };
 
-    # Azure metadata is available as a CD-ROM drive.
-    fileSystems."/metadata".device = "/dev/sr0";
-
-    systemd.services.fetch-ssh-keys = {
-      description = "Fetch host keys and authorized_keys for root user";
-
-      wantedBy = [ "sshd.service" "waagent.service" ];
-      before = [ "sshd.service" "waagent.service" ];
-
-      path  = [ pkgs.coreutils ];
-      script =
-        ''
-          eval "$(cat /metadata/CustomData.bin)"
-          if ! [ -z "$ssh_host_ecdsa_key" ]; then
-            echo "downloaded ssh_host_ecdsa_key"
-            echo "$ssh_host_ecdsa_key" > /etc/ssh/ssh_host_ed25519_key
-            chmod 600 /etc/ssh/ssh_host_ed25519_key
-          fi
-
-          if ! [ -z "$ssh_host_ecdsa_key_pub" ]; then
-            echo "downloaded ssh_host_ecdsa_key_pub"
-            echo "$ssh_host_ecdsa_key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
-            chmod 644 /etc/ssh/ssh_host_ed25519_key.pub
-          fi
-
-          if ! [ -z "$ssh_root_auth_key" ]; then
-            echo "downloaded ssh_root_auth_key"
-            mkdir -m 0700 -p /root/.ssh
-            echo "$ssh_root_auth_key" > /root/.ssh/authorized_keys
-            chmod 600 /root/.ssh/authorized_keys
-          fi
-        '';
-      serviceConfig.Type = "oneshot";
-      serviceConfig.RemainAfterExit = true;
-      serviceConfig.StandardError = "journal+console";
-      serviceConfig.StandardOutput = "journal+console";
-    };
   };
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index f44fcfcf54ab6..6c127efa4cea2 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -124,6 +124,7 @@ in {
   apfs = runTest ./apfs.nix;
   appliance-repart-image = runTest ./appliance-repart-image.nix;
   apparmor = handleTest ./apparmor.nix {};
+  archi = handleTest ./archi.nix {};
   atd = handleTest ./atd.nix {};
   atop = handleTest ./atop.nix {};
   atuin = handleTest ./atuin.nix {};
@@ -656,6 +657,7 @@ in {
   phylactery = handleTest ./web-apps/phylactery.nix {};
   pict-rs = handleTest ./pict-rs.nix {};
   pinnwand = handleTest ./pinnwand.nix {};
+  plantuml-server = handleTest ./plantuml-server.nix {};
   plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
   plasma5 = handleTest ./plasma5.nix {};
   plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
diff --git a/nixos/tests/archi.nix b/nixos/tests/archi.nix
new file mode 100644
index 0000000000000..59f2e940c0050
--- /dev/null
+++ b/nixos/tests/archi.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "archi";
+  meta.maintainers = with lib.maintainers; [ paumr ];
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ archi ];
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("createEmptyModel via CLI"):
+         machine.succeed("Archi -application com.archimatetool.commandline.app -consoleLog -nosplash --createEmptyModel --saveModel smoke.archimate")
+         machine.copy_from_vm("smoke.archimate", "")
+
+    with subtest("UI smoketest"):
+         machine.succeed("DISPLAY=:0 Archi --createEmptyModel >&2 &")
+         machine.wait_for_window("Archi")
+
+         # wait till main UI is open
+         machine.wait_for_text("Welcome to Archi")
+
+         machine.screenshot("welcome-screen")
+  '';
+})
diff --git a/nixos/tests/dnscrypt-wrapper/default.nix b/nixos/tests/dnscrypt-wrapper/default.nix
index 1c05376e097b3..1a794931dc500 100644
--- a/nixos/tests/dnscrypt-wrapper/default.nix
+++ b/nixos/tests/dnscrypt-wrapper/default.nix
@@ -1,5 +1,15 @@
+
 { lib, pkgs, ... }:
 
+let
+  snakeoil = import ../common/acme/server/snakeoil-certs.nix;
+
+  hosts = lib.mkForce
+   { "fd::a" = [ "server" snakeoil.domain ];
+     "fd::b" = [ "client" ];
+   };
+in
+
 {
   name = "dnscrypt-wrapper";
   meta = with pkgs.lib.maintainers; {
@@ -7,59 +17,122 @@
   };
 
   nodes = {
-    server = { lib, ... }:
-      { services.dnscrypt-wrapper = with builtins;
+    server = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::a"; prefixLength = 64; };
+
+        services.dnscrypt-wrapper =
           { enable = true;
-            address = "192.168.1.1";
+            address = "[::]";
+            port = 5353;
             keys.expiration = 5; # days
             keys.checkInterval = 2;  # min
             # The keypair was generated by the command:
             # dnscrypt-wrapper --gen-provider-keypair \
             #  --provider-name=2.dnscrypt-cert.server \
-            #  --ext-address=192.168.1.1:5353
-            providerKey.public = toFile "public.key" (readFile ./public.key);
-            providerKey.secret = toFile "secret.key" (readFile ./secret.key);
+            providerKey.public = "${./public.key}";
+            providerKey.secret = "${./secret.key}";
+          };
+
+        # nameserver
+        services.bind.enable = true;
+        services.bind.zones = lib.singleton
+          { name = ".";
+            master = true;
+            file = pkgs.writeText "root.zone" ''
+              $TTL 3600
+              . IN SOA example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+              . IN NS example.org.
+              example.org. IN AAAA 2001:db8::1
+            '';
+          };
+
+        # webserver
+        services.nginx.enable = true;
+        services.nginx.virtualHosts.${snakeoil.domain} =
+          { onlySSL = true;
+            listenAddresses = [ "localhost" ];
+            sslCertificate = snakeoil.${snakeoil.domain}.cert;
+            sslCertificateKey = snakeoil.${snakeoil.domain}.key;
+            locations."/ip".extraConfig = ''
+              default_type text/plain;
+              return 200 "Ciao $remote_addr!\n";
+            '';
           };
-        services.tinydns.enable = true;
-        services.tinydns.data = ''
-          ..:192.168.1.1:a
-          +it.works:1.2.3.4
-        '';
-        networking.firewall.allowedUDPPorts = [ 5353 ];
-        networking.firewall.allowedTCPPorts = [ 5353 ];
-        networking.interfaces.eth1.ipv4.addresses = lib.mkForce
-          [ { address = "192.168.1.1"; prefixLength = 24; } ];
+
+        # demultiplex HTTP and DNS from port 443
+        services.sslh =
+          { enable = true;
+            method = "ev";
+            settings.transparent = true;
+            settings.listen = lib.mkForce
+              [ { host = "server"; port = "443"; is_udp = false; }
+                { host = "server"; port = "443"; is_udp = true; }
+              ];
+            settings.protocols =
+              [ # Send TLS to webserver (TCP)
+                { name = "tls"; host= "localhost"; port= "443"; }
+                # Send DNSCrypt to dnscrypt-wrapper (TCP or UDP)
+                { name = "anyprot"; host = "localhost"; port = "5353"; }
+                { name = "anyprot"; host = "localhost"; port = "5353"; is_udp = true;}
+              ];
+          };
+
+        networking.firewall.allowedTCPPorts = [ 443 ];
+        networking.firewall.allowedUDPPorts = [ 443 ];
       };
 
-    client = { lib, ... }:
-      { services.dnscrypt-proxy2.enable = true;
-        services.dnscrypt-proxy2.upstreamDefaults = false;
-        services.dnscrypt-proxy2.settings = {
-          server_names = [ "server" ];
-          static.server.stamp = "sdns://AQAAAAAAAAAAEDE5Mi4xNjguMS4xOjUzNTMgFEHYOv0SCKSuqR5CDYa7-58cCBuXO2_5uTSVU9wNQF0WMi5kbnNjcnlwdC1jZXJ0LnNlcnZlcg";
+    client = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::b"; prefixLength = 64; };
+
+      services.dnscrypt-proxy2.enable = true;
+      services.dnscrypt-proxy2.upstreamDefaults = false;
+      services.dnscrypt-proxy2.settings =
+        { server_names = [ "server" ];
+          listen_addresses = [ "[::1]:53" ];
+          cache = false;
+          # Computed using https://dnscrypt.info/stamps/
+          static.server.stamp =
+            "sdns://AQAAAAAAAAAADzE5Mi4xNjguMS4yOjQ0MyAUQdg6"
+            +"_RIIpK6pHkINhrv7nxwIG5c7b_m5NJVT3A1AXRYyLmRuc2NyeXB0LWNlcnQuc2VydmVy";
         };
-        networking.nameservers = [ "127.0.0.1" ];
-        networking.interfaces.eth1.ipv4.addresses = lib.mkForce
-          [ { address = "192.168.1.2"; prefixLength = 24; } ];
-      };
+      networking.nameservers = [ "::1" ];
+      security.pki.certificateFiles = [ snakeoil.ca.cert ];
+    };
 
   };
 
   testScript = ''
-    start_all()
-
     with subtest("The server can generate the ephemeral keypair"):
         server.wait_for_unit("dnscrypt-wrapper")
         server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.key")
         server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.crt")
         almost_expiration = server.succeed("date --date '4days 23 hours 56min'").strip()
 
-    with subtest("The client can connect to the server"):
-        server.wait_for_unit("tinydns")
-        client.wait_for_unit("dnscrypt-proxy2")
-        assert "1.2.3.4" in client.wait_until_succeeds(
-            "host it.works"
-        ), "The IP address of 'it.works' does not match 1.2.3.4"
+    with subtest("The DNSCrypt client can connect to the server"):
+        server.wait_for_unit("sslh")
+        client.wait_until_succeeds("journalctl -u dnscrypt-proxy2 --grep '\[server\] OK'")
+
+    with subtest("HTTP client can connect to the server"):
+        server.wait_for_unit("nginx")
+        client.succeed("curl -s --fail https://${snakeoil.domain}/ip | grep -q fd::b")
+
+    with subtest("DNS queries over UDP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -U example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
+
+    with subtest("DNS queries over TCP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -T example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
 
     with subtest("The server rotates the ephemeral keys"):
         # advance time by a little less than 5 days
@@ -68,7 +141,8 @@
         server.wait_for_file("/var/lib/dnscrypt-wrapper/oldkeys")
 
     with subtest("The client can still connect to the server"):
-        server.wait_for_unit("dnscrypt-wrapper")
-        client.succeed("host it.works")
+        client.systemctl("restart dnscrypt-proxy2")
+        client.wait_until_succeeds("host -T example.org")
+        client.wait_until_succeeds("host -U example.org")
   '';
 }
diff --git a/nixos/tests/plantuml-server.nix b/nixos/tests/plantuml-server.nix
new file mode 100644
index 0000000000000..460c30919aecf
--- /dev/null
+++ b/nixos/tests/plantuml-server.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "plantuml-server";
+  meta.maintainers = with lib.maintainers; [ anthonyroussel ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.plantuml-server.enable = true;
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("plantuml-server.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("Generate chart"):
+      chart_id = machine.succeed("curl -sSf http://localhost:8080/plantuml/coder -d 'Alice -> Bob'")
+      machine.succeed("curl -sSf http://localhost:8080/plantuml/txt/{}".format(chart_id))
+  '';
+})
diff --git a/nixos/tests/plausible.nix b/nixos/tests/plausible.nix
index 9afd3db75de8a..9c26c509a5ab5 100644
--- a/nixos/tests/plausible.nix
+++ b/nixos/tests/plausible.nix
@@ -8,9 +8,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     virtualisation.memorySize = 4096;
     services.plausible = {
       enable = true;
-      releaseCookiePath = "${pkgs.runCommand "cookie" { } ''
-        ${pkgs.openssl}/bin/openssl rand -base64 64 >"$out"
-      ''}";
       adminUser = {
         email = "admin@example.org";
         passwordFile = "${pkgs.writeText "pwd" "foobar"}";
@@ -28,6 +25,10 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
     machine.wait_for_unit("plausible.service")
     machine.wait_for_open_port(8000)
 
+    # Ensure that the software does not make not make the machine
+    # listen on any public interfaces by default.
+    machine.fail("ss -tlpn 'src = 0.0.0.0 or src = [::]' | grep LISTEN")
+
     machine.succeed("curl -f localhost:8000 >&2")
 
     machine.succeed("curl -f localhost:8000/js/script.js >&2")
diff --git a/nixos/tests/pleroma.nix b/nixos/tests/pleroma.nix
index 4f1aef854146e..08a01585f8778 100644
--- a/nixos/tests/pleroma.nix
+++ b/nixos/tests/pleroma.nix
@@ -164,9 +164,12 @@ import ./make-test-python.nix ({ pkgs, ... }:
   '';
 
   tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
-    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=pleroma.nixos.test' -days 36500
     mkdir -p $out
-    cp key.pem cert.pem $out
+    openssl req -x509 \
+      -subj '/CN=pleroma.nixos.test/' -days 49710 \
+      -addext 'subjectAltName = DNS:pleroma.nixos.test' \
+      -keyout "$out/key.pem" -newkey ed25519 \
+      -out "$out/cert.pem" -noenc
   '';
 
   hosts = nodes: ''
@@ -180,7 +183,7 @@ import ./make-test-python.nix ({ pkgs, ... }:
       security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
       networking.extraHosts = hosts nodes;
       environment.systemPackages = with pkgs; [
-        toot
+        pkgs.toot
         send-toot
       ];
     };
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index bccfe998f61a9..7840130d4a364 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -257,6 +257,21 @@ let
       '';
     };
 
+    exportarr-sonarr = {
+      nodeName = "exportarr_sonarr";
+      exporterConfig = {
+        enable = true;
+        url = "http://127.0.0.1:8989";
+        # testing for real data is tricky, because the api key can not be preconfigured
+        apiKeyFile = pkgs.writeText "dummy-api-key" "eccff6a992bc2e4b88e46d064b26bb4e";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-exportarr-sonarr-exporter.service")
+        wait_for_open_port(9707)
+        succeed("curl -sSf 'http://localhost:9707/metrics")
+      '';
+    };
+
     fastly = {
       exporterConfig = {
         enable = true;
diff --git a/nixos/tests/web-apps/mastodon/remote-postgresql.nix b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
index 715477191bfbc..6548883db4526 100644
--- a/nixos/tests/web-apps/mastodon/remote-postgresql.nix
+++ b/nixos/tests/web-apps/mastodon/remote-postgresql.nix
@@ -16,7 +16,7 @@ in
   meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin ];
 
   nodes = {
-    database = {
+    database = { config, ... }: {
       networking = {
         interfaces.eth1 = {
           ipv4.addresses = [
@@ -24,11 +24,13 @@ in
           ];
         };
         extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 5432 ];
+        firewall.allowedTCPPorts = [ config.services.postgresql.port ];
       };
 
       services.postgresql = {
         enable = true;
+        # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+        package = pkgs.postgresql_14;
         enableTCPIP = true;
         authentication = ''
           hostnossl mastodon_local mastodon_test 192.168.2.201/32 md5
@@ -41,7 +43,7 @@ in
       };
     };
 
-    nginx = {
+    nginx = { nodes, ... }: {
       networking = {
         interfaces.eth1 = {
           ipv4.addresses = [
@@ -69,18 +71,14 @@ in
             tryFiles = "$uri @proxy";
           };
           locations."@proxy" = {
-            proxyPass = "http://192.168.2.201:55001";
-            proxyWebsockets = true;
-          };
-          locations."/api/v1/streaming/" = {
-            proxyPass = "http://192.168.2.201:55002";
+            proxyPass = "http://192.168.2.201:${toString nodes.server.services.mastodon.webPort}";
             proxyWebsockets = true;
           };
         };
       };
     };
 
-    server = { pkgs, ... }: {
+    server = { config, pkgs, ... }: {
       virtualisation.memorySize = 2048;
 
       environment = {
@@ -98,7 +96,10 @@ in
           ];
         };
         extraHosts = hosts;
-        firewall.allowedTCPPorts = [ 55001 55002 ];
+        firewall.allowedTCPPorts = [
+          config.services.mastodon.webPort
+          config.services.mastodon.sidekiqPort
+        ];
       };
 
       services.mastodon = {
@@ -106,6 +107,7 @@ in
         configureNginx = false;
         localDomain = "mastodon.local";
         enableUnixSocket = false;
+        streamingProcesses = 2;
         database = {
           createLocally = false;
           host = "192.168.2.102";
diff --git a/nixos/tests/web-apps/mastodon/script.nix b/nixos/tests/web-apps/mastodon/script.nix
index a89b4b7480e99..afb7c0e0a0ebe 100644
--- a/nixos/tests/web-apps/mastodon/script.nix
+++ b/nixos/tests/web-apps/mastodon/script.nix
@@ -10,9 +10,8 @@
 
   server.wait_for_unit("redis-mastodon.service")
   server.wait_for_unit("mastodon-sidekiq-all.service")
-  server.wait_for_unit("mastodon-streaming.service")
+  server.wait_for_unit("mastodon-streaming.target")
   server.wait_for_unit("mastodon-web.service")
-  server.wait_for_open_port(55000)
   server.wait_for_open_port(55001)
 
   # Check that mastodon-media-auto-remove is scheduled
diff --git a/nixos/tests/web-apps/mastodon/standard.nix b/nixos/tests/web-apps/mastodon/standard.nix
index 14311afea3f78..e5eb30fef5976 100644
--- a/nixos/tests/web-apps/mastodon/standard.nix
+++ b/nixos/tests/web-apps/mastodon/standard.nix
@@ -40,11 +40,15 @@ in
         port = 31637;
       };
 
+      # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+      services.postgresql.package = pkgs.postgresql_14;
+
       services.mastodon = {
         enable = true;
         configureNginx = true;
         localDomain = "mastodon.local";
         enableUnixSocket = false;
+        streamingProcesses = 2;
         smtp = {
           createLocally = false;
           fromAddress = "mastodon@mastodon.local";
diff --git a/nixos/tests/wordpress.nix b/nixos/tests/wordpress.nix
index 937b505af2acd..592af9a094f1d 100644
--- a/nixos/tests/wordpress.nix
+++ b/nixos/tests/wordpress.nix
@@ -67,7 +67,7 @@ rec {
       networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
     };
   }) {} [
-    "6_3"
+    "6_3" "6_4"
   ];
 
   testScript = ''
diff --git a/nixos/tests/xmpp/ejabberd.nix b/nixos/tests/xmpp/ejabberd.nix
index 7926fe80de2fd..1a807b27b6f65 100644
--- a/nixos/tests/xmpp/ejabberd.nix
+++ b/nixos/tests/xmpp/ejabberd.nix
@@ -1,7 +1,7 @@
 import ../make-test-python.nix ({ pkgs, ... }: {
   name = "ejabberd";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ ajs124 ];
+    maintainers = [ ];
   };
   nodes = {
     client = { nodes, pkgs, ... }: {