about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/from_md/release-notes/rl-2305.section.xml73
-rw-r--r--nixos/doc/manual/release-notes/rl-2305.section.md19
-rw-r--r--nixos/lib/systemd-unit-options.nix6
-rw-r--r--nixos/lib/test-driver/test_driver/driver.py14
-rw-r--r--nixos/lib/test-driver/test_driver/polling_condition.py29
-rw-r--r--nixos/modules/config/no-x-libs.nix4
-rw-r--r--nixos/modules/installer/cd-dvd/iso-image.nix10
-rw-r--r--nixos/modules/module-list.nix3
-rw-r--r--nixos/modules/profiles/keys/ssh_host_ed25519_key7
-rw-r--r--nixos/modules/profiles/keys/ssh_host_ed25519_key.pub1
-rw-r--r--nixos/modules/profiles/macos-builder.nix140
-rw-r--r--nixos/modules/programs/git.nix38
-rw-r--r--nixos/modules/programs/i3lock.nix58
-rw-r--r--nixos/modules/services/blockchain/ethereum/lighthouse.nix2
-rw-r--r--nixos/modules/services/misc/nitter.nix2
-rw-r--r--nixos/modules/services/monitoring/grafana.nix2
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters.nix1
-rw-r--r--nixos/modules/services/monitoring/prometheus/exporters/statsd.nix19
-rw-r--r--nixos/modules/services/networking/firefox-syncserver.nix7
-rw-r--r--nixos/modules/services/networking/headscale.nix761
-rw-r--r--nixos/modules/services/networking/rpcbind.nix10
-rw-r--r--nixos/modules/services/networking/tox-node.nix2
-rw-r--r--nixos/modules/services/web-apps/dolibarr.nix19
-rw-r--r--nixos/modules/services/web-apps/peertube.nix121
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix47
-rw-r--r--nixos/modules/services/x11/desktop-managers/cinnamon.nix4
-rw-r--r--nixos/modules/services/x11/desktop-managers/plasma5.nix2
-rw-r--r--nixos/modules/system/activation/bootspec.cue1
-rw-r--r--nixos/modules/system/activation/bootspec.nix12
-rw-r--r--nixos/modules/system/activation/top-level.nix2
-rw-r--r--nixos/modules/system/boot/stage-1.nix5
-rw-r--r--nixos/modules/tasks/filesystems/zfs.nix4
-rw-r--r--nixos/tests/acme.nix62
-rw-r--r--nixos/tests/all-tests.nix2
-rw-r--r--nixos/tests/bootspec.nix40
-rw-r--r--nixos/tests/headscale.nix17
-rw-r--r--nixos/tests/nginx.nix2
-rw-r--r--nixos/tests/os-prober.nix2
-rw-r--r--nixos/tests/prometheus-exporters.nix19
-rw-r--r--nixos/tests/vscodium.nix2
-rw-r--r--nixos/tests/web-apps/peertube.nix7
41 files changed, 1099 insertions, 479 deletions
diff --git a/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml b/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml
index 95d5a80b7330c..ab1a63c807991 100644
--- a/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml
+++ b/nixos/doc/manual/from_md/release-notes/rl-2305.section.xml
@@ -100,6 +100,26 @@
       </listitem>
       <listitem>
         <para>
+          <literal>minio</literal> removed support for its legacy
+          filesystem backend in
+          <link xlink:href="https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z">RELEASE.2022-10-29T06-21-33Z</link>.
+          This means if your storage was created with the old format,
+          minio will no longer start. Unfortunately minio doesn’t
+          provide a an automatic migration, they only provide
+          <link xlink:href="https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html">instructions
+          how to manually convert the node</link>. To facilitate this
+          migration we keep around the last version that still supports
+          the old filesystem backend as
+          <literal>minio_legacy_fs</literal>. Use it via
+          <literal>services.minio.package = minio_legacy_fs;</literal>
+          to export your data before switching to the new version. See
+          the corresponding
+          <link xlink:href="https://github.com/NixOS/nixpkgs/issues/199318">issue</link>
+          for more details.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           <literal>services.sourcehut.dispatch</literal> and the
           corresponding package
           (<literal>sourcehut.dispatchsrht</literal>) have been removed
@@ -140,6 +160,16 @@
       </listitem>
       <listitem>
         <para>
+          The Nginx module now validates the syntax of config files at
+          build time. For more complex configurations (using
+          <literal>include</literal> with out-of-store files notably)
+          you may need to disable this check by setting
+          <link linkend="opt-services.nginx.validateConfig">services.nginx.validateConfig</link>
+          to <literal>false</literal>.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           The EC2 image module previously detected and automatically
           mounted ext3-formatted instance store devices and partitions
           in stage-1 (initramfs), storing <literal>/tmp</literal> on the
@@ -260,6 +290,49 @@
       </listitem>
       <listitem>
         <para>
+          <literal>services.peertube</literal> now requires you to
+          specify the secret file
+          <literal>secrets.secretsFile</literal>. It can be generated by
+          running <literal>openssl rand -hex 32</literal>. Before
+          upgrading, read the release notes for PeerTube:
+        </para>
+        <itemizedlist spacing="compact">
+          <listitem>
+            <para>
+              <link xlink:href="https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0">Release
+              v5.0.0</link>
+            </para>
+          </listitem>
+        </itemizedlist>
+        <para>
+          And backup your data.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
+          The module <literal>services.headscale</literal> was
+          refactored to be compliant with
+          <link xlink:href="https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md">RFC
+          0042</link>. To be precise, this means that the following
+          things have changed:
+        </para>
+        <itemizedlist spacing="compact">
+          <listitem>
+            <para>
+              Most settings has been migrated under
+              <link linkend="opt-services.headscale.settings">services.headscale.settings</link>
+              which is an attribute-set that will be converted into
+              headscale’s YAML config format. This means that the
+              configuration from
+              <link xlink:href="https://github.com/juanfont/headscale/blob/main/config-example.yaml">headscale’s
+              example configuration</link> can be directly written as
+              attribute-set in Nix within this option.
+            </para>
+          </listitem>
+        </itemizedlist>
+      </listitem>
+      <listitem>
+        <para>
           A new <literal>virtualisation.rosetta</literal> module was
           added to allow running <literal>x86_64</literal> binaries
           through
diff --git a/nixos/doc/manual/release-notes/rl-2305.section.md b/nixos/doc/manual/release-notes/rl-2305.section.md
index de376a7403b96..76e2a1f8b4329 100644
--- a/nixos/doc/manual/release-notes/rl-2305.section.md
+++ b/nixos/doc/manual/release-notes/rl-2305.section.md
@@ -35,6 +35,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 - The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
   This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`
 
+- `minio` removed support for its legacy filesystem backend in [RELEASE.2022-10-29T06-21-33Z](https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z). This means if your storage was created with the old format, minio will no longer start. Unfortunately minio doesn't provide a an automatic migration, they only provide [instructions how to manually convert the node](https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html). To facilitate this migration we keep around the last version that still supports the old filesystem backend as `minio_legacy_fs`. Use it via `services.minio.package = minio_legacy_fs;` to export your data before switching to the new version. See the corresponding [issue](https://github.com/NixOS/nixpkgs/issues/199318) for more details.
+
 - `services.sourcehut.dispatch` and the corresponding package (`sourcehut.dispatchsrht`) have been removed due to [upstream deprecation](https://sourcehut.org/blog/2022-08-01-dispatch-deprecation-plans/).
 
 - The [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
@@ -43,6 +45,8 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - The [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
 
+- The Nginx module now validates the syntax of config files at build time. For more complex configurations (using `include` with out-of-store files notably) you may need to disable this check by setting [services.nginx.validateConfig](#opt-services.nginx.validateConfig) to `false`.
+
 - The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
 
 - The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
@@ -57,7 +61,7 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
 
-- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are *customizable* (in the sense of user configuration, like vimrc).
+- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
 
 - The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
 
@@ -76,6 +80,19 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - `mastodon` now supports connection to a remote `PostgreSQL` database.
 
+- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.
+  Before upgrading, read the release notes for PeerTube:
+    - [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0)
+
+  And backup your data.
+
+- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
+
+  - Most settings has been migrated under [services.headscale.settings](#opt-services.headscale.settings) which is an attribute-set that
+    will be converted into headscale's YAML config format. This means that the configuration from
+    [headscale's example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
+    can be directly written as attribute-set in Nix within this option.
+
 - A new `virtualisation.rosetta` module was added to allow running `x86_64` binaries through [Rosetta](https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment) inside virtualised NixOS guests on Apple silicon. This feature works by default with the [UTM](https://docs.getutm.app/) virtualisation [package](https://search.nixos.org/packages?channel=unstable&show=utm&from=0&size=1&sort=relevance&type=packages&query=utm).
 
 - The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically.
diff --git a/nixos/lib/systemd-unit-options.nix b/nixos/lib/systemd-unit-options.nix
index 44f26572a23ba..9c7cb34f14b57 100644
--- a/nixos/lib/systemd-unit-options.nix
+++ b/nixos/lib/systemd-unit-options.nix
@@ -324,7 +324,11 @@ in rec {
       scriptArgs = mkOption {
         type = types.str;
         default = "";
-        description = lib.mdDoc "Arguments passed to the main process script.";
+        example = "%i";
+        description = lib.mdDoc ''
+          Arguments passed to the main process script.
+          Can contain specifiers (`%` placeholders expanded by systemd, see {manpage}`systemd.unit(5)`).
+        '';
       };
 
       preStart = mkOption {
diff --git a/nixos/lib/test-driver/test_driver/driver.py b/nixos/lib/test-driver/test_driver/driver.py
index e32f6810ca87f..6542a2e2f6938 100644
--- a/nixos/lib/test-driver/test_driver/driver.py
+++ b/nixos/lib/test-driver/test_driver/driver.py
@@ -220,6 +220,20 @@ class Driver:
                 res = driver.polling_conditions.pop()
                 assert res is self.condition
 
+            def wait(self, timeout: int = 900) -> None:
+                def condition(last: bool) -> bool:
+                    if last:
+                        rootlog.info(f"Last chance for {self.condition.description}")
+                    ret = self.condition.check(force=True)
+                    if not ret and not last:
+                        rootlog.info(
+                            f"({self.condition.description} failure not fatal yet)"
+                        )
+                    return ret
+
+                with rootlog.nested(f"waiting for {self.condition.description}"):
+                    retry(condition, timeout=timeout)
+
         if fun_ is None:
             return Poll
         else:
diff --git a/nixos/lib/test-driver/test_driver/polling_condition.py b/nixos/lib/test-driver/test_driver/polling_condition.py
index 459845452fa12..02ca0a03ab3dc 100644
--- a/nixos/lib/test-driver/test_driver/polling_condition.py
+++ b/nixos/lib/test-driver/test_driver/polling_condition.py
@@ -1,4 +1,5 @@
 from typing import Callable, Optional
+from math import isfinite
 import time
 
 from .logger import rootlog
@@ -14,7 +15,7 @@ class PollingCondition:
     description: Optional[str]
 
     last_called: float
-    entered: bool
+    entry_count: int
 
     def __init__(
         self,
@@ -34,14 +35,21 @@ class PollingCondition:
             self.description = str(description)
 
         self.last_called = float("-inf")
-        self.entered = False
+        self.entry_count = 0
 
-    def check(self) -> bool:
-        if self.entered or not self.overdue:
+    def check(self, force: bool = False) -> bool:
+        if (self.entered or not self.overdue) and not force:
             return True
 
         with self, rootlog.nested(self.nested_message):
-            rootlog.info(f"Time since last: {time.monotonic() - self.last_called:.2f}s")
+            time_since_last = time.monotonic() - self.last_called
+            last_message = (
+                f"Time since last: {time_since_last:.2f}s"
+                if isfinite(time_since_last)
+                else "(not called yet)"
+            )
+
+            rootlog.info(last_message)
             try:
                 res = self.condition()  # type: ignore
             except Exception:
@@ -69,9 +77,16 @@ class PollingCondition:
     def overdue(self) -> bool:
         return self.last_called + self.seconds_interval < time.monotonic()
 
+    @property
+    def entered(self) -> bool:
+        # entry_count should never dip *below* zero
+        assert self.entry_count >= 0
+        return self.entry_count > 0
+
     def __enter__(self) -> None:
-        self.entered = True
+        self.entry_count += 1
 
     def __exit__(self, exc_type, exc_value, traceback) -> None:  # type: ignore
-        self.entered = False
+        assert self.entered
+        self.entry_count -= 1
         self.last_called = time.monotonic()
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index 3efadea823552..70e265a65a60c 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -33,9 +33,13 @@ with lib;
       ffmpeg_4 = super.ffmpeg_4-headless;
       ffmpeg_5 = super.ffmpeg_5-headless;
       gobject-introspection = super.gobject-introspection.override { x11Support = false; };
+      gpsd = super.gpsd.override { guiSupport = false; };
       imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
       imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
+      libextractor = super.libextractor.override { gstreamerSupport = false; gtkSupport = false; };
       libva = super.libva-minimal;
+      limesuite = super.limesuite.override { withGui = false; };
+      msmtp = super.msmtp.override { withKeyring = false; };
       networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
       networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };
diff --git a/nixos/modules/installer/cd-dvd/iso-image.nix b/nixos/modules/installer/cd-dvd/iso-image.nix
index 5bd343c85fa25..81aca86173899 100644
--- a/nixos/modules/installer/cd-dvd/iso-image.nix
+++ b/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -70,14 +70,12 @@ let
   ;
 
   # Timeout in syslinux is in units of 1/10 of a second.
-  # 0 is used to disable timeouts.
+  # null means max timeout (35996, just under 1h in 1/10 seconds)
+  # 0 means disable timeout
   syslinuxTimeout = if config.boot.loader.timeout == null then
-      0
+      35996
     else
-      max (config.boot.loader.timeout * 10) 1;
-
-
-  max = x: y: if x > y then x else y;
+      config.boot.loader.timeout * 10;
 
   # The configuration file for syslinux.
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 6ec6c74565cd1..ac40b6cbfd97c 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -180,6 +180,7 @@
   ./programs/hamster.nix
   ./programs/htop.nix
   ./programs/iftop.nix
+  ./programs/i3lock.nix
   ./programs/iotop.nix
   ./programs/java.nix
   ./programs/k3b.nix
@@ -724,6 +725,7 @@
   ./services/monitoring/riemann.nix
   ./services/monitoring/scollector.nix
   ./services/monitoring/smartd.nix
+  ./services/monitoring/statsd.nix
   ./services/monitoring/sysstat.nix
   ./services/monitoring/teamviewer.nix
   ./services/monitoring/telegraf.nix
@@ -874,7 +876,6 @@
   ./services/networking/miredo.nix
   ./services/networking/mjpg-streamer.nix
   ./services/networking/mmsd.nix
-  ./services/networking/mosquitto.nix
   ./services/networking/monero.nix
   ./services/networking/morty.nix
   ./services/networking/mosquitto.nix
diff --git a/nixos/modules/profiles/keys/ssh_host_ed25519_key b/nixos/modules/profiles/keys/ssh_host_ed25519_key
new file mode 100644
index 0000000000000..b18489795369e
--- /dev/null
+++ b/nixos/modules/profiles/keys/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmwAAAJASuMMnErjD
+JwAAAAtzc2gtZWQyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmw
+AAAEDIN2VWFyggtoSPXcAFy8dtG1uAig8sCuyE21eMDt2GgJBWcxb/Blaqt1auOtE+F8QU
+WrUotiC5qBJ+UuEWdVCbAAAACnJvb3RAbml4b3MBAgM=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub b/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000000..2c45826715fc5
--- /dev/null
+++ b/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJBWcxb/Blaqt1auOtE+F8QUWrUotiC5qBJ+UuEWdVCb root@nixos
diff --git a/nixos/modules/profiles/macos-builder.nix b/nixos/modules/profiles/macos-builder.nix
new file mode 100644
index 0000000000000..77f3224a72941
--- /dev/null
+++ b/nixos/modules/profiles/macos-builder.nix
@@ -0,0 +1,140 @@
+{ config, lib, pkgs, ... }:
+
+let
+  keysDirectory = "/var/keys";
+
+  user = "builder";
+
+  keyType = "ed25519";
+
+in
+
+{ imports = [
+    ../virtualisation/qemu-vm.nix
+  ];
+
+  # The builder is not intended to be used interactively
+  documentation.enable = false;
+
+  environment.etc = {
+    "ssh/ssh_host_ed25519_key" = {
+      mode = "0600";
+
+      source = ./keys/ssh_host_ed25519_key;
+    };
+
+    "ssh/ssh_host_ed25519_key.pub" = {
+      mode = "0644";
+
+      source = ./keys/ssh_host_ed25519_key.pub;
+    };
+  };
+
+  # DNS fails for QEMU user networking (SLiRP) on macOS.  See:
+  #
+  # https://github.com/utmapp/UTM/issues/2353
+  #
+  # This works around that by using a public DNS server other than the DNS
+  # server that QEMU provides (normally 10.0.2.3)
+  networking.nameservers = [ "8.8.8.8" ];
+
+  nix.settings = {
+    auto-optimise-store = true;
+
+    min-free = 1024 * 1024 * 1024;
+
+    max-free = 3 * 1024 * 1024 * 1024;
+
+    trusted-users = [ "root" user ];
+  };
+
+  services.openssh = {
+    enable = true;
+
+    authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
+  };
+
+  system.build.macos-builder-installer =
+    let
+      privateKey = "/etc/nix/${user}_${keyType}";
+
+      publicKey = "${privateKey}.pub";
+
+      # This installCredentials script is written so that it's as easy as
+      # possible for a user to audit before confirming the `sudo`
+      installCredentials = pkgs.writeShellScript "install-credentials" ''
+        KEYS="''${1}"
+        INSTALL=${hostPkgs.coreutils}/bin/install
+        "''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
+        "''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
+      '';
+
+      hostPkgs = config.virtualisation.host.pkgs;
+
+      script = hostPkgs.writeShellScriptBin "create-builder" ''
+        KEYS="''${KEYS:-./keys}"
+        ${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
+        PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
+        PUBLIC_KEY="''${PRIVATE_KEY}.pub"
+        if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
+            ${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
+            ${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
+        fi
+        if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
+          (set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
+        fi
+        KEYS="$(nix-store --add "$KEYS")" ${config.system.build.vm}/bin/run-nixos-vm
+      '';
+
+    in
+      script.overrideAttrs (old: {
+        meta = (old.meta or { }) // {
+          platforms = lib.platforms.darwin;
+        };
+      });
+
+  system.stateVersion = "22.05";
+
+  users.users."${user}"= {
+    isNormalUser = true;
+  };
+
+  virtualisation = {
+    diskSize = 20 * 1024;
+
+    memorySize = 3 * 1024;
+
+    forwardPorts = [
+      { from = "host"; guest.port = 22; host.port = 22; }
+    ];
+
+    # Disable graphics for the builder since users will likely want to run it
+    # non-interactively in the background.
+    graphics = false;
+
+    sharedDirectories.keys = {
+      source = "\"$KEYS\"";
+      target = keysDirectory;
+    };
+
+    # If we don't enable this option then the host will fail to delegate builds
+    # to the guest, because:
+    #
+    # - The host will lock the path to build
+    # - The host will delegate the build to the guest
+    # - The guest will attempt to lock the same path and fail because
+    #   the lockfile on the host is visible on the guest
+    #
+    # Snapshotting the host's /nix/store as an image isolates the guest VM's
+    # /nix/store from the host's /nix/store, preventing this problem.
+    useNixStoreImage = true;
+
+    # Obviously the /nix/store needs to be writable on the guest in order for it
+    # to perform builds.
+    writableStore = true;
+
+    # This ensures that anything built on the guest isn't lost when the guest is
+    # restarted.
+    writableStoreUseTmpfs = false;
+  };
+}
diff --git a/nixos/modules/programs/git.nix b/nixos/modules/programs/git.nix
index acff5dfdd888a..4e271a8c134b2 100644
--- a/nixos/modules/programs/git.nix
+++ b/nixos/modules/programs/git.nix
@@ -20,15 +20,41 @@ in
       };
 
       config = mkOption {
-        type = with types; attrsOf (attrsOf anything);
-        default = { };
+        type =
+          with types;
+          let
+            gitini = attrsOf (attrsOf anything);
+          in
+          either gitini (listOf gitini) // {
+            merge = loc: defs:
+              let
+                config = foldl'
+                  (acc: { value, ... }@x: acc // (if isList value then {
+                    ordered = acc.ordered ++ value;
+                  } else {
+                    unordered = acc.unordered ++ [ x ];
+                  }))
+                  {
+                    ordered = [ ];
+                    unordered = [ ];
+                  }
+                  defs;
+              in
+              [ (gitini.merge loc config.unordered) ] ++ config.ordered;
+          };
+        default = [ ];
         example = {
           init.defaultBranch = "main";
           url."https://github.com/".insteadOf = [ "gh:" "github:" ];
         };
         description = lib.mdDoc ''
-          Configuration to write to /etc/gitconfig. See the CONFIGURATION FILE
-          section of git-config(1) for more information.
+          Configuration to write to /etc/gitconfig. A list can also be
+          specified to keep the configuration in order. For example, setting
+          `config` to `[ { foo.x = 42; } { bar.y = 42; }]` will put the `foo`
+          section before the `bar` section unlike the default alphabetical
+          order, which can be helpful for sections such as `include` and
+          `includeIf`. See the CONFIGURATION FILE section of git-config(1) for
+          more information.
         '';
       };
 
@@ -48,8 +74,8 @@ in
   config = mkMerge [
     (mkIf cfg.enable {
       environment.systemPackages = [ cfg.package ];
-      environment.etc.gitconfig = mkIf (cfg.config != {}) {
-        text = generators.toGitINI cfg.config;
+      environment.etc.gitconfig = mkIf (cfg.config != [ ]) {
+        text = concatMapStringsSep "\n" generators.toGitINI cfg.config;
       };
     })
     (mkIf (cfg.enable && cfg.lfs.enable) {
diff --git a/nixos/modules/programs/i3lock.nix b/nixos/modules/programs/i3lock.nix
new file mode 100644
index 0000000000000..466ae59c9277f
--- /dev/null
+++ b/nixos/modules/programs/i3lock.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.i3lock;
+
+in {
+
+  ###### interface
+
+  options = {
+    programs.i3lock = {
+      enable = mkEnableOption (mdDoc "i3lock");
+      package = mkOption {
+        type        = types.package;
+        default     = pkgs.i3lock;
+        defaultText = literalExpression "pkgs.i3lock";
+        example     = literalExpression ''
+          pkgs.i3lock-color
+        '';
+        description = mdDoc ''
+          Specify which package to use for the i3lock program,
+          The i3lock package must include a i3lock file or link in its out directory in order for the u2fSupport option to work correctly.
+        '';
+      };
+      u2fSupport = mkOption {
+        type        = types.bool;
+        default     = false;
+        example     = true;
+        description = mdDoc ''
+          Whether to enable U2F support in the i3lock program.
+          U2F enables authentication using a hardware device, such as a security key.
+          When U2F support is enabled, the i3lock program will set the setuid bit on the i3lock binary and enable the pam u2fAuth service,
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    security.wrappers.i3lock = mkIf cfg.u2fSupport {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${cfg.package.out}/bin/i3lock";
+    };
+
+    security.pam.services.i3lock.u2fAuth = cfg.u2fSupport;
+
+  };
+
+}
diff --git a/nixos/modules/services/blockchain/ethereum/lighthouse.nix b/nixos/modules/services/blockchain/ethereum/lighthouse.nix
index 96d66e44d162f..863e737d908ae 100644
--- a/nixos/modules/services/blockchain/ethereum/lighthouse.nix
+++ b/nixos/modules/services/blockchain/ethereum/lighthouse.nix
@@ -280,7 +280,7 @@ in {
         ${pkgs.lighthouse}/bin/lighthouse validator_client \
           --network ${cfg.network} \
           --beacon-nodes ${lib.concatStringsSep "," cfg.validator.beaconNodes} \
-          --datadir ${cfg.validator.dataDir}/${cfg.network}
+          --datadir ${cfg.validator.dataDir}/${cfg.network} \
           ${optionalString cfg.validator.metrics.enable ''--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}''} \
           ${cfg.extraArgs} ${cfg.validator.extraArgs}
       '';
diff --git a/nixos/modules/services/misc/nitter.nix b/nixos/modules/services/misc/nitter.nix
index 95394d9d2113e..f0cb5cc151388 100644
--- a/nixos/modules/services/misc/nitter.nix
+++ b/nixos/modules/services/misc/nitter.nix
@@ -47,7 +47,7 @@ in
 {
   options = {
     services.nitter = {
-      enable = mkEnableOption (lib.mdDoc "If enabled, start Nitter.");
+      enable = mkEnableOption (lib.mdDoc "Nitter");
 
       package = mkOption {
         default = pkgs.nitter;
diff --git a/nixos/modules/services/monitoring/grafana.nix b/nixos/modules/services/monitoring/grafana.nix
index 5061666f57cec..9a9a0ab755325 100644
--- a/nixos/modules/services/monitoring/grafana.nix
+++ b/nixos/modules/services/monitoring/grafana.nix
@@ -555,7 +555,7 @@ in {
             auto_assign_org_role = mkOption {
               description = lib.mdDoc "Default role new users will be auto assigned.";
               default = "Viewer";
-              type = types.enum ["Viewer" "Editor"];
+              type = types.enum ["Viewer" "Editor" "Admin"];
             };
           };
 
diff --git a/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixos/modules/services/monitoring/prometheus/exporters.nix
index 2451f46ba7d75..f3fbfb149ad77 100644
--- a/nixos/modules/services/monitoring/prometheus/exporters.nix
+++ b/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -68,6 +68,7 @@ let
     "smartctl"
     "smokeping"
     "sql"
+    "statsd"
     "surfboard"
     "systemd"
     "tor"
diff --git a/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix b/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
new file mode 100644
index 0000000000000..d9d732d8c125a
--- /dev/null
+++ b/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.statsd;
+in
+{
+  port = 9102;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-statsd-exporter}/bin/statsd_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/firefox-syncserver.nix b/nixos/modules/services/networking/firefox-syncserver.nix
index c3d9f43f74577..9733fb16d9032 100644
--- a/nixos/modules/services/networking/firefox-syncserver.nix
+++ b/nixos/modules/services/networking/firefox-syncserver.nix
@@ -11,8 +11,10 @@ let
 
   format = pkgs.formats.toml {};
   settings = {
-    database_url = dbURL;
     human_logs = true;
+    syncstorage = {
+      database_url = dbURL;
+    };
     tokenserver = {
       node_type = "mysql";
       database_url = dbURL;
@@ -253,8 +255,7 @@ in
       serviceConfig = {
         User = defaultUser;
         Group = defaultUser;
-        ExecStart = "${cfg.package}/bin/syncstorage --config ${configFile}";
-        Stderr = "journal";
+        ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
         EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
 
         # hardening
diff --git a/nixos/modules/services/networking/headscale.nix b/nixos/modules/services/networking/headscale.nix
index 29b632ff5d22a..cc46819eed5a6 100644
--- a/nixos/modules/services/networking/headscale.nix
+++ b/nixos/modules/services/networking/headscale.nix
@@ -1,15 +1,18 @@
-{ config, lib, pkgs, ... }:
-with lib;
-let
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+with lib; let
   cfg = config.services.headscale;
 
   dataDir = "/var/lib/headscale";
   runDir = "/run/headscale";
 
-  settingsFormat = pkgs.formats.yaml { };
+  settingsFormat = pkgs.formats.yaml {};
   configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
-in
-{
+in {
   options = {
     services.headscale = {
       enable = mkEnableOption (lib.mdDoc "headscale, Open Source coordination server for Tailscale");
@@ -51,15 +54,6 @@ in
         '';
       };
 
-      serverUrl = mkOption {
-        type = types.str;
-        default = "http://127.0.0.1:8080";
-        description = lib.mdDoc ''
-          The url clients will connect to.
-        '';
-        example = "https://myheadscale.example.com:443";
-      };
-
       address = mkOption {
         type = types.str;
         default = "127.0.0.1";
@@ -78,337 +72,346 @@ in
         example = 443;
       };
 
-      privateKeyFile = mkOption {
-        type = types.path;
-        default = "${dataDir}/private.key";
-        description = lib.mdDoc ''
-          Path to private key file, generated automatically if it does not exist.
-        '';
-      };
-
-      derp = {
-        urls = mkOption {
-          type = types.listOf types.str;
-          default = [ "https://controlplane.tailscale.com/derpmap/default" ];
-          description = lib.mdDoc ''
-            List of urls containing DERP maps.
-            See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
-          '';
-        };
-
-        paths = mkOption {
-          type = types.listOf types.path;
-          default = [ ];
-          description = lib.mdDoc ''
-            List of file paths containing DERP maps.
-            See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
-          '';
-        };
-
-
-        autoUpdate = mkOption {
-          type = types.bool;
-          default = true;
-          description = lib.mdDoc ''
-            Whether to automatically update DERP maps on a set frequency.
-          '';
-          example = false;
-        };
-
-        updateFrequency = mkOption {
-          type = types.str;
-          default = "24h";
-          description = lib.mdDoc ''
-            Frequency to update DERP maps.
-          '';
-          example = "5m";
-        };
-
-      };
-
-      ephemeralNodeInactivityTimeout = mkOption {
-        type = types.str;
-        default = "30m";
-        description = lib.mdDoc ''
-          Time before an inactive ephemeral node is deleted.
-        '';
-        example = "5m";
-      };
-
-      database = {
-        type = mkOption {
-          type = types.enum [ "sqlite3" "postgres" ];
-          example = "postgres";
-          default = "sqlite3";
-          description = lib.mdDoc "Database engine to use.";
-        };
-
-        host = mkOption {
-          type = types.nullOr types.str;
-          default = null;
-          example = "127.0.0.1";
-          description = lib.mdDoc "Database host address.";
-        };
-
-        port = mkOption {
-          type = types.nullOr types.port;
-          default = null;
-          example = 3306;
-          description = lib.mdDoc "Database host port.";
-        };
-
-        name = mkOption {
-          type = types.nullOr types.str;
-          default = null;
-          example = "headscale";
-          description = lib.mdDoc "Database name.";
-        };
-
-        user = mkOption {
-          type = types.nullOr types.str;
-          default = null;
-          example = "headscale";
-          description = lib.mdDoc "Database user.";
-        };
-
-        passwordFile = mkOption {
-          type = types.nullOr types.path;
-          default = null;
-          example = "/run/keys/headscale-dbpassword";
-          description = lib.mdDoc ''
-            A file containing the password corresponding to
-            {option}`database.user`.
-          '';
-        };
-
-        path = mkOption {
-          type = types.nullOr types.str;
-          default = "${dataDir}/db.sqlite";
-          description = lib.mdDoc "Path to the sqlite3 database file.";
-        };
-      };
-
-      logLevel = mkOption {
-        type = types.str;
-        default = "info";
-        description = lib.mdDoc ''
-          headscale log level.
-        '';
-        example = "debug";
-      };
-
-      dns = {
-        nameservers = mkOption {
-          type = types.listOf types.str;
-          default = [ "1.1.1.1" ];
-          description = lib.mdDoc ''
-            List of nameservers to pass to Tailscale clients.
-          '';
-        };
-
-        domains = mkOption {
-          type = types.listOf types.str;
-          default = [ ];
-          description = lib.mdDoc ''
-            Search domains to inject to Tailscale clients.
-          '';
-          example = [ "mydomain.internal" ];
-        };
-
-        magicDns = mkOption {
-          type = types.bool;
-          default = true;
-          description = lib.mdDoc ''
-            Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
-            Only works if there is at least a nameserver defined.
-          '';
-          example = false;
-        };
-
-        baseDomain = mkOption {
-          type = types.str;
-          default = "";
-          description = lib.mdDoc ''
-            Defines the base domain to create the hostnames for MagicDNS.
-            {option}`baseDomain` must be a FQDNs, without the trailing dot.
-            The FQDN of the hosts will be
-            `hostname.namespace.base_domain` (e.g.
-            `myhost.mynamespace.example.com`).
-          '';
-        };
-      };
-
-      openIdConnect = {
-        issuer = mkOption {
-          type = types.str;
-          default = "";
-          description = lib.mdDoc ''
-            URL to OpenID issuer.
-          '';
-          example = "https://openid.example.com";
-        };
-
-        clientId = mkOption {
-          type = types.str;
-          default = "";
-          description = lib.mdDoc ''
-            OpenID Connect client ID.
-          '';
-        };
-
-        clientSecretFile = mkOption {
-          type = types.nullOr types.path;
-          default = null;
-          description = lib.mdDoc ''
-            Path to OpenID Connect client secret file.
-          '';
-        };
-
-        domainMap = mkOption {
-          type = types.attrsOf types.str;
-          default = { };
-          description = lib.mdDoc ''
-            Domain map is used to map incoming users (by their email) to
-            a namespace. The key can be a string, or regex.
-          '';
-          example = {
-            ".*" = "default-namespace";
-          };
-        };
-
-      };
-
-      tls = {
-        letsencrypt = {
-          hostname = mkOption {
-            type = types.nullOr types.str;
-            default = "";
-            description = lib.mdDoc ''
-              Domain name to request a TLS certificate for.
-            '';
-          };
-          challengeType = mkOption {
-            type = types.enum [ "TLS-ALPN-01" "HTTP-01" ];
-            default = "HTTP-01";
-            description = lib.mdDoc ''
-              Type of ACME challenge to use, currently supported types:
-              `HTTP-01` or `TLS-ALPN-01`.
-            '';
-          };
-          httpListen = mkOption {
-            type = types.nullOr types.str;
-            default = ":http";
-            description = lib.mdDoc ''
-              When HTTP-01 challenge is chosen, letsencrypt must set up a
-              verification endpoint, and it will be listening on:
-              `:http = port 80`.
-            '';
-          };
-        };
-
-        certFile = mkOption {
-          type = types.nullOr types.path;
-          default = null;
-          description = lib.mdDoc ''
-            Path to already created certificate.
-          '';
-        };
-        keyFile = mkOption {
-          type = types.nullOr types.path;
-          default = null;
-          description = lib.mdDoc ''
-            Path to key for already created certificate.
-          '';
-        };
-      };
-
-      aclPolicyFile = mkOption {
-        type = types.nullOr types.path;
-        default = null;
-        description = lib.mdDoc ''
-          Path to a file containing ACL policies.
-        '';
-      };
-
       settings = mkOption {
-        type = settingsFormat.type;
-        default = { };
         description = lib.mdDoc ''
           Overrides to {file}`config.yaml` as a Nix attribute set.
-          This option is ideal for overriding settings not exposed as Nix options.
           Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
           for possible options.
         '';
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            server_url = mkOption {
+              type = types.str;
+              default = "http://127.0.0.1:8080";
+              description = lib.mdDoc ''
+                The url clients will connect to.
+              '';
+              example = "https://myheadscale.example.com:443";
+            };
+
+            private_key_path = mkOption {
+              type = types.path;
+              default = "${dataDir}/private.key";
+              description = lib.mdDoc ''
+                Path to private key file, generated automatically if it does not exist.
+              '';
+            };
+
+            noise.private_key_path = mkOption {
+              type = types.path;
+              default = "${dataDir}/noise_private.key";
+              description = lib.mdDoc ''
+                Path to noise private key file, generated automatically if it does not exist.
+              '';
+            };
+
+            derp = {
+              urls = mkOption {
+                type = types.listOf types.str;
+                default = ["https://controlplane.tailscale.com/derpmap/default"];
+                description = lib.mdDoc ''
+                  List of urls containing DERP maps.
+                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
+                '';
+              };
+
+              paths = mkOption {
+                type = types.listOf types.path;
+                default = [];
+                description = lib.mdDoc ''
+                  List of file paths containing DERP maps.
+                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
+                '';
+              };
+
+              auto_update_enable = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether to automatically update DERP maps on a set frequency.
+                '';
+                example = false;
+              };
+
+              update_frequency = mkOption {
+                type = types.str;
+                default = "24h";
+                description = lib.mdDoc ''
+                  Frequency to update DERP maps.
+                '';
+                example = "5m";
+              };
+            };
+
+            ephemeral_node_inactivity_timeout = mkOption {
+              type = types.str;
+              default = "30m";
+              description = lib.mdDoc ''
+                Time before an inactive ephemeral node is deleted.
+              '';
+              example = "5m";
+            };
+
+            db_type = mkOption {
+              type = types.enum ["sqlite3" "postgres"];
+              example = "postgres";
+              default = "sqlite3";
+              description = lib.mdDoc "Database engine to use.";
+            };
+
+            db_host = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "127.0.0.1";
+              description = lib.mdDoc "Database host address.";
+            };
+
+            db_port = mkOption {
+              type = types.nullOr types.port;
+              default = null;
+              example = 3306;
+              description = lib.mdDoc "Database host port.";
+            };
+
+            db_name = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "headscale";
+              description = lib.mdDoc "Database name.";
+            };
+
+            db_user = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "headscale";
+              description = lib.mdDoc "Database user.";
+            };
+
+            db_password_file = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              example = "/run/keys/headscale-dbpassword";
+              description = lib.mdDoc ''
+                A file containing the password corresponding to
+                {option}`database.user`.
+              '';
+            };
+
+            db_path = mkOption {
+              type = types.nullOr types.str;
+              default = "${dataDir}/db.sqlite";
+              description = lib.mdDoc "Path to the sqlite3 database file.";
+            };
+
+            log.level = mkOption {
+              type = types.str;
+              default = "info";
+              description = lib.mdDoc ''
+                headscale log level.
+              '';
+              example = "debug";
+            };
+
+            log.format = mkOption {
+              type = types.str;
+              default = "text";
+              description = lib.mdDoc ''
+                headscale log format.
+              '';
+              example = "json";
+            };
+
+            dns_config = {
+              nameservers = mkOption {
+                type = types.listOf types.str;
+                default = ["1.1.1.1"];
+                description = lib.mdDoc ''
+                  List of nameservers to pass to Tailscale clients.
+                '';
+              };
+
+              override_local_dns = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to use [Override local DNS](https://tailscale.com/kb/1054/dns/).
+                '';
+                example = true;
+              };
+
+              domains = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                description = lib.mdDoc ''
+                  Search domains to inject to Tailscale clients.
+                '';
+                example = ["mydomain.internal"];
+              };
+
+              magic_dns = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
+                  Only works if there is at least a nameserver defined.
+                '';
+                example = false;
+              };
+
+              base_domain = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  Defines the base domain to create the hostnames for MagicDNS.
+                  {option}`baseDomain` must be a FQDNs, without the trailing dot.
+                  The FQDN of the hosts will be
+                  `hostname.namespace.base_domain` (e.g.
+                  `myhost.mynamespace.example.com`).
+                '';
+              };
+            };
+
+            oidc = {
+              issuer = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  URL to OpenID issuer.
+                '';
+                example = "https://openid.example.com";
+              };
+
+              client_id = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  OpenID Connect client ID.
+                '';
+              };
+
+              client_secret_file = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                description = lib.mdDoc ''
+                  Path to OpenID Connect client secret file.
+                '';
+              };
+
+              domain_map = mkOption {
+                type = types.attrsOf types.str;
+                default = {};
+                description = lib.mdDoc ''
+                  Domain map is used to map incomming users (by their email) to
+                  a namespace. The key can be a string, or regex.
+                '';
+                example = {
+                  ".*" = "default-namespace";
+                };
+              };
+            };
+
+            tls_letsencrypt_hostname = mkOption {
+              type = types.nullOr types.str;
+              default = "";
+              description = lib.mdDoc ''
+                Domain name to request a TLS certificate for.
+              '';
+            };
+
+            tls_letsencrypt_challenge_type = mkOption {
+              type = types.enum ["TLS-ALPN-01" "HTTP-01"];
+              default = "HTTP-01";
+              description = lib.mdDoc ''
+                Type of ACME challenge to use, currently supported types:
+                `HTTP-01` or `TLS-ALPN-01`.
+              '';
+            };
+
+            tls_letsencrypt_listen = mkOption {
+              type = types.nullOr types.str;
+              default = ":http";
+              description = lib.mdDoc ''
+                When HTTP-01 challenge is chosen, letsencrypt must set up a
+                verification endpoint, and it will be listening on:
+                `:http = port 80`.
+              '';
+            };
+
+            tls_cert_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to already created certificate.
+              '';
+            };
+
+            tls_key_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to key for already created certificate.
+              '';
+            };
+
+            acl_policy_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to a file containg ACL policies.
+              '';
+            };
+          };
+        };
       };
-
-
     };
-
   };
-  config = mkIf cfg.enable {
 
+  imports = [
+    # TODO address + port = listen_addr
+    (mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
+    (mkRenamedOptionModule ["services" "headscale" "privateKeyFile"] ["services" "headscale" "settings" "private_key_path"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "autoUpdate"] ["services" "headscale" "settings" "derp" "auto_update_enable"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "updateFrequency"] ["services" "headscale" "settings" "derp" "update_frequency"])
+    (mkRenamedOptionModule ["services" "headscale" "ephemeralNodeInactivityTimeout"] ["services" "headscale" "settings" "ephemeral_node_inactivity_timeout"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "type"] ["services" "headscale" "settings" "db_type"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "path"] ["services" "headscale" "settings" "db_path"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "host"] ["services" "headscale" "settings" "db_host"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "port"] ["services" "headscale" "settings" "db_port"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "name"] ["services" "headscale" "settings" "db_name"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "user"] ["services" "headscale" "settings" "db_user"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "passwordFile"] ["services" "headscale" "settings" "db_password_file"])
+    (mkRenamedOptionModule ["services" "headscale" "logLevel"] ["services" "headscale" "settings" "log" "level"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "nameservers"] ["services" "headscale" "settings" "dns_config" "nameservers"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "domains"] ["services" "headscale" "settings" "dns_config" "domains"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "magicDns"] ["services" "headscale" "settings" "dns_config" "magic_dns"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ["services" "headscale" "settings" "oidc" "domain_map"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
+    (mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
+  ];
+
+  config = mkIf cfg.enable {
     services.headscale.settings = {
-      server_url = mkDefault cfg.serverUrl;
       listen_addr = mkDefault "${cfg.address}:${toString cfg.port}";
 
-      private_key_path = mkDefault cfg.privateKeyFile;
-
-      derp = {
-        urls = mkDefault cfg.derp.urls;
-        paths = mkDefault cfg.derp.paths;
-        auto_update_enable = mkDefault cfg.derp.autoUpdate;
-        update_frequency = mkDefault cfg.derp.updateFrequency;
-      };
-
       # Turn off update checks since the origin of our package
       # is nixpkgs and not Github.
       disable_check_updates = true;
 
-      ephemeral_node_inactivity_timeout = mkDefault cfg.ephemeralNodeInactivityTimeout;
-
-      db_type = mkDefault cfg.database.type;
-      db_path = mkDefault cfg.database.path;
-
-      log_level = mkDefault cfg.logLevel;
-
-      dns_config = {
-        nameservers = mkDefault cfg.dns.nameservers;
-        domains = mkDefault cfg.dns.domains;
-        magic_dns = mkDefault cfg.dns.magicDns;
-        base_domain = mkDefault cfg.dns.baseDomain;
-      };
-
       unix_socket = "${runDir}/headscale.sock";
 
-      # OpenID Connect
-      oidc = {
-        issuer = mkDefault cfg.openIdConnect.issuer;
-        client_id = mkDefault cfg.openIdConnect.clientId;
-        domain_map = mkDefault cfg.openIdConnect.domainMap;
-      };
-
       tls_letsencrypt_cache_dir = "${dataDir}/.cache";
-
-    } // optionalAttrs (cfg.database.host != null) {
-      db_host = mkDefault cfg.database.host;
-    } // optionalAttrs (cfg.database.port != null) {
-      db_port = mkDefault cfg.database.port;
-    } // optionalAttrs (cfg.database.name != null) {
-      db_name = mkDefault cfg.database.name;
-    } // optionalAttrs (cfg.database.user != null) {
-      db_user = mkDefault cfg.database.user;
-    } // optionalAttrs (cfg.tls.letsencrypt.hostname != null) {
-      tls_letsencrypt_hostname = mkDefault cfg.tls.letsencrypt.hostname;
-    } // optionalAttrs (cfg.tls.letsencrypt.challengeType != null) {
-      tls_letsencrypt_challenge_type = mkDefault cfg.tls.letsencrypt.challengeType;
-    } // optionalAttrs (cfg.tls.letsencrypt.httpListen != null) {
-      tls_letsencrypt_listen = mkDefault cfg.tls.letsencrypt.httpListen;
-    } // optionalAttrs (cfg.tls.certFile != null) {
-      tls_cert_path = mkDefault cfg.tls.certFile;
-    } // optionalAttrs (cfg.tls.keyFile != null) {
-      tls_key_path = mkDefault cfg.tls.keyFile;
-    } // optionalAttrs (cfg.aclPolicyFile != null) {
-      acl_policy_path = mkDefault cfg.aclPolicyFile;
     };
 
     # Setup the headscale configuration in a known path in /etc to
@@ -416,7 +419,7 @@ in
     # for communication.
     environment.etc."headscale/config.yaml".source = configFile;
 
-    users.groups.headscale = mkIf (cfg.group == "headscale") { };
+    users.groups.headscale = mkIf (cfg.group == "headscale") {};
 
     users.users.headscale = mkIf (cfg.user == "headscale") {
       description = "headscale user";
@@ -427,70 +430,68 @@ in
 
     systemd.services.headscale = {
       description = "headscale coordination server for Tailscale";
-      after = [ "network-online.target" ];
-      wantedBy = [ "multi-user.target" ];
-      restartTriggers = [ configFile ];
+      after = ["network-online.target"];
+      wantedBy = ["multi-user.target"];
+      restartTriggers = [configFile];
 
       environment.GIN_MODE = "release";
 
       script = ''
-        ${optionalString (cfg.database.passwordFile != null) ''
-          export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.database.passwordFile})"
+        ${optionalString (cfg.settings.db_password_file != null) ''
+          export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
         ''}
 
-        ${optionalString (cfg.openIdConnect.clientSecretFile != null) ''
-          export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.openIdConnect.clientSecretFile})"
+        ${optionalString (cfg.settings.oidc.client_secret_file != null) ''
+          export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.settings.oidc.client_secret_file})"
         ''}
         exec ${cfg.package}/bin/headscale serve
       '';
 
-      serviceConfig =
-        let
-          capabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
-        in
-        {
-          Restart = "always";
-          Type = "simple";
-          User = cfg.user;
-          Group = cfg.group;
-
-          # Hardening options
-          RuntimeDirectory = "headscale";
-          # Allow headscale group access so users can be added and use the CLI.
-          RuntimeDirectoryMode = "0750";
-
-          StateDirectory = "headscale";
-          StateDirectoryMode = "0750";
-
-          ProtectSystem = "strict";
-          ProtectHome = true;
-          PrivateTmp = true;
-          PrivateDevices = true;
-          ProtectKernelTunables = true;
-          ProtectControlGroups = true;
-          RestrictSUIDSGID = true;
-          PrivateMounts = true;
-          ProtectKernelModules = true;
-          ProtectKernelLogs = true;
-          ProtectHostname = true;
-          ProtectClock = true;
-          ProtectProc = "invisible";
-          ProcSubset = "pid";
-          RestrictNamespaces = true;
-          RemoveIPC = true;
-          UMask = "0077";
-
-          CapabilityBoundingSet = capabilityBoundingSet;
-          AmbientCapabilities = capabilityBoundingSet;
-          NoNewPrivileges = true;
-          LockPersonality = true;
-          RestrictRealtime = true;
-          SystemCallFilter = [ "@system-service" "~@privileged" "@chown" ];
-          SystemCallArchitectures = "native";
-          RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
-        };
+      serviceConfig = let
+        capabilityBoundingSet = ["CAP_CHOWN"] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
+      in {
+        Restart = "always";
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+
+        # Hardening options
+        RuntimeDirectory = "headscale";
+        # Allow headscale group access so users can be added and use the CLI.
+        RuntimeDirectoryMode = "0750";
+
+        StateDirectory = "headscale";
+        StateDirectoryMode = "0750";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        RestrictNamespaces = true;
+        RemoveIPC = true;
+        UMask = "0077";
+
+        CapabilityBoundingSet = capabilityBoundingSet;
+        AmbientCapabilities = capabilityBoundingSet;
+        NoNewPrivileges = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        SystemCallFilter = ["@system-service" "~@privileged" "@chown"];
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
+      };
     };
   };
 
-  meta.maintainers = with maintainers; [ kradalby ];
+  meta.maintainers = with maintainers; [kradalby misterio77];
 }
diff --git a/nixos/modules/services/networking/rpcbind.nix b/nixos/modules/services/networking/rpcbind.nix
index aa04214debb0a..60e78dfec51ba 100644
--- a/nixos/modules/services/networking/rpcbind.nix
+++ b/nixos/modules/services/networking/rpcbind.nix
@@ -35,6 +35,16 @@ with lib;
 
     systemd.services.rpcbind = {
       wantedBy = [ "multi-user.target" ];
+      # rpcbind performs a check for /var/run/rpcbind.lock at startup
+      # and will crash if /var/run isn't present. In the stock NixOS
+      # var.conf tmpfiles configuration file, /var/run is symlinked to
+      # /run, so rpcbind can enter a race condition in which /var/run
+      # isn't symlinked yet but tries to interact with the path, so
+      # controlling the order explicitly here ensures that rpcbind can
+      # start successfully. The `wants` instead of `requires` should
+      # avoid creating a strict/brittle dependency.
+      wants = [ "systemd-tmpfiles-setup.service" ];
+      after = [ "systemd-tmpfiles-setup.service" ];
     };
 
     users.users.rpc = {
diff --git a/nixos/modules/services/networking/tox-node.nix b/nixos/modules/services/networking/tox-node.nix
index fa5b241f91836..884fd55dae51d 100644
--- a/nixos/modules/services/networking/tox-node.nix
+++ b/nixos/modules/services/networking/tox-node.nix
@@ -8,7 +8,7 @@ let
   homeDir = "/var/lib/tox-node";
 
   configFile = let
-    src = "${pkg.src}/dpkg/config.yml";
+    src = "${pkg.src}/tox_node/dpkg/config.yml";
     confJSON = pkgs.writeText "config.json" (
       builtins.toJSON {
         log-type = cfg.logType;
diff --git a/nixos/modules/services/web-apps/dolibarr.nix b/nixos/modules/services/web-apps/dolibarr.nix
index 5335c439329c0..f262099354d2c 100644
--- a/nixos/modules/services/web-apps/dolibarr.nix
+++ b/nixos/modules/services/web-apps/dolibarr.nix
@@ -1,11 +1,11 @@
 { config, pkgs, lib, ... }:
 let
-  inherit (lib) any boolToString concatStringsSep isBool isString literalExpression mapAttrsToList mkDefault mkEnableOption mkIf mkOption optionalAttrs types;
+  inherit (lib) any boolToString concatStringsSep isBool isString mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption optionalAttrs types;
 
   package = pkgs.dolibarr.override { inherit (cfg) stateDir; };
 
   cfg = config.services.dolibarr;
-  vhostCfg = config.services.nginx.virtualHosts."${cfg.domain}";
+  vhostCfg = lib.optionalAttr (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
 
   mkConfigFile = filename: settings:
     let
@@ -38,7 +38,7 @@ let
     force_install_database = cfg.database.name;
     force_install_databaselogin = cfg.database.user;
 
-    force_install_mainforcehttps = vhostCfg.forceSSL;
+    force_install_mainforcehttps = vhostCfg.forceSSL or false;
     force_install_createuser = false;
     force_install_dolibarrlogin = null;
   } // optionalAttrs (cfg.database.passwordFile != null) {
@@ -183,7 +183,8 @@ in
   };
 
   # implementation
-  config = mkIf cfg.enable {
+  config = mkIf cfg.enable (mkMerge [
+    {
 
     assertions = [
       { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
@@ -214,7 +215,7 @@ in
 
       # Security settings
       dolibarr_main_prod = true;
-      dolibarr_main_force_https = vhostCfg.forceSSL;
+      dolibarr_main_force_https = vhostCfg.forceSSL or false;
       dolibarr_main_restrict_os_commands = "${pkgs.mariadb}/bin/mysqldump, ${pkgs.mariadb}/bin/mysql";
       dolibarr_nocsrfcheck = false;
       dolibarr_main_instance_unique_id = ''
@@ -314,7 +315,9 @@ in
     users.groups = optionalAttrs (cfg.group == "dolibarr") {
       dolibarr = { };
     };
-
-    users.users."${config.services.nginx.group}".extraGroups = [ cfg.group ];
-  };
+  }
+  (mkIf (cfg.nginx != null) {
+    users.users."${config.services.nginx.group}".extraGroups = mkIf (cfg.nginx != null) [ cfg.group ];
+  })
+]);
 }
diff --git a/nixos/modules/services/web-apps/peertube.nix b/nixos/modules/services/web-apps/peertube.nix
index 4dbcb09d2ae28..7e418f2869c85 100644
--- a/nixos/modules/services/web-apps/peertube.nix
+++ b/nixos/modules/services/web-apps/peertube.nix
@@ -161,6 +161,18 @@ in {
       description = lib.mdDoc "Configure nginx as a reverse proxy for peertube.";
     };
 
+    secrets = {
+      secretsFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/secrets/peertube";
+        description = lib.mdDoc ''
+          Secrets to run PeerTube.
+          Generate one using `openssl rand -hex 32`
+        '';
+      };
+    };
+
     database = {
       createLocally = lib.mkOption {
         type = lib.types.bool;
@@ -201,7 +213,7 @@ in {
       passwordFile = lib.mkOption {
         type = lib.types.nullOr lib.types.path;
         default = null;
-        example = "/run/keys/peertube/password-posgressql-db";
+        example = "/run/keys/peertube/password-postgresql";
         description = lib.mdDoc "Password for PostgreSQL database.";
       };
     };
@@ -282,6 +294,11 @@ in {
             prevent this.
           '';
       }
+      { assertion = cfg.secrets.secretsFile != null;
+          message = ''
+            <option>services.peertube.secrets.secretsFile</option> needs to be set.
+          '';
+      }
       { assertion = !(cfg.redis.enableUnixSocket && (cfg.redis.host != null || cfg.redis.port != null));
           message = ''
             <option>services.peertube.redis.createLocally</option> and redis network connection (<option>services.peertube.redis.host</option> or <option>services.peertube.redis.port</option>) enabled. Disable either of them.
@@ -349,6 +366,7 @@ in {
           captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
           cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
           plugins = lib.mkDefault "/var/lib/peertube/storage/plugins/";
+          well_known = lib.mkDefault "/var/lib/peertube/storage/well_known/";
           client_overrides = lib.mkDefault "/var/lib/peertube/storage/client-overrides/";
         };
         import = {
@@ -417,6 +435,10 @@ in {
         #!/bin/sh
         umask 077
         cat > /var/lib/peertube/config/local.yaml <<EOF
+        ${lib.optionalString (cfg.secrets.secretsFile != null) ''
+        secrets:
+          peertube: '$(cat ${cfg.secrets.secretsFile})'
+        ''}
         ${lib.optionalString ((!cfg.database.createLocally) && (cfg.database.passwordFile != null)) ''
         database:
           password: '$(cat ${cfg.database.passwordFile})'
@@ -443,6 +465,7 @@ in {
         RestartSec = 20;
         TimeoutSec = 60;
         WorkingDirectory = cfg.package;
+        SyslogIdentifier = "peertube";
         # User and group
         User = cfg.user;
         Group = cfg.group;
@@ -548,9 +571,14 @@ in {
           '';
         };
 
+        locations."~ ^/plugins/[^/]+(/[^/]+)?/ws/" = {
+          tryFiles = "/dev/null @api_websocket";
+          priority = 1230;
+        };
+
         locations."@api_websocket" = {
           proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
-          priority = 1230;
+          priority = 1240;
 
           extraConfig = ''
             proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
@@ -581,7 +609,7 @@ in {
           '';
         };
 
-        locations."~ ^/lazy-static/(avatars|banners)/" = {
+        locations."^~ /lazy-static/avatars/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.avatars;
           priority = 1330;
@@ -599,6 +627,26 @@ in {
             add_header Cache-Control                    'public, max-age=7200';
 
             rewrite ^/lazy-static/avatars/(.*)$         /$1 break;
+          '';
+        };
+
+        locations."^~ /lazy-static/banners/" = {
+          tryFiles = "$uri @api";
+          root = cfg.settings.storage.avatars;
+          priority = 1340;
+          extraConfig = ''
+            if ($request_method = 'OPTIONS') {
+              ${nginxCommonHeaders}
+              add_header Access-Control-Max-Age         1728000;
+              add_header Cache-Control                  'no-cache';
+              add_header Content-Type                   'text/plain charset=UTF-8';
+              add_header Content-Length                 0;
+              return                                    204;
+            }
+
+            ${nginxCommonHeaders}
+            add_header Cache-Control                    'public, max-age=7200';
+
             rewrite ^/lazy-static/banners/(.*)$         /$1 break;
           '';
         };
@@ -606,7 +654,7 @@ in {
         locations."^~ /lazy-static/previews/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.previews;
-          priority = 1340;
+          priority = 1350;
           extraConfig = ''
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
@@ -624,10 +672,34 @@ in {
           '';
         };
 
+        locations."^~ /static/streaming-playlists/private/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1410;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
+        locations."^~ /static/webseed/private/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1420;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
         locations."^~ /static/thumbnails/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.thumbnails;
-          priority = 1350;
+          priority = 1430;
           extraConfig = ''
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
@@ -648,8 +720,14 @@ in {
         locations."^~ /static/redundancy/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.redundancy;
-          priority = 1360;
+          priority = 1440;
           extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
               add_header Access-Control-Max-Age         1728000;
@@ -662,15 +740,14 @@ in {
 
               access_log                                off;
             }
+
             aio                                         threads;
             sendfile                                    on;
             sendfile_max_chunk                          1M;
 
+            limit_rate                                  $peertube_limit_rate;
             limit_rate_after                            5M;
 
-            set $peertube_limit_rate                    800k;
-            set $limit_rate                             $peertube_limit_rate;
-
             rewrite ^/static/redundancy/(.*)$           /$1 break;
           '';
         };
@@ -678,8 +755,14 @@ in {
         locations."^~ /static/streaming-playlists/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.streaming_playlists;
-          priority = 1370;
+          priority = 1450;
           extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
               add_header Access-Control-Max-Age         1728000;
@@ -697,20 +780,24 @@ in {
             sendfile                                    on;
             sendfile_max_chunk                          1M;
 
+            limit_rate                                  $peertube_limit_rate;
             limit_rate_after                            5M;
 
-            set $peertube_limit_rate                    5M;
-            set $limit_rate                             $peertube_limit_rate;
-
             rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
           '';
         };
 
-        locations."~ ^/static/webseed/" = {
+        locations."^~ /static/webseed/" = {
           tryFiles = "$uri @api";
           root = cfg.settings.storage.videos;
-          priority = 1380;
+          priority = 1460;
           extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
             if ($request_method = 'OPTIONS') {
               ${nginxCommonHeaders}
               add_header Access-Control-Max-Age         1728000;
@@ -728,11 +815,9 @@ in {
             sendfile                                    on;
             sendfile_max_chunk                          1M;
 
+            limit_rate                                  $peertube_limit_rate;
             limit_rate_after                            5M;
 
-            set $peertube_limit_rate                    800k;
-            set $limit_rate                             $peertube_limit_rate;
-
             rewrite ^/static/webseed/(.*)$              /$1 break;
           '';
         };
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 0b0e0de21df2b..953f316329346 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -241,7 +241,7 @@ let
 
   configPath = if cfg.enableReload
     then "/etc/nginx/nginx.conf"
-    else configFile;
+    else finalConfigFile;
 
   execCommand = "${cfg.package}/bin/nginx -c '${configPath}'";
 
@@ -393,6 +393,38 @@ let
   );
 
   mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
+
+  snakeOilCert = pkgs.runCommand "nginx-config-validate-cert" { nativeBuildInputs = [ pkgs.openssl.bin ]; } ''
+    mkdir $out
+    openssl genrsa -des3 -passout pass:xxxxx -out server.pass.key 2048
+    openssl rsa -passin pass:xxxxx -in server.pass.key -out $out/server.key
+    openssl req -new -key $out/server.key -out server.csr \
+    -subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
+    openssl x509 -req -days 1 -in server.csr -signkey $out/server.key -out $out/server.crt
+  '';
+  validatedConfigFile = pkgs.runCommand "validated-nginx.conf" { nativeBuildInputs = [ cfg.package ]; } ''
+    # nginx absolutely wants to read the certificates even when told to only validate config, so let's provide fake certs
+    sed ${configFile} \
+    -e "s|ssl_certificate .*;|ssl_certificate ${snakeOilCert}/server.crt;|g" \
+    -e "s|ssl_trusted_certificate .*;|ssl_trusted_certificate ${snakeOilCert}/server.crt;|g" \
+    -e "s|ssl_certificate_key .*;|ssl_certificate_key ${snakeOilCert}/server.key;|g" \
+    > conf
+
+    LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so \
+      NIX_REDIRECTS="/etc/resolv.conf=/dev/null" \
+      nginx -t -c $(readlink -f ./conf) > out 2>&1 || true
+    if ! grep -q "syntax is ok" out; then
+      echo nginx config validation failed.
+      echo config was ${configFile}.
+      echo 'in case of false positive, set `services.nginx.validateConfig` to false.'
+      echo nginx output:
+      cat out
+      exit 1
+    fi
+    cp ${configFile} $out
+  '';
+
+  finalConfigFile = if cfg.validateConfig then validatedConfigFile else configFile;
 in
 
 {
@@ -491,6 +523,15 @@ in
         '';
       };
 
+      validateConfig = mkOption {
+        default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
+        defaultText = literalExpression "pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Validate the generated nginx config at build time. The check is not very robust and can be disabled in case of false positives. This is notably the case when cross-compiling or when using `include` with files outside of the store.
+        '';
+      };
+
       additionalModules = mkOption {
         default = [];
         type = types.listOf (types.attrsOf types.anything);
@@ -1029,7 +1070,7 @@ in
     };
 
     environment.etc."nginx/nginx.conf" = mkIf cfg.enableReload {
-      source = configFile;
+      source = finalConfigFile;
     };
 
     # This service waits for all certificates to be available
@@ -1048,7 +1089,7 @@ in
       # certs are updated _after_ config has been reloaded.
       before = sslTargets;
       after = sslServices;
-      restartTriggers = optionals (cfg.enableReload) [ configFile ];
+      restartTriggers = optionals (cfg.enableReload) [ finalConfigFile ];
       # Block reloading if not all certs exist yet.
       # Happens when config changes add new vhosts/certs.
       unitConfig.ConditionPathExists = optionals (sslServices != []) (map (certName: certs.${certName}.directory + "/fullchain.pem") dependentCertNames);
diff --git a/nixos/modules/services/x11/desktop-managers/cinnamon.nix b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
index aaad1de5f87bd..08c5625fc7ddd 100644
--- a/nixos/modules/services/x11/desktop-managers/cinnamon.nix
+++ b/nixos/modules/services/x11/desktop-managers/cinnamon.nix
@@ -105,7 +105,7 @@ in
       services.dbus.packages = with pkgs.cinnamon; [
         cinnamon-common
         cinnamon-screensaver
-        nemo
+        nemo-with-extensions
         xapp
       ];
       services.cinnamon.apps.enable = mkDefault true;
@@ -154,7 +154,7 @@ in
         polkit_gnome
 
         # packages
-        nemo
+        nemo-with-extensions
         cinnamon-control-center
         cinnamon-settings-daemon
         libgnomekbd
diff --git a/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixos/modules/services/x11/desktop-managers/plasma5.nix
index 2ab24951ec62e..9fcb408c287d5 100644
--- a/nixos/modules/services/x11/desktop-managers/plasma5.nix
+++ b/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -585,6 +585,8 @@ in
       hardware.bluetooth.enable = true;
       hardware.pulseaudio.enable = true;
       networking.networkmanager.enable = true;
+      # Required for autorotate
+      hardware.sensor.iio.enable = lib.mkDefault true;
 
       # Recommendations can be found here:
       #  - https://invent.kde.org/plasma-mobile/plasma-phone-settings/-/tree/master/etc/xdg
diff --git a/nixos/modules/system/activation/bootspec.cue b/nixos/modules/system/activation/bootspec.cue
index 3fc9ca381df77..9f857a1b1cd80 100644
--- a/nixos/modules/system/activation/bootspec.cue
+++ b/nixos/modules/system/activation/bootspec.cue
@@ -1,4 +1,5 @@
 #V1: {
+	system:         string
 	init:           string
 	initrd?:        string
 	initrdSecrets?: string
diff --git a/nixos/modules/system/activation/bootspec.nix b/nixos/modules/system/activation/bootspec.nix
index da76bf9084af8..61407ab67558b 100644
--- a/nixos/modules/system/activation/bootspec.nix
+++ b/nixos/modules/system/activation/bootspec.nix
@@ -19,13 +19,15 @@ let
           (builtins.toJSON
           {
             v1 = {
+              system = config.boot.kernelPackages.stdenv.hostPlatform.system;
               kernel = "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
               kernelParams = config.boot.kernelParams;
-              initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
-              initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
               label = "NixOS ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
 
               inherit (cfg) extensions;
+            } // lib.optionalAttrs config.boot.initrd.enable {
+              initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+              initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
             };
           });
 
@@ -54,7 +56,7 @@ let
           specialisationInjector =
             let
               specialisationLoader = (lib.mapAttrsToList
-                (childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/bootspec/${filename}" ])
+                (childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/${filename}" ])
                 children);
             in
             lib.escapeShellArgs [
@@ -66,7 +68,7 @@ let
         ''
           mkdir -p $out/bootspec
 
-          ${toplevelInjector} | ${specialisationInjector} > $out/bootspec/${filename}
+          ${toplevelInjector} | ${specialisationInjector} > $out/${filename}
         '';
 
       validator = pkgs.writeCueValidator ./bootspec.cue {
@@ -80,7 +82,7 @@ in
     enable = lib.mkEnableOption (lib.mdDoc "Enable generation of RFC-0125 bootspec in $system/bootspec, e.g. /run/current-system/bootspec");
 
     extensions = lib.mkOption {
-      type = lib.types.attrs;
+      type = lib.types.attrsOf lib.types.attrs; # <namespace>: { ...namespace-specific fields }
       default = { };
       description = lib.mdDoc ''
         User-defined data that extends the bootspec document.
diff --git a/nixos/modules/system/activation/top-level.nix b/nixos/modules/system/activation/top-level.nix
index 0bb3628ceed9a..00b11471e1c71 100644
--- a/nixos/modules/system/activation/top-level.nix
+++ b/nixos/modules/system/activation/top-level.nix
@@ -81,7 +81,7 @@ let
 
       ${optionalString (!config.boot.isContainer && config.boot.bootspec.enable) ''
         ${config.boot.bootspec.writer}
-        ${config.boot.bootspec.validator} "$out/bootspec/${config.boot.bootspec.filename}"
+        ${config.boot.bootspec.validator} "$out/${config.boot.bootspec.filename}"
       ''}
 
       ${config.system.extraSystemBuilderCmds}
diff --git a/nixos/modules/system/boot/stage-1.nix b/nixos/modules/system/boot/stage-1.nix
index 28c76fb169f11..95dcdfd7fbe16 100644
--- a/nixos/modules/system/boot/stage-1.nix
+++ b/nixos/modules/system/boot/stage-1.nix
@@ -205,8 +205,9 @@ let
       # Copy ld manually since it isn't detected correctly
       cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
 
-      # Copy all of the needed libraries
-      find $out/bin $out/lib -type f | while read BIN; do
+      # Copy all of the needed libraries in a consistent order so
+      # duplicates are resolved the same way.
+      find $out/bin $out/lib -type f | sort | while read BIN; do
         echo "Copying libs for executable $BIN"
         for LIB in $(${findLibs}/bin/find-libs $BIN); do
           TGT="$out/lib/$(basename $LIB)"
diff --git a/nixos/modules/tasks/filesystems/zfs.nix b/nixos/modules/tasks/filesystems/zfs.nix
index 4b4f4cc801aba..0f14f2b501c22 100644
--- a/nixos/modules/tasks/filesystems/zfs.nix
+++ b/nixos/modules/tasks/filesystems/zfs.nix
@@ -503,6 +503,10 @@ in
           assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
           message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
         }
+        {
+          assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
+          message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
+        }
       ];
 
       boot = {
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 64bc99f6d325f..d62bf0c0fd92d 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -144,7 +144,11 @@
 
 in {
   name = "acme";
-  meta.maintainers = lib.teams.acme.members;
+  meta = {
+    maintainers = lib.teams.acme.members;
+    # Hard timeout in seconds. Average run time is about 7 minutes.
+    timeout = 1800;
+  };
 
   nodes = {
     # The fake ACME server which will respond to client requests
@@ -357,6 +361,30 @@ in {
       import time
 
 
+      TOTAL_RETRIES = 20
+
+
+      class BackoffTracker(object):
+          delay = 1
+          increment = 1
+
+          def handle_fail(self, retries, message) -> int:
+              assert retries < TOTAL_RETRIES, message
+
+              print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
+              time.sleep(self.delay)
+
+              # Only increment after the first try
+              if retries == 0:
+                  self.delay += self.increment
+                  self.increment *= 2
+
+              return retries + 1
+
+
+      backoff = BackoffTracker()
+
+
       def switch_to(node, name):
           # On first switch, this will create a symlink to the current system so that we can
           # quickly switch between derivations
@@ -404,9 +432,7 @@ in {
           assert False
 
 
-      def check_connection(node, domain, retries=3):
-          assert retries >= 0, f"Failed to connect to https://{domain}"
-
+      def check_connection(node, domain, retries=0):
           result = node.succeed(
               "openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
               f" -servername {domain} -connect {domain}:443 < /dev/null 2>&1"
@@ -414,13 +440,11 @@ in {
 
           for line in result.lower().split("\n"):
               if "verification" in line and "error" in line:
-                  time.sleep(3)
-                  return check_connection(node, domain, retries - 1)
+                  retries = backoff.handle_fail(retries, f"Failed to connect to https://{domain}")
+                  return check_connection(node, domain, retries)
 
 
-      def check_connection_key_bits(node, domain, bits, retries=3):
-          assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
-
+      def check_connection_key_bits(node, domain, bits, retries=0):
           result = node.succeed(
               "openssl s_client -CAfile /tmp/ca.crt"
               f" -servername {domain} -connect {domain}:443 < /dev/null"
@@ -429,13 +453,11 @@ in {
           print("Key type:", result)
 
           if bits not in result:
-              time.sleep(3)
-              return check_connection_key_bits(node, domain, bits, retries - 1)
-
+              retries = backoff.handle_fail(retries, f"Did not find expected number of bits ({bits}) in key")
+              return check_connection_key_bits(node, domain, bits, retries)
 
-      def check_stapling(node, domain, retries=3):
-          assert retries >= 0, "OCSP Stapling check failed"
 
+      def check_stapling(node, domain, retries=0):
           # Pebble doesn't provide a full OCSP responder, so just check the URL
           result = node.succeed(
               "openssl s_client -CAfile /tmp/ca.crt"
@@ -445,21 +467,19 @@ in {
           print("OCSP Responder URL:", result)
 
           if "${caDomain}:4002" not in result.lower():
-              time.sleep(3)
-              return check_stapling(node, domain, retries - 1)
-
+              retries = backoff.handle_fail(retries, "OCSP Stapling check failed")
+              return check_stapling(node, domain, retries)
 
-      def download_ca_certs(node, retries=5):
-          assert retries >= 0, "Failed to connect to pebble to download root CA certs"
 
+      def download_ca_certs(node, retries=0):
           exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
           exit_code_2, _ = node.execute(
               "curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
           )
 
           if exit_code + exit_code_2 > 0:
-              time.sleep(3)
-              return download_ca_certs(node, retries - 1)
+              retries = backoff.handle_fail(retries, "Failed to connect to pebble to download root CA certs")
+              return download_ca_certs(node, retries)
 
 
       start_all()
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 1956d3c9e8c7c..6f056de2ed5cc 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -96,6 +96,7 @@ in {
   blockbook-frontend = handleTest ./blockbook-frontend.nix {};
   blocky = handleTest ./blocky.nix {};
   boot = handleTestOn ["x86_64-linux" "aarch64-linux"] ./boot.nix {};
+  bootspec = handleTestOn ["x86_64-linux"] ./bootspec.nix {};
   boot-stage1 = handleTest ./boot-stage1.nix {};
   borgbackup = handleTest ./borgbackup.nix {};
   botamusique = handleTest ./botamusique.nix {};
@@ -256,6 +257,7 @@ in {
   haste-server = handleTest ./haste-server.nix {};
   haproxy = handleTest ./haproxy.nix {};
   hardened = handleTest ./hardened.nix {};
+  headscale = handleTest ./headscale.nix {};
   healthchecks = handleTest ./web-apps/healthchecks.nix {};
   hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
   hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
diff --git a/nixos/tests/bootspec.nix b/nixos/tests/bootspec.nix
index 13360bb1eaa2e..077dff918e0d2 100644
--- a/nixos/tests/bootspec.nix
+++ b/nixos/tests/bootspec.nix
@@ -43,7 +43,7 @@ in
       machine.start()
       machine.wait_for_unit("multi-user.target")
 
-      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+      machine.succeed("test -e /run/current-system/boot.json")
     '';
   };
 
@@ -65,7 +65,7 @@ in
       machine.start()
       machine.wait_for_unit("multi-user.target")
 
-      machine.succeed("test -e /run/current-system/bootspec/boot.json")
+      machine.succeed("test -e /run/current-system/boot.json")
     '';
   };
 
@@ -86,7 +86,33 @@ in
       machine.start()
       machine.wait_for_unit("multi-user.target")
 
+      machine.succeed("test -e /run/current-system/boot.json")
+    '';
+  };
+
+  # Check that initrd create corresponding entries in bootspec.
+  initrd = makeTest {
+    name = "bootspec-with-initrd";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      # It's probably the case, but we want to make it explicit here.
+      boot.initrd.enable = true;
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
       machine.succeed("test -e /run/current-system/bootspec/boot.json")
+
+      bootspec = json.loads(machine.succeed("jq -r '.v1' /run/current-system/bootspec/boot.json"))
+
+      assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
     '';
   };
 
@@ -107,11 +133,11 @@ in
       machine.start()
       machine.wait_for_unit("multi-user.target")
 
-      machine.succeed("test -e /run/current-system/bootspec/boot.json")
-      machine.succeed("test -e /run/current-system/specialisation/something/bootspec/boot.json")
+      machine.succeed("test -e /run/current-system/boot.json")
+      machine.succeed("test -e /run/current-system/specialisation/something/boot.json")
 
-      sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/bootspec/boot.json"))
-      sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/bootspec/boot.json"))
+      sp_in_parent = json.loads(machine.succeed("jq -r '.v1.specialisation.something' /run/current-system/boot.json"))
+      sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/boot.json"))
 
       assert sp_in_parent == sp_in_fs['v1'], "Bootspecs of the same specialisation are different!"
     '';
@@ -135,7 +161,7 @@ in
       machine.wait_for_unit("multi-user.target")
 
       current_os_release = machine.succeed("cat /etc/os-release")
-      bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/bootspec/boot.json)")
+      bootspec_os_release = machine.succeed("cat $(jq -r '.v1.extensions.osRelease' /run/current-system/boot.json)")
 
       assert current_os_release == bootspec_os_release, "Filename referenced by extension has unexpected contents"
     '';
diff --git a/nixos/tests/headscale.nix b/nixos/tests/headscale.nix
new file mode 100644
index 0000000000000..48658b5dade42
--- /dev/null
+++ b/nixos/tests/headscale.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "headscale";
+  meta.maintainers = with lib.maintainers; [ misterio77 ];
+
+  nodes.machine = { ... }: {
+    services.headscale.enable = true;
+    environment.systemPackages = [ pkgs.headscale ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("headscale")
+    machine.wait_for_open_port(8080)
+    # Test basic funcionality
+    machine.succeed("headscale namespaces create test")
+    machine.succeed("headscale preauthkeys -n test create")
+  '';
+})
diff --git a/nixos/tests/nginx.nix b/nixos/tests/nginx.nix
index d9d073822a145..73f1133bd6ca9 100644
--- a/nixos/tests/nginx.nix
+++ b/nixos/tests/nginx.nix
@@ -61,7 +61,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
 
       specialisation.reloadWithErrorsSystem.configuration = {
         services.nginx.package = pkgs.nginxMainline;
-        services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+        services.nginx.virtualHosts."hello".extraConfig = "access_log /does/not/exist.log;";
       };
     };
   };
diff --git a/nixos/tests/os-prober.nix b/nixos/tests/os-prober.nix
index 1c89cf8c1c677..8f3e2494047c3 100644
--- a/nixos/tests/os-prober.nix
+++ b/nixos/tests/os-prober.nix
@@ -1,7 +1,7 @@
 import ./make-test-python.nix ({pkgs, lib, ...}:
 let
   # A filesystem image with a (presumably) bootable debian
-  debianImage = pkgs.vmTools.diskImageFuns.debian9i386 {
+  debianImage = pkgs.vmTools.diskImageFuns.debian11i386 {
     # os-prober cannot detect systems installed on disks without a partition table
     # so we create the disk ourselves
     createRootFS = with pkgs; ''
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 7264bd5a498aa..5f50a3f87d5d2 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -1172,6 +1172,25 @@ let
       '';
     };
 
+    statsd = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-statsd-exporter.service")
+        wait_for_open_port(9102)
+        succeed("curl http://localhost:9102/metrics | grep 'statsd_exporter_build_info{'")
+        succeed(
+          "echo 'test.udp:1|c' > /dev/udp/localhost/9125",
+          "curl http://localhost:9102/metrics | grep 'test_udp 1'",
+        )
+        succeed(
+          "echo 'test.tcp:1|c' > /dev/tcp/localhost/9125",
+          "curl http://localhost:9102/metrics | grep 'test_tcp 1'",
+        )
+      '';
+    };
+
     surfboard = {
       exporterConfig = {
         enable = true;
diff --git a/nixos/tests/vscodium.nix b/nixos/tests/vscodium.nix
index ee884cc4295dd..37bb649889b45 100644
--- a/nixos/tests/vscodium.nix
+++ b/nixos/tests/vscodium.nix
@@ -49,8 +49,8 @@ let
         start_all()
 
         machine.wait_for_unit('graphical.target')
-        machine.wait_until_succeeds('pgrep -x codium')
 
+        codium_running.wait()
         with codium_running:
             # Wait until vscodium is visible. "File" is in the menu bar.
             machine.wait_for_text('Get Started')
diff --git a/nixos/tests/web-apps/peertube.nix b/nixos/tests/web-apps/peertube.nix
index ecc45bff2e2ca..0e5f39c08a023 100644
--- a/nixos/tests/web-apps/peertube.nix
+++ b/nixos/tests/web-apps/peertube.nix
@@ -41,6 +41,9 @@ import ../make-test-python.nix ({pkgs, ...}:
     server = { pkgs, ... }: {
       environment = {
         etc = {
+          "peertube/secrets-peertube".text = ''
+            063d9c60d519597acef26003d5ecc32729083965d09181ef3949200cbe5f09ee
+          '';
           "peertube/password-posgressql-db".text = ''
             0gUN0C1mgST6czvjZ8T9
           '';
@@ -67,6 +70,10 @@ import ../make-test-python.nix ({pkgs, ...}:
         localDomain = "peertube.local";
         enableWebHttps = false;
 
+        secrets = {
+          secretsFile = "/etc/peertube/secrets-peertube";
+        };
+
         database = {
           host = "192.168.2.10";
           name = "peertube_local";