about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-2311.section.md4
-rw-r--r--nixos/modules/config/zram.nix49
-rw-r--r--nixos/modules/module-list.nix2
-rw-r--r--nixos/modules/services/databases/influxdb2.nix452
-rw-r--r--nixos/modules/services/editors/emacs.nix17
-rw-r--r--nixos/modules/services/hardware/hddfancontrol.nix67
-rw-r--r--nixos/modules/services/networking/tailscale.nix2
-rw-r--r--nixos/modules/services/networking/twingate.nix2
-rw-r--r--nixos/modules/services/system/zram-generator.nix38
-rw-r--r--nixos/modules/services/web-servers/caddy/default.nix54
-rw-r--r--nixos/tests/all-tests.nix5
-rw-r--r--nixos/tests/caddy.nix22
-rw-r--r--nixos/tests/common/lxd/config.yaml24
-rw-r--r--nixos/tests/hddfancontrol.nix44
-rw-r--r--nixos/tests/influxdb2.nix193
-rw-r--r--nixos/tests/lxd-image-server.nix6
-rw-r--r--nixos/tests/lxd/container.nix (renamed from nixos/tests/lxd.nix)38
-rw-r--r--nixos/tests/lxd/default.nix9
-rw-r--r--nixos/tests/lxd/nftables.nix (renamed from nixos/tests/lxd-nftables.nix)2
-rw-r--r--nixos/tests/lxd/ui.nix (renamed from nixos/tests/lxd-ui.nix)2
-rw-r--r--nixos/tests/os-prober.nix1
-rw-r--r--nixos/tests/virtualbox.nix2
22 files changed, 870 insertions, 165 deletions
diff --git a/nixos/doc/manual/release-notes/rl-2311.section.md b/nixos/doc/manual/release-notes/rl-2311.section.md
index 825b1c5bd407a..623576ce4ff28 100644
--- a/nixos/doc/manual/release-notes/rl-2311.section.md
+++ b/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -20,6 +20,8 @@
 
 - [mautrix-whatsapp](https://docs.mau.fi/bridges/go/whatsapp/index.html) A Matrix-WhatsApp puppeting bridge
 
+- [hddfancontrol](https://github.com/desbma/hddfancontrol), a service to regulate fan speeds based on hard drive temperature. Available as [services.hddfancontrol](#opt-services.hddfancontrol.enable).
+
 - [GoToSocial](https://gotosocial.org/), an ActivityPub social network server, written in Golang. Available as [services.gotosocial](#opt-services.gotosocial.enable).
 
 - [Typesense](https://github.com/typesense/typesense), a fast, typo-tolerant search engine for building delightful search experiences. Available as [services.typesense](#opt-services.typesense.enable).
@@ -201,6 +203,8 @@ The module update takes care of the new config syntax and the data itself (user
 
 - `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
 
+- `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
+
 - `wrapHelm` now exposes `passthru.pluginsDir` which can be passed to `helmfile`. For convenience, a top-level package `helmfile-wrapped` has been added, which inherits `passthru.pluginsDir` from `kubernetes-helm-wrapped`. See [#217768](https://github.com/NixOS/nixpkgs/issues/217768) for details.
 
 - `boot.initrd.network.udhcp.enable` allows control over dhcp during stage 1 regardless of what `networking.useDHCP` is set to.
diff --git a/nixos/modules/config/zram.nix b/nixos/modules/config/zram.nix
index 991387ea9b2bd..ec8b4ed6e9315 100644
--- a/nixos/modules/config/zram.nix
+++ b/nixos/modules/config/zram.nix
@@ -105,36 +105,25 @@ in
       }
     ];
 
-
-    system.requiredKernelConfig = with config.lib.kernelConfig; [
-      (isModule "ZRAM")
-    ];
-
-    # Disabling this for the moment, as it would create and mkswap devices twice,
-    # once in stage 2 boot, and again when the zram-reloader service starts.
-    # boot.kernelModules = [ "zram" ];
-
-    systemd.packages = [ pkgs.zram-generator ];
-    systemd.services."systemd-zram-setup@".path = [ pkgs.util-linux ]; # for mkswap
-
-    environment.etc."systemd/zram-generator.conf".source =
-      (pkgs.formats.ini { }).generate "zram-generator.conf" (lib.listToAttrs
-        (builtins.map
-          (dev: {
-            name = dev;
-            value =
-              let
-                size = "${toString cfg.memoryPercent} / 100 * ram";
-              in
-              {
-                zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
-                compression-algorithm = cfg.algorithm;
-                swap-priority = cfg.priority;
-              } // lib.optionalAttrs (cfg.writebackDevice != null) {
-                writeback-device = cfg.writebackDevice;
-              };
-          })
-          devices));
+    services.zram-generator.enable = true;
+
+    services.zram-generator.settings = lib.listToAttrs
+      (builtins.map
+        (dev: {
+          name = dev;
+          value =
+            let
+              size = "${toString cfg.memoryPercent} / 100 * ram";
+            in
+            {
+              zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
+              compression-algorithm = cfg.algorithm;
+              swap-priority = cfg.priority;
+            } // lib.optionalAttrs (cfg.writebackDevice != null) {
+              writeback-device = cfg.writebackDevice;
+            };
+        })
+        devices);
 
   };
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 97b8f61e1e70d..3812aa14760dd 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -505,6 +505,7 @@
   ./services/hardware/fancontrol.nix
   ./services/hardware/freefall.nix
   ./services/hardware/fwupd.nix
+  ./services/hardware/hddfancontrol.nix
   ./services/hardware/illum.nix
   ./services/hardware/interception-tools.nix
   ./services/hardware/irqbalance.nix
@@ -1171,6 +1172,7 @@
   ./services/system/self-deploy.nix
   ./services/system/systembus-notify.nix
   ./services/system/uptimed.nix
+  ./services/system/zram-generator.nix
   ./services/torrent/deluge.nix
   ./services/torrent/flexget.nix
   ./services/torrent/magnetico.nix
diff --git a/nixos/modules/services/databases/influxdb2.nix b/nixos/modules/services/databases/influxdb2.nix
index 329533b35dc8e..3740cd01b5dc0 100644
--- a/nixos/modules/services/databases/influxdb2.nix
+++ b/nixos/modules/services/databases/influxdb2.nix
@@ -3,34 +3,291 @@
 let
   inherit
     (lib)
+    any
+    attrNames
+    attrValues
+    count
     escapeShellArg
+    filterAttrs
+    flatten
+    flip
+    getExe
     hasAttr
+    hasInfix
+    listToAttrs
     literalExpression
+    mapAttrsToList
+    mdDoc
     mkEnableOption
     mkIf
     mkOption
+    nameValuePair
+    optional
+    subtractLists
     types
+    unique
     ;
 
   format = pkgs.formats.json { };
   cfg = config.services.influxdb2;
   configFile = format.generate "config.json" cfg.settings;
+
+  validPermissions = [
+    "authorizations"
+    "buckets"
+    "dashboards"
+    "orgs"
+    "tasks"
+    "telegrafs"
+    "users"
+    "variables"
+    "secrets"
+    "labels"
+    "views"
+    "documents"
+    "notificationRules"
+    "notificationEndpoints"
+    "checks"
+    "dbrp"
+    "annotations"
+    "sources"
+    "scrapers"
+    "notebooks"
+    "remotes"
+    "replications"
+  ];
+
+  # Determines whether at least one active api token is defined
+  anyAuthDefined =
+    flip any (attrValues cfg.provision.organizations)
+    (o: o.present && flip any (attrValues o.auths)
+    (a: a.present && a.tokenFile != null));
+
+  provisionState = pkgs.writeText "provision_state.json" (builtins.toJSON {
+    inherit (cfg.provision) organizations users;
+  });
+
+  provisioningScript = pkgs.writeShellScript "post-start-provision" ''
+    set -euo pipefail
+    export INFLUX_HOST="http://"${escapeShellArg (
+      if ! hasAttr "http-bind-address" cfg.settings
+        || hasInfix "0.0.0.0" cfg.settings.http-bind-address
+      then "localhost:8086"
+      else cfg.settings.http-bind-address
+    )}
+
+    # Wait for the influxdb server to come online
+    count=0
+    while ! influx ping &>/dev/null; do
+      if [ "$count" -eq 300 ]; then
+        echo "Tried for 30 seconds, giving up..."
+        exit 1
+      fi
+
+      if ! kill -0 "$MAINPID"; then
+        echo "Main server died, giving up..."
+        exit 1
+      fi
+
+      sleep 0.1
+      count=$((count++))
+    done
+
+    # Do the initial database setup. Pass /dev/null as configs-path to
+    # avoid saving the token as the active config.
+    if test -e "$STATE_DIRECTORY/.first_startup"; then
+      influx setup \
+        --configs-path /dev/null \
+        --org ${escapeShellArg cfg.provision.initialSetup.organization} \
+        --bucket ${escapeShellArg cfg.provision.initialSetup.bucket} \
+        --username ${escapeShellArg cfg.provision.initialSetup.username} \
+        --password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
+        --token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
+        --retention ${toString cfg.provision.initialSetup.retention}s \
+        --force >/dev/null
+
+      rm -f "$STATE_DIRECTORY/.first_startup"
+    fi
+
+    provision_result=$(${getExe pkgs.influxdb2-provision} ${provisionState} "$INFLUX_HOST" "$(< "$CREDENTIALS_DIRECTORY/admin-token")")
+    if [[ "$(jq '[.auths[] | select(.action == "created")] | length' <<< "$provision_result")" -gt 0 ]]; then
+      echo "Created at least one new token, queueing service restart so we can manipulate secrets"
+      touch "$STATE_DIRECTORY/.needs_restart"
+    fi
+  '';
+
+  restarterScript = pkgs.writeShellScript "post-start-restarter" ''
+    set -euo pipefail
+    if test -e "$STATE_DIRECTORY/.needs_restart"; then
+      rm -f "$STATE_DIRECTORY/.needs_restart"
+      /run/current-system/systemd/bin/systemctl restart influxdb2
+    fi
+  '';
+
+  organizationSubmodule = types.submodule (organizationSubmod: let
+    org = organizationSubmod.config._module.args.name;
+  in {
+    options = {
+      present = mkOption {
+        description = mdDoc "Whether to ensure that this organization is present or absent.";
+        type = types.bool;
+        default = true;
+      };
+
+      description = mkOption {
+        description = mdDoc "Optional description for the organization.";
+        default = null;
+        type = types.nullOr types.str;
+      };
+
+      buckets = mkOption {
+        description = mdDoc "Buckets to provision in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (bucketSubmod: let
+          bucket = bucketSubmod.config._module.args.name;
+        in {
+          options = {
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this bucket is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = mdDoc "Optional description for the bucket.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            retention = mkOption {
+              type = types.ints.unsigned;
+              default = 0;
+              description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
+            };
+          };
+        }));
+      };
+
+      auths = mkOption {
+        description = mdDoc "API tokens to provision for the user in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (authSubmod: let
+          auth = authSubmod.config._module.args.name;
+        in {
+          options = {
+            id = mkOption {
+              description = mdDoc "A unique identifier for this authentication token. Since influx doesn't store names for tokens, this will be hashed and appended to the description to identify the token.";
+              readOnly = true;
+              default = builtins.substring 0 32 (builtins.hashString "sha256" "${org}:${auth}");
+              defaultText = "<a hash derived from org and name>";
+              type = types.str;
+            };
+
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this user is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = ''
+                Optional description for the API token.
+                Note that the actual token will always be created with a descriptionregardless
+                of whether this is given or not. The name is always added plus a unique suffix
+                to later identify the token to track whether it has already been created.
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            tokenFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc "The token value. If not given, influx will automatically generate one.";
+            };
+
+            operator = mkOption {
+              description = mdDoc "Grants all permissions in all organizations.";
+              default = false;
+              type = types.bool;
+            };
+
+            allAccess = mkOption {
+              description = mdDoc "Grants all permissions in the associated organization.";
+              default = false;
+              type = types.bool;
+            };
+
+            readPermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants read access to all associated buckets. Use `readBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            writePermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants write access to all associated buckets. Use `writeBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            readBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be read";
+              default = [];
+              type = types.listOf types.str;
+            };
+
+            writeBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be written";
+              default = [];
+              type = types.listOf types.str;
+            };
+          };
+        }));
+      };
+    };
+  });
 in
 {
   options = {
     services.influxdb2 = {
-      enable = mkEnableOption (lib.mdDoc "the influxdb2 server");
+      enable = mkEnableOption (mdDoc "the influxdb2 server");
 
       package = mkOption {
         default = pkgs.influxdb2-server;
         defaultText = literalExpression "pkgs.influxdb2";
-        description = lib.mdDoc "influxdb2 derivation to use.";
+        description = mdDoc "influxdb2 derivation to use.";
         type = types.package;
       };
 
       settings = mkOption {
         default = { };
-        description = lib.mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
+        description = mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
         type = format.type;
       };
 
@@ -41,52 +298,135 @@ in
           organization = mkOption {
             type = types.str;
             example = "main";
-            description = "Primary organization name";
+            description = mdDoc "Primary organization name";
           };
 
           bucket = mkOption {
             type = types.str;
             example = "example";
-            description = "Primary bucket name";
+            description = mdDoc "Primary bucket name";
           };
 
           username = mkOption {
             type = types.str;
             default = "admin";
-            description = "Primary username";
+            description = mdDoc "Primary username";
           };
 
           retention = mkOption {
-            type = types.str;
-            default = "0";
-            description = ''
-              The duration for which the bucket will retain data (0 is infinite).
-              Accepted units are `ns` (nanoseconds), `us` or `µs` (microseconds), `ms` (milliseconds),
-              `s` (seconds), `m` (minutes), `h` (hours), `d` (days) and `w` (weeks).
-            '';
+            type = types.ints.unsigned;
+            default = 0;
+            description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
           };
 
           passwordFile = mkOption {
             type = types.path;
-            description = "Password for primary user. Don't use a file from the nix store!";
+            description = mdDoc "Password for primary user. Don't use a file from the nix store!";
           };
 
           tokenFile = mkOption {
             type = types.path;
-            description = "API Token to set for the admin user. Don't use a file from the nix store!";
+            description = mdDoc "API Token to set for the admin user. Don't use a file from the nix store!";
           };
         };
+
+        organizations = mkOption {
+          description = mdDoc "Organizations to provision.";
+          example = literalExpression ''
+            {
+              myorg = {
+                description = "My organization";
+                buckets.mybucket = {
+                  description = "My bucket";
+                  retention = 31536000; # 1 year
+                };
+                auths.mytoken = {
+                  readBuckets = ["mybucket"];
+                  tokenFile = "/run/secrets/mytoken";
+                };
+              };
+            }
+          '';
+          default = {};
+          type = types.attrsOf organizationSubmodule;
+        };
+
+        users = mkOption {
+          description = mdDoc "Users to provision.";
+          default = {};
+          example = literalExpression ''
+            {
+              # admin = {}; /* The initialSetup.username will automatically be added. */
+              myuser.passwordFile = "/run/secrets/myuser_password";
+            }
+          '';
+          type = types.attrsOf (types.submodule (userSubmod: let
+            user = userSubmod.config._module.args.name;
+            org = userSubmod.config.org;
+          in {
+            options = {
+              present = mkOption {
+                description = mdDoc "Whether to ensure that this user is present or absent.";
+                type = types.bool;
+                default = true;
+              };
+
+              passwordFile = mkOption {
+                description = mdDoc "Password for the user. If unset, the user will not be able to log in until a password is set by an operator! Don't use a file from the nix store!";
+                default = null;
+                type = types.nullOr types.path;
+              };
+            };
+          }));
+        };
       };
     };
   };
 
   config = mkIf cfg.enable {
-    assertions = [
-      {
-        assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
-        message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
-      }
-    ];
+    assertions =
+      [
+        {
+          assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
+          message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
+        }
+      ]
+      ++ flatten (flip mapAttrsToList cfg.provision.organizations (orgName: org:
+        flip mapAttrsToList org.auths (authName: auth:
+          [
+            {
+              assertion = 1 == count (x: x) [
+                auth.operator
+                auth.allAccess
+                (auth.readPermissions != []
+                  || auth.writePermissions != []
+                  || auth.readBuckets != []
+                  || auth.writeBuckets != [])
+              ];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: The `operator` and `allAccess` options are mutually exclusive with each other and the granular permission settings.";
+            }
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.readBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in readBuckets: ${toString unknownBuckets}";
+            })
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.writeBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in writeBuckets: ${toString unknownBuckets}";
+            })
+          ]
+        )
+      ));
+
+    services.influxdb2.provision = mkIf cfg.provision.enable {
+      organizations.${cfg.provision.initialSetup.organization} = {
+        buckets.${cfg.provision.initialSetup.bucket} = {
+          inherit (cfg.provision.initialSetup) retention;
+        };
+      };
+      users.${cfg.provision.initialSetup.username} = {
+        inherit (cfg.provision.initialSetup) passwordFile;
+      };
+    };
 
     systemd.services.influxdb2 = {
       description = "InfluxDB is an open-source, distributed, time series database";
@@ -111,58 +451,38 @@ in
           "admin-password:${cfg.provision.initialSetup.passwordFile}"
           "admin-token:${cfg.provision.initialSetup.tokenFile}"
         ];
+
+        ExecStartPost = mkIf cfg.provision.enable (
+          [provisioningScript] ++
+          # Only the restarter runs with elevated privileges
+          optional anyAuthDefined "+${restarterScript}"
+        );
       };
 
-      path = [pkgs.influxdb2-cli];
+      path = [
+        pkgs.influxdb2-cli
+        pkgs.jq
+      ];
 
-      # Mark if this is the first startup so postStart can do the initial setup
-      preStart = mkIf cfg.provision.enable ''
+      # Mark if this is the first startup so postStart can do the initial setup.
+      # Also extract any token secret mappings and apply them if this isn't the first start.
+      preStart = let
+        tokenPaths = listToAttrs (flatten
+          # For all organizations
+          (flip mapAttrsToList cfg.provision.organizations
+            # For each contained token that has a token file
+            (_: org: flip mapAttrsToList (filterAttrs (_: x: x.tokenFile != null) org.auths)
+              # Collect id -> tokenFile for the mapping
+              (_: auth: nameValuePair auth.id auth.tokenFile))));
+        tokenMappings = pkgs.writeText "token_mappings.json" (builtins.toJSON tokenPaths);
+      in mkIf cfg.provision.enable ''
         if ! test -e "$STATE_DIRECTORY/influxd.bolt"; then
           touch "$STATE_DIRECTORY/.first_startup"
+        else
+          # Manipulate provisioned api tokens if necessary
+          ${getExe pkgs.influxdb2-token-manipulator} "$STATE_DIRECTORY/influxd.bolt" ${tokenMappings}
         fi
       '';
-
-      postStart = let
-        initCfg = cfg.provision.initialSetup;
-      in mkIf cfg.provision.enable (
-        ''
-          set -euo pipefail
-          export INFLUX_HOST="http://"${escapeShellArg (cfg.settings.http-bind-address or "localhost:8086")}
-
-          # Wait for the influxdb server to come online
-          count=0
-          while ! influx ping &>/dev/null; do
-            if [ "$count" -eq 300 ]; then
-              echo "Tried for 30 seconds, giving up..."
-              exit 1
-            fi
-
-            if ! kill -0 "$MAINPID"; then
-              echo "Main server died, giving up..."
-              exit 1
-            fi
-
-            sleep 0.1
-            count=$((count++))
-          done
-
-          # Do the initial database setup. Pass /dev/null as configs-path to
-          # avoid saving the token as the active config.
-          if test -e "$STATE_DIRECTORY/.first_startup"; then
-            influx setup \
-              --configs-path /dev/null \
-              --org ${escapeShellArg initCfg.organization} \
-              --bucket ${escapeShellArg initCfg.bucket} \
-              --username ${escapeShellArg initCfg.username} \
-              --password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
-              --token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
-              --retention ${escapeShellArg initCfg.retention} \
-              --force >/dev/null
-
-            rm -f "$STATE_DIRECTORY/.first_startup"
-          fi
-        ''
-      );
     };
 
     users.extraUsers.influxdb2 = {
diff --git a/nixos/modules/services/editors/emacs.nix b/nixos/modules/services/editors/emacs.nix
index fe3a101597947..fad4f39ff2104 100644
--- a/nixos/modules/services/editors/emacs.nix
+++ b/nixos/modules/services/editors/emacs.nix
@@ -80,6 +80,15 @@ in
         using the EDITOR environment variable.
       '';
     };
+
+    startWithGraphical = mkOption {
+      type = types.bool;
+      default = config.services.xserver.enable;
+      defaultText = literalExpression "config.services.xserver.enable";
+      description = lib.mdDoc ''
+        Start emacs with the graphical session instead of any session. Without this, emacs clients will not be able to create frames in the graphical session.
+      '';
+    };
   };
 
   config = mkIf (cfg.enable || cfg.install) {
@@ -92,7 +101,13 @@ in
         ExecStop = "${cfg.package}/bin/emacsclient --eval (kill-emacs)";
         Restart = "always";
       };
-    } // optionalAttrs cfg.enable { wantedBy = [ "default.target" ]; };
+
+      unitConfig = optionalAttrs cfg.startWithGraphical {
+        After = "graphical-session.target";
+      };
+    } // optionalAttrs cfg.enable {
+      wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
+    };
 
     environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
 
diff --git a/nixos/modules/services/hardware/hddfancontrol.nix b/nixos/modules/services/hardware/hddfancontrol.nix
new file mode 100644
index 0000000000000..463f63cc4940f
--- /dev/null
+++ b/nixos/modules/services/hardware/hddfancontrol.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hddfancontrol;
+  types = lib.types;
+in
+
+{
+  options = {
+
+    services.hddfancontrol.enable = lib.mkEnableOption "hddfancontrol daemon";
+
+    services.hddfancontrol.disks = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        Drive(s) to get temperature from
+      '';
+      example = ["/dev/sda"];
+    };
+
+    services.hddfancontrol.pwmPaths = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        PWM filepath(s) to control fan speed (under /sys)
+      '';
+      example = ["/sys/class/hwmon/hwmon2/pwm1"];
+    };
+
+    services.hddfancontrol.smartctl = lib.mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Probe temperature using smartctl instead of hddtemp or hdparm
+      '';
+    };
+
+    services.hddfancontrol.extraArgs = lib.mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra commandline arguments for hddfancontrol
+      '';
+      example = ["--pwm-start-value=32"
+                 "--pwm-stop-value=0"
+                 "--spin-down-time=900"];
+    };
+  };
+
+  config = lib.mkIf cfg.enable (
+    let args = lib.concatLists [
+      ["-d"] cfg.disks
+      ["-p"] cfg.pwmPaths
+      (lib.optional cfg.smartctl "--smartctl")
+      cfg.extraArgs
+    ]; in {
+      systemd.packages = [pkgs.hddfancontrol];
+
+      systemd.services.hddfancontrol = {
+        enable = true;
+        wantedBy = [ "multi-user.target" ];
+        environment.HDDFANCONTROL_ARGS = lib.escapeShellArgs args;
+      };
+    }
+  );
+}
diff --git a/nixos/modules/services/networking/tailscale.nix b/nixos/modules/services/networking/tailscale.nix
index f308b7e331140..8b35cc8d66697 100644
--- a/nixos/modules/services/networking/tailscale.nix
+++ b/nixos/modules/services/networking/tailscale.nix
@@ -6,7 +6,7 @@ let
   cfg = config.services.tailscale;
   isNetworkd = config.networking.useNetworkd;
 in {
-  meta.maintainers = with maintainers; [ danderson mbaillie twitchyliquid64 ];
+  meta.maintainers = with maintainers; [ danderson mbaillie twitchyliquid64 mfrw ];
 
   options.services.tailscale = {
     enable = mkEnableOption (lib.mdDoc "Tailscale client daemon");
diff --git a/nixos/modules/services/networking/twingate.nix b/nixos/modules/services/networking/twingate.nix
index 170d392bf2135..03c68fc874f02 100644
--- a/nixos/modules/services/networking/twingate.nix
+++ b/nixos/modules/services/networking/twingate.nix
@@ -17,7 +17,7 @@ in
     };
 
     networking.firewall.checkReversePath = lib.mkDefault "loose";
-    services.resolved.enable = !(config.networking.networkmanager.enable);
+    services.resolved.enable = lib.mkIf (!config.networking.networkmanager.enable) true;
 
     environment.systemPackages = [ cfg.package ]; # For the CLI.
   };
diff --git a/nixos/modules/services/system/zram-generator.nix b/nixos/modules/services/system/zram-generator.nix
new file mode 100644
index 0000000000000..5902eda556967
--- /dev/null
+++ b/nixos/modules/services/system/zram-generator.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.zram-generator;
+  settingsFormat = pkgs.formats.ini { };
+in
+{
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  options.services.zram-generator = {
+    enable = lib.mkEnableOption (lib.mdDoc "Systemd unit generator for zram devices");
+
+    package = lib.mkPackageOptionMD pkgs "zram-generator" { };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for zram-generator,
+        see https://github.com/systemd/zram-generator for documentation.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isModule "ZRAM")
+    ];
+
+    systemd.packages = [ cfg.package ];
+    systemd.services."systemd-zram-setup@".path = [ pkgs.util-linux ]; # for mkswap
+
+    environment.etc."systemd/zram-generator.conf".source = settingsFormat.generate "zram-generator.conf" cfg.settings;
+  };
+}
diff --git a/nixos/modules/services/web-servers/caddy/default.nix b/nixos/modules/services/web-servers/caddy/default.nix
index 5cc9ef6dd6d98..cec0b379f67ae 100644
--- a/nixos/modules/services/web-servers/caddy/default.nix
+++ b/nixos/modules/services/web-servers/caddy/default.nix
@@ -24,21 +24,26 @@ let
         }
       '';
 
-  configFile =
-    let
-      Caddyfile = pkgs.writeTextDir "Caddyfile" ''
-        {
-          ${cfg.globalConfig}
-        }
-        ${cfg.extraConfig}
-      '';
+  settingsFormat = pkgs.formats.json { };
 
-      Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
-        mkdir -p $out
-        cp --no-preserve=mode ${Caddyfile}/Caddyfile $out/Caddyfile
-        caddy fmt --overwrite $out/Caddyfile
-      '';
-    in
+  configFile =
+    if cfg.settings != { } then
+      settingsFormat.generate "caddy.json" cfg.settings
+    else
+      let
+        Caddyfile = pkgs.writeTextDir "Caddyfile" ''
+          {
+            ${cfg.globalConfig}
+          }
+          ${cfg.extraConfig}
+        '';
+
+        Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
+          mkdir -p $out
+          cp --no-preserve=mode ${Caddyfile}/Caddyfile $out/Caddyfile
+          caddy fmt --overwrite $out/Caddyfile
+        '';
+      in
       "${if pkgs.stdenv.buildPlatform == pkgs.stdenv.hostPlatform then Caddyfile-formatted else Caddyfile}/Caddyfile";
 
   etcConfigFile = "caddy/caddy_config";
@@ -299,6 +304,27 @@ in
         which could delay the reload essentially indefinitely.
       '';
     };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Structured configuration for Caddy to generate a Caddy JSON configuration file.
+        See <https://caddyserver.com/docs/json/> for available options.
+
+        ::: {.warning}
+        Using a [Caddyfile](https://caddyserver.com/docs/caddyfile) instead of a JSON config is highly recommended by upstream.
+        There are only very few exception to this.
+
+        Please use a Caddyfile via {option}`services.caddy.configFile`, {option}`services.caddy.virtualHosts` or
+        {option}`services.caddy.extraConfig` with {option}`services.caddy.globalConfig` instead.
+        :::
+
+        ::: {.note}
+        Takes presence over most `services.caddy.*` options, such as {option}`services.caddy.configFile` and {option}`services.caddy.virtualHosts`, if specified.
+        :::
+      '';
+    };
   };
 
   # implementation
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index db170ea2486b3..19aaac694594b 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -341,6 +341,7 @@ in {
   hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
   hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
   hbase3 = handleTest ./hbase.nix { package=pkgs.hbase3; };
+  hddfancontrol = handleTest ./hddfancontrol.nix {};
   hedgedoc = handleTest ./hedgedoc.nix {};
   herbstluftwm = handleTest ./herbstluftwm.nix {};
   homepage-dashboard = handleTest ./homepage-dashboard.nix {};
@@ -442,10 +443,8 @@ in {
   loki = handleTest ./loki.nix {};
   luks = handleTest ./luks.nix {};
   lvm2 = handleTest ./lvm2 {};
-  lxd = handleTest ./lxd.nix {};
-  lxd-nftables = handleTest ./lxd-nftables.nix {};
+  lxd = handleTest ./lxd {};
   lxd-image-server = handleTest ./lxd-image-server.nix {};
-  lxd-ui = handleTest ./lxd-ui.nix {};
   #logstash = handleTest ./logstash.nix {};
   lorri = handleTest ./lorri/default.nix {};
   maddy = discoverTests (import ./maddy { inherit handleTest; });
diff --git a/nixos/tests/caddy.nix b/nixos/tests/caddy.nix
index 238091ec606f5..5a0d3539394b6 100644
--- a/nixos/tests/caddy.nix
+++ b/nixos/tests/caddy.nix
@@ -34,6 +34,20 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           "http://localhost:8081" = { };
         };
       };
+      specialisation.rfc42.configuration = {
+        services.caddy.settings = {
+          apps.http.servers.default = {
+            listen = [ ":80" ];
+            routes = [{
+              handle = [{
+                body = "hello world";
+                handler = "static_response";
+                status_code = 200;
+              }];
+            }];
+          };
+        };
+      };
     };
   };
 
@@ -41,6 +55,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
     let
       justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
       multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
+      rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
     in
     ''
       url = "http://localhost/example.html"
@@ -62,5 +77,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
           )
           webserver.wait_for_open_port(8080)
           webserver.wait_for_open_port(8081)
+
+      with subtest("rfc42 settings config"):
+          webserver.succeed(
+              "${rfc42Config}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(80)
+          webserver.succeed("curl http://localhost | grep hello")
     '';
 })
diff --git a/nixos/tests/common/lxd/config.yaml b/nixos/tests/common/lxd/config.yaml
deleted file mode 100644
index 3bb667ed43f7c..0000000000000
--- a/nixos/tests/common/lxd/config.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-storage_pools:
-  - name: default
-    driver: dir
-    config:
-      source: /var/lxd-pool
-
-networks:
-  - name: lxdbr0
-    type: bridge
-    config:
-      ipv4.address: auto
-      ipv6.address: none
-
-profiles:
-  - name: default
-    devices:
-      eth0:
-        name: eth0
-        network: lxdbr0
-        type: nic
-      root:
-        path: /
-        pool: default
-        type: disk
diff --git a/nixos/tests/hddfancontrol.nix b/nixos/tests/hddfancontrol.nix
new file mode 100644
index 0000000000000..b5fa7ccb2c19b
--- /dev/null
+++ b/nixos/tests/hddfancontrol.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "hddfancontrol";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ benley ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    services.hddfancontrol.enable = true;
+    services.hddfancontrol.disks = ["/dev/vda"];
+    services.hddfancontrol.pwmPaths = ["/test/hwmon1/pwm1"];
+    services.hddfancontrol.extraArgs = ["--pwm-start-value=32"
+                                        "--pwm-stop-value=0"];
+
+    systemd.services.hddfancontrol_fixtures = {
+      description = "Install test fixtures for hddfancontrol";
+      serviceConfig = {
+        Type = "oneshot";
+      };
+      script = ''
+        mkdir -p /test/hwmon1
+        echo 255 > /test/hwmon1/pwm1
+        echo 2 > /test/hwmon1/pwm1_enable
+      '';
+      wantedBy = ["hddfancontrol.service"];
+      before = ["hddfancontrol.service"];
+    };
+
+    systemd.services.hddfancontrol.serviceConfig.ReadWritePaths = "/test";
+  };
+
+  # hddfancontrol.service will fail to start because qemu /dev/vda doesn't have
+  # any thermal interfaces, but it should ensure that fans appear to be running
+  # before it aborts.
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("journalctl -eu hddfancontrol.service|grep 'Setting fan speed'")
+    machine.shutdown()
+
+  '';
+
+})
diff --git a/nixos/tests/influxdb2.nix b/nixos/tests/influxdb2.nix
index c9c54b788cc0c..1631ac1d94081 100644
--- a/nixos/tests/influxdb2.nix
+++ b/nixos/tests/influxdb2.nix
@@ -6,6 +6,9 @@ import ./make-test-python.nix ({ pkgs, ...} : {
 
   nodes.machine = { lib, ... }: {
     environment.systemPackages = [ pkgs.influxdb2-cli ];
+    # Make sure that the service is restarted immediately if tokens need to be rewritten
+    # without relying on any Restart=on-failure behavior
+    systemd.services.influxdb2.serviceConfig.RestartSec = 6000;
     services.influxdb2.enable = true;
     services.influxdb2.provision = {
       enable = true;
@@ -15,22 +18,208 @@ import ./make-test-python.nix ({ pkgs, ...} : {
         passwordFile = pkgs.writeText "admin-pw" "ExAmPl3PA55W0rD";
         tokenFile = pkgs.writeText "admin-token" "verysecureadmintoken";
       };
+      organizations.someorg = {
+        buckets.somebucket = {};
+        auths.sometoken = {
+          description = "some auth token";
+          readBuckets = ["somebucket"];
+          writeBuckets = ["somebucket"];
+        };
+      };
+      users.someuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+    };
+
+    specialisation.withModifications.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.buckets.somebucket.present = false;
+        organizations.someorg.auths.sometoken.present = false;
+        users.someuser.present = false;
+
+        organizations.myorg = {
+          description = "Myorg description";
+          buckets.mybucket = {
+            description = "Mybucket description";
+          };
+          auths.mytoken = {
+            operator = true;
+            description = "operator token";
+            tokenFile = pkgs.writeText "tmp-tok" "someusertoken";
+          };
+        };
+        users.myuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+      };
+    };
+
+    specialisation.withParentDelete.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.present = false;
+        # Deleting the parent implies:
+        #organizations.someorg.buckets.somebucket.present = false;
+        #organizations.someorg.auths.sometoken.present = false;
+      };
+    };
+
+    specialisation.withNewTokens.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.default = {
+          auths.operator = {
+            operator = true;
+            description = "new optoken";
+            tokenFile = pkgs.writeText "tmp-tok" "newoptoken";
+          };
+          auths.allaccess = {
+            operator = true;
+            description = "new allaccess";
+            tokenFile = pkgs.writeText "tmp-tok" "newallaccess";
+          };
+          auths.specifics = {
+            description = "new specifics";
+            readPermissions = ["users" "tasks"];
+            writePermissions = ["tasks"];
+            tokenFile = pkgs.writeText "tmp-tok" "newspecificstoken";
+          };
+        };
+      };
     };
   };
 
   testScript = { nodes, ... }:
     let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
       tokenArg = "--token verysecureadmintoken";
     in ''
+      def assert_contains(haystack, needle):
+          if needle not in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack)
+              print("---")
+              raise Exception(f"Expected string '{needle}' was not found")
+
+      def assert_lacks(haystack, needle):
+          if needle in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack, end="")
+              print("---")
+              raise Exception(f"Unexpected string '{needle}' was found")
+
       machine.wait_for_unit("influxdb2.service")
 
       machine.fail("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:wrongpassword")
       machine.succeed("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:ExAmPl3PA55W0rD")
 
       out = machine.succeed("influx org list ${tokenArg}")
-      assert "default" in out
+      assert_contains(out, "default")
+      assert_lacks(out, "myorg")
+      assert_contains(out, "someorg")
 
       out = machine.succeed("influx bucket list ${tokenArg} --org default")
-      assert "default" in out
+      assert_contains(out, "default")
+
+      machine.fail("influx bucket list ${tokenArg} --org myorg")
+
+      out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+      assert_contains(out, "somebucket")
+
+      out = machine.succeed("influx user list ${tokenArg}")
+      assert_contains(out, "admin")
+      assert_lacks(out, "myuser")
+      assert_contains(out, "someuser")
+
+      out = machine.succeed("influx auth list ${tokenArg}")
+      assert_lacks(out, "operator token")
+      assert_contains(out, "some auth token")
+
+      with subtest("withModifications"):
+        machine.succeed('${specialisations}/withModifications/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_lacks(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_lacks(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+        # Make sure the user token is also usable
+        machine.succeed("influx auth list --token someusertoken")
+
+      with subtest("keepsUnrelated"):
+        machine.succeed('${nodes.machine.system.build.toplevel}/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_contains(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+
+      with subtest("withParentDelete"):
+        machine.succeed('${specialisations}/withParentDelete/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_lacks(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        machine.fail("influx bucket list ${tokenArg} --org someorg")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+      with subtest("withNewTokens"):
+        machine.succeed('${specialisations}/withNewTokens/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+        assert_contains(out, "new optoken")
+        assert_contains(out, "new allaccess")
+        assert_contains(out, "new specifics")
     '';
 })
diff --git a/nixos/tests/lxd-image-server.nix b/nixos/tests/lxd-image-server.nix
index e5a292b61bd97..d0afa495a5b1d 100644
--- a/nixos/tests/lxd-image-server.nix
+++ b/nixos/tests/lxd-image-server.nix
@@ -61,14 +61,14 @@ in {
     machine.wait_for_unit("lxd.service")
     machine.wait_for_file("/var/lib/lxd/unix.socket")
 
-    # It takes additional second for lxd to settle
-    machine.sleep(1)
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
 
     # lxd expects the pool's directory to already exist
     machine.succeed("mkdir /var/lxd-pool")
 
     machine.succeed(
-        "cat ${./common/lxd/config.yaml} | lxd init --preseed"
+        "lxd init --minimal"
     )
 
     machine.succeed(
diff --git a/nixos/tests/lxd.nix b/nixos/tests/lxd/container.nix
index 2c2c19e0eecf7..9e56f6e41e054 100644
--- a/nixos/tests/lxd.nix
+++ b/nixos/tests/lxd/container.nix
@@ -1,7 +1,7 @@
-import ./make-test-python.nix ({ pkgs, lib, ... } :
+import ../make-test-python.nix ({ pkgs, lib, ... } :
 
 let
-  lxd-image = import ../release.nix {
+  lxd-image = import ../../release.nix {
     configuration = {
       # Building documentation makes the test unnecessarily take a longer time:
       documentation.enable = lib.mkForce false;
@@ -38,19 +38,18 @@ in {
   };
 
   testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
     machine.wait_for_unit("sockets.target")
     machine.wait_for_unit("lxd.service")
     machine.wait_for_file("/var/lib/lxd/unix.socket")
 
-    # It takes additional second for lxd to settle
-    machine.sleep(1)
-
-    # lxd expects the pool's directory to already exist
-    machine.succeed("mkdir /var/lxd-pool")
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
 
-    machine.succeed(
-        "cat ${./common/lxd/config.yaml} | lxd init --preseed"
-    )
+    machine.succeed("lxd init --minimal")
 
     machine.succeed(
         "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
@@ -58,21 +57,23 @@ in {
 
     with subtest("Container can be managed"):
         machine.succeed("lxc launch nixos container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
         machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
-        machine.succeed("lxc exec container true")
         machine.succeed("lxc delete -f container")
 
     with subtest("Container is mounted with lxcfs inside"):
         machine.succeed("lxc launch nixos container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         ## ---------- ##
         ## limits.cpu ##
 
         machine.succeed("lxc config set container limits.cpu 1")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "1"
@@ -81,7 +82,8 @@ in {
 
         machine.succeed("lxc config set container limits.cpu 2")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "2"
@@ -93,7 +95,8 @@ in {
 
         machine.succeed("lxc config set container limits.memory 64MB")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "MemTotal:          62500 kB"
@@ -102,7 +105,8 @@ in {
 
         machine.succeed("lxc config set container limits.memory 128MB")
         machine.succeed("lxc restart container")
-        machine.sleep(5)
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
 
         assert (
             "MemTotal:         125000 kB"
diff --git a/nixos/tests/lxd/default.nix b/nixos/tests/lxd/default.nix
new file mode 100644
index 0000000000000..2e34907d79369
--- /dev/null
+++ b/nixos/tests/lxd/default.nix
@@ -0,0 +1,9 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. {inherit system config;},
+}: {
+  container = import ./container.nix {inherit system pkgs;};
+  nftables = import ./nftables.nix {inherit system pkgs;};
+  ui = import ./ui.nix {inherit system pkgs;};
+}
diff --git a/nixos/tests/lxd-nftables.nix b/nixos/tests/lxd/nftables.nix
index 2930650015679..b85caa9eb368b 100644
--- a/nixos/tests/lxd-nftables.nix
+++ b/nixos/tests/lxd/nftables.nix
@@ -5,7 +5,7 @@
 # iptables to nftables requires a full reboot, which is a bit hard inside NixOS
 # tests.
 
-import ./make-test-python.nix ({ pkgs, ...} : {
+import ../make-test-python.nix ({ pkgs, ...} : {
   name = "lxd-nftables";
 
   meta = with pkgs.lib.maintainers; {
diff --git a/nixos/tests/lxd-ui.nix b/nixos/tests/lxd/ui.nix
index 19eaa226c0bfe..86cb30d8c2b68 100644
--- a/nixos/tests/lxd-ui.nix
+++ b/nixos/tests/lxd/ui.nix
@@ -1,4 +1,4 @@
-import ./make-test-python.nix ({ pkgs, lib, ... }: {
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
   name = "lxd-ui";
 
   meta = with pkgs.lib.maintainers; {
diff --git a/nixos/tests/os-prober.nix b/nixos/tests/os-prober.nix
index 22e720824c805..dae1306bd69d0 100644
--- a/nixos/tests/os-prober.nix
+++ b/nixos/tests/os-prober.nix
@@ -76,6 +76,7 @@ in {
       # nixos-rebuild needs must be included in the VM.
       system.extraDependencies = with pkgs;
         [
+          bintools
           brotli
           brotli.dev
           brotli.lib
diff --git a/nixos/tests/virtualbox.nix b/nixos/tests/virtualbox.nix
index 062b125eb611e..e522d0679e151 100644
--- a/nixos/tests/virtualbox.nix
+++ b/nixos/tests/virtualbox.nix
@@ -519,4 +519,4 @@ in mapAttrs (mkVBoxTest false vboxVMs) {
     destroy_vm_test1()
     destroy_vm_test2()
   '';
-} // (lib.optionalAttrs enableUnfree unfreeTests)
+} // (optionalAttrs enableUnfree unfreeTests)