about summary refs log tree commit diff
path: root/nixos/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/tests')
-rw-r--r--nixos/tests/all-tests.nix5
-rw-r--r--nixos/tests/db-rest.nix107
-rw-r--r--nixos/tests/docker-registry.nix2
-rw-r--r--nixos/tests/forgejo.nix156
-rw-r--r--nixos/tests/freshrss-none-auth.nix19
-rw-r--r--nixos/tests/gonic.nix12
-rw-r--r--nixos/tests/incus/virtual-machine.nix9
-rw-r--r--nixos/tests/lomiri.nix294
-rw-r--r--nixos/tests/networking/networkmanager.nix172
-rw-r--r--nixos/tests/prometheus-exporters.nix48
-rw-r--r--nixos/tests/systemd-networkd-bridge.nix103
-rw-r--r--nixos/tests/teleport.nix2
12 files changed, 884 insertions, 45 deletions
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 7944952e2f4ed..23c5a94ed12ca 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -236,6 +236,7 @@ in {
   darling = handleTest ./darling.nix {};
   dae = handleTest ./dae.nix {};
   davis = handleTest ./davis.nix {};
+  db-rest = handleTest ./db-rest.nix {};
   dconf = handleTest ./dconf.nix {};
   deconz = handleTest ./deconz.nix {};
   deepin = handleTest ./deepin.nix {};
@@ -329,6 +330,7 @@ in {
   freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
   freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
   freshrss-http-auth = handleTest ./freshrss-http-auth.nix {};
+  freshrss-none-auth = handleTest ./freshrss-none-auth.nix {};
   frigate = handleTest ./frigate.nix {};
   frp = handleTest ./frp.nix {};
   frr = handleTest ./frr.nix {};
@@ -503,6 +505,7 @@ in {
   lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
   lxd-image-server = handleTest ./lxd-image-server.nix {};
   #logstash = handleTest ./logstash.nix {};
+  lomiri = handleTest ./lomiri.nix {};
   lomiri-system-settings = handleTest ./lomiri-system-settings.nix {};
   lorri = handleTest ./lorri/default.nix {};
   maddy = discoverTests (import ./maddy { inherit handleTest; });
@@ -596,6 +599,7 @@ in {
   netdata = handleTest ./netdata.nix {};
   networking.scripted = handleTest ./networking/networkd-and-scripted.nix { networkd = false; };
   networking.networkd = handleTest ./networking/networkd-and-scripted.nix { networkd = true; };
+  networking.networkmanager = handleTest ./networking/networkmanager.nix {};
   netbox_3_6 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_6; };
   netbox_3_7 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_7; };
   netbox-upgrade = handleTest ./web-apps/netbox-upgrade.nix {};
@@ -899,6 +903,7 @@ in {
   systemd-lock-handler = runTestOn ["aarch64-linux" "x86_64-linux"] ./systemd-lock-handler.nix;
   systemd-machinectl = handleTest ./systemd-machinectl.nix {};
   systemd-networkd = handleTest ./systemd-networkd.nix {};
+  systemd-networkd-bridge = handleTest ./systemd-networkd-bridge.nix {};
   systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix {};
   systemd-networkd-dhcpserver-static-leases = handleTest ./systemd-networkd-dhcpserver-static-leases.nix {};
   systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix {};
diff --git a/nixos/tests/db-rest.nix b/nixos/tests/db-rest.nix
new file mode 100644
index 0000000000000..9249da904acbe
--- /dev/null
+++ b/nixos/tests/db-rest.nix
@@ -0,0 +1,107 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "db-rest";
+  meta.maintainers = with pkgs.lib.maintainers; [ marie ];
+
+  nodes = {
+    database = {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.10"; prefixLength = 24; }
+          ];
+        };
+        firewall.allowedTCPPorts = [ 31638 ];
+      };
+
+      services.redis.servers.db-rest = {
+        enable = true;
+        bind = "0.0.0.0";
+        requirePass = "choochoo";
+        port = 31638;
+      };
+    };
+
+    serverWithTcp = { pkgs, ... }: {
+      environment = {
+        etc = {
+          "db-rest/password-redis-db".text = ''
+            choochoo
+          '';
+        };
+      };
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.11"; prefixLength = 24; }
+          ];
+        };
+        firewall.allowedTCPPorts = [ 3000 ];
+      };
+
+      services.db-rest = {
+        enable = true;
+        host = "0.0.0.0";
+        redis = {
+          enable = true;
+          createLocally = false;
+          host = "192.168.2.10";
+          port = 31638;
+          passwordFile = "/etc/db-rest/password-redis-db";
+          useSSL = false;
+        };
+      };
+    };
+
+    serverWithUnixSocket = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.12"; prefixLength = 24; }
+          ];
+        };
+        firewall.allowedTCPPorts = [ 3000 ];
+      };
+
+      services.db-rest = {
+        enable = true;
+        host = "0.0.0.0";
+        redis = {
+          enable = true;
+          createLocally = true;
+        };
+      };
+    };
+
+    client = {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.13"; prefixLength = 24; }
+          ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("db-rest redis with TCP socket"):
+      database.wait_for_unit("redis-db-rest.service")
+      database.wait_for_open_port(31638)
+
+      serverWithTcp.wait_for_unit("db-rest.service")
+      serverWithTcp.wait_for_open_port(3000)
+
+      client.succeed("curl --fail --get http://192.168.2.11:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
+
+    with subtest("db-rest redis with Unix socket"):
+      serverWithUnixSocket.wait_for_unit("db-rest.service")
+      serverWithUnixSocket.wait_for_open_port(3000)
+
+      client.succeed("curl --fail --get http://192.168.2.12:3000/stations --data-urlencode 'query=Köln Hbf' | jq -r '.\"8000207\".name' | grep 'Köln Hbf'")
+  '';
+})
diff --git a/nixos/tests/docker-registry.nix b/nixos/tests/docker-registry.nix
index 3969ef3f0226f..4f033fc30b191 100644
--- a/nixos/tests/docker-registry.nix
+++ b/nixos/tests/docker-registry.nix
@@ -3,7 +3,7 @@
 import ./make-test-python.nix ({ pkgs, ...} : {
   name = "docker-registry";
   meta = with pkgs.lib.maintainers; {
-    maintainers = [ globin ironpinguin ];
+    maintainers = [ globin ironpinguin cafkafk ];
   };
 
   nodes = {
diff --git a/nixos/tests/forgejo.nix b/nixos/tests/forgejo.nix
index b14df0a2c74f9..8b9ee46ff5d32 100644
--- a/nixos/tests/forgejo.nix
+++ b/nixos/tests/forgejo.nix
@@ -22,8 +22,27 @@ let
   '';
   signingPrivateKeyId = "4D642DE8B678C79D";
 
+  actionsWorkflowYaml = ''
+    run-name: dummy workflow
+    on:
+      push:
+    jobs:
+      cat:
+        runs-on: native
+        steps:
+          - uses: http://localhost:3000/test/checkout@main
+          - run: cat testfile
+  '';
+  # https://github.com/actions/checkout/releases
+  checkoutActionSource = pkgs.fetchFromGitHub {
+    owner = "actions";
+    repo = "checkout";
+    rev = "v4.1.1";
+    hash = "sha256-h2/UIp8IjPo3eE4Gzx52Fb7pcgG/Ww7u31w5fdKVMos=";
+  };
+
   supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
-  makeGForgejoTest = type: nameValuePair type (makeTest {
+  makeForgejoTest = type: nameValuePair type (makeTest {
     name = "forgejo-${type}";
     meta.maintainers = with maintainers; [ bendlas emilylange ];
 
@@ -36,21 +55,28 @@ let
           settings.service.DISABLE_REGISTRATION = true;
           settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
           settings.actions.ENABLED = true;
+          settings.repository = {
+            ENABLE_PUSH_CREATE_USER = true;
+            DEFAULT_PUSH_CREATE_PRIVATE = false;
+          };
         };
-        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq pkgs.file ];
+        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq pkgs.file pkgs.htmlq ];
         services.openssh.enable = true;
 
         specialisation.runner = {
           inheritParentConfig = true;
-          configuration.services.gitea-actions-runner.instances."test" = {
-            enable = true;
-            name = "ci";
-            url = "http://localhost:3000";
-            labels = [
-              # don't require docker/podman
-              "native:host"
-            ];
-            tokenFile = "/var/lib/forgejo/runner_token";
+          configuration.services.gitea-actions-runner = {
+            package = pkgs.forgejo-runner;
+            instances."test" = {
+              enable = true;
+              name = "ci";
+              url = "http://localhost:3000";
+              labels = [
+                # type ":host" does not depend on docker/podman/lxc
+                "native:host"
+              ];
+              tokenFile = "/var/lib/forgejo/runner_token";
+            };
           };
         };
         specialisation.dump = {
@@ -62,11 +88,20 @@ let
           };
         };
       };
-      client1 = { config, pkgs, ... }: {
-        environment.systemPackages = [ pkgs.git ];
-      };
-      client2 = { config, pkgs, ... }: {
-        environment.systemPackages = [ pkgs.git ];
+      client = { ... }: {
+        programs.git = {
+          enable = true;
+          config = {
+            user.email = "test@localhost";
+            user.name = "test";
+            init.defaultBranch = "main";
+          };
+        };
+        programs.ssh.extraConfig = ''
+          Host *
+            StrictHostKeyChecking no
+            IdentityFile ~/.ssh/privk
+        '';
       };
     };
 
@@ -75,26 +110,23 @@ let
         inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
         serverSystem = nodes.server.system.build.toplevel;
         dumpFile = with nodes.server.specialisation.dump.configuration.services.forgejo.dump; "${backupDir}/${file}";
+        remoteUri = "forgejo@server:test/repo";
+        remoteUriCheckoutAction = "forgejo@server:test/checkout";
       in
       ''
         import json
-        GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
-        REPO = "forgejo@server:test/repo"
-        PRIVK = "${snakeOilPrivateKey}"
 
         start_all()
 
-        client1.succeed("mkdir /tmp/repo")
-        client1.succeed("mkdir -p $HOME/.ssh")
-        client1.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
-        client1.succeed("chmod 0400 $HOME/.ssh/privk")
-        client1.succeed("git -C /tmp/repo init")
-        client1.succeed("echo hello world > /tmp/repo/testfile")
-        client1.succeed("git -C /tmp/repo add .")
-        client1.succeed("git config --global user.email test@localhost")
-        client1.succeed("git config --global user.name test")
-        client1.succeed("git -C /tmp/repo commit -m 'Initial import'")
-        client1.succeed(f"git -C /tmp/repo remote add origin {REPO}")
+        client.succeed("mkdir -p ~/.ssh")
+        client.succeed("(umask 0077; cat ${snakeOilPrivateKey} > ~/.ssh/privk)")
+
+        client.succeed("mkdir /tmp/repo")
+        client.succeed("git -C /tmp/repo init")
+        client.succeed("echo 'hello world' > /tmp/repo/testfile")
+        client.succeed("git -C /tmp/repo add .")
+        client.succeed("git -C /tmp/repo commit -m 'Initial import'")
+        client.succeed("git -C /tmp/repo remote add origin ${remoteUri}")
 
         server.wait_for_unit("forgejo.service")
         server.wait_for_open_port(3000)
@@ -143,18 +175,14 @@ let
             + ' -d \'{"key":"${snakeOilPublicKey}","read_only":true,"title":"SSH"}\'''
         )
 
-        client1.succeed(
-            f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git -C /tmp/repo push origin master"
-        )
+        client.succeed("git -C /tmp/repo push origin main")
 
-        client2.succeed("mkdir -p $HOME/.ssh")
-        client2.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
-        client2.succeed("chmod 0400 $HOME/.ssh/privk")
-        client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
-        client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
+        client.succeed("git clone ${remoteUri} /tmp/repo-clone")
+        print(client.succeed("ls -lash /tmp/repo-clone"))
+        assert "hello world" == client.succeed("cat /tmp/repo-clone/testfile").strip()
 
         with subtest("Testing git protocol version=2 over ssh"):
-            git_protocol = client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' GIT_TRACE2_EVENT=true git -C repo fetch |& grep negotiated-version")
+            git_protocol = client.succeed("GIT_TRACE2_EVENT=true git -C /tmp/repo-clone fetch |& grep negotiated-version")
             version = json.loads(git_protocol).get("value")
             assert version == "2", f"git did not negotiate protocol version 2, but version {version} instead."
 
@@ -164,7 +192,7 @@ let
             timeout=10
         )
 
-        with subtest("Testing runner registration"):
+        with subtest("Testing runner registration and action workflow"):
             server.succeed(
                 "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea actions generate-runner-token' | sed 's/^/TOKEN=/' | tee /var/lib/forgejo/runner_token"
             )
@@ -172,6 +200,52 @@ let
             server.wait_for_unit("gitea-runner-test.service")
             server.succeed("journalctl -o cat -u gitea-runner-test.service | grep -q 'Runner registered successfully'")
 
+            # enable actions feature for this repository, defaults to disabled
+            server.succeed(
+                "curl --fail -X PATCH http://localhost:3000/api/v1/repos/test/repo "
+                + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+                + f"-H 'Authorization: token {api_token}'"
+                + ' -d \'{"has_actions":true}\'''
+            )
+
+            # mirror "actions/checkout" action
+            client.succeed("cp -R ${checkoutActionSource}/ /tmp/checkout")
+            client.succeed("git -C /tmp/checkout init")
+            client.succeed("git -C /tmp/checkout add .")
+            client.succeed("git -C /tmp/checkout commit -m 'Initial import'")
+            client.succeed("git -C /tmp/checkout remote add origin ${remoteUriCheckoutAction}")
+            client.succeed("git -C /tmp/checkout push origin main")
+
+            # push workflow to initial repo
+            client.succeed("mkdir -p /tmp/repo/.forgejo/workflows")
+            client.succeed("cp ${pkgs.writeText "dummy-workflow.yml" actionsWorkflowYaml} /tmp/repo/.forgejo/workflows/")
+            client.succeed("git -C /tmp/repo add .")
+            client.succeed("git -C /tmp/repo commit -m 'Add dummy workflow'")
+            client.succeed("git -C /tmp/repo push origin main")
+
+            def poll_workflow_action_status(_) -> bool:
+                output = server.succeed(
+                    "curl --fail http://localhost:3000/test/repo/actions | "
+                    + 'htmlq ".flex-item-leading span" --attribute "data-tooltip-content"'
+                ).strip()
+
+                # values taken from https://codeberg.org/forgejo/forgejo/src/commit/af47c583b4fb3190fa4c4c414500f9941cc02389/options/locale/locale_en-US.ini#L3649-L3661
+                if output in [ "Failure", "Canceled", "Skipped", "Blocked" ]:
+                    raise Exception(f"Workflow status is '{output}', which we consider failed.")
+                    server.log(f"Command returned '{output}', which we consider failed.")
+
+                elif output in [ "Unknown", "Waiting", "Running", "" ]:
+                    server.log(f"Workflow status is '{output}'. Waiting some more...")
+                    return False
+
+                elif output in [ "Success" ]:
+                    return True
+
+                raise Exception(f"Workflow status is '{output}', which we don't know. Value mappings likely need updating.")
+
+            with server.nested("Waiting for the workflow run to be successful"):
+                retry(poll_workflow_action_status)
+
         with subtest("Testing backup service"):
             server.succeed("${serverSystem}/specialisation/dump/bin/switch-to-configuration test")
             server.systemctl("start forgejo-dump")
@@ -181,4 +255,4 @@ let
   });
 in
 
-listToAttrs (map makeGForgejoTest supportedDbTypes)
+listToAttrs (map makeForgejoTest supportedDbTypes)
diff --git a/nixos/tests/freshrss-none-auth.nix b/nixos/tests/freshrss-none-auth.nix
new file mode 100644
index 0000000000000..fd63470386a05
--- /dev/null
+++ b/nixos/tests/freshrss-none-auth.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+  meta.maintainers = with lib.maintainers; [ mattchrist ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      authType = "none";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:80/i/")
+    assert '<title>Main stream · FreshRSS</title>' in response, "FreshRSS stream page didn't load successfully"
+  '';
+})
diff --git a/nixos/tests/gonic.nix b/nixos/tests/gonic.nix
index 726d7da0970f7..adf0f511a9cf7 100644
--- a/nixos/tests/gonic.nix
+++ b/nixos/tests/gonic.nix
@@ -2,11 +2,19 @@ import ./make-test-python.nix ({ pkgs, ... }: {
   name = "gonic";
 
   nodes.machine = { ... }: {
+    systemd.tmpfiles.settings = {
+      "10-gonic" = {
+        "/tmp/music"."d" = {};
+        "/tmp/podcast"."d" = {};
+        "/tmp/playlists"."d" = {};
+      };
+    };
     services.gonic = {
       enable = true;
       settings = {
-        music-path = [ "/tmp" ];
-        podcast-path = "/tmp";
+        music-path = [ "/tmp/music" ];
+        podcast-path = "/tmp/podcast";
+        playlists-path = "/tmp/playlists";
       };
     };
   };
diff --git a/nixos/tests/incus/virtual-machine.nix b/nixos/tests/incus/virtual-machine.nix
index ab378c7b9490e..48178aaed32c4 100644
--- a/nixos/tests/incus/virtual-machine.nix
+++ b/nixos/tests/incus/virtual-machine.nix
@@ -57,5 +57,14 @@ in
 
     with subtest("lxd-agent has a valid path"):
         machine.succeed("incus exec ${instance-name} -- bash -c 'true'")
+
+    with subtest("guest supports cpu hotplug"):
+        machine.succeed("incus config set ${instance-name} limits.cpu=1")
+        count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
+        assert count == 1, f"Wrong number of CPUs reported, want: 1, got: {count}"
+
+        machine.succeed("incus config set ${instance-name} limits.cpu=2")
+        count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
+        assert count == 2, f"Wrong number of CPUs reported, want: 2, got: {count}"
   '';
 })
diff --git a/nixos/tests/lomiri.nix b/nixos/tests/lomiri.nix
new file mode 100644
index 0000000000000..9d6337e9977cb
--- /dev/null
+++ b/nixos/tests/lomiri.nix
@@ -0,0 +1,294 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  # Just to make sure everything is the same, need it for OCR & navigating greeter
+  user = "alice";
+  description = "Alice Foobar";
+  password = "foobar";
+in {
+  name = "lomiri";
+
+  meta = {
+    maintainers = lib.teams.lomiri.members;
+  };
+
+  nodes.machine = { config, ... }: {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    users.users.${user} = {
+      inherit description password;
+    };
+
+    services.desktopManager.lomiri.enable = lib.mkForce true;
+    services.displayManager.defaultSession = lib.mkForce "lomiri";
+
+    fonts.packages = [ pkgs.inconsolata ];
+
+    environment = {
+      # Help with OCR
+      etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
+        font = rec {
+          normal.family = "Inconsolata";
+          bold.family = normal.family;
+          italic.family = normal.family;
+          bold_italic.family = normal.family;
+          size = 16;
+        };
+        colors = rec {
+          primary = {
+            foreground = "0x000000";
+            background = "0xffffff";
+          };
+          normal = {
+            green = primary.foreground;
+          };
+        };
+      };
+
+      variables = {
+        # So we can test what content-hub is working behind the scenes
+        CONTENT_HUB_LOGGING_LEVEL = "2";
+      };
+
+      systemPackages = with pkgs; [
+        # For a convenient way of kicking off content-hub peer collection
+        lomiri.content-hub.examples
+
+        # Forcing alacritty to run as an X11 app when opened from the starter menu
+        (symlinkJoin {
+          name = "x11-${alacritty.name}";
+
+          paths = [ alacritty ];
+
+          nativeBuildInputs = [ makeWrapper ];
+
+          postBuild = ''
+            wrapProgram $out/bin/alacritty \
+              --set WINIT_UNIX_BACKEND x11 \
+              --set WAYLAND_DISPLAY ""
+          '';
+
+          inherit (alacritty) meta;
+        })
+      ];
+    };
+
+    # Help with OCR
+    systemd.tmpfiles.settings = let
+      white = "255, 255, 255";
+      black = "0, 0, 0";
+      colorSection = color: {
+        Color = color;
+        Bold = true;
+        Transparency = false;
+      };
+      terminalColors = pkgs.writeText "customized.colorscheme" (lib.generators.toINI {} {
+        Background = colorSection white;
+        Foreground = colorSection black;
+        Color2 = colorSection black;
+        Color2Intense = colorSection black;
+      });
+      terminalConfig = pkgs.writeText "terminal.ubports.conf" (lib.generators.toINI {} {
+        General = {
+          colorScheme = "customized";
+          fontSize = "16";
+          fontStyle = "Inconsolata";
+        };
+      });
+      confBase = "${config.users.users.${user}.home}/.config";
+      userDirArgs = {
+        mode = "0700";
+        user = user;
+        group = "users";
+      };
+    in {
+      "10-lomiri-test-setup" = {
+        "${confBase}".d = userDirArgs;
+        "${confBase}/terminal.ubports".d = userDirArgs;
+        "${confBase}/terminal.ubports/customized.colorscheme".L.argument = "${terminalColors}";
+        "${confBase}/terminal.ubports/terminal.ubports.conf".L.argument = "${terminalConfig}";
+      };
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: ''
+    def open_starter():
+        """
+        Open the starter, and ensure it's opened.
+        """
+        machine.send_key("meta_l-a")
+        # Look for any of the default apps
+        machine.wait_for_text(r"(Search|System|Settings|Morph|Browser|Terminal|Alacritty)")
+
+    def toggle_maximise():
+        """
+        Send the keybind to maximise the current window.
+        """
+        machine.send_key("ctrl-meta_l-up")
+
+        # For some reason, Lomiri in these VM tests very frequently opens the starter menu a few seconds after sending the above.
+        # Because this isn't 100% reproducible all the time, and there is no command to await when OCR doesn't pick up some text,
+        # the best we can do is send some Escape input after waiting some arbitrary time and hope that it works out fine.
+        machine.sleep(5)
+        machine.send_key("esc")
+        machine.sleep(5)
+
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+
+    # Lomiri in greeter mode should work & be able to start a session
+    with subtest("lomiri greeter works"):
+        machine.wait_for_unit("display-manager.service")
+        # Start page shows current tie
+        machine.wait_for_text(r"(AM|PM)")
+        machine.screenshot("lomiri_greeter_launched")
+
+        # Advance to login part
+        machine.send_key("ret")
+        machine.wait_for_text("${description}")
+        machine.screenshot("lomiri_greeter_login")
+
+        # Login
+        machine.send_chars("${password}\n")
+        # Best way I can think of to differenciate "Lomiri in LightDM greeter mode" from "Lomiri in user shell mode"
+        machine.wait_until_succeeds("pgrep -u ${user} -f 'lomiri --mode=full-shell'")
+
+    # The session should start, and not be stuck in i.e. a crash loop
+    with subtest("lomiri starts"):
+        # Output rendering from Lomiri has started when it starts printing performance diagnostics
+        machine.wait_for_console_text("Last frame took")
+        # Look for datetime's clock, one of the last elements to load
+        machine.wait_for_text(r"(AM|PM)")
+        machine.screenshot("lomiri_launched")
+
+    # Working terminal keybind is good
+    with subtest("terminal keybind works"):
+        machine.send_key("ctrl-alt-t")
+        machine.wait_for_text(r"(${user}|machine)")
+        machine.screenshot("terminal_opens")
+
+        # lomiri-terminal-app has a separate VM test to test its basic functionality
+
+        # for the LSS content-hub test to work reliably, we need to kick off peer collecting
+        machine.send_chars("content-hub-test-importer\n")
+        machine.wait_for_text(r"(/build/source|hub.cpp|handler.cpp|void|virtual|const)") # awaiting log messages from content-hub
+        machine.send_key("ctrl-c")
+
+        machine.send_key("alt-f4")
+
+    # We want the ability to launch applications
+    with subtest("starter menu works"):
+        open_starter()
+        machine.screenshot("starter_opens")
+
+        # Just try the terminal again, we know that it should work
+        machine.send_chars("Terminal\n")
+        machine.wait_for_text(r"(${user}|machine)")
+        machine.send_key("alt-f4")
+
+    # We want support for X11 apps
+    with subtest("xwayland support works"):
+        open_starter()
+        machine.send_chars("Alacritty\n")
+        machine.wait_for_text(r"(${user}|machine)")
+        machine.screenshot("alacritty_opens")
+        machine.send_key("alt-f4")
+
+    # LSS provides DE settings
+    with subtest("system settings open"):
+        open_starter()
+        machine.send_chars("System Settings\n")
+        machine.wait_for_text("Rotation Lock")
+        machine.screenshot("settings_open")
+
+        # lomiri-system-settings has a separate VM test, only test Lomiri-specific content-hub functionalities here
+
+        # Make fullscreen, can't navigate to Background plugin via keyboard unless window has non-phone-like aspect ratio
+        toggle_maximise()
+
+        # Load Background plugin
+        machine.send_key("tab")
+        machine.send_key("tab")
+        machine.send_key("tab")
+        machine.send_key("tab")
+        machine.send_key("tab")
+        machine.send_key("tab")
+        machine.send_key("ret")
+        machine.wait_for_text("Background image")
+
+        # Try to load custom background
+        machine.send_key("shift-tab")
+        machine.send_key("shift-tab")
+        machine.send_key("shift-tab")
+        machine.send_key("shift-tab")
+        machine.send_key("shift-tab")
+        machine.send_key("shift-tab")
+        machine.send_key("ret")
+
+        # Peers should be loaded
+        machine.wait_for_text("Morph") # or Gallery, but Morph is already packaged
+        machine.screenshot("settings_content-hub_peers")
+
+        # Sadly, it doesn't seem possible to actually select a peer and attempt a content-hub data exchange with just the keyboard
+
+        machine.send_key("alt-f4")
+
+    # Morph is how we go online
+    with subtest("morph browser works"):
+        open_starter()
+        machine.send_chars("Morph\n")
+        machine.wait_for_text(r"(Bookmarks|address|site|visited any)")
+        machine.screenshot("morph_open")
+
+        # morph-browser has a separate VM test, there isn't anything new we could test here
+
+        machine.send_key("alt-f4")
+
+    # The ayatana indicators are an important part of the experience, and they hold the only graphical way of exiting the session.
+    # Reaching them via the intended way requires wayland mouse control, but ydotool lacks a module for its daemon:
+    # https://github.com/NixOS/nixpkgs/issues/183659
+    # Luckily, there's a test app that also displays their contents, but it's abit inconsistent. Hopefully this is *good-enough*.
+    with subtest("ayatana indicators work"):
+        open_starter()
+        machine.send_chars("Indicators\n")
+        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
+        machine.screenshot("indicators_open")
+
+        # Element tab order within the indicator menus is not fully deterministic
+        # Only check that the indicators are listed & their items load
+
+        with subtest("lomiri indicator network works"):
+            # Select indicator-network
+            machine.send_key("tab")
+            # Don't go further down, first entry
+            machine.send_key("ret")
+            machine.wait_for_text(r"(Flight|Wi-Fi)")
+            machine.screenshot("indicators_network")
+
+        machine.send_key("shift-tab")
+        machine.send_key("ret")
+        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
+
+        with subtest("ayatana indicator datetime works"):
+            # Select ayatana-indicator-datetime
+            machine.send_key("tab")
+            machine.send_key("down")
+            machine.send_key("ret")
+            machine.wait_for_text("Time and Date Settings")
+            machine.screenshot("indicators_timedate")
+
+        machine.send_key("shift-tab")
+        machine.send_key("ret")
+        machine.wait_for_text(r"(Indicators|Client|List|network|datetime|session)")
+
+        with subtest("ayatana indicator session works"):
+            # Select ayatana-indicator-session
+            machine.send_key("tab")
+            machine.send_key("down")
+            machine.send_key("ret")
+            machine.wait_for_text("Log Out")
+            machine.screenshot("indicators_session")
+  '';
+})
diff --git a/nixos/tests/networking/networkmanager.nix b/nixos/tests/networking/networkmanager.nix
new file mode 100644
index 0000000000000..e654e37d7efb7
--- /dev/null
+++ b/nixos/tests/networking/networkmanager.nix
@@ -0,0 +1,172 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  lib = pkgs.lib;
+  # this is intended as a client test since you shouldn't use NetworkManager for a router or server
+  # so using systemd-networkd for the router vm is fine in these tests.
+  router = import ./router.nix { networkd = true; };
+  qemu-common = import ../../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
+  clientConfig = extraConfig: lib.recursiveUpdate {
+    networking.useDHCP = false;
+
+    # Make sure that only NetworkManager configures the interface
+    networking.interfaces = lib.mkForce {
+      eth1 = {};
+    };
+    networking.networkmanager = {
+      enable = true;
+      # this is needed so NM doesn't generate 'Wired Connection' profiles and instead uses the default one
+      settings.main.no-auto-default = "*";
+      ensureProfiles.profiles.default = {
+        connection = {
+          id = "default";
+          type = "ethernet";
+          interface-name = "eth1";
+          autoconnect = true;
+        };
+      };
+    };
+  } extraConfig;
+  testCases = {
+    static = {
+      name = "static";
+      nodes = {
+        inherit router;
+        client = clientConfig {
+          networking.networkmanager.ensureProfiles.profiles.default = {
+            ipv4.method = "manual";
+            ipv4.addresses = "192.168.1.42/24";
+            ipv4.gateway = "192.168.1.1";
+            ipv6.method = "manual";
+            ipv6.addresses = "fd00:1234:5678:1::42/64";
+            ipv6.gateway = "fd00:1234:5678:1::1";
+          };
+        };
+      };
+      testScript = ''
+        start_all()
+        router.systemctl("start network-online.target")
+        router.wait_for_unit("network-online.target")
+        client.wait_for_unit("NetworkManager.service")
+
+        with subtest("Wait until we have an ip address on each interface"):
+            client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
+            client.wait_until_succeeds("ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'")
+
+        with subtest("Test if icmp echo works"):
+            client.wait_until_succeeds("ping -c 1 192.168.3.1")
+            client.wait_until_succeeds("ping -c 1 fd00:1234:5678:3::1")
+            router.wait_until_succeeds("ping -c 1 192.168.1.42")
+            router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::42")
+      '';
+    };
+    auto = {
+      name = "auto";
+      nodes = {
+        inherit router;
+        client = clientConfig {
+          networking.networkmanager.ensureProfiles.profiles.default = {
+            ipv4.method = "auto";
+            ipv6.method = "auto";
+          };
+        };
+      };
+      testScript = ''
+        start_all()
+        router.systemctl("start network-online.target")
+        router.wait_for_unit("network-online.target")
+        client.wait_for_unit("NetworkManager.service")
+
+        with subtest("Wait until we have an ip address on each interface"):
+            client.wait_until_succeeds("ip addr show dev eth1 | grep -q '192.168.1'")
+            client.wait_until_succeeds("ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'")
+
+        with subtest("Test if icmp echo works"):
+            client.wait_until_succeeds("ping -c 1 192.168.1.1")
+            client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+            router.wait_until_succeeds("ping -c 1 192.168.1.2")
+            router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::2")
+      '';
+    };
+    dns = {
+      name = "dns";
+      nodes = {
+        inherit router;
+        dynamic = clientConfig {
+          networking.networkmanager.ensureProfiles.profiles.default = {
+            ipv4.method = "auto";
+          };
+        };
+        static = clientConfig {
+          networking.networkmanager.ensureProfiles.profiles.default = {
+            ipv4 = {
+              method = "auto";
+              ignore-auto-dns = "true";
+              dns = "10.10.10.10";
+              dns-search = "";
+            };
+          };
+        };
+      };
+      testScript = ''
+        start_all()
+        router.systemctl("start network-online.target")
+        router.wait_for_unit("network-online.target")
+        dynamic.wait_for_unit("NetworkManager.service")
+        static.wait_for_unit("NetworkManager.service")
+
+        dynamic.wait_until_succeeds("cat /etc/resolv.conf | grep -q '192.168.1.1'")
+        static.wait_until_succeeds("cat /etc/resolv.conf | grep -q '10.10.10.10'")
+        static.wait_until_fails("cat /etc/resolv.conf | grep -q '192.168.1.1'")
+      '';
+    };
+    dispatcherScripts = {
+      name = "dispatcherScripts";
+      nodes.client = clientConfig {
+        networking.networkmanager.dispatcherScripts = [{
+          type = "pre-up";
+          source = pkgs.writeText "testHook" ''
+            touch /tmp/dispatcher-scripts-are-working
+          '';
+        }];
+      };
+      testScript = ''
+        start_all()
+        client.wait_for_unit("NetworkManager.service")
+        client.wait_until_succeeds("stat /tmp/dispatcher-scripts-are-working")
+      '';
+    };
+    envsubst = {
+      name = "envsubst";
+      nodes.client = let
+        # you should never write secrets in to your nixos configuration, please use tools like sops-nix or agenix
+        secretFile = pkgs.writeText "my-secret.env" ''
+          MY_SECRET_IP=fd00:1234:5678:1::23/64
+        '';
+      in clientConfig {
+        networking.networkmanager.ensureProfiles.environmentFiles = [ secretFile ];
+        networking.networkmanager.ensureProfiles.profiles.default = {
+          ipv6.method = "manual";
+          ipv6.addresses = "$MY_SECRET_IP";
+        };
+      };
+      testScript = ''
+        start_all()
+        client.wait_for_unit("NetworkManager.service")
+        client.wait_until_succeeds("ip addr show dev eth1 | grep -q 'fd00:1234:5678:1:'")
+        client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::23")
+      '';
+    };
+  };
+in lib.mapAttrs (lib.const (attrs: makeTest (attrs // {
+  name = "${attrs.name}-Networking-NetworkManager";
+  meta = {
+    maintainers = with lib.maintainers; [ janik ];
+  };
+
+}))) testCases
diff --git a/nixos/tests/prometheus-exporters.nix b/nixos/tests/prometheus-exporters.nix
index 3dc368e320ff2..576253450814f 100644
--- a/nixos/tests/prometheus-exporters.nix
+++ b/nixos/tests/prometheus-exporters.nix
@@ -227,6 +227,54 @@ let
       '';
     };
 
+    dnssec = {
+      exporterConfig = {
+        enable = true;
+        configuration = {
+          records = [
+            {
+              zone = "example.com";
+              record = "@";
+              type = "SOA";
+            }
+          ];
+        };
+        resolvers = [ "127.0.0.1:53" ];
+      };
+      metricProvider = {
+        services.knot = {
+          enable = true;
+          settingsFile = pkgs.writeText "knot.conf" ''
+            server:
+              listen: 127.0.0.1@53
+            template:
+              - id: default
+                storage: ${pkgs.buildEnv {
+                  name = "zones";
+                  paths = [(pkgs.writeTextDir "example.com.zone" ''
+                    @ SOA ns1.example.com. noc.example.com. 2024032401 86400 7200 3600000 172800
+                    @       NS      ns1
+                    ns1     A       192.168.0.1
+                  '')];
+                }}
+                zonefile-load: difference
+                zonefile-sync: -1
+            zone:
+              - domain: example.com
+                file: example.com.zone
+                dnssec-signing: on
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("knot.service")
+        wait_for_open_port(53)
+        wait_for_unit("prometheus-dnssec-exporter.service")
+        wait_for_open_port(9204)
+        succeed("curl -sSf http://localhost:9204/metrics | grep 'example.com'")
+      '';
+    };
+
     # Access to WHOIS server is required to properly test this exporter, so
     # just perform basic sanity check that the exporter is running and returns
     # a failure.
diff --git a/nixos/tests/systemd-networkd-bridge.nix b/nixos/tests/systemd-networkd-bridge.nix
new file mode 100644
index 0000000000000..f1f8823e84205
--- /dev/null
+++ b/nixos/tests/systemd-networkd-bridge.nix
@@ -0,0 +1,103 @@
+/* This test ensures that we can configure spanning-tree protocol
+   across bridges using systemd-networkd.
+
+   Test topology:
+
+              1       2       3
+       node1 --- sw1 --- sw2 --- node2
+                   \     /
+                  4 \   / 5
+                     sw3
+                      |
+                    6 |
+                      |
+                    node3
+
+   where switches 1, 2, and 3 bridge their links and use STP,
+   and each link is labeled with the VLAN we are assigning it in
+   virtualisation.vlans.
+*/
+with builtins;
+let
+  commonConf = {
+    systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+    networking.useNetworkd = true;
+    networking.useDHCP = false;
+    networking.firewall.enable = false;
+  };
+
+  generateNodeConf = { octet, vlan }:
+    { lib, pkgs, config, ... }: {
+      imports = [ common/user-account.nix commonConf ];
+      virtualisation.vlans = [ vlan ];
+      systemd.network = {
+        enable = true;
+        networks = {
+          "30-eth" = {
+            matchConfig.Name = "eth1";
+            address = [ "10.0.0.${toString octet}/24" ];
+          };
+        };
+      };
+    };
+
+  generateSwitchConf = vlans:
+    { lib, pkgs, config, ... }: {
+      imports = [ common/user-account.nix commonConf ];
+      virtualisation.vlans = vlans;
+      systemd.network = {
+        enable = true;
+        netdevs = {
+          "40-br0" = {
+            netdevConfig = {
+              Kind = "bridge";
+              Name = "br0";
+            };
+            bridgeConfig.STP = "yes";
+          };
+        };
+        networks = {
+          "30-eth" = {
+            matchConfig.Name = "eth*";
+            networkConfig.Bridge = "br0";
+          };
+          "40-br0" = { matchConfig.Name = "br0"; };
+        };
+      };
+    };
+in import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "networkd";
+  meta = with pkgs.lib.maintainers; { maintainers = [ picnoir ]; };
+  nodes = {
+    node1 = generateNodeConf {
+      octet = 1;
+      vlan = 1;
+    };
+    node2 = generateNodeConf {
+      octet = 2;
+      vlan = 3;
+    };
+    node3 = generateNodeConf {
+      octet = 3;
+      vlan = 6;
+    };
+    sw1 = generateSwitchConf [ 1 2 4 ];
+    sw2 = generateSwitchConf [ 2 3 5 ];
+    sw3 = generateSwitchConf [ 4 5 6 ];
+  };
+  testScript = ''
+    network_nodes = [node1, node2, node3]
+    network_switches = [sw1, sw2, sw3]
+    start_all()
+
+    for n in network_nodes + network_switches:
+        n.wait_for_unit("systemd-networkd-wait-online.service")
+
+    node1.succeed("ping 10.0.0.2 -w 10 -c 1")
+    node1.succeed("ping 10.0.0.3 -w 10 -c 1")
+    node2.succeed("ping 10.0.0.1 -w 10 -c 1")
+    node2.succeed("ping 10.0.0.3 -w 10 -c 1")
+    node3.succeed("ping 10.0.0.1 -w 10 -c 1")
+    node3.succeed("ping 10.0.0.2 -w 10 -c 1")
+  '';
+})
diff --git a/nixos/tests/teleport.nix b/nixos/tests/teleport.nix
index d68917c6c7acb..2fb347155759a 100644
--- a/nixos/tests/teleport.nix
+++ b/nixos/tests/teleport.nix
@@ -9,8 +9,8 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
 let
   packages = with pkgs; {
     "default" = teleport;
-    "12" = teleport_12;
     "13" = teleport_13;
+    "14" = teleport_14;
   };
 
   minimal = package: {