about summary refs log tree commit diff
path: root/nixos/tests
diff options
context:
space:
mode:
authorgithub-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>2023-02-07 00:02:12 +0000
committerGitHub <noreply@github.com>2023-02-07 00:02:12 +0000
commitb30088fc3fdf6294c457a0184ffb9967f29d389e (patch)
tree0b18a33167a17c1ce79b42b975b4e4f1e984c7d0 /nixos/tests
parentc92b3924fec8b9b7b0bfbe60f173751459b55c86 (diff)
parent1bda69b4294492363df8716263aa06ed6637148c (diff)
Merge master into staging-next
Diffstat (limited to 'nixos/tests')
-rw-r--r--nixos/tests/all-tests.nix1
-rw-r--r--nixos/tests/cockpit.nix135
-rw-r--r--nixos/tests/cups-pdf.nix2
-rw-r--r--nixos/tests/nebula.nix207
-rw-r--r--nixos/tests/pgadmin4.nix224
5 files changed, 398 insertions, 171 deletions
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 7b1159d667151..7ee83be6672e4 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -134,6 +134,7 @@ in {
   cloud-init-hostname = handleTest ./cloud-init-hostname.nix {};
   cloudlog = handleTest ./cloudlog.nix {};
   cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
+  cockpit = handleTest ./cockpit.nix {};
   cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
   collectd = handleTest ./collectd.nix {};
   connman = handleTest ./connman.nix {};
diff --git a/nixos/tests/cockpit.nix b/nixos/tests/cockpit.nix
new file mode 100644
index 0000000000000..4a4983f9bc4e6
--- /dev/null
+++ b/nixos/tests/cockpit.nix
@@ -0,0 +1,135 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let
+    user = "alice"; # from ./common/user-account.nix
+    password = "foobar"; # from ./common/user-account.nix
+  in {
+    name = "cockpit";
+    meta = {
+      maintainers = with lib.maintainers; [ lucasew ];
+    };
+    nodes = {
+      server = { config, ... }: {
+        imports = [ ./common/user-account.nix ];
+        security.polkit.enable = true;
+        users.users.${user} = {
+          extraGroups = [ "wheel" ];
+        };
+        services.cockpit = {
+          enable = true;
+          openFirewall = true;
+          settings = {
+            WebService = {
+              Origins = "https://server:9090";
+            };
+          };
+        };
+      };
+      client = { config, ... }: {
+        imports = [ ./common/user-account.nix ];
+        environment.systemPackages = let
+          seleniumScript = pkgs.writers.writePython3Bin "selenium-script" {
+            libraries = with pkgs.python3Packages; [ selenium ];
+            } ''
+            from selenium import webdriver
+            from selenium.webdriver.common.by import By
+            from selenium.webdriver.firefox.options import Options
+            from selenium.webdriver.support.ui import WebDriverWait
+            from selenium.webdriver.support import expected_conditions as EC
+            from time import sleep
+
+
+            def log(msg):
+                from sys import stderr
+                print(f"[*] {msg}", file=stderr)
+
+
+            log("Initializing")
+
+            options = Options()
+            options.add_argument("--headless")
+
+            driver = webdriver.Firefox(options=options)
+
+            driver.implicitly_wait(10)
+
+            log("Opening homepage")
+            driver.get("https://server:9090")
+
+            wait = WebDriverWait(driver, 60)
+
+
+            def wait_elem(by, query):
+                wait.until(EC.presence_of_element_located((by, query)))
+
+
+            def wait_title_contains(title):
+                wait.until(EC.title_contains(title))
+
+
+            def find_element(by, query):
+                return driver.find_element(by, query)
+
+
+            def set_value(elem, value):
+                script = 'arguments[0].value = arguments[1]'
+                return driver.execute_script(script, elem, value)
+
+
+            log("Waiting for the homepage to load")
+
+            # cockpit sets initial title as hostname
+            wait_title_contains("server")
+            wait_elem(By.CSS_SELECTOR, 'input#login-user-input')
+
+            log("Homepage loaded!")
+
+            log("Filling out username")
+            login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input')
+            set_value(login_input, "${user}")
+
+            log("Filling out password")
+            password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input')
+            set_value(password_input, "${password}")
+
+            log("Submiting credentials for login")
+            driver.find_element(By.CSS_SELECTOR, 'button#login-button').click()
+
+            # driver.implicitly_wait(1)
+            # driver.get("https://server:9090/system")
+
+            log("Waiting dashboard to load")
+            wait_title_contains("${user}@server")
+
+            log("Waiting for the frontend to initalize")
+            sleep(1)
+
+            log("Looking for that banner that tells about limited access")
+            container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame')
+            driver.switch_to.frame(container_iframe)
+
+            assert "Web console is running in limited access mode" in driver.page_source
+
+            driver.close()
+          '';
+        in with pkgs; [ firefox-unwrapped geckodriver seleniumScript ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      server.wait_for_open_port(9090)
+      server.wait_for_unit("network.target")
+      server.wait_for_unit("multi-user.target")
+      server.systemctl("start", "polkit")
+
+      client.wait_for_unit("multi-user.target")
+
+      client.succeed("curl -k https://server:9090 -o /dev/stderr")
+      print(client.succeed("whoami"))
+      client.succeed('PYTHONUNBUFFERED=1 selenium-script')
+    '';
+  }
+)
diff --git a/nixos/tests/cups-pdf.nix b/nixos/tests/cups-pdf.nix
index 70d14f29e2e5d..957b0296a755b 100644
--- a/nixos/tests/cups-pdf.nix
+++ b/nixos/tests/cups-pdf.nix
@@ -23,7 +23,7 @@ import ./make-test-python.nix ({ lib, pkgs, ... }: {
 
   testScript = ''
     from subprocess import run
-    machine.wait_for_unit("cups.service")
+    machine.wait_for_unit("multi-user.target")
     for name in ("opt", "noopt"):
         text = f"test text {name}".upper()
         machine.wait_until_succeeds(f"lpstat -v {name}")
diff --git a/nixos/tests/nebula.nix b/nixos/tests/nebula.nix
index 372cfebdf801b..89b91d89fcb3f 100644
--- a/nixos/tests/nebula.nix
+++ b/nixos/tests/nebula.nix
@@ -10,6 +10,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
       environment.systemPackages = [ pkgs.nebula ];
       users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
       services.openssh.enable = true;
+      networking.interfaces.eth1.useDHCP = false;
 
       services.nebula.networks.smoke = {
         # Note that these paths won't exist when the machine is first booted.
@@ -30,13 +31,14 @@ in
 
     lighthouse = { ... } @ args:
       makeNebulaNode args "lighthouse" {
-        networking.interfaces.eth1.ipv4.addresses = [{
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
           address = "192.168.1.1";
           prefixLength = 24;
         }];
 
         services.nebula.networks.smoke = {
           isLighthouse = true;
+          isRelay = true;
           firewall = {
             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@@ -44,9 +46,9 @@ in
         };
       };
 
-    node2 = { ... } @ args:
-      makeNebulaNode args "node2" {
-        networking.interfaces.eth1.ipv4.addresses = [{
+    allowAny = { ... } @ args:
+      makeNebulaNode args "allowAny" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
           address = "192.168.1.2";
           prefixLength = 24;
         }];
@@ -55,6 +57,7 @@ in
           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
           isLighthouse = false;
           lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
           firewall = {
             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@@ -62,9 +65,9 @@ in
         };
       };
 
-    node3 = { ... } @ args:
-      makeNebulaNode args "node3" {
-        networking.interfaces.eth1.ipv4.addresses = [{
+    allowFromLighthouse = { ... } @ args:
+      makeNebulaNode args "allowFromLighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
           address = "192.168.1.3";
           prefixLength = 24;
         }];
@@ -73,6 +76,7 @@ in
           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
           isLighthouse = false;
           lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
           firewall = {
             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
             inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
@@ -80,9 +84,9 @@ in
         };
       };
 
-    node4 = { ... } @ args:
-      makeNebulaNode args "node4" {
-        networking.interfaces.eth1.ipv4.addresses = [{
+    allowToLighthouse = { ... } @ args:
+      makeNebulaNode args "allowToLighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
           address = "192.168.1.4";
           prefixLength = 24;
         }];
@@ -92,6 +96,7 @@ in
           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
           isLighthouse = false;
           lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
           firewall = {
             outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@@ -99,9 +104,9 @@ in
         };
       };
 
-    node5 = { ... } @ args:
-      makeNebulaNode args "node5" {
-        networking.interfaces.eth1.ipv4.addresses = [{
+    disabled = { ... } @ args:
+      makeNebulaNode args "disabled" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
           address = "192.168.1.5";
           prefixLength = 24;
         }];
@@ -111,6 +116,7 @@ in
           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
           isLighthouse = false;
           lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
           firewall = {
             outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@@ -123,12 +129,14 @@ in
   testScript = let
 
     setUpPrivateKey = name: ''
-    ${name}.succeed(
-        "mkdir -p /root/.ssh",
-        "chown 700 /root/.ssh",
-        "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
-        "chown 600 /root/.ssh/id_snakeoil",
-    )
+      ${name}.start()
+      ${name}.succeed(
+          "mkdir -p /root/.ssh",
+          "chown 700 /root/.ssh",
+          "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+          "chown 600 /root/.ssh/id_snakeoil",
+          "mkdir -p /root"
+      )
     '';
 
     # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
@@ -146,26 +154,48 @@ in
       ${name}.succeed(
           "mkdir -p /etc/nebula",
           "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
-          "scp ${sshOpts} /etc/nebula/${name}.pub 192.168.1.1:/tmp/${name}.pub",
+          "scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub",
       )
       lighthouse.succeed(
-          'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /tmp/${name}.pub -out-crt /tmp/${name}.crt',
+          'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt'
       )
       ${name}.succeed(
-          "scp ${sshOpts} 192.168.1.1:/tmp/${name}.crt /etc/nebula/${name}.crt",
-          "scp ${sshOpts} 192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
+          "scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt",
+          "scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
+          '(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true'
       )
     '';
 
-  in ''
-    start_all()
+    getPublicIp = node: ''
+      ${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip()
+    '';
 
+    # Never do this for anything security critical! (Thankfully it's just a test.)
+    # Restart Nebula right after the mutual block and/or restore so the state is fresh.
+    blockTrafficBetween = nodeA: nodeB: ''
+      node_a = ${getPublicIp nodeA}
+      node_b = ${getPublicIp nodeB}
+      ${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP")
+      ${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP")
+      ${nodeA}.systemctl("restart nebula@smoke.service")
+      ${nodeB}.systemctl("restart nebula@smoke.service")
+    '';
+    allowTrafficBetween = nodeA: nodeB: ''
+      node_a = ${getPublicIp nodeA}
+      node_b = ${getPublicIp nodeB}
+      ${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP")
+      ${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP")
+      ${nodeA}.systemctl("restart nebula@smoke.service")
+      ${nodeB}.systemctl("restart nebula@smoke.service")
+    '';
+  in ''
     # Create the certificate and sign the lighthouse's keys.
     ${setUpPrivateKey "lighthouse"}
     lighthouse.succeed(
         "mkdir -p /etc/nebula",
         'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
         'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
+        'chown -R nebula-smoke:nebula-smoke /etc/nebula'
     )
 
     # Reboot the lighthouse and verify that the nebula service comes up on boot.
@@ -175,49 +205,104 @@ in
     lighthouse.wait_for_unit("nebula@smoke.service")
     lighthouse.succeed("ping -c5 10.0.100.1")
 
-    # Create keys for node2's nebula service and test that it comes up.
-    ${setUpPrivateKey "node2"}
-    ${signKeysFor "node2" "10.0.100.2/24"}
-    ${restartAndCheckNebula "node2" "10.0.100.2"}
+    # Create keys for allowAny's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowAny"}
+    ${signKeysFor "allowAny" "10.0.100.2/24"}
+    ${restartAndCheckNebula "allowAny" "10.0.100.2"}
 
-    # Create keys for node3's nebula service and test that it comes up.
-    ${setUpPrivateKey "node3"}
-    ${signKeysFor "node3" "10.0.100.3/24"}
-    ${restartAndCheckNebula "node3" "10.0.100.3"}
+    # Create keys for allowFromLighthouse's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowFromLighthouse"}
+    ${signKeysFor "allowFromLighthouse" "10.0.100.3/24"}
+    ${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"}
 
-    # Create keys for node4's nebula service and test that it comes up.
-    ${setUpPrivateKey "node4"}
-    ${signKeysFor "node4" "10.0.100.4/24"}
-    ${restartAndCheckNebula "node4" "10.0.100.4"}
+    # Create keys for allowToLighthouse's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowToLighthouse"}
+    ${signKeysFor "allowToLighthouse" "10.0.100.4/24"}
+    ${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"}
 
-    # Create keys for node4's nebula service and test that it does not come up.
-    ${setUpPrivateKey "node5"}
-    ${signKeysFor "node5" "10.0.100.5/24"}
-    node5.fail("systemctl status nebula@smoke.service")
-    node5.fail("ping -c5 10.0.100.5")
+    # Create keys for disabled's nebula service and test that it does not come up.
+    ${setUpPrivateKey "disabled"}
+    ${signKeysFor "disabled" "10.0.100.5/24"}
+    disabled.fail("systemctl status nebula@smoke.service")
+    disabled.fail("ping -c5 10.0.100.5")
 
-    # The lighthouse can ping node2 and node3 but not node5
+    # The lighthouse can ping allowAny and allowFromLighthouse but not disabled
     lighthouse.succeed("ping -c3 10.0.100.2")
     lighthouse.succeed("ping -c3 10.0.100.3")
     lighthouse.fail("ping -c3 10.0.100.5")
 
-    # node2 can ping the lighthouse, but not node3 because of its inbound firewall
-    node2.succeed("ping -c3 10.0.100.1")
-    node2.fail("ping -c3 10.0.100.3")
-
-    # node3 can ping the lighthouse and node2
-    node3.succeed("ping -c3 10.0.100.1")
-    node3.succeed("ping -c3 10.0.100.2")
-
-    # node4 can ping the lighthouse but not node2 or node3
-    node4.succeed("ping -c3 10.0.100.1")
-    node4.fail("ping -c3 10.0.100.2")
-    node4.fail("ping -c3 10.0.100.3")
-
-    # node2 can ping node3 now that node3 pinged it first
-    node2.succeed("ping -c3 10.0.100.3")
-    # node4 can ping node2 if node2 pings it first
-    node2.succeed("ping -c3 10.0.100.4")
-    node4.succeed("ping -c3 10.0.100.2")
+    # allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall
+    allowAny.succeed("ping -c3 10.0.100.1")
+    allowAny.fail("ping -c3 10.0.100.3")
+
+    # allowFromLighthouse can ping the lighthouse and allowAny
+    allowFromLighthouse.succeed("ping -c3 10.0.100.1")
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work.
+    ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+
+    # allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse
+    allowToLighthouse.succeed("ping -c3 10.0.100.1")
+    allowToLighthouse.fail("ping -c3 10.0.100.2")
+    allowToLighthouse.fail("ping -c3 10.0.100.3")
+
+    # allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first
+    allowAny.succeed("ping -c3 10.0.100.3")
+
+    # block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work.
+    ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    allowAny.succeed("ping -c10 10.0.100.3")
+    ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    allowAny.succeed("ping -c10 10.0.100.3")
+
+    # allowToLighthouse can ping allowAny if allowAny pings it first
+    allowAny.succeed("ping -c3 10.0.100.4")
+    allowToLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work.
+    ${blockTrafficBetween "allowAny" "allowToLighthouse"}
+    allowAny.succeed("ping -c10 10.0.100.4")
+    allowToLighthouse.succeed("ping -c10 10.0.100.2")
+    ${allowTrafficBetween "allowAny" "allowToLighthouse"}
+    allowAny.succeed("ping -c10 10.0.100.4")
+    allowToLighthouse.succeed("ping -c10 10.0.100.2")
+
+    # block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny
+    ${blockTrafficBetween "allowFromLighthouse" "lighthouse"}
+    ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.fail("ping -c3 10.0.100.2")
+    ${allowTrafficBetween "allowFromLighthouse" "lighthouse"}
+    ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse
+    ${blockTrafficBetween "allowAny" "lighthouse"}
+    ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
+    ${blockTrafficBetween "allowAny" "allowToLighthouse"}
+    allowFromLighthouse.fail("ping -c3 10.0.100.2")
+    allowAny.fail("ping -c3 10.0.100.3")
+    allowAny.fail("ping -c3 10.0.100.4")
+    ${allowTrafficBetween "allowAny" "lighthouse"}
+    ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
+    ${allowTrafficBetween "allowAny" "allowToLighthouse"}
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+    allowAny.succeed("ping -c3 10.0.100.3")
+    allowAny.succeed("ping -c3 10.0.100.4")
+
+    # block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny
+    ${blockTrafficBetween "allowToLighthouse" "lighthouse"}
+    ${blockTrafficBetween "allowToLighthouse" "allowAny"}
+    allowAny.fail("ping -c3 10.0.100.4")
+    allowToLighthouse.fail("ping -c3 10.0.100.2")
+    ${allowTrafficBetween "allowToLighthouse" "lighthouse"}
+    ${allowTrafficBetween "allowToLighthouse" "allowAny"}
+    allowAny.succeed("ping -c3 10.0.100.4")
+    allowToLighthouse.succeed("ping -c3 10.0.100.2")
   '';
 })
diff --git a/nixos/tests/pgadmin4.nix b/nixos/tests/pgadmin4.nix
index 2a2b5aaa2841d..3b98a5257a13a 100644
--- a/nixos/tests/pgadmin4.nix
+++ b/nixos/tests/pgadmin4.nix
@@ -1,6 +1,6 @@
 import ./make-test-python.nix ({ pkgs, lib, buildDeps ? [ ], pythonEnv ? [ ], ... }:
 
-  /*
+/*
   This test suite replaces the typical pytestCheckHook function in python
   packages. Pgadmin4 test suite needs a running and configured postgresql
   server. This is why this test exists.
@@ -17,117 +17,123 @@ import ./make-test-python.nix ({ pkgs, lib, buildDeps ? [ ], pythonEnv ? [ ], ..
 
   */
 
-  let
-    pgadmin4SrcDir = "/pgadmin";
-    pgadmin4Dir = "/var/lib/pgadmin";
-    pgadmin4LogDir = "/var/log/pgadmin";
-
-  in
-  {
-    name = "pgadmin4";
-    meta.maintainers = with lib.maintainers; [ gador ];
-
-    nodes.machine = { pkgs, ... }: {
-      imports = [ ./common/x11.nix ];
-      # needed because pgadmin 6.8 will fail, if those dependencies get updated
-      nixpkgs.overlays = [
-        (self: super: {
-          pythonPackages = pythonEnv;
-        })
-      ];
-
-      environment.systemPackages = with pkgs; [
-        pgadmin4
-        postgresql
-        chromedriver
-        chromium
-        # include the same packages as in pgadmin minus speaklater3
-        (python3.withPackages
-          (ps: buildDeps ++
-            [
-              # test suite package requirements
-              pythonPackages.testscenarios
-              pythonPackages.selenium
-            ])
-        )
+let
+  pgadmin4SrcDir = "/pgadmin";
+  pgadmin4Dir = "/var/lib/pgadmin";
+  pgadmin4LogDir = "/var/log/pgadmin";
+
+in
+{
+  name = "pgadmin4";
+  meta.maintainers = with lib.maintainers; [ gador ];
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ];
+    # needed because pgadmin 6.8 will fail, if those dependencies get updated
+    nixpkgs.overlays = [
+      (self: super: {
+        pythonPackages = pythonEnv;
+      })
+    ];
+
+    environment.systemPackages = with pkgs; [
+      pgadmin4
+      postgresql
+      chromedriver
+      chromium
+      # include the same packages as in pgadmin minus speaklater3
+      (python3.withPackages
+        (ps: buildDeps ++
+          [
+            # test suite package requirements
+            pythonPackages.testscenarios
+            pythonPackages.selenium
+          ])
+      )
+    ];
+    services.postgresql = {
+      enable = true;
+      authentication = ''
+        host    all             all             localhost               trust
+      '';
+      ensureUsers = [
+        {
+          name = "postgres";
+          ensurePermissions = {
+            "DATABASE \"postgres\"" = "ALL PRIVILEGES";
+          };
+        }
       ];
-      services.postgresql = {
-        enable = true;
-        authentication = ''
-          host    all             all             localhost               trust
-        '';
-        ensureUsers = [
-          {
-            name = "postgres";
-            ensurePermissions = {
-              "DATABASE \"postgres\"" = "ALL PRIVILEGES";
-            };
-          }
-        ];
-      };
     };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("postgresql")
+
+    # pgadmin4 needs its data and log directories
+    machine.succeed(
+        "mkdir -p ${pgadmin4Dir} \
+        && mkdir -p ${pgadmin4LogDir} \
+        && mkdir -p ${pgadmin4SrcDir}"
+    )
+
+    machine.succeed(
+         "tar xvzf ${pkgs.pgadmin4.src} -C ${pgadmin4SrcDir}"
+    )
+
+    machine.wait_for_file("${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/README.md")
+
+    # set paths and config for tests
+    # also ensure Server Mode is set to false, which will automatically exclude some unnecessary tests.
+    # see https://github.com/pgadmin-org/pgadmin4/blob/fd1c26408bbf154fa455a49ee5c12895933833a3/web/regression/runtests.py#L217-L226
+    machine.succeed(
+        "cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version} \
+        && cp -v web/regression/test_config.json.in web/regression/test_config.json \
+        && sed -i 's|PostgreSQL 9.4|PostgreSQL|' web/regression/test_config.json \
+        && sed -i 's|/opt/PostgreSQL/9.4/bin/|${pkgs.postgresql}/bin|' web/regression/test_config.json \
+        && sed -i 's|\"headless_chrome\": false|\"headless_chrome\": true|' web/regression/test_config.json \
+        && sed -i 's|builtins.SERVER_MODE = None|builtins.SERVER_MODE = False|' web/regression/runtests.py"
+    )
+
+    # adapt chrome config to run within a sandbox without GUI
+    # see https://stackoverflow.com/questions/50642308/webdriverexception-unknown-error-devtoolsactiveport-file-doesnt-exist-while-t#50642913
+    # add chrome binary path. use spaces to satisfy python indention (tabs throw an error)
+    machine.succeed(
+         "cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version} \
+         && sed -i '\|options.add_argument(\"--disable-infobars\")|a \ \ \ \ \ \ \ \ options.binary_location = \"${pkgs.chromium}/bin/chromium\"' web/regression/runtests.py \
+         && sed -i '\|options.add_argument(\"--no-sandbox\")|a \ \ \ \ \ \ \ \ options.add_argument(\"--headless\")' web/regression/runtests.py \
+         && sed -i '\|options.add_argument(\"--disable-infobars\")|a \ \ \ \ \ \ \ \ options.add_argument(\"--disable-dev-shm-usage\")' web/regression/runtests.py \
+         && sed -i 's|(chrome_options=options)|(executable_path=\"${pkgs.chromedriver}/bin/chromedriver\", chrome_options=options)|' web/regression/runtests.py \
+         && sed -i 's|driver_local.maximize_window()||' web/regression/runtests.py"
+    )
+
+    # don't bother to test kerberos authentication
+    excluded_tests = [ "browser.tests.test_kerberos_with_mocking",
+                       ]
+
+    with subtest("run browser test"):
+        machine.succeed(
+             'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
+             && python regression/runtests.py \
+             --pkg browser \
+             --exclude ' + ','.join(excluded_tests)
+        )
 
-    testScript = ''
-      machine.wait_for_unit("postgresql")
-
-      # pgadmin4 needs its data and log directories
-      machine.succeed(
-          "mkdir -p ${pgadmin4Dir} \
-          && mkdir -p ${pgadmin4LogDir} \
-          && mkdir -p ${pgadmin4SrcDir}"
-      )
-
-      machine.succeed(
-           "tar xvzf ${pkgs.pgadmin4.src} -C ${pgadmin4SrcDir}"
-      )
-
-      machine.wait_for_file("${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/README.md")
-
-      # set paths and config for tests
-      machine.succeed(
-           "cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version} \
-           && cp -v web/regression/test_config.json.in web/regression/test_config.json \
-           && sed -i 's|PostgreSQL 9.4|PostgreSQL|' web/regression/test_config.json \
-           && sed -i 's|/opt/PostgreSQL/9.4/bin/|${pkgs.postgresql}/bin|' web/regression/test_config.json \
-           && sed -i 's|\"headless_chrome\": false|\"headless_chrome\": true|' web/regression/test_config.json"
-      )
-
-      # adapt chrome config to run within a sandbox without GUI
-      # see https://stackoverflow.com/questions/50642308/webdriverexception-unknown-error-devtoolsactiveport-file-doesnt-exist-while-t#50642913
-      # add chrome binary path. use spaces to satisfy python indention (tabs throw an error)
-      # this works for selenium 3 (currently used), but will need to be updated
-      # to work with "from selenium.webdriver.chrome.service import Service" in selenium 4
+    with subtest("run resql test"):
       machine.succeed(
-           "cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version} \
-           && sed -i '\|options.add_argument(\"--disable-infobars\")|a \ \ \ \ \ \ \ \ options.binary_location = \"${pkgs.chromium}/bin/chromium\"' web/regression/runtests.py \
-           && sed -i '\|options.add_argument(\"--no-sandbox\")|a \ \ \ \ \ \ \ \ options.add_argument(\"--headless\")' web/regression/runtests.py \
-           && sed -i '\|options.add_argument(\"--disable-infobars\")|a \ \ \ \ \ \ \ \ options.add_argument(\"--disable-dev-shm-usage\")' web/regression/runtests.py \
-           && sed -i 's|(chrome_options=options)|(executable_path=\"${pkgs.chromedriver}/bin/chromedriver\", chrome_options=options)|' web/regression/runtests.py \
-           && sed -i 's|driver_local.maximize_window()||' web/regression/runtests.py"
+           'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
+           && python regression/runtests.py --pkg resql'
       )
 
-      # Don't bother to test LDAP or kerberos authentication
-      with subtest("run browser test"):
-          machine.succeed(
-               'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
-               && python regression/runtests.py \
-               --pkg browser \
-               --exclude browser.tests.test_ldap_login.LDAPLoginTestCase,browser.tests.test_ldap_login,browser.tests.test_kerberos_with_mocking'
-          )
-
-      # fontconfig is necessary for chromium to run
-      # https://github.com/NixOS/nixpkgs/issues/136207
-      with subtest("run feature test"):
-          machine.succeed(
-              'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
-               && export FONTCONFIG_FILE=${pkgs.makeFontsConf { fontDirectories = [];}} \
-               && python regression/runtests.py --pkg feature_tests'
-          )
-
-      with subtest("run resql test"):
-         machine.succeed(
-              'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
-              && python regression/runtests.py --pkg resql'
-         )
-    '';
-  })
+    # fontconfig is necessary for chromium to run
+    # https://github.com/NixOS/nixpkgs/issues/136207
+    # also, the feature_tests require Server Mode = True
+    with subtest("run feature test"):
+       machine.succeed(
+           'cd ${pgadmin4SrcDir}/pgadmin4-${pkgs.pgadmin4.version}/web \
+            && export FONTCONFIG_FILE=${pkgs.makeFontsConf { fontDirectories = [];}} \
+            && sed -i \'s|builtins.SERVER_MODE = False|builtins.SERVER_MODE = True|\' regression/runtests.py \
+            && python regression/runtests.py --pkg feature_tests'
+       )
+  '';
+})