about summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/default.nix3
-rw-r--r--nixos/doc/manual/development/sources.xml19
-rw-r--r--nixos/doc/manual/manual.xml27
-rw-r--r--nixos/doc/manual/preface.xml37
-rw-r--r--nixos/doc/manual/release-notes/rl-2003.xml18
-rwxr-xr-xnixos/maintainers/scripts/ec2/create-amis.sh2
-rwxr-xr-xnixos/maintainers/scripts/gce/create-gce.sh2
-rw-r--r--nixos/modules/config/i18n.nix6
-rw-r--r--nixos/modules/config/no-x-libs.nix1
-rw-r--r--nixos/modules/config/pulseaudio.nix5
-rw-r--r--nixos/modules/config/shells-environment.nix14
-rw-r--r--nixos/modules/config/update-users-groups.pl4
-rw-r--r--nixos/modules/config/users-groups.nix2
-rw-r--r--nixos/modules/installer/tools/tools.nix6
-rw-r--r--nixos/modules/module-list.nix8
-rw-r--r--nixos/modules/profiles/installation-device.nix3
-rw-r--r--nixos/modules/programs/gnupg.nix41
-rw-r--r--nixos/modules/programs/shadow.nix14
-rw-r--r--nixos/modules/programs/x2goserver.nix1
-rw-r--r--nixos/modules/rename.nix3
-rw-r--r--nixos/modules/security/acme.nix55
-rw-r--r--nixos/modules/services/admin/oxidized.nix1
-rw-r--r--nixos/modules/services/audio/jack.nix1
-rw-r--r--nixos/modules/services/backup/automysqlbackup.nix5
-rw-r--r--nixos/modules/services/backup/znapzend.nix28
-rw-r--r--nixos/modules/services/continuous-integration/buildkite-agent.nix1
-rw-r--r--nixos/modules/services/databases/redis.nix5
-rw-r--r--nixos/modules/services/databases/rethinkdb.nix1
-rw-r--r--nixos/modules/services/desktops/geoclue2.nix41
-rw-r--r--nixos/modules/services/editors/infinoted.nix1
-rw-r--r--nixos/modules/services/games/openarena.nix56
-rw-r--r--nixos/modules/services/hardware/fancontrol.nix39
-rw-r--r--nixos/modules/services/hardware/trezord.nix16
-rw-r--r--nixos/modules/services/hardware/udisks2.nix5
-rw-r--r--nixos/modules/services/hardware/usbmuxd.nix1
-rw-r--r--nixos/modules/services/hardware/vdr.nix1
-rw-r--r--nixos/modules/services/mail/mailhog.nix1
-rw-r--r--nixos/modules/services/misc/airsonic.nix1
-rw-r--r--nixos/modules/services/misc/docker-registry.nix6
-rw-r--r--nixos/modules/services/misc/errbot.nix5
-rw-r--r--nixos/modules/services/misc/gitea.nix1
-rw-r--r--nixos/modules/services/misc/gitlab.nix8
-rw-r--r--nixos/modules/services/misc/gollum.nix1
-rw-r--r--nixos/modules/services/misc/jellyfin.nix5
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix2
-rw-r--r--nixos/modules/services/misc/osrm.nix1
-rw-r--r--nixos/modules/services/monitoring/collectd.nix1
-rw-r--r--nixos/modules/services/monitoring/fusion-inventory.nix1
-rw-r--r--nixos/modules/services/monitoring/netdata.nix1
-rw-r--r--nixos/modules/services/monitoring/zabbix-agent.nix1
-rw-r--r--nixos/modules/services/network-filesystems/orangefs/client.nix97
-rw-r--r--nixos/modules/services/network-filesystems/orangefs/server.nix225
-rw-r--r--nixos/modules/services/networking/bitcoind.nix1
-rw-r--r--nixos/modules/services/networking/dnscache.nix2
-rw-r--r--nixos/modules/services/networking/dnscrypt-wrapper.nix1
-rw-r--r--nixos/modules/services/networking/go-shadowsocks2.nix30
-rw-r--r--nixos/modules/services/networking/hans.nix1
-rw-r--r--nixos/modules/services/networking/matterbridge.nix1
-rw-r--r--nixos/modules/services/networking/morty.nix1
-rw-r--r--nixos/modules/services/networking/nghttpx/default.nix1
-rw-r--r--nixos/modules/services/networking/owamp.nix1
-rw-r--r--nixos/modules/services/networking/stunnel.nix15
-rw-r--r--nixos/modules/services/networking/thelounge.nix1
-rw-r--r--nixos/modules/services/networking/tinydns.nix2
-rw-r--r--nixos/modules/services/networking/trickster.nix112
-rw-r--r--nixos/modules/services/networking/yggdrasil.nix193
-rw-r--r--nixos/modules/services/printing/cupsd.nix2
-rw-r--r--nixos/modules/services/scheduling/marathon.nix2
-rw-r--r--nixos/modules/services/security/bitwarden_rs/default.nix5
-rw-r--r--nixos/modules/services/security/oauth2_proxy.nix1
-rw-r--r--nixos/modules/services/torrent/magnetico.nix1
-rw-r--r--nixos/modules/services/web-apps/codimd.nix1
-rw-r--r--nixos/modules/services/web-apps/frab.nix1
-rw-r--r--nixos/modules/services/web-apps/gotify-server.nix49
-rw-r--r--nixos/modules/services/web-apps/limesurvey.nix5
-rw-r--r--nixos/modules/services/web-apps/matomo-doc.xml2
-rw-r--r--nixos/modules/services/web-apps/matomo.nix56
-rw-r--r--nixos/modules/services/web-apps/mediawiki.nix5
-rw-r--r--nixos/modules/services/web-apps/moodle.nix6
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix4
-rw-r--r--nixos/modules/services/web-apps/nexus.nix1
-rw-r--r--nixos/modules/services/web-apps/virtlyst.nix1
-rw-r--r--nixos/modules/services/web-apps/wordpress.nix5
-rw-r--r--nixos/modules/services/web-servers/hitch/default.nix5
-rw-r--r--nixos/modules/services/web-servers/traefik.nix1
-rw-r--r--nixos/modules/services/web-servers/unit/default.nix1
-rw-r--r--nixos/modules/system/boot/networkd.nix2
-rw-r--r--nixos/modules/tasks/network-interfaces-systemd.nix2
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix3
-rw-r--r--nixos/modules/virtualisation/ec2-amis.nix18
-rw-r--r--nixos/modules/virtualisation/virtualbox-host.nix9
-rw-r--r--nixos/tests/acme.nix27
-rw-r--r--nixos/tests/all-tests.nix10
-rw-r--r--nixos/tests/ceph-multi-node.nix247
-rw-r--r--nixos/tests/ceph-single-node.nix193
-rw-r--r--nixos/tests/ceph.nix161
-rw-r--r--nixos/tests/common/letsencrypt/common.nix26
-rw-r--r--nixos/tests/common/letsencrypt/default.nix370
-rw-r--r--nixos/tests/common/letsencrypt/mkcerts.nix2
-rw-r--r--nixos/tests/common/letsencrypt/snakeoil-certs.nix451
-rw-r--r--nixos/tests/fancontrol.nix25
-rw-r--r--nixos/tests/gotify-server.nix45
-rw-r--r--nixos/tests/installer.nix4
-rw-r--r--nixos/tests/matomo.nix43
-rw-r--r--nixos/tests/openarena.nix36
-rw-r--r--nixos/tests/orangefs.nix88
-rw-r--r--nixos/tests/os-prober.nix3
-rw-r--r--nixos/tests/trickster.nix29
-rw-r--r--nixos/tests/yggdrasil.nix123
109 files changed, 2307 insertions, 956 deletions
diff --git a/nixos/doc/manual/default.nix b/nixos/doc/manual/default.nix
index f9de2db1a0843..6ca75f869f45c 100644
--- a/nixos/doc/manual/default.nix
+++ b/nixos/doc/manual/default.nix
@@ -62,14 +62,13 @@ let
     "--stringparam html.stylesheet 'style.css overrides.css highlightjs/mono-blue.css'"
     "--stringparam html.script './highlightjs/highlight.pack.js ./highlightjs/loader.js'"
     "--param xref.with.number.and.title 1"
-    "--param toc.section.depth 3"
+    "--param toc.section.depth 0"
     "--stringparam admon.style ''"
     "--stringparam callout.graphics.extension .svg"
     "--stringparam current.docid manual"
     "--param chunk.section.depth 0"
     "--param chunk.first.sections 1"
     "--param use.id.as.filename 1"
-    "--stringparam generate.toc 'book toc appendix toc'"
     "--stringparam chunk.toc ${toc}"
   ];
 
diff --git a/nixos/doc/manual/development/sources.xml b/nixos/doc/manual/development/sources.xml
index 3c30c782746df..b333ccabb420a 100644
--- a/nixos/doc/manual/development/sources.xml
+++ b/nixos/doc/manual/development/sources.xml
@@ -13,17 +13,16 @@
 <screen>
 <prompt>$ </prompt>git clone https://github.com/NixOS/nixpkgs
 <prompt>$ </prompt>cd nixpkgs
-<prompt>$ </prompt>git remote add channels https://github.com/NixOS/nixpkgs-channels
-<prompt>$ </prompt>git remote update channels
+<prompt>$ </prompt>git remote update origin
 </screen>
   This will check out the latest Nixpkgs sources to
   <filename>./nixpkgs</filename> the NixOS sources to
   <filename>./nixpkgs/nixos</filename>. (The NixOS source tree lives in a
-  subdirectory of the Nixpkgs repository.) The remote
-  <literal>channels</literal> refers to a read-only repository that tracks the
-  Nixpkgs/NixOS channels (see <xref linkend="sec-upgrading"/> for more
+  subdirectory of the Nixpkgs repository.) The
+  <literal>nixpkgs</literal> repository has branches that correspond
+  to each Nixpkgs/NixOS channel (see <xref linkend="sec-upgrading"/> for more
   information about channels). Thus, the Git branch
-  <literal>channels/nixos-17.03</literal> will contain the latest built and
+  <literal>origin/nixos-17.03</literal> will contain the latest built and
   tested version available in the <literal>nixos-17.03</literal> channel.
  </para>
  <para>
@@ -40,15 +39,15 @@
   Or, to base your local branch on the latest version available in a NixOS
   channel:
 <screen>
-<prompt>$ </prompt>git remote update channels
-<prompt>$ </prompt>git checkout -b local channels/nixos-17.03
+<prompt>$ </prompt>git remote update origin
+<prompt>$ </prompt>git checkout -b local origin/nixos-17.03
 </screen>
   (Replace <literal>nixos-17.03</literal> with the name of the channel you want
   to use.) You can use <command>git merge</command> or <command>git
   rebase</command> to keep your local branch in sync with the channel, e.g.
 <screen>
-<prompt>$ </prompt>git remote update channels
-<prompt>$ </prompt>git merge channels/nixos-17.03
+<prompt>$ </prompt>git remote update origin
+<prompt>$ </prompt>git merge origin/nixos-17.03
 </screen>
   You can use <command>git cherry-pick</command> to copy commits from your
   local branch to the upstream branch.
diff --git a/nixos/doc/manual/manual.xml b/nixos/doc/manual/manual.xml
index 12f52e1997c8d..18a67a2dd9416 100644
--- a/nixos/doc/manual/manual.xml
+++ b/nixos/doc/manual/manual.xml
@@ -8,32 +8,7 @@
   <subtitle>Version <xi:include href="./generated/version" parse="text" />
   </subtitle>
  </info>
- <preface xml:id="preface">
-  <title>Preface</title>
-  <para>
-   This manual describes how to install, use and extend NixOS, a Linux
-   distribution based on the purely functional package management system Nix.
-  </para>
-  <para>
-   If you encounter problems, please report them on the
-   <literal
-    xlink:href="https://discourse.nixos.org">Discourse</literal> or
-   on the <link
-    xlink:href="irc://irc.freenode.net/#nixos">
-   <literal>#nixos</literal> channel on Freenode</link>. Bugs should be
-   reported in
-   <link
-    xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS’
-   GitHub issue tracker</link>.
-  </para>
-  <note>
-   <para>
-    Commands prefixed with <literal>#</literal> have to be run as root, either
-    requiring to login as root user or temporarily switching to it using
-    <literal>sudo</literal> for example.
-   </para>
-  </note>
- </preface>
+ <xi:include href="preface.xml" />
  <xi:include href="installation/installation.xml" />
  <xi:include href="configuration/configuration.xml" />
  <xi:include href="administration/running.xml" />
diff --git a/nixos/doc/manual/preface.xml b/nixos/doc/manual/preface.xml
new file mode 100644
index 0000000000000..6ac9ae7e7861d
--- /dev/null
+++ b/nixos/doc/manual/preface.xml
@@ -0,0 +1,37 @@
+<preface xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xml:id="preface">
+ <title>Preface</title>
+ <para>
+  This manual describes how to install, use and extend NixOS, a Linux
+  distribution based on the purely functional package management system
+  <link xlink:href="https://nixos.org/nix">Nix</link>, that is composed
+  using modules and packages defined in the
+  <link xlink:href="https://nixos.org/nixpkgs">Nixpkgs</link> project.
+ </para>
+ <para>
+  Additional information regarding the Nix package manager and the Nixpkgs
+  project can be found in respectively the
+  <link xlink:href="https://nixos.org/nix/manual">Nix manual</link> and the
+  <link xlink:href="https://nixos.org/nixpkgs/manual">Nixpkgs manual</link>.
+ </para>
+ <para>
+  If you encounter problems, please report them on the
+  <literal
+   xlink:href="https://discourse.nixos.org">Discourse</literal> or
+  on the <link
+   xlink:href="irc://irc.freenode.net/#nixos">
+  <literal>#nixos</literal> channel on Freenode</link>. Bugs should be
+  reported in
+  <link
+   xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS’
+  GitHub issue tracker</link>.
+ </para>
+ <note>
+  <para>
+   Commands prefixed with <literal>#</literal> have to be run as root, either
+   requiring to login as root user or temporarily switching to it using
+   <literal>sudo</literal> for example.
+  </para>
+ </note>
+</preface>
diff --git a/nixos/doc/manual/release-notes/rl-2003.xml b/nixos/doc/manual/release-notes/rl-2003.xml
index d925f30ff22a2..f001a18b1c1f8 100644
--- a/nixos/doc/manual/release-notes/rl-2003.xml
+++ b/nixos/doc/manual/release-notes/rl-2003.xml
@@ -86,6 +86,16 @@
   <itemizedlist>
    <listitem>
     <para>
+      GnuPG is now built without support for a graphical passphrase entry
+      by default. Please enable the <literal>gpg-agent</literal> user service
+      via the NixOS option <literal>programs.gnupg.agent.enable</literal>.
+      Note that upstream recommends using <literal>gpg-agent</literal> and
+      will spawn a <literal>gpg-agent</literal> on the first invocation of
+      GnuPG anyway.
+    </para>
+   </listitem>
+   <listitem>
+    <para>
      The <literal>dynamicHosts</literal> option has been removed from the
      <link linkend="opt-networking.networkmanager.enable">networkd</link>
      module. Allowing (multiple) regular users to override host entries
@@ -129,6 +139,14 @@
    <listitem>
      <para>SD images are now compressed by default using <literal>bzip2</literal>.</para>
    </listitem>
+   <listitem>
+    <para>
+     OpenSSH has been upgraded from 7.9 to 8.1, improving security and adding features
+     but with potential incompatibilities.  Consult the
+     <link xlink:href="https://www.openssh.com/txt/release-8.1">
+     release announcement</link> for more information.
+    </para>
+   </listitem>
   </itemizedlist>
  </section>
 </section>
diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh
index c4149e3e8ffe6..f08e500e079ac 100755
--- a/nixos/maintainers/scripts/ec2/create-amis.sh
+++ b/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -14,7 +14,7 @@
 set -euo pipefail
 
 # configuration
-state_dir=/home/deploy/amis/ec2-images
+state_dir=$HOME/amis/ec2-images
 home_region=eu-west-1
 bucket=nixos-amis
 
diff --git a/nixos/maintainers/scripts/gce/create-gce.sh b/nixos/maintainers/scripts/gce/create-gce.sh
index 48748a59d298a..77cc64e591e99 100755
--- a/nixos/maintainers/scripts/gce/create-gce.sh
+++ b/nixos/maintainers/scripts/gce/create-gce.sh
@@ -15,7 +15,7 @@ nix-build '<nixpkgs/nixos/lib/eval-config.nix>' \
    -j 10
 
 img_path=$(echo gce/*.tar.gz)
-img_name=$(basename "$img_path")
+img_name=${IMAGE_NAME:-$(basename "$img_path")}
 img_id=$(echo "$img_name" | sed 's|.raw.tar.gz$||;s|\.|-|g;s|_|-|g')
 if ! gsutil ls "gs://${BUCKET_NAME}/$img_name"; then
   gsutil cp "$img_path" "gs://${BUCKET_NAME}/$img_name"
diff --git a/nixos/modules/config/i18n.nix b/nixos/modules/config/i18n.nix
index dc7305b1ba24c..d0db8fedecd80 100644
--- a/nixos/modules/config/i18n.nix
+++ b/nixos/modules/config/i18n.nix
@@ -89,11 +89,7 @@ with lib;
       };
 
       consoleKeyMap = mkOption {
-        type = mkOptionType {
-          name = "string or path";
-          check = t: (isString t || types.path.check t);
-        };
-
+        type = with types; either str path;
         default = "us";
         example = "fr";
         description = ''
diff --git a/nixos/modules/config/no-x-libs.nix b/nixos/modules/config/no-x-libs.nix
index 74cf74d74181f..873b8073fed9c 100644
--- a/nixos/modules/config/no-x-libs.nix
+++ b/nixos/modules/config/no-x-libs.nix
@@ -34,7 +34,6 @@ with lib;
       networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
       networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
       networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
-      pinentry = super.pinentry.override { gtk2 = null; gcr = null; qt4 = null; qt5 = null; };
       gobject-introspection = super.gobject-introspection.override { x11Support = false; };
     }));
   };
diff --git a/nixos/modules/config/pulseaudio.nix b/nixos/modules/config/pulseaudio.nix
index b3bc4a451aa0f..9baad9b585455 100644
--- a/nixos/modules/config/pulseaudio.nix
+++ b/nixos/modules/config/pulseaudio.nix
@@ -98,11 +98,12 @@ in {
         description = ''
           If false, a PulseAudio server is launched automatically for
           each user that tries to use the sound system. The server runs
-          with user privileges. This is the recommended and most secure
-          way to use PulseAudio. If true, one system-wide PulseAudio
+          with user privileges. If true, one system-wide PulseAudio
           server is launched on boot, running as the user "pulse", and
           only users in the "audio" group will have access to the server.
           Please read the PulseAudio documentation for more details.
+
+          Don't enable this option unless you know what you are doing.
         '';
       };
 
diff --git a/nixos/modules/config/shells-environment.nix b/nixos/modules/config/shells-environment.nix
index d939cbb393ee4..b79e16cd79796 100644
--- a/nixos/modules/config/shells-environment.nix
+++ b/nixos/modules/config/shells-environment.nix
@@ -118,6 +118,14 @@ in
       type = with types; attrsOf (nullOr (either str path));
     };
 
+    environment.homeBinInPath = mkOption {
+      description = ''
+        Include ~/bin/ in $PATH.
+      '';
+      default = true;
+      type = types.bool;
+    };
+
     environment.binsh = mkOption {
       default = "${config.system.build.binsh}/bin/sh";
       defaultText = "\${config.system.build.binsh}/bin/sh";
@@ -186,8 +194,10 @@ in
 
         ${cfg.extraInit}
 
-        # ~/bin if it exists overrides other bin directories.
-        export PATH="$HOME/bin:$PATH"
+        ${optionalString cfg.homeBinInPath ''
+          # ~/bin if it exists overrides other bin directories.
+          export PATH="$HOME/bin:$PATH"
+        ''}
       '';
 
     system.activationScripts.binsh = stringAfter [ "stdio" ]
diff --git a/nixos/modules/config/update-users-groups.pl b/nixos/modules/config/update-users-groups.pl
index 59cea51c611b4..15e448b787aaf 100644
--- a/nixos/modules/config/update-users-groups.pl
+++ b/nixos/modules/config/update-users-groups.pl
@@ -56,12 +56,12 @@ sub allocGid {
         $gidsUsed{$prevGid} = 1;
         return $prevGid;
     }
-    return allocId(\%gidsUsed, \%gidsPrevUsed, 400, 499, 0, sub { my ($gid) = @_; getgrgid($gid) });
+    return allocId(\%gidsUsed, \%gidsPrevUsed, 400, 999, 0, sub { my ($gid) = @_; getgrgid($gid) });
 }
 
 sub allocUid {
     my ($name, $isSystemUser) = @_;
-    my ($min, $max, $up) = $isSystemUser ? (400, 499, 0) : (1000, 29999, 1);
+    my ($min, $max, $up) = $isSystemUser ? (400, 999, 0) : (1000, 29999, 1);
     my $prevUid = $uidMap->{$name};
     if (defined $prevUid && $prevUid >= $min && $prevUid <= $max && !defined $uidsUsed{$prevUid}) {
         print STDERR "reviving user '$name' with UID $prevUid\n";
diff --git a/nixos/modules/config/users-groups.nix b/nixos/modules/config/users-groups.nix
index ba79bd3d6ecc5..ae3bdeb00e645 100644
--- a/nixos/modules/config/users-groups.nix
+++ b/nixos/modules/config/users-groups.nix
@@ -251,7 +251,7 @@ let
         default = [];
         example = literalExample "[ pkgs.firefox pkgs.thunderbird ]";
         description = ''
-          The set of packages that should be made availabe to the user.
+          The set of packages that should be made available to the user.
           This is in contrast to <option>environment.systemPackages</option>,
           which adds packages to all users.
         '';
diff --git a/nixos/modules/installer/tools/tools.nix b/nixos/modules/installer/tools/tools.nix
index 3292600595984..052e7fdd4fc16 100644
--- a/nixos/modules/installer/tools/tools.nix
+++ b/nixos/modules/installer/tools/tools.nix
@@ -120,7 +120,11 @@ in
         # Some programs need SUID wrappers, can be configured further or are
         # started in user sessions.
         # programs.mtr.enable = true;
-        # programs.gnupg.agent = { enable = true; enableSSHSupport = true; };
+        # programs.gnupg.agent = {
+        #   enable = true;
+        #   enableSSHSupport = true;
+        #   pinentryFlavor = "gnome3";
+        # };
 
         # List services that you want to enable:
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 5214126ff7ed9..df6e4dc1336a1 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -227,6 +227,7 @@
   ./services/backup/rsnapshot.nix
   ./services/backup/tarsnap.nix
   ./services/backup/tsm.nix
+  ./services/backup/zfs-replication.nix
   ./services/backup/znapzend.nix
   ./services/cluster/hadoop/default.nix
   ./services/cluster/kubernetes/addons/dns.nix
@@ -321,6 +322,7 @@
   ./services/games/factorio.nix
   ./services/games/minecraft-server.nix
   ./services/games/minetest-server.nix
+  ./services/games/openarena.nix
   ./services/games/terraria.nix
   ./services/hardware/acpid.nix
   ./services/hardware/actkbd.nix
@@ -548,6 +550,8 @@
   ./services/network-filesystems/nfsd.nix
   ./services/network-filesystems/openafs/client.nix
   ./services/network-filesystems/openafs/server.nix
+  ./services/network-filesystems/orangefs/server.nix
+  ./services/network-filesystems/orangefs/client.nix
   ./services/network-filesystems/rsyncd.nix
   ./services/network-filesystems/samba.nix
   ./services/network-filesystems/tahoe.nix
@@ -601,6 +605,7 @@
   ./services/networking/gdomap.nix
   ./services/networking/git-daemon.nix
   ./services/networking/gnunet.nix
+  ./services/networking/go-shadowsocks2.nix
   ./services/networking/gogoclient.nix
   ./services/networking/gvpe.nix
   ./services/networking/hans.nix
@@ -713,6 +718,7 @@
   ./services/networking/tinc.nix
   ./services/networking/tinydns.nix
   ./services/networking/tftpd.nix
+  ./services/networking/trickster.nix
   ./services/networking/tox-bootstrapd.nix
   ./services/networking/tox-node.nix
   ./services/networking/toxvpn.nix
@@ -729,6 +735,7 @@
   ./services/networking/xinetd.nix
   ./services/networking/xl2tpd.nix
   ./services/networking/xrdp.nix
+  ./services/networking/yggdrasil.nix
   ./services/networking/zerobin.nix
   ./services/networking/zeronet.nix
   ./services/networking/zerotierone.nix
@@ -793,6 +800,7 @@
   ./services/web-apps/cryptpad.nix
   ./services/web-apps/documize.nix
   ./services/web-apps/frab.nix
+  ./services/web-apps/gotify-server.nix
   ./services/web-apps/icingaweb2/icingaweb2.nix
   ./services/web-apps/icingaweb2/module-monitoring.nix
   ./services/web-apps/limesurvey.nix
diff --git a/nixos/modules/profiles/installation-device.nix b/nixos/modules/profiles/installation-device.nix
index fd30220ce1c91..4596e163404ca 100644
--- a/nixos/modules/profiles/installation-device.nix
+++ b/nixos/modules/profiles/installation-device.nix
@@ -31,9 +31,6 @@ with lib;
     # Let the user play Rogue on TTY 8 during the installation.
     #services.rogue.enable = true;
 
-    # Disable some other stuff we don't need.
-    services.udisks2.enable = mkDefault false;
-
     # Use less privileged nixos user
     users.users.nixos = {
       isNormalUser = true;
diff --git a/nixos/modules/programs/gnupg.nix b/nixos/modules/programs/gnupg.nix
index bcbc994efe9b3..2d262d9065796 100644
--- a/nixos/modules/programs/gnupg.nix
+++ b/nixos/modules/programs/gnupg.nix
@@ -6,6 +6,19 @@ let
 
   cfg = config.programs.gnupg;
 
+  xserverCfg = config.services.xserver;
+
+  defaultPinentryFlavor =
+    if xserverCfg.desktopManager.lxqt.enable
+    || xserverCfg.desktopManager.plasma5.enable then
+      "qt"
+    else if xserverCfg.desktopManager.xfce.enable then
+      "gtk2"
+    else if xserverCfg.enable || config.programs.sway.enable then
+      "gnome3"
+    else
+      null;
+
 in
 
 {
@@ -54,6 +67,20 @@ in
       '';
     };
 
+    agent.pinentryFlavor = mkOption {
+      type = types.nullOr (types.enum pkgs.pinentry.flavors);
+      example = "gnome3";
+      description = ''
+        Which pinentry interface to use. If not null, the path to the
+        pinentry binary will be passed to gpg-agent via commandline and
+        thus overrides the pinentry option in gpg-agent.conf in the user's
+        home directory.
+        If not set at all, it'll pick an appropriate flavor depending on the
+        system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce
+        4.12, gnome3 on all other systems with X enabled, ncurses otherwise).
+      '';
+    };
+
     dirmngr.enable = mkOption {
       type = types.bool;
       default = false;
@@ -64,6 +91,16 @@ in
   };
 
   config = mkIf cfg.agent.enable {
+    programs.gnupg.agent.pinentryFlavor = mkDefault defaultPinentryFlavor;
+
+    # This overrides the systemd user unit shipped with the gnupg package
+    systemd.user.services.gpg-agent = mkIf (cfg.agent.pinentryFlavor != null) {
+      serviceConfig.ExecStart = [ "" ''
+        ${pkgs.gnupg}/bin/gpg-agent --supervised \
+          --pinentry-program ${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry
+      '' ];
+    };
+
     systemd.user.sockets.gpg-agent = {
       wantedBy = [ "sockets.target" ];
     };
@@ -83,7 +120,9 @@ in
     systemd.user.sockets.dirmngr = mkIf cfg.dirmngr.enable {
       wantedBy = [ "sockets.target" ];
     };
-    
+
+    services.dbus.packages = mkIf (cfg.agent.pinentryFlavor == "gnome3") [ pkgs.gcr ];
+
     environment.systemPackages = with pkgs; [ cfg.package ];
     systemd.packages = [ cfg.package ];
 
diff --git a/nixos/modules/programs/shadow.nix b/nixos/modules/programs/shadow.nix
index 8ec4169207db5..7eaf79d864e79 100644
--- a/nixos/modules/programs/shadow.nix
+++ b/nixos/modules/programs/shadow.nix
@@ -6,17 +6,27 @@ with lib;
 
 let
 
+  /*
+  There are three different sources for user/group id ranges, each of which gets
+  used by different programs:
+  - The login.defs file, used by the useradd, groupadd and newusers commands
+  - The update-users-groups.pl file, used by NixOS in the activation phase to
+    decide on which ids to use for declaratively defined users without a static
+    id
+  - Systemd compile time options -Dsystem-uid-max= and -Dsystem-gid-max=, used
+    by systemd for features like ConditionUser=@system and systemd-sysusers
+  */
   loginDefs =
     ''
       DEFAULT_HOME yes
 
       SYS_UID_MIN  400
-      SYS_UID_MAX  499
+      SYS_UID_MAX  999
       UID_MIN      1000
       UID_MAX      29999
 
       SYS_GID_MIN  400
-      SYS_GID_MAX  499
+      SYS_GID_MAX  999
       GID_MIN      1000
       GID_MAX      29999
 
diff --git a/nixos/modules/programs/x2goserver.nix b/nixos/modules/programs/x2goserver.nix
index 77a1a0da79938..7d74231e956b7 100644
--- a/nixos/modules/programs/x2goserver.nix
+++ b/nixos/modules/programs/x2goserver.nix
@@ -69,6 +69,7 @@ in {
     users.users.x2go = {
       home = "/var/lib/x2go/db";
       group = "x2go";
+      isSystemUser = true;
     };
 
     security.wrappers.x2gosqliteWrapper = {
diff --git a/nixos/modules/rename.nix b/nixos/modules/rename.nix
index df8ebe5058461..886e2e83ba62c 100644
--- a/nixos/modules/rename.nix
+++ b/nixos/modules/rename.nix
@@ -135,7 +135,8 @@ with lib;
     # piwik was renamed to matomo
     (mkRenamedOptionModule [ "services" "piwik" "enable" ] [ "services" "matomo" "enable" ])
     (mkRenamedOptionModule [ "services" "piwik" "webServerUser" ] [ "services" "matomo" "webServerUser" ])
-    (mkRenamedOptionModule [ "services" "piwik" "phpfpmProcessManagerConfig" ] [ "services" "matomo" "phpfpmProcessManagerConfig" ])
+    (mkRemovedOptionModule [ "services" "piwik" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
+    (mkRemovedOptionModule [ "services" "matomo" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
     (mkRenamedOptionModule [ "services" "piwik" "nginx" ] [ "services" "matomo" "nginx" ])
 
     # tarsnap
diff --git a/nixos/modules/security/acme.nix b/nixos/modules/security/acme.nix
index b321c04e574c4..d14613f22b057 100644
--- a/nixos/modules/security/acme.nix
+++ b/nixos/modules/security/acme.nix
@@ -20,6 +20,16 @@ let
         '';
       };
 
+      server = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          ACME Directory Resource URI. Defaults to let's encrypt
+          production endpoint,
+          https://acme-v02.api.letsencrypt.org/directory, if unset.
+        '';
+      };
+
       domain = mkOption {
         type = types.str;
         default = name;
@@ -69,9 +79,9 @@ let
       plugins = mkOption {
         type = types.listOf (types.enum [
           "cert.der" "cert.pem" "chain.pem" "external.sh"
-          "fullchain.pem" "full.pem" "key.der" "key.pem" "account_key.json"
+          "fullchain.pem" "full.pem" "key.der" "key.pem" "account_key.json" "account_reg.json"
         ]);
-        default = [ "fullchain.pem" "full.pem" "key.pem" "account_key.json" ];
+        default = [ "fullchain.pem" "full.pem" "key.pem" "account_key.json" "account_reg.json" ];
         description = ''
           Plugins to enable. With default settings simp_le will
           store public certificate bundle in <filename>fullchain.pem</filename>,
@@ -109,7 +119,15 @@ in
 {
 
   ###### interface
-
+  imports = [
+    (mkRemovedOptionModule [ "security" "acme" "production" ] ''
+      Use security.acme.server to define your staging ACME server URL instead.
+
+      To use the let's encrypt staging server, use security.acme.server =
+      "https://acme-staging-v02.api.letsencrypt.org/directory".
+    ''
+    )
+  ];
   options = {
     security.acme = {
 
@@ -129,6 +147,16 @@ in
         '';
       };
 
+      server = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          ACME Directory Resource URI. Defaults to let's encrypt
+          production endpoint,
+          <literal>https://acme-v02.api.letsencrypt.org/directory</literal>, if unset.
+        '';
+      };
+
       preliminarySelfsigned = mkOption {
         type = types.bool;
         default = true;
@@ -142,20 +170,6 @@ in
         '';
       };
 
-      production = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          If set to true, use Let's Encrypt's production environment
-          instead of the staging environment. The main benefit of the
-          staging environment is to get much higher rate limits.
-
-          See
-          <literal>https://letsencrypt.org/docs/staging-environment</literal>
-          for more detail.
-        '';
-      };
-
       certs = mkOption {
         default = { };
         type = with types; attrsOf (submodule certOpts);
@@ -198,11 +212,16 @@ in
                           ++ optionals (data.email != null) [ "--email" data.email ]
                           ++ concatMap (p: [ "-f" p ]) data.plugins
                           ++ concatLists (mapAttrsToList (name: root: [ "-d" (if root == null then name else "${name}:${root}")]) data.extraDomains)
-                          ++ optionals (!cfg.production) ["--server" "https://acme-staging.api.letsencrypt.org/directory"];
+                          ++ optionals (cfg.server != null || data.server != null) ["--server" (if data.server == null then cfg.server else data.server)];
                 acmeService = {
                   description = "Renew ACME Certificate for ${cert}";
                   after = [ "network.target" "network-online.target" ];
                   wants = [ "network-online.target" ];
+                  # simp_le uses requests, which uses certifi under the hood,
+                  # which doesn't respect the system trust store.
+                  # At least in the acme test, we provision a fake CA, impersonating the LE endpoint.
+                  # REQUESTS_CA_BUNDLE is a way to teach python requests to use something else
+                  environment.REQUESTS_CA_BUNDLE = "/etc/ssl/certs/ca-certificates.crt";
                   serviceConfig = {
                     Type = "oneshot";
                     SuccessExitStatus = [ "0" "1" ];
diff --git a/nixos/modules/services/admin/oxidized.nix b/nixos/modules/services/admin/oxidized.nix
index 39112c3970d5a..da81be3f23e85 100644
--- a/nixos/modules/services/admin/oxidized.nix
+++ b/nixos/modules/services/admin/oxidized.nix
@@ -89,6 +89,7 @@ in
       group = cfg.group;
       home = cfg.dataDir;
       createHome = true;
+      isSystemUser = true;
     };
 
     systemd.services.oxidized = {
diff --git a/nixos/modules/services/audio/jack.nix b/nixos/modules/services/audio/jack.nix
index aa3351f401afb..ceff366d0bbb2 100644
--- a/nixos/modules/services/audio/jack.nix
+++ b/nixos/modules/services/audio/jack.nix
@@ -223,6 +223,7 @@ in {
         group = "jackaudio";
         extraGroups = [ "audio" ];
         description = "JACK Audio system service user";
+        isSystemUser = true;
       };
       # http://jackaudio.org/faq/linux_rt_config.html
       security.pam.loginLimits = [
diff --git a/nixos/modules/services/backup/automysqlbackup.nix b/nixos/modules/services/backup/automysqlbackup.nix
index 1884f3536a97b..e3a8d1f79934b 100644
--- a/nixos/modules/services/backup/automysqlbackup.nix
+++ b/nixos/modules/services/backup/automysqlbackup.nix
@@ -99,7 +99,10 @@ in
 
     environment.systemPackages = [ pkg ];
 
-    users.users.${user}.group = group;
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
     users.groups.${group} = { };
 
     systemd.tmpfiles.rules = [
diff --git a/nixos/modules/services/backup/znapzend.nix b/nixos/modules/services/backup/znapzend.nix
index f317078ddda22..203631a577f0d 100644
--- a/nixos/modules/services/backup/znapzend.nix
+++ b/nixos/modules/services/backup/znapzend.nix
@@ -34,6 +34,8 @@ let
     description = "string of the form number{b|k|M|G}";
   };
 
+  enabledFeatures = concatLists (mapAttrsToList (name: enabled: optional enabled name) cfg.features);
+
   # Type for a string that must contain certain other strings (the list parameter).
   # Note that these would need regex escaping.
   stringContainingStrings = list: let
@@ -354,6 +356,22 @@ in
         '';
         default = false;
       };
+
+      features.recvu = mkEnableOption ''
+        recvu feature which uses <literal>-u</literal> on the receiving end to keep the destination
+        filesystem unmounted.
+      '';
+      features.compressed = mkEnableOption ''
+        compressed feature which adds the options <literal>-Lce</literal> to
+        the <command>zfs send</command> command. When this is enabled, make
+        sure that both the sending and receiving pool have the same relevant
+        features enabled. Using <literal>-c</literal> will skip unneccessary
+        decompress-compress stages, <literal>-L</literal> is for large block
+        support and -e is for embedded data support. see
+        <citerefentry><refentrytitle>znapzend</refentrytitle><manvolnum>1</manvolnum></citerefentry>
+        and <citerefentry><refentrytitle>zfs</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+        for more info.
+      '';
     };
   };
 
@@ -381,12 +399,22 @@ in
         '';
 
         serviceConfig = {
+          # znapzendzetup --import apparently tries to connect to the backup
+          # host 3 times with a timeout of 30 seconds, leading to a startup
+          # delay of >90s when the host is down, which is just above the default
+          # service timeout of 90 seconds. Increase the timeout so it doesn't
+          # make the service fail in that case.
+          TimeoutStartSec = 180;
+          # Needs to have write access to ZFS
+          User = "root";
           ExecStart = let
             args = concatStringsSep " " [
               "--logto=${cfg.logTo}"
               "--loglevel=${cfg.logLevel}"
               (optionalString cfg.noDestroy "--nodestroy")
               (optionalString cfg.autoCreation "--autoCreation")
+              (optionalString (enabledFeatures != [])
+                "--features=${concatStringsSep "," enabledFeatures}")
             ]; in "${pkgs.znapzend}/bin/znapzend ${args}";
           ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
           Restart = "on-failure";
diff --git a/nixos/modules/services/continuous-integration/buildkite-agent.nix b/nixos/modules/services/continuous-integration/buildkite-agent.nix
index 12cc3d2b1ccce..32f361454bc18 100644
--- a/nixos/modules/services/continuous-integration/buildkite-agent.nix
+++ b/nixos/modules/services/continuous-integration/buildkite-agent.nix
@@ -191,6 +191,7 @@ in
         createHome = true;
         description = "Buildkite agent user";
         extraGroups = [ "keys" ];
+        isSystemUser = true;
       };
 
     environment.systemPackages = [ cfg.package ];
diff --git a/nixos/modules/services/databases/redis.nix b/nixos/modules/services/databases/redis.nix
index 5861323e5ea0f..5695eeaf74cb6 100644
--- a/nixos/modules/services/databases/redis.nix
+++ b/nixos/modules/services/databases/redis.nix
@@ -194,7 +194,10 @@ in
       allowedTCPPorts = [ cfg.port ];
     };
 
-    users.users.redis.description = "Redis database user";
+    users.users.redis = {
+      description = "Redis database user";
+      isSystemUser = true;
+    };
 
     environment.systemPackages = [ cfg.package ];
 
diff --git a/nixos/modules/services/databases/rethinkdb.nix b/nixos/modules/services/databases/rethinkdb.nix
index 4828e594b3283..f18fbaf5b062a 100644
--- a/nixos/modules/services/databases/rethinkdb.nix
+++ b/nixos/modules/services/databases/rethinkdb.nix
@@ -99,6 +99,7 @@ in
     users.users.rethinkdb = mkIf (cfg.user == "rethinkdb")
       { name = "rethinkdb";
         description = "RethinkDB server user";
+        isSystemUser = true;
       };
 
     users.groups = optionalAttrs (cfg.group == "rethinkdb") (singleton
diff --git a/nixos/modules/services/desktops/geoclue2.nix b/nixos/modules/services/desktops/geoclue2.nix
index 6007dddf50c0c..542b2ead41040 100644
--- a/nixos/modules/services/desktops/geoclue2.nix
+++ b/nixos/modules/services/desktops/geoclue2.nix
@@ -188,34 +188,41 @@ in
 
     systemd.packages = [ package ];
 
-    users.users.geoclue = {
-      isSystemUser = true;
-      home = "/var/lib/geoclue";
-      group = "geoclue";
-      description = "Geoinformation service";
-    };
-
-    users.groups.geoclue = {};
+    # we cannot use DynamicUser as we need the the geoclue user to exist for the dbus policy to work
+    users = {
+      users.geoclue = {
+        isSystemUser = true;
+        home = "/var/lib/geoclue";
+        group = "geoclue";
+        description = "Geoinformation service";
+      };
 
-    systemd.tmpfiles.rules = [
-      "d /var/lib/geoclue 0755 geoclue geoclue"
-    ];
+      groups.geoclue = {};
+    };
 
-    # restart geoclue service when the configuration changes
-    systemd.services.geoclue.restartTriggers = [
-      config.environment.etc."geoclue/geoclue.conf".source
-    ];
+    systemd.services.geoclue = {
+      # restart geoclue service when the configuration changes
+      restartTriggers = [
+        config.environment.etc."geoclue/geoclue.conf".source
+      ];
+      serviceConfig.StateDirectory = "geoclue";
+    };
 
     # this needs to run as a user service, since it's associated with the
     # user who is making the requests
     systemd.user.services = mkIf cfg.enableDemoAgent {
       geoclue-agent = {
         description = "Geoclue agent";
-        script = "${package}/libexec/geoclue-2.0/demos/agent";
         # this should really be `partOf = [ "geoclue.service" ]`, but
         # we can't be part of a system service, and the agent should
         # be okay with the main service coming and going
         wantedBy = [ "default.target" ];
+        serviceConfig = {
+          Type = "exec";
+          ExecStart = "${package}/libexec/geoclue-2.0/demos/agent";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
       };
     };
 
@@ -256,4 +263,6 @@ in
         };
       } // mapAttrs' appConfigToINICompatible cfg.appConfig);
   };
+
+  meta.maintainers = with lib.maintainers; [ worldofpeace ];
 }
diff --git a/nixos/modules/services/editors/infinoted.nix b/nixos/modules/services/editors/infinoted.nix
index 9cc8d421270ea..be36676169427 100644
--- a/nixos/modules/services/editors/infinoted.nix
+++ b/nixos/modules/services/editors/infinoted.nix
@@ -115,6 +115,7 @@ in {
       { name = "infinoted";
         description = "Infinoted user";
         group = cfg.group;
+        isSystemUser = true;
       };
     users.groups = optional (cfg.group == "infinoted")
       { name = "infinoted";
diff --git a/nixos/modules/services/games/openarena.nix b/nixos/modules/services/games/openarena.nix
new file mode 100644
index 0000000000000..b7d1aea6b8d2a
--- /dev/null
+++ b/nixos/modules/services/games/openarena.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.openarena;
+in
+{
+  options = {
+    services.openarena = {
+      enable = mkEnableOption "OpenArena";
+
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to open firewall ports for OpenArena";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = ''Extra flags to pass to <command>oa_ded</command>'';
+        example = [
+          "+set dedicated 2"
+          "+set sv_hostname 'My NixOS OpenArena Server'"
+          # Load a map. Mandatory for clients to be able to connect.
+          "+map oa_dm1"
+        ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openPorts {
+      allowedUDPPorts = [ 27960 ];
+    };
+
+    systemd.services.openarena = {
+      description = "OpenArena";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "openarena";
+        ExecStart = "${pkgs.openarena}/bin/openarena-server +set fs_basepath ${pkgs.openarena}/openarena-0.8.8 +set fs_homepath /var/lib/openarena ${concatStringsSep " " cfg.extraFlags}";
+        Restart = "on-failure";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/hardware/fancontrol.nix b/nixos/modules/services/hardware/fancontrol.nix
index 616e4add31e87..bb4541a784dae 100644
--- a/nixos/modules/services/hardware/fancontrol.nix
+++ b/nixos/modules/services/hardware/fancontrol.nix
@@ -4,42 +4,41 @@ with lib;
 
 let
   cfg = config.hardware.fancontrol;
-  configFile = pkgs.writeText "fan.conf" cfg.config;
-
-in {
+  configFile = pkgs.writeText "fancontrol.conf" cfg.config;
 
+in{
   options.hardware.fancontrol = {
-    enable = mkEnableOption "fancontrol (requires fancontrol.config)";
+    enable = mkEnableOption "software fan control (requires fancontrol.config)";
 
     config = mkOption {
-      type = types.lines;
       default = null;
+      type = types.lines;
+      description = "Fancontrol configuration file content. See <citerefentry><refentrytitle>pwmconfig</refentrytitle><manvolnum>8</manvolnum></citerefentry> from the lm_sensors package.";
       example = ''
         # Configuration file generated by pwmconfig
-        INTERVAL=1
-        DEVPATH=hwmon0=devices/platform/nct6775.656 hwmon1=devices/pci0000:00/0000:00:18.3
-        DEVNAME=hwmon0=nct6779 hwmon1=k10temp
-        FCTEMPS=hwmon0/pwm2=hwmon1/temp1_input
-        FCFANS=hwmon0/pwm2=hwmon0/fan2_input
-        MINTEMP=hwmon0/pwm2=25
-        MAXTEMP=hwmon0/pwm2=60
-        MINSTART=hwmon0/pwm2=25
-        MINSTOP=hwmon0/pwm2=10
-        MINPWM=hwmon0/pwm2=0
-        MAXPWM=hwmon0/pwm2=255
+        INTERVAL=10
+        DEVPATH=hwmon3=devices/virtual/thermal/thermal_zone2 hwmon4=devices/platform/f71882fg.656
+        DEVNAME=hwmon3=soc_dts1 hwmon4=f71869a
+        FCTEMPS=hwmon4/device/pwm1=hwmon3/temp1_input
+        FCFANS= hwmon4/device/pwm1=hwmon4/device/fan1_input
+        MINTEMP=hwmon4/device/pwm1=35
+        MAXTEMP=hwmon4/device/pwm1=65
+        MINSTART=hwmon4/device/pwm1=150
+        MINSTOP=hwmon4/device/pwm1=0
       '';
-      description = "Contents for configuration file. See <citerefentry><refentrytitle>pwmconfig</refentrytitle><manvolnum>8</manvolnum></citerefentry>.";
     };
   };
 
-
   config = mkIf cfg.enable {
     systemd.services.fancontrol = {
-      description = "Fan speed control from lm_sensors";
+      unitConfig.Documentation = "man:fancontrol(8)";
+      description = "software fan control";
       wantedBy = [ "multi-user.target" ];
+      after = [ "lm_sensors.service" ];
+
       serviceConfig = {
         Type = "simple";
-        ExecStart = "${pkgs.lm_sensors}/bin/fancontrol ${configFile}";
+        ExecStart = "${pkgs.lm_sensors}/sbin/fancontrol ${configFile}";
       };
     };
   };
diff --git a/nixos/modules/services/hardware/trezord.nix b/nixos/modules/services/hardware/trezord.nix
index 62824ed7350a7..c517e9fbb2bda 100644
--- a/nixos/modules/services/hardware/trezord.nix
+++ b/nixos/modules/services/hardware/trezord.nix
@@ -44,20 +44,7 @@ in {
   ### implementation
 
   config = mkIf cfg.enable {
-    services.udev.packages = lib.singleton (pkgs.writeTextFile {
-      name = "trezord-udev-rules";
-      destination = "/etc/udev/rules.d/51-trezor.rules";
-      text = ''
-        # TREZOR v1 (One)
-        SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0660", GROUP="trezord", TAG+="uaccess", SYMLINK+="trezor%n"
-        KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0660", GROUP="trezord", TAG+="uaccess"
-
-        # TREZOR v2 (T)
-        SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="trezord", TAG+="uaccess", SYMLINK+="trezor%n"
-        SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="trezord", TAG+="uaccess", SYMLINK+="trezor%n"
-        KERNEL=="hidraw*", ATTRS{idVendor}=="1209", ATTRS{idProduct}=="53c1", MODE="0660", GROUP="trezord", TAG+="uaccess"
-      '';
-    });
+    services.udev.packages = [ pkgs.trezor-udev-rules ];
 
     systemd.services.trezord = {
       description = "TREZOR Bridge";
@@ -74,6 +61,7 @@ in {
     users.users.trezord = {
       group = "trezord";
       description = "Trezor bridge daemon user";
+      isSystemUser = true;
     };
 
     users.groups.trezord = {};
diff --git a/nixos/modules/services/hardware/udisks2.nix b/nixos/modules/services/hardware/udisks2.nix
index ed8703be921cb..e898f32605856 100644
--- a/nixos/modules/services/hardware/udisks2.nix
+++ b/nixos/modules/services/hardware/udisks2.nix
@@ -34,10 +34,7 @@ with lib;
 
     services.dbus.packages = [ pkgs.udisks2 ];
 
-    system.activationScripts.udisks2 =
-      ''
-        mkdir -m 0755 -p /var/lib/udisks2
-      '';
+    systemd.tmpfiles.rules = [ "d /var/lib/udisks2 0755 root root -" ];
 
     services.udev.packages = [ pkgs.udisks2 ];
 
diff --git a/nixos/modules/services/hardware/usbmuxd.nix b/nixos/modules/services/hardware/usbmuxd.nix
index 93ced0b9f04d4..39bbcaf4627ca 100644
--- a/nixos/modules/services/hardware/usbmuxd.nix
+++ b/nixos/modules/services/hardware/usbmuxd.nix
@@ -47,6 +47,7 @@ in
       name = cfg.user;
       description = "usbmuxd user";
       group = cfg.group;
+      isSystemUser = true;
     };
 
     users.groups = optional (cfg.group == defaultUserGroup) {
diff --git a/nixos/modules/services/hardware/vdr.nix b/nixos/modules/services/hardware/vdr.nix
index 6e246f70f515f..8a6cde51b06ff 100644
--- a/nixos/modules/services/hardware/vdr.nix
+++ b/nixos/modules/services/hardware/vdr.nix
@@ -66,6 +66,7 @@ in {
     users.users.vdr = {
       group = "vdr";
       home = libDir;
+      isSystemUser = true;
     };
 
     users.groups.vdr = {};
diff --git a/nixos/modules/services/mail/mailhog.nix b/nixos/modules/services/mail/mailhog.nix
index b78f4c8e0e663..0f998c6d0ea63 100644
--- a/nixos/modules/services/mail/mailhog.nix
+++ b/nixos/modules/services/mail/mailhog.nix
@@ -27,6 +27,7 @@ in {
     users.users.mailhog = {
       name = cfg.user;
       description = "MailHog service user";
+      isSystemUser = true;
     };
 
     systemd.services.mailhog = {
diff --git a/nixos/modules/services/misc/airsonic.nix b/nixos/modules/services/misc/airsonic.nix
index 919d3b2f6e640..c296e048cea41 100644
--- a/nixos/modules/services/misc/airsonic.nix
+++ b/nixos/modules/services/misc/airsonic.nix
@@ -148,6 +148,7 @@ in {
       name = cfg.user;
       home = cfg.home;
       createHome = true;
+      isSystemUser = true;
     };
   };
 }
diff --git a/nixos/modules/services/misc/docker-registry.nix b/nixos/modules/services/misc/docker-registry.nix
index c87607d2666af..89bac4f47d736 100644
--- a/nixos/modules/services/misc/docker-registry.nix
+++ b/nixos/modules/services/misc/docker-registry.nix
@@ -145,11 +145,13 @@ in {
     };
 
     users.users.docker-registry =
-      if cfg.storagePath != null
+      (if cfg.storagePath != null
       then {
         createHome = true;
         home = cfg.storagePath;
       }
-      else {};
+      else {}) // {
+        isSystemUser = true;
+      };
   };
 }
diff --git a/nixos/modules/services/misc/errbot.nix b/nixos/modules/services/misc/errbot.nix
index 256adce2f02e1..b447ba5d438d2 100644
--- a/nixos/modules/services/misc/errbot.nix
+++ b/nixos/modules/services/misc/errbot.nix
@@ -76,7 +76,10 @@ in {
   };
 
   config = mkIf (cfg.instances != {}) {
-    users.users.errbot.group = "errbot";
+    users.users.errbot = {
+      group = "errbot";
+      isSystemUser = true;
+    };
     users.groups.errbot = {};
 
     systemd.services = mapAttrs' (name: instanceCfg: nameValuePair "errbot-${name}" (
diff --git a/nixos/modules/services/misc/gitea.nix b/nixos/modules/services/misc/gitea.nix
index 4992b13c9d4ab..c8c59fb256e86 100644
--- a/nixos/modules/services/misc/gitea.nix
+++ b/nixos/modules/services/misc/gitea.nix
@@ -409,6 +409,7 @@ in
         home = cfg.stateDir;
         useDefaultShell = true;
         group = "gitea";
+        isSystemUser = true;
       };
     };
 
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index 6ee35aaca5652..07ea9c4584371 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -608,6 +608,8 @@ in {
     # objects owners and extensions; for now we tack on what's needed
     # here.
     systemd.services.postgresql.postStart = mkAfter (optionalString databaseActuallyCreateLocally ''
+      set -eu
+
       $PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.databaseName}'" | grep -q 1 || $PSQL -tAc 'CREATE DATABASE "${cfg.databaseName}" OWNER "${cfg.databaseUsername}"'
       current_owner=$($PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.databaseName}'")
       if [[ "$current_owner" != "${cfg.databaseUsername}" ]]; then
@@ -739,7 +741,6 @@ in {
         gitlab-workhorse
       ];
       serviceConfig = {
-        PermissionsStartOnly = true; # preStart must be run as root
         Type = "simple";
         User = cfg.user;
         Group = cfg.group;
@@ -781,13 +782,18 @@ in {
         ExecStartPre = let
           preStartFullPrivileges = ''
             shopt -s dotglob nullglob
+            set -eu
+
             chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/*
             chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/config/*
           '';
           preStart = ''
+            set -eu
+
             cp -f ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
             rm -rf ${cfg.statePath}/db/*
             rm -rf ${cfg.statePath}/config/initializers/*
+            rm -f ${cfg.statePath}/lib
             cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
             cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
 
diff --git a/nixos/modules/services/misc/gollum.nix b/nixos/modules/services/misc/gollum.nix
index 7653b415bf094..f4a9c72b1545e 100644
--- a/nixos/modules/services/misc/gollum.nix
+++ b/nixos/modules/services/misc/gollum.nix
@@ -71,6 +71,7 @@ in
       group = config.users.users.gollum.name;
       description = "Gollum user";
       createHome = false;
+      isSystemUser = true;
     };
 
     users.groups.gollum = { };
diff --git a/nixos/modules/services/misc/jellyfin.nix b/nixos/modules/services/misc/jellyfin.nix
index 55559206568d9..6ecdfb57dc358 100644
--- a/nixos/modules/services/misc/jellyfin.nix
+++ b/nixos/modules/services/misc/jellyfin.nix
@@ -41,7 +41,10 @@ in
     };
 
     users.users = mkIf (cfg.user == "jellyfin") {
-      jellyfin.group = cfg.group;
+      jellyfin = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
     };
 
     users.groups = mkIf (cfg.group == "jellyfin") {
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
index c1754c0e0ecbc..dcec4d4fc6cd3 100644
--- a/nixos/modules/services/misc/nix-daemon.nix
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -61,7 +61,7 @@ let
             '' else ''
               echo "Checking that Nix can read nix.conf..."
               ln -s $out ./nix.conf
-              NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config ${optionalString isNix23 "--no-net"} >/dev/null
+              NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config ${optionalString isNix23 "--no-net --option experimental-features nix-command"} >/dev/null
             '')
       );
 
diff --git a/nixos/modules/services/misc/osrm.nix b/nixos/modules/services/misc/osrm.nix
index f89f37ccd9df5..79c347ab7e0ef 100644
--- a/nixos/modules/services/misc/osrm.nix
+++ b/nixos/modules/services/misc/osrm.nix
@@ -59,6 +59,7 @@ in
       group = config.users.users.osrm.name;
       description = "OSRM user";
       createHome = false;
+      isSystemUser = true;
     };
 
     users.groups.osrm = { };
diff --git a/nixos/modules/services/monitoring/collectd.nix b/nixos/modules/services/monitoring/collectd.nix
index b2e44a1e36662..731ac743b7c63 100644
--- a/nixos/modules/services/monitoring/collectd.nix
+++ b/nixos/modules/services/monitoring/collectd.nix
@@ -131,6 +131,7 @@ in {
 
     users.users = optional (cfg.user == "collectd") {
       name = "collectd";
+      isSystemUser = true;
     };
   };
 }
diff --git a/nixos/modules/services/monitoring/fusion-inventory.nix b/nixos/modules/services/monitoring/fusion-inventory.nix
index b90579bb70c76..fe19ed5619542 100644
--- a/nixos/modules/services/monitoring/fusion-inventory.nix
+++ b/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -49,6 +49,7 @@ in {
     users.users = singleton {
       name = "fusion-inventory";
       description = "FusionInventory user";
+      isSystemUser = true;
     };
 
     systemd.services.fusion-inventory = {
diff --git a/nixos/modules/services/monitoring/netdata.nix b/nixos/modules/services/monitoring/netdata.nix
index 463b1b882acf5..7d976db963005 100644
--- a/nixos/modules/services/monitoring/netdata.nix
+++ b/nixos/modules/services/monitoring/netdata.nix
@@ -181,6 +181,7 @@ in {
 
     users.users = optional (cfg.user == defaultUser) {
       name = defaultUser;
+      isSystemUser = true;
     };
 
     users.groups = optional (cfg.group == defaultUser) {
diff --git a/nixos/modules/services/monitoring/zabbix-agent.nix b/nixos/modules/services/monitoring/zabbix-agent.nix
index 856b9432892b5..b3383ed628b29 100644
--- a/nixos/modules/services/monitoring/zabbix-agent.nix
+++ b/nixos/modules/services/monitoring/zabbix-agent.nix
@@ -131,6 +131,7 @@ in
     users.users.${user} = {
       description = "Zabbix Agent daemon user";
       inherit group;
+      isSystemUser = true;
     };
 
     users.groups.${group} = { };
diff --git a/nixos/modules/services/network-filesystems/orangefs/client.nix b/nixos/modules/services/network-filesystems/orangefs/client.nix
new file mode 100644
index 0000000000000..b69d9e713c3dc
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/orangefs/client.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.client;
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.client = {
+      enable = mkEnableOption "OrangeFS client daemon";
+
+      extraOptions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = "Extra command line options for pvfs2-client.";
+      };
+
+      fileSystems = mkOption {
+        description = ''
+          The orangefs file systems to be mounted.
+          This option is prefered over using <option>fileSystems</option> directly since
+          the pvfs client service needs to be running for it to be mounted.
+        '';
+
+        example = [{
+          mountPoint = "/orangefs";
+          target = "tcp://server:3334/orangefs";
+        }];
+
+        type = with types; listOf (submodule ({ ... } : {
+          options = {
+
+            mountPoint = mkOption {
+              type = types.str;
+              default = "/orangefs";
+              description = "Mount point.";
+            };
+
+            options = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = "Mount options";
+            };
+
+            target = mkOption {
+              type = types.str;
+              default = null;
+              example = "tcp://server:3334/orangefs";
+              description = "Target URL";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    boot.supportedFilesystems = [ "pvfs2" ];
+    boot.kernelModules = [ "orangefs" ];
+
+    systemd.services.orangefs-client = {
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+
+         ExecStart = ''
+           ${pkgs.orangefs}/bin/pvfs2-client-core \
+              --logtype=syslog ${concatStringsSep " " cfg.extraOptions}
+        '';
+
+        TimeoutStopSec = "120";
+      };
+    };
+
+    systemd.mounts = map (fs: {
+      requires = [ "orangefs-client.service" ];
+      after = [ "orangefs-client.service" ];
+      bindsTo = [ "orangefs-client.service" ];
+      wantedBy = [ "remote-fs.target" ];
+      type = "pvfs2";
+      options = concatStringsSep "," fs.options;
+      what = fs.target;
+      where = fs.mountPoint;
+    }) cfg.fileSystems;
+  };
+}
+
diff --git a/nixos/modules/services/network-filesystems/orangefs/server.nix b/nixos/modules/services/network-filesystems/orangefs/server.nix
new file mode 100644
index 0000000000000..74ebdc1340245
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/orangefs/server.nix
@@ -0,0 +1,225 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.server;
+
+  aliases = mapAttrsToList (alias: url: alias) cfg.servers;
+
+  # Maximum handle number is 2^63
+  maxHandle = 9223372036854775806;
+
+  # One range of handles for each meta/data instance
+  handleStep = maxHandle / (length aliases) / 2;
+
+  fileSystems = mapAttrsToList (name: fs: ''
+    <FileSystem>
+      Name ${name}
+      ID ${toString fs.id}
+      RootHandle ${toString fs.rootHandle}
+
+      ${fs.extraConfig}
+
+      <MetaHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </MetaHandleRanges>
+
+      <DataHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3 + (length aliases) * handleStep;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </DataHandleRanges>
+
+      <StorageHints>
+      TroveSyncMeta ${if fs.troveSyncMeta then "yes" else "no"}
+      TroveSyncData ${if fs.troveSyncData then "yes" else "no"}
+      ${fs.extraStorageHints}
+      </StorageHints>
+
+    </FileSystem>
+  '') cfg.fileSystems;
+
+  configFile = ''
+    <Defaults>
+    LogType ${cfg.logType}
+    DataStorageSpace ${cfg.dataStorageSpace}
+    MetaDataStorageSpace ${cfg.metadataStorageSpace}
+
+    BMIModules ${concatStringsSep "," cfg.BMIModules}
+    ${cfg.extraDefaults}
+    </Defaults>
+
+    ${cfg.extraConfig}
+
+    <Aliases>
+    ${concatStringsSep "\n" (mapAttrsToList (alias: url: "Alias ${alias} ${url}") cfg.servers)}
+    </Aliases>
+
+    ${concatStringsSep "\n" fileSystems}
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.server = {
+      enable = mkEnableOption "OrangeFS server";
+
+      logType = mkOption {
+        type = with types; enum [ "file" "syslog" ];
+        default = "syslog";
+        description = "Destination for log messages.";
+      };
+
+      dataStorageSpace = mkOption {
+        type = types.str;
+        default = null;
+        example = "/data/storage";
+        description = "Directory for data storage.";
+      };
+
+      metadataStorageSpace = mkOption {
+        type = types.str;
+        default = null;
+        example = "/data/meta";
+        description = "Directory for meta data storage.";
+      };
+
+      BMIModules = mkOption {
+        type = with types; listOf str;
+        default = [ "bmi_tcp" ];
+        example = [ "bmi_tcp" "bmi_ib"];
+        description = "List of BMI modules to load.";
+      };
+
+      extraDefaults = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra config for <literal>&lt;Defaults&gt;</literal> section.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = "Extra config for the global section.";
+      };
+
+      servers = mkOption {
+        type = with types; attrsOf types.str;
+        default = {};
+        example = ''
+          {
+            node1="tcp://node1:3334";
+            node2="tcp://node2:3334";
+          }
+        '';
+        description = "URLs for storage server including port. The attribute names define the server alias.";
+      };
+
+      fileSystems = mkOption {
+        description = ''
+          These options will create the <literal>&lt;FileSystem&gt;</literal> sections of config file.
+        '';
+        default = { orangefs = {}; };
+        defaultText = literalExample "{ orangefs = {}; }";
+        example = literalExample ''
+          {
+            fs1 = {
+              id = 101;
+            };
+
+            fs2 = {
+              id = 102;
+            };
+          }
+        '';
+        type = with types; attrsOf (submodule ({ ... } : {
+          options = {
+            id = mkOption {
+              type = types.int;
+              default = 1;
+              description = "File system ID (must be unique within configuration).";
+            };
+
+            rootHandle = mkOption {
+              type = types.int;
+              default = 3;
+              description = "File system root ID.";
+            };
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = "Extra config for <literal>&lt;FileSystem&gt;</literal> section.";
+            };
+
+            troveSyncMeta = mkOption {
+              type = types.bool;
+              default = true;
+              description = "Sync meta data.";
+            };
+
+            troveSyncData = mkOption {
+              type = types.bool;
+              default = false;
+              description = "Sync data.";
+            };
+
+            extraStorageHints = mkOption {
+              type = types.lines;
+              default = "";
+              description = "Extra config for <literal>&lt;StorageHints&gt;</literal> section.";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    # orangefs daemon will run as user
+    users.users.orangefs.isSystemUser = true;
+    users.groups.orangefs = {};
+
+    # To format the file system the config file is needed.
+    environment.etc."orangefs/server.conf" = {
+      text = configFile;
+      user = "orangefs";
+      group = "orangefs";
+    };
+
+    systemd.services.orangefs-server = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        # Run as "simple" in forground mode.
+        # This is more reliable
+        ExecStart = ''
+          ${pkgs.orangefs}/bin/pvfs2-server -d \
+            /etc/orangefs/server.conf
+        '';
+        TimeoutStopSec = "120";
+        User = "orangefs";
+        Group = "orangefs";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/services/networking/bitcoind.nix b/nixos/modules/services/networking/bitcoind.nix
index 1439d739da9d8..90f1291c01985 100644
--- a/nixos/modules/services/networking/bitcoind.nix
+++ b/nixos/modules/services/networking/bitcoind.nix
@@ -187,6 +187,7 @@ in {
       group = cfg.group;
       description = "Bitcoin daemon user";
       home = cfg.dataDir;
+      isSystemUser = true;
     };
     users.groups.${cfg.group} = {
       name = cfg.group;
diff --git a/nixos/modules/services/networking/dnscache.nix b/nixos/modules/services/networking/dnscache.nix
index 5051fc916d969..d123bca932193 100644
--- a/nixos/modules/services/networking/dnscache.nix
+++ b/nixos/modules/services/networking/dnscache.nix
@@ -84,7 +84,7 @@ in {
 
   config = mkIf config.services.dnscache.enable {
     environment.systemPackages = [ pkgs.djbdns ];
-    users.users.dnscache = {};
+    users.users.dnscache.isSystemUser = true;
 
     systemd.services.dnscache = {
       description = "djbdns dnscache server";
diff --git a/nixos/modules/services/networking/dnscrypt-wrapper.nix b/nixos/modules/services/networking/dnscrypt-wrapper.nix
index bf13d5c6f5fec..79f9e1a430837 100644
--- a/nixos/modules/services/networking/dnscrypt-wrapper.nix
+++ b/nixos/modules/services/networking/dnscrypt-wrapper.nix
@@ -142,6 +142,7 @@ in {
       description = "dnscrypt-wrapper daemon user";
       home = "${dataDir}";
       createHome = true;
+      isSystemUser = true;
     };
     users.groups.dnscrypt-wrapper = { };
 
diff --git a/nixos/modules/services/networking/go-shadowsocks2.nix b/nixos/modules/services/networking/go-shadowsocks2.nix
new file mode 100644
index 0000000000000..afbd7ea27c65c
--- /dev/null
+++ b/nixos/modules/services/networking/go-shadowsocks2.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.go-shadowsocks2.server;
+in {
+  options.services.go-shadowsocks2.server = {
+    enable = mkEnableOption "go-shadowsocks2 server";
+
+    listenAddress = mkOption {
+      type = types.str;
+      description = "Server listen address or URL";
+      example = "ss://AEAD_CHACHA20_POLY1305:your-password@:8488";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.go-shadowsocks2-server = {
+      description = "go-shadowsocks2 server";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.go-shadowsocks2}/bin/go-shadowsocks2 -s '${cfg.listenAddress}'";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/networking/hans.nix b/nixos/modules/services/networking/hans.nix
index 20e57e4626efa..4f60300f5ff41 100644
--- a/nixos/modules/services/networking/hans.nix
+++ b/nixos/modules/services/networking/hans.nix
@@ -138,6 +138,7 @@ in
     users.users = singleton {
       name = hansUser;
       description = "Hans daemon user";
+      isSystemUser = true;
     };
   };
 
diff --git a/nixos/modules/services/networking/matterbridge.nix b/nixos/modules/services/networking/matterbridge.nix
index 1fd63348c16c1..682eaa6eb297b 100644
--- a/nixos/modules/services/networking/matterbridge.nix
+++ b/nixos/modules/services/networking/matterbridge.nix
@@ -95,6 +95,7 @@ in
     users.users = optional (cfg.user == "matterbridge")
       { name = "matterbridge";
         group = "matterbridge";
+        isSystemUser = true;
       };
 
     users.groups = optional (cfg.group == "matterbridge")
diff --git a/nixos/modules/services/networking/morty.nix b/nixos/modules/services/networking/morty.nix
index 1b3084fe9abbc..e3a6444c11635 100644
--- a/nixos/modules/services/networking/morty.nix
+++ b/nixos/modules/services/networking/morty.nix
@@ -74,6 +74,7 @@ in
       { description = "Morty user";
         createHome = true;
         home = "/var/lib/morty";
+        isSystemUser = true;
       };
 
     systemd.services.morty =
diff --git a/nixos/modules/services/networking/nghttpx/default.nix b/nixos/modules/services/networking/nghttpx/default.nix
index d6e1906e38816..881a2670f5db0 100644
--- a/nixos/modules/services/networking/nghttpx/default.nix
+++ b/nixos/modules/services/networking/nghttpx/default.nix
@@ -96,6 +96,7 @@ in
     users.groups.nghttpx = { };
     users.users.nghttpx = {
       group = config.users.groups.nghttpx.name;
+      isSystemUser = true;
     };
       
 
diff --git a/nixos/modules/services/networking/owamp.nix b/nixos/modules/services/networking/owamp.nix
index 821a0258f4bec..dbb2e3b4c4092 100644
--- a/nixos/modules/services/networking/owamp.nix
+++ b/nixos/modules/services/networking/owamp.nix
@@ -21,6 +21,7 @@ in
       name = "owamp";
       group = "owamp";
       description = "Owamp daemon";
+      isSystemUser = true;
     };
 
     users.groups = singleton {
diff --git a/nixos/modules/services/networking/stunnel.nix b/nixos/modules/services/networking/stunnel.nix
index cbc899f2b4d73..ab51bba2f6acf 100644
--- a/nixos/modules/services/networking/stunnel.nix
+++ b/nixos/modules/services/networking/stunnel.nix
@@ -57,7 +57,13 @@ let
       };
 
       CAPath = mkOption {
-        type = types.path;
+        type = types.nullOr types.path;
+        default = null;
+        description = "Path to a directory containing certificates to validate against.";
+      };
+
+      CAFile = mkOption {
+        type = types.nullOr types.path;
         default = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
         description = "Path to a file containing certificates to validate against.";
       };
@@ -196,6 +202,7 @@ in
                verifyChain = ${yesNo v.verifyChain}
                verifyPeer = ${yesNo v.verifyPeer}
                ${optionalString (v.CAPath != null) "CApath = ${v.CAPath}"}
+               ${optionalString (v.CAFile != null) "CAFile = ${v.CAFile}"}
                ${optionalString (v.verifyHostname != null) "checkHost = ${v.verifyHostname}"}
                OCSPaia = yes
 
@@ -216,6 +223,12 @@ in
       };
     };
 
+    meta.maintainers = with maintainers; [
+      # Server side
+      lschuermann
+      # Client side
+      das_j
+    ];
   };
 
 }
diff --git a/nixos/modules/services/networking/thelounge.nix b/nixos/modules/services/networking/thelounge.nix
index b1d23372955ed..875d8f6616978 100644
--- a/nixos/modules/services/networking/thelounge.nix
+++ b/nixos/modules/services/networking/thelounge.nix
@@ -56,6 +56,7 @@ in {
     users.users.thelounge = {
       description = "thelounge service user";
       group = "thelounge";
+      isSystemUser = true;
     };
     users.groups.thelounge = {};
     systemd.services.thelounge = {
diff --git a/nixos/modules/services/networking/tinydns.nix b/nixos/modules/services/networking/tinydns.nix
index 7d5db71601ef5..7b2c464ab46b0 100644
--- a/nixos/modules/services/networking/tinydns.nix
+++ b/nixos/modules/services/networking/tinydns.nix
@@ -32,7 +32,7 @@ with lib;
   config = mkIf config.services.tinydns.enable {
     environment.systemPackages = [ pkgs.djbdns ];
 
-    users.users.tinydns = {};
+    users.users.tinydns.isSystemUser = true;
 
     systemd.services.tinydns = {
       description = "djbdns tinydns server";
diff --git a/nixos/modules/services/networking/trickster.nix b/nixos/modules/services/networking/trickster.nix
new file mode 100644
index 0000000000000..8760dd5a93827
--- /dev/null
+++ b/nixos/modules/services/networking/trickster.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.trickster;
+in
+{
+
+  options = {
+    services.trickster = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable Trickster.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.trickster;
+        defaultText = "pkgs.trickster";
+        description = ''
+          Package that should be used for trickster.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = ''
+          Path to configuration file.
+        '';
+      };
+
+      instance-id = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = ''
+          Instance ID for when running multiple processes (default null).
+        '';
+      };
+
+      log-level = mkOption {
+        type = types.str;
+        default = "info";
+        description = ''
+          Level of Logging to use (debug, info, warn, error) (default "info").
+        '';
+      };
+
+      metrics-port = mkOption {
+        type = types.port;
+        default = 8082;
+        description = ''
+          Port that the /metrics endpoint will listen on.
+        '';
+      };
+
+      origin = mkOption {
+        type = types.str;
+        default = "http://prometheus:9090";
+        description = ''
+          URL to the Prometheus Origin. Enter it like you would in grafana, e.g., http://prometheus:9090 (default http://prometheus:9090).
+        '';
+      };
+
+      profiler-port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        description = ''
+          Port that the /debug/pprof endpoint will listen on.
+        '';
+      };
+
+      proxy-port = mkOption {
+        type = types.port;
+        default = 9090;
+        description = ''
+          Port that the Proxy server will listen on.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.trickster = {
+      description = "Dashboard Accelerator for Prometheus";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = ''
+          ${cfg.package}/bin/trickster \
+          -log-level ${cfg.log-level} \
+          -metrics-port ${toString cfg.metrics-port} \
+          -origin ${cfg.origin} \
+          -proxy-port ${toString cfg.proxy-port} \
+          ${optionalString (cfg.configFile != null) "-config ${cfg.configFile}"} \
+          ${optionalString (cfg.profiler-port != null) "-profiler-port ${cfg.profiler-port}"} \
+          ${optionalString (cfg.instance-id != null) "-instance-id ${cfg.instance-id}"}
+        '';
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "always";
+      };
+    };
+
+  };  
+}
+
diff --git a/nixos/modules/services/networking/yggdrasil.nix b/nixos/modules/services/networking/yggdrasil.nix
new file mode 100644
index 0000000000000..0da50ccc344b0
--- /dev/null
+++ b/nixos/modules/services/networking/yggdrasil.nix
@@ -0,0 +1,193 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.yggdrasil;
+  configProvided = (cfg.config != {});
+  configAsFile = (if configProvided then
+                   toString (pkgs.writeTextFile {
+                     name = "yggdrasil-conf";
+                     text = builtins.toJSON cfg.config;
+                   })
+                   else null);
+  configFileProvided = (cfg.configFile != null);
+  generateConfig = (
+    if configProvided && configFileProvided then
+      "${pkgs.jq}/bin/jq -s add /run/yggdrasil/configFile.json ${configAsFile}"
+    else if configProvided then
+      "cat ${configAsFile}"
+    else if configFileProvided then
+      "cat /run/yggdrasil/configFile.json"
+    else
+      "${cfg.package}/bin/yggdrasil -genconf"
+  );
+
+in {
+  options = with types; {
+    services.yggdrasil = {
+      enable = mkEnableOption "the yggdrasil system service";
+
+      configFile = mkOption {
+        type =  nullOr str;
+        default = null;
+        example = "/run/keys/yggdrasil.conf";
+        description = ''
+          A file which contains JSON configuration for yggdrasil.
+
+          You do not have to supply a complete configuration, as
+          yggdrasil will use default values for anything which is
+          omitted.  If the encryption and signing keys are omitted,
+          yggdrasil will generate new ones each time the service is
+          started, resulting in a random IPv6 address on the yggdrasil
+          network each time.
+
+          If both this option and <option>config</option> are
+          supplied, they will be combined, with values from
+          <option>config</option> taking precedence.
+
+          You can use the command <code>nix-shell -p yggdrasil --run
+          "yggdrasil -genconf -json"</code> to generate a default
+          JSON configuration.
+        '';
+      };
+
+      config = mkOption {
+        type = attrs;
+        default = {};
+        example = {
+          Peers = [
+            "tcp://aa.bb.cc.dd:eeeee"
+            "tcp://[aaaa:bbbb:cccc:dddd::eeee]:fffff"
+          ];
+          Listen = [
+            "tcp://0.0.0.0:xxxxx"
+          ];
+        };
+        description = ''
+          Configuration for yggdrasil, as a Nix attribute set.
+
+          Warning: this is stored in the WORLD-READABLE Nix store!
+          Therefore, it is not appropriate for private keys.  If you
+          do not specify the keys, yggdrasil will generate a new set
+          each time the service is started, creating a random IPv6
+          address on the yggdrasil network each time.
+
+          If you wish to specify the keys, use
+          <option>configFile</option>.  If both
+          <option>configFile</option> and <option>config</option> are
+          supplied, they will be combined, with values from
+          <option>config</option> taking precedence.
+
+          You can use the command <code>nix-shell -p yggdrasil --run
+          "yggdrasil -genconf"</code> to generate default
+          configuration values with documentation.
+        '';
+      };
+
+      openMulticastPort = mkOption {
+        type = bool;
+        default = false;
+        description = ''
+          Whether to open the UDP port used for multicast peer
+          discovery. The NixOS firewall blocks link-local
+          communication, so in order to make local peering work you
+          will also need to set <code>LinkLocalTCPPort</code> in your
+          yggdrasil configuration (<option>config</option> or
+          <option>configFile</option>) to a port number other than 0,
+          and then add that port to
+          <option>networking.firewall.allowedTCPPorts</option>.
+        '';
+      };
+
+      denyDhcpcdInterfaces = mkOption {
+        type = listOf str;
+        default = [];
+        example = [ "tap*" ];
+        description = ''
+          Disable the DHCP client for any interface whose name matches
+          any of the shell glob patterns in this list.  Use this
+          option to prevent the DHCP client from broadcasting requests
+          on the yggdrasil network.  It is only necessary to do so
+          when yggdrasil is running in TAP mode, because TUN
+          interfaces do not support broadcasting.
+        '';
+      };
+
+      package = mkOption {
+        type = package;
+        default = pkgs.yggdrasil;
+        defaultText = "pkgs.yggdrasil";
+        description = "Yggdrasil package to use.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = config.networking.enableIPv6;
+        message = "networking.enableIPv6 must be true for yggdrasil to work";
+      }
+    ];
+
+    environment.etc."yggdrasil.conf" = {
+      enable = true;
+      mode = "symlink";
+      source = "/run/yggdrasil/yggdrasil.conf";
+    };
+
+    systemd.services.yggdrasil = {
+      description = "Yggdrasil Network Service";
+      path = [ cfg.package ] ++ optional (configProvided && configFileProvided) pkgs.jq;
+      bindsTo = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        ${generateConfig} | yggdrasil -normaliseconf -useconf > /run/yggdrasil/yggdrasil.conf
+      '';
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/yggdrasil -useconffile /etc/yggdrasil.conf";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "always";
+
+        RuntimeDirectory = "yggdrasil";
+        RuntimeDirectoryMode = "0700";
+        BindReadOnlyPaths = mkIf configFileProvided
+          [ "${cfg.configFile}:/run/yggdrasil/configFile.json" ];
+
+        # TODO: as of yggdrasil 0.3.8 and systemd 243, yggdrasil fails
+        # to set up the network adapter when DynamicUser is set.  See
+        # github.com/yggdrasil-network/yggdrasil-go/issues/557.  The
+        # following options are implied by DynamicUser according to
+        # the systemd.exec documentation, and can be removed if the
+        # upstream issue is fixed and DynamicUser is set to true:
+        PrivateTmp = true;
+        RemoveIPC = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        RestrictSUIDSGID = true;
+        # End of list of options implied by DynamicUser.
+
+        AmbientCapabilities = "CAP_NET_ADMIN";
+        CapabilityBoundingSet = "CAP_NET_ADMIN";
+        MemoryDenyWriteExecute = true;
+        ProtectControlGroups = true;
+        ProtectHome = "tmpfs";
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @resources";
+      };
+    };
+
+    networking.dhcpcd.denyInterfaces = cfg.denyDhcpcdInterfaces;
+    networking.firewall.allowedUDPPorts = mkIf cfg.openMulticastPort [ 9001 ];
+
+    # Make yggdrasilctl available on the command line.
+    environment.systemPackages = [ cfg.package ];
+  };
+  meta.maintainers = with lib.maintainers; [ gazally ];
+}
diff --git a/nixos/modules/services/printing/cupsd.nix b/nixos/modules/services/printing/cupsd.nix
index 3fcae611dc793..1071c05d514ef 100644
--- a/nixos/modules/services/printing/cupsd.nix
+++ b/nixos/modules/services/printing/cupsd.nix
@@ -31,7 +31,7 @@ let
   # part of CUPS itself, e.g. the SMB backend is part of Samba.  Since
   # we can't update ${cups.out}/lib/cups itself, we create a symlink tree
   # here and add the additional programs.  The ServerBin directive in
-  # cupsd.conf tells cupsd to use this tree.
+  # cups-files.conf tells cupsd to use this tree.
   bindir = pkgs.buildEnv {
     name = "cups-progs";
     paths =
diff --git a/nixos/modules/services/scheduling/marathon.nix b/nixos/modules/services/scheduling/marathon.nix
index 0961a67770e1b..2e0d20c64b23a 100644
--- a/nixos/modules/services/scheduling/marathon.nix
+++ b/nixos/modules/services/scheduling/marathon.nix
@@ -93,6 +93,6 @@ in {
       };
     };
 
-    users.users.${cfg.user} = { };
+    users.users.${cfg.user}.isSystemUser = true;
   };
 }
diff --git a/nixos/modules/services/security/bitwarden_rs/default.nix b/nixos/modules/services/security/bitwarden_rs/default.nix
index 80fd65891ff8f..d1817db075550 100644
--- a/nixos/modules/services/security/bitwarden_rs/default.nix
+++ b/nixos/modules/services/security/bitwarden_rs/default.nix
@@ -74,7 +74,10 @@ in {
       webVaultEnabled = mkDefault true;
     };
 
-    users.users.bitwarden_rs = { inherit group; };
+    users.users.bitwarden_rs = {
+      inherit group;
+      isSystemUser = true;
+    };
     users.groups.bitwarden_rs = { };
 
     systemd.services.bitwarden_rs = {
diff --git a/nixos/modules/services/security/oauth2_proxy.nix b/nixos/modules/services/security/oauth2_proxy.nix
index bb03f7fc9e43c..2abb9ec32acaa 100644
--- a/nixos/modules/services/security/oauth2_proxy.nix
+++ b/nixos/modules/services/security/oauth2_proxy.nix
@@ -546,6 +546,7 @@ in
 
     users.users.oauth2_proxy = {
       description = "OAuth2 Proxy";
+      isSystemUser = true;
     };
 
     systemd.services.oauth2_proxy = {
diff --git a/nixos/modules/services/torrent/magnetico.nix b/nixos/modules/services/torrent/magnetico.nix
index 02fa2ac0750a5..a7acdb78b3164 100644
--- a/nixos/modules/services/torrent/magnetico.nix
+++ b/nixos/modules/services/torrent/magnetico.nix
@@ -171,6 +171,7 @@ in {
 
     users.users.magnetico = {
       description = "Magnetico daemons user";
+      isSystemUser = true;
     };
 
     systemd.services.magneticod = {
diff --git a/nixos/modules/services/web-apps/codimd.nix b/nixos/modules/services/web-apps/codimd.nix
index 7ae7cd9c52d8c..5f56f8ed5a091 100644
--- a/nixos/modules/services/web-apps/codimd.nix
+++ b/nixos/modules/services/web-apps/codimd.nix
@@ -893,6 +893,7 @@ in
       extraGroups = cfg.groups;
       home = cfg.workDir;
       createHome = true;
+      isSystemUser = true;
     };
 
     systemd.services.codimd = {
diff --git a/nixos/modules/services/web-apps/frab.nix b/nixos/modules/services/web-apps/frab.nix
index 7914e5cc0ee19..a9a30b409220f 100644
--- a/nixos/modules/services/web-apps/frab.nix
+++ b/nixos/modules/services/web-apps/frab.nix
@@ -177,6 +177,7 @@ in
       { name = cfg.user;
         group = cfg.group;
         home = "${cfg.statePath}";
+        isSystemUser = true;
       }
     ];
 
diff --git a/nixos/modules/services/web-apps/gotify-server.nix b/nixos/modules/services/web-apps/gotify-server.nix
new file mode 100644
index 0000000000000..03e01f46a9441
--- /dev/null
+++ b/nixos/modules/services/web-apps/gotify-server.nix
@@ -0,0 +1,49 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gotify;
+in {
+  options = {
+    services.gotify = {
+      enable = mkEnableOption "Gotify webserver";
+
+      port = mkOption {
+        type = types.port;
+        description = ''
+          Port the server listens to.
+        '';
+      };
+
+      stateDirectoryName = mkOption {
+        type = types.str;
+        default = "gotify-server";
+        description = ''
+          The name of the directory below <filename>/var/lib</filename> where
+          gotify stores its runtime data.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.gotify-server = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "Simple server for sending and receiving messages";
+
+      environment = {
+        GOTIFY_SERVER_PORT = toString cfg.port;
+      };
+
+      serviceConfig = {
+        WorkingDirectory = "/var/lib/${cfg.stateDirectoryName}";
+        StateDirectory = cfg.stateDirectoryName;
+        Restart = "always";
+        DynamicUser = "yes";
+        ExecStart = "${pkgs.gotify-server}/bin/server";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-apps/limesurvey.nix b/nixos/modules/services/web-apps/limesurvey.nix
index 68b57a9b90ddf..bd524524130d6 100644
--- a/nixos/modules/services/web-apps/limesurvey.nix
+++ b/nixos/modules/services/web-apps/limesurvey.nix
@@ -277,7 +277,10 @@ in
 
     systemd.services.httpd.after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
 
-    users.users.${user}.group = group;
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
 
   };
 }
diff --git a/nixos/modules/services/web-apps/matomo-doc.xml b/nixos/modules/services/web-apps/matomo-doc.xml
index 8485492c51c78..79cece551d34c 100644
--- a/nixos/modules/services/web-apps/matomo-doc.xml
+++ b/nixos/modules/services/web-apps/matomo-doc.xml
@@ -105,7 +105,7 @@ GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
   <para>
    You can use other web servers by forwarding calls for
    <filename>index.php</filename> and <filename>piwik.php</filename> to the
-   <literal>/run/phpfpm-matomo.sock</literal> fastcgi unix socket. You can use
+   <literal><link linkend="opt-services.phpfpm.pools._name_.socket">services.phpfpm.pools.&lt;name&gt;.socket</link></literal> fastcgi unix socket. You can use
    the nginx configuration in the module code as a reference to what else
    should be configured.
   </para>
diff --git a/nixos/modules/services/web-apps/matomo.nix b/nixos/modules/services/web-apps/matomo.nix
index 1e34aff8d1713..352cc4c647bce 100644
--- a/nixos/modules/services/web-apps/matomo.nix
+++ b/nixos/modules/services/web-apps/matomo.nix
@@ -2,15 +2,13 @@
 with lib;
 let
   cfg = config.services.matomo;
+  fpm = config.services.phpfpm.pools.${pool};
 
   user = "matomo";
   dataDir = "/var/lib/${user}";
   deprecatedDataDir = "/var/lib/piwik";
 
   pool = user;
-  # it's not possible to use /run/phpfpm/${pool}.sock because /run/phpfpm/ is root:root 0770,
-  # and therefore is not accessible by the web server.
-  phpSocket = "/run/phpfpm-${pool}.sock";
   phpExecutionUnit = "phpfpm-${pool}";
   databaseService = "mysql.service";
 
@@ -50,7 +48,7 @@ in {
         default = null;
         example = "lighttpd";
         description = ''
-          Name of the web server user that forwards requests to the ${phpSocket} fastcgi socket for Matomo if the nginx
+          Name of the web server user that forwards requests to <option>services.phpfpm.pools.&lt;name&gt;.socket</option> the fastcgi socket for Matomo if the nginx
           option is not used. Either this option or the nginx option is mandatory.
           If you want to use another webserver than nginx, you need to set this to that server's user
           and pass fastcgi requests to `index.php`, `matomo.php` and `piwik.php` (legacy name) to this socket.
@@ -71,25 +69,6 @@ in {
         '';
       };
 
-      phpfpmProcessManagerConfig = mkOption {
-        type = types.str;
-        default = ''
-          ; default phpfpm process manager settings
-          pm = dynamic
-          pm.max_children = 75
-          pm.start_servers = 10
-          pm.min_spare_servers = 5
-          pm.max_spare_servers = 20
-          pm.max_requests = 500
-
-          ; log worker's stdout, but this has a performance hit
-          catch_workers_output = yes
-        '';
-        description = ''
-          Settings for phpfpm's process manager. You might need to change this depending on the load for Matomo.
-        '';
-      };
-
       nginx = mkOption {
         type = types.nullOr (types.submodule (
           recursiveUpdate
@@ -233,15 +212,24 @@ in {
       else if (cfg.webServerUser != null) then cfg.webServerUser else "";
     in {
       ${pool} = {
-        listen = phpSocket;
-        extraConfig = ''
-          listen.owner = ${socketOwner}
-          listen.group = root
-          listen.mode = 0600
-          user = ${user}
-          env[PIWIK_USER_PATH] = ${dataDir}
-          ${cfg.phpfpmProcessManagerConfig}
+        inherit user;
+        phpOptions = ''
+          error_log = 'stderr'
+          log_errors = on
         '';
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = socketOwner;
+          "listen.group" = "root";
+          "listen.mode" = "0660";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = true;
+        };
+        phpEnv.PIWIK_USER_PATH = dataDir;
       };
     };
 
@@ -264,15 +252,15 @@ in {
         };
         # allow index.php for webinterface
         locations."= /index.php".extraConfig = ''
-          fastcgi_pass unix:${phpSocket};
+          fastcgi_pass unix:${fpm.socket};
         '';
         # allow matomo.php for tracking
         locations."= /matomo.php".extraConfig = ''
-          fastcgi_pass unix:${phpSocket};
+          fastcgi_pass unix:${fpm.socket};
         '';
         # allow piwik.php for tracking (deprecated name)
         locations."= /piwik.php".extraConfig = ''
-          fastcgi_pass unix:${phpSocket};
+          fastcgi_pass unix:${fpm.socket};
         '';
         # Any other attempt to access any php files is forbidden
         locations."~* ^.+\\.php$".extraConfig = ''
diff --git a/nixos/modules/services/web-apps/mediawiki.nix b/nixos/modules/services/web-apps/mediawiki.nix
index ec2568bf952d3..43edc04e1a492 100644
--- a/nixos/modules/services/web-apps/mediawiki.nix
+++ b/nixos/modules/services/web-apps/mediawiki.nix
@@ -461,7 +461,10 @@ in
 
     systemd.services.httpd.after = optional (cfg.database.createLocally && cfg.database.type == "mysql") "mysql.service";
 
-    users.users.${user}.group = group;
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
 
     environment.systemPackages = [ mediawikiScripts ];
   };
diff --git a/nixos/modules/services/web-apps/moodle.nix b/nixos/modules/services/web-apps/moodle.nix
index 211bc17ee192c..ac59f9e0012a8 100644
--- a/nixos/modules/services/web-apps/moodle.nix
+++ b/nixos/modules/services/web-apps/moodle.nix
@@ -309,7 +309,9 @@ in
 
     systemd.services.httpd.after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
 
-    users.users.${user}.group = group;
-
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
   };
 }
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index db5dc915c89ff..b9186a1dc07f7 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -467,7 +467,7 @@ in {
               };
               "/" = {
                 priority = 200;
-                extraConfig = "rewrite ^ /index.php$request_uri;";
+                extraConfig = "rewrite ^ /index.php;";
               };
               "~ ^/store-apps" = {
                 priority = 201;
@@ -494,6 +494,7 @@ in {
                 extraConfig = ''
                   include ${config.services.nginx.package}/conf/fastcgi.conf;
                   fastcgi_split_path_info ^(.+\.php)(\\/.*)$;
+                  try_files $fastcgi_script_name =404;
                   fastcgi_param PATH_INFO $fastcgi_path_info;
                   fastcgi_param HTTPS ${if cfg.https then "on" else "off"};
                   fastcgi_param modHeadersAvailable true;
@@ -531,6 +532,7 @@ in {
               add_header X-Download-Options noopen;
               add_header X-Permitted-Cross-Domain-Policies none;
               add_header Referrer-Policy no-referrer;
+              add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
               error_page 403 /core/templates/403.php;
               error_page 404 /core/templates/404.php;
               client_max_body_size ${cfg.maxUploadSize};
diff --git a/nixos/modules/services/web-apps/nexus.nix b/nixos/modules/services/web-apps/nexus.nix
index 3af97e146d0aa..d4d507362c97b 100644
--- a/nixos/modules/services/web-apps/nexus.nix
+++ b/nixos/modules/services/web-apps/nexus.nix
@@ -68,6 +68,7 @@ in
           -Dkaraf.data=${cfg.home}/nexus3
           -Djava.io.tmpdir=${cfg.home}/nexus3/tmp
           -Dkaraf.startLocalConsole=false
+          -Djava.endorsed.dirs=${cfg.package}/lib/endorsed
         '';
 
         description = ''
diff --git a/nixos/modules/services/web-apps/virtlyst.nix b/nixos/modules/services/web-apps/virtlyst.nix
index e5c0bff2168ae..37bdbb0e3b42b 100644
--- a/nixos/modules/services/web-apps/virtlyst.nix
+++ b/nixos/modules/services/web-apps/virtlyst.nix
@@ -54,6 +54,7 @@ in
       home = stateDir;
       createHome = true;
       group = mkIf config.virtualisation.libvirtd.enable "libvirtd";
+      isSystemUser = true;
     };
 
     systemd.services.virtlyst = {
diff --git a/nixos/modules/services/web-apps/wordpress.nix b/nixos/modules/services/web-apps/wordpress.nix
index e311dd917dd07..f1370c2854b8d 100644
--- a/nixos/modules/services/web-apps/wordpress.nix
+++ b/nixos/modules/services/web-apps/wordpress.nix
@@ -367,7 +367,10 @@ in
       })
     ];
 
-    users.users.${user}.group = group;
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
 
   };
 }
diff --git a/nixos/modules/services/web-servers/hitch/default.nix b/nixos/modules/services/web-servers/hitch/default.nix
index a6c4cbea12259..1812f225b74d7 100644
--- a/nixos/modules/services/web-servers/hitch/default.nix
+++ b/nixos/modules/services/web-servers/hitch/default.nix
@@ -102,7 +102,10 @@ with lib;
 
     environment.systemPackages = [ pkgs.hitch ];
 
-    users.users.hitch.group = "hitch";
+    users.users.hitch = {
+      group = "hitch";
+      isSystemUser = true;
+    };
     users.groups.hitch = {};
   };
 }
diff --git a/nixos/modules/services/web-servers/traefik.nix b/nixos/modules/services/web-servers/traefik.nix
index 8de7df0d446c9..5b0fc467ea469 100644
--- a/nixos/modules/services/web-servers/traefik.nix
+++ b/nixos/modules/services/web-servers/traefik.nix
@@ -117,6 +117,7 @@ in {
       group = "traefik";
       home = cfg.dataDir;
       createHome = true;
+      isSystemUser = true;
     };
 
     users.groups.traefik = {};
diff --git a/nixos/modules/services/web-servers/unit/default.nix b/nixos/modules/services/web-servers/unit/default.nix
index a4a9d370d6448..32f6d475b34ef 100644
--- a/nixos/modules/services/web-servers/unit/default.nix
+++ b/nixos/modules/services/web-servers/unit/default.nix
@@ -116,6 +116,7 @@ in {
     users.users = optionalAttrs (cfg.user == "unit") (singleton {
       name = "unit";
       group = cfg.group;
+      isSystemUser = true;
     });
 
     users.groups = optionalAttrs (cfg.group == "unit") (singleton {
diff --git a/nixos/modules/system/boot/networkd.nix b/nixos/modules/system/boot/networkd.nix
index f2060e21509c9..85a106527fe08 100644
--- a/nixos/modules/system/boot/networkd.nix
+++ b/nixos/modules/system/boot/networkd.nix
@@ -187,7 +187,7 @@ let
     # Note: For DHCP the values both, none, v4, v6 are deprecated
     (assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6" "both" "none" "v4" "v6"])
     (assertValueOneOf "DHCPServer" boolValues)
-    (assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6"])
+    (assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6" "ipv4-fallback" "fallback"])
     (assertValueOneOf "IPv4LLRoute" boolValues)
     (assertValueOneOf "LLMNR" ["yes" "resolve" "no"])
     (assertValueOneOf "MulticastDNS" ["yes" "resolve" "no"])
diff --git a/nixos/modules/tasks/network-interfaces-systemd.nix b/nixos/modules/tasks/network-interfaces-systemd.nix
index 90ee09c34dde1..9ffa1089ee697 100644
--- a/nixos/modules/tasks/network-interfaces-systemd.nix
+++ b/nixos/modules/tasks/network-interfaces-systemd.nix
@@ -31,7 +31,7 @@ in
       message = "networking.defaultGatewayWindowSize is not supported by networkd.";
     } {
       assertion = cfg.vswitches == {};
-      message = "networking.vswichtes are not supported by networkd.";
+      message = "networking.vswitches are not supported by networkd.";
     } {
       assertion = cfg.defaultGateway == null || cfg.defaultGateway.interface == null;
       message = "networking.defaultGateway.interface is not supported by networkd.";
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index aadfc5add3507..20d48add71293 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -135,6 +135,9 @@ in
     services.openssh.enable = true;
     services.openssh.permitRootLogin = "prohibit-password";
 
+    # Creates symlinks for block device names.
+    services.udev.packages = [ pkgs.ec2-utils ];
+
     # Force getting the hostname from EC2.
     networking.hostName = mkDefault "";
 
diff --git a/nixos/modules/virtualisation/ec2-amis.nix b/nixos/modules/virtualisation/ec2-amis.nix
index f640bb21b1331..3b4e55d39d7ba 100644
--- a/nixos/modules/virtualisation/ec2-amis.nix
+++ b/nixos/modules/virtualisation/ec2-amis.nix
@@ -291,5 +291,21 @@ let self = {
   "19.03".sa-east-1.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
   "19.03".ap-south-1.hvm-ebs = "ami-0303deb1b5890f878";
 
-  latest = self."19.03";
+  # 19.09.981.205691b7cbe
+  "19.09".eu-west-1.hvm-ebs = "ami-0ebd3156e21e9642f";
+  "19.09".eu-west-2.hvm-ebs = "ami-02a2b5480a79084b7";
+  "19.09".eu-west-3.hvm-ebs = "ami-09aa175c7588734f7";
+  "19.09".eu-central-1.hvm-ebs = "ami-00a7fafd7e237a330";
+  "19.09".us-east-1.hvm-ebs = "ami-00a8eeaf232a74f84";
+  "19.09".us-east-2.hvm-ebs = "ami-093efd3a57a1e03a8";
+  "19.09".us-west-1.hvm-ebs = "ami-0913e9a2b677fac30";
+  "19.09".us-west-2.hvm-ebs = "ami-02d9a19f77b47882a";
+  "19.09".ca-central-1.hvm-ebs = "ami-0627dd3f7b3627a29";
+  "19.09".ap-southeast-1.hvm-ebs = "ami-083614e4d08f2164d";
+  "19.09".ap-southeast-2.hvm-ebs = "ami-0048c704185ded6dc";
+  "19.09".ap-northeast-1.hvm-ebs = "ami-0329e7fc2d7f60bd0";
+  "19.09".ap-northeast-2.hvm-ebs = "ami-03d4ae7d0b5fc364f";
+  "19.09".ap-south-1.hvm-ebs = "ami-0b599690b35aeef23";
+
+  latest = self."19.09";
 }; in self
diff --git a/nixos/modules/virtualisation/virtualbox-host.nix b/nixos/modules/virtualisation/virtualbox-host.nix
index 6081d4153a6c0..ddb0a7bda4f34 100644
--- a/nixos/modules/virtualisation/virtualbox-host.nix
+++ b/nixos/modules/virtualisation/virtualbox-host.nix
@@ -149,5 +149,12 @@ in
     # Make sure NetworkManager won't assume this interface being up
     # means we have internet access.
     networking.networkmanager.unmanaged = ["vboxnet0"];
-  })]);
+  }) (mkIf config.networking.useNetworkd {
+    systemd.network.networks."40-vboxnet0".extraConfig = ''
+      [Link]
+      RequiredForOnline=no
+    '';
+  })
+
+]);
 }
diff --git a/nixos/tests/acme.nix b/nixos/tests/acme.nix
index 8cfdea4a16ef6..206d97849f024 100644
--- a/nixos/tests/acme.nix
+++ b/nixos/tests/acme.nix
@@ -12,8 +12,11 @@ in import ./make-test.nix {
       networking.extraHosts = ''
         ${config.networking.primaryIPAddress} standalone.com
       '';
-      security.acme.certs."standalone.com" = {
-        webroot = "/var/lib/acme/acme-challenges";
+      security.acme = {
+        server = "https://acme-v02.api.letsencrypt.org/dir";
+        certs."standalone.com" = {
+            webroot = "/var/lib/acme/acme-challenges";
+        };
       };
       systemd.targets."acme-finished-standalone.com" = {};
       systemd.services."acme-standalone.com" = {
@@ -54,6 +57,8 @@ in import ./make-test.nix {
         '';
       };
 
+      security.acme.server = "https://acme-v02.api.letsencrypt.org/dir";
+
       nesting.clone = [
         ({pkgs, ...}: {
 
@@ -80,7 +85,7 @@ in import ./make-test.nix {
     client = commonConfig;
   };
 
-  testScript = {nodes, ...}: 
+  testScript = {nodes, ...}:
     let
       newServerSystem = nodes.webserver2.config.system.build.toplevel;
       switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
@@ -91,9 +96,12 @@ in import ./make-test.nix {
     # get pulled in by the oneshot units. The target units linger after activation, and hence we
     # can use them to probe that a oneshot fired. It is a bit ugly, but it is the best we can do
     ''
-      $client->waitForUnit("default.target");
+      $client->start;
+      $letsencrypt->start;
+      $acmeStandalone->start;
+
       $letsencrypt->waitForUnit("default.target");
-      $letsencrypt->waitForUnit("boulder.service");
+      $letsencrypt->waitForUnit("pebble.service");
 
       subtest "can request certificate with HTTPS-01 challenge", sub {
         $acmeStandalone->waitForUnit("default.target");
@@ -101,15 +109,20 @@ in import ./make-test.nix {
         $acmeStandalone->waitForUnit("acme-finished-standalone.com.target");
       };
 
+      $client->waitForUnit("default.target");
+
+      $client->succeed('curl https://acme-v02.api.letsencrypt.org:15000/roots/0 > /tmp/ca.crt');
+      $client->succeed('curl https://acme-v02.api.letsencrypt.org:15000/intermediate-keys/0 >> /tmp/ca.crt');
+
       subtest "Can request certificate for nginx service", sub {
         $webserver->waitForUnit("acme-finished-a.example.com.target");
-        $client->succeed('curl https://a.example.com/ | grep -qF "hello world"');
+        $client->succeed('curl --cacert /tmp/ca.crt https://a.example.com/ | grep -qF "hello world"');
       };
 
       subtest "Can add another certificate for nginx service", sub {
         $webserver->succeed("/run/current-system/fine-tune/child-1/bin/switch-to-configuration test");
         $webserver->waitForUnit("acme-finished-b.example.com.target");
-        $client->succeed('curl https://b.example.com/ | grep -qF "hello world"');
+        $client->succeed('curl --cacert /tmp/ca.crt https://b.example.com/ | grep -qF "hello world"');
       };
     '';
 }
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index e94c9712cbfaa..744d7ed0f839b 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -39,7 +39,8 @@ in
   caddy = handleTest ./caddy.nix {};
   cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
   cassandra = handleTest ./cassandra.nix {};
-  ceph = handleTestOn ["x86_64-linux"] ./ceph.nix {};
+  ceph-single-node = handleTestOn ["x86_64-linux"] ./ceph-single-node.nix {};
+  ceph-multi-node = handleTestOn ["x86_64-linux"] ./ceph-multi-node.nix {};
   certmgr = handleTest ./certmgr.nix {};
   cfssl = handleTestOn ["x86_64-linux"] ./cfssl.nix {};
   chromium = (handleTestOn ["x86_64-linux"] ./chromium.nix {}).stable or {};
@@ -81,6 +82,7 @@ in
   env = handleTest ./env.nix {};
   etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
   etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
+  fancontrol = handleTest ./fancontrol.nix {};
   ferm = handleTest ./ferm.nix {};
   firefox = handleTest ./firefox.nix {};
   firewall = handleTest ./firewall.nix {};
@@ -93,6 +95,7 @@ in
   fsck = handleTest ./fsck.nix {};
   fwupd = handleTestOn ["x86_64-linux"] ./fwupd.nix {}; # libsmbios is unsupported on aarch64
   gdk-pixbuf = handleTest ./gdk-pixbuf.nix {};
+  gotify-server = handleTest ./gotify-server.nix {};
   gitea = handleTest ./gitea.nix {};
   gitlab = handleTest ./gitlab.nix {};
   gitolite = handleTest ./gitolite.nix {};
@@ -157,6 +160,7 @@ in
   #logstash = handleTest ./logstash.nix {};
   mailcatcher = handleTest ./mailcatcher.nix {};
   mathics = handleTest ./mathics.nix {};
+  matomo = handleTest ./matomo.nix {};
   matrix-synapse = handleTest ./matrix-synapse.nix {};
   mediawiki = handleTest ./mediawiki.nix {};
   memcached = handleTest ./memcached.nix {};
@@ -201,12 +205,14 @@ in
   novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
   nsd = handleTest ./nsd.nix {};
   nzbget = handleTest ./nzbget.nix {};
+  openarena = handleTest ./openarena.nix {};
   openldap = handleTest ./openldap.nix {};
   opensmtpd = handleTest ./opensmtpd.nix {};
   openssh = handleTest ./openssh.nix {};
   # openstack-image-userdata doesn't work in a sandbox as the simulated openstack instance needs network access
   #openstack-image-userdata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).userdata or {};
   openstack-image-metadata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).metadata or {};
+  orangefs = handleTest ./orangefs.nix {};
   os-prober = handleTestOn ["x86_64-linux"] ./os-prober.nix {};
   osquery = handleTest ./osquery.nix {};
   osrm-backend = handleTest ./osrm-backend.nix {};
@@ -275,6 +281,7 @@ in
   tor = handleTest ./tor.nix {};
   transmission = handleTest ./transmission.nix {};
   trezord = handleTest ./trezord.nix {};
+  trickster = handleTest ./trickster.nix {};
   udisks2 = handleTest ./udisks2.nix {};
   upnp = handleTest ./upnp.nix {};
   uwsgi = handleTest ./uwsgi.nix {};
@@ -291,5 +298,6 @@ in
   xrdp = handleTest ./xrdp.nix {};
   xss-lock = handleTest ./xss-lock.nix {};
   yabar = handleTest ./yabar.nix {};
+  yggdrasil = handleTest ./yggdrasil.nix {};
   zookeeper = handleTest ./zookeeper.nix {};
 }
diff --git a/nixos/tests/ceph-multi-node.nix b/nixos/tests/ceph-multi-node.nix
new file mode 100644
index 0000000000000..6698aac3f2714
--- /dev/null
+++ b/nixos/tests/ceph-multi-node.nix
@@ -0,0 +1,247 @@
+import ./make-test.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      ip = "192.168.1.2";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      ip = "192.168.1.3";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      memorySize = 512;
+      emptyDiskImages = [ 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+      netcat-openbsd
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+
+    # So that we don't have to battle systemd when bootstraping
+    systemd.targets.ceph.wantedBy = lib.mkForce [];
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPorts = [ 6789 3300 ];
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+  }; };
+
+  networkOsd0 = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.osd0.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+  cephConfigOsd0 = generateCephConfig { daemonConfig = {
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd0.name ];
+    };
+  }; };
+
+  networkOsd1 = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.osd1.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+  cephConfigOsd1 = generateCephConfig { daemonConfig = {
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd1.name ];
+    };
+  }; };
+
+  testscript = { ... }: ''
+    startAll;
+
+    $monA->waitForUnit("network.target");
+    $osd0->waitForUnit("network.target");
+    $osd1->waitForUnit("network.target");
+
+    # Create the ceph-related directories
+    $monA->mustSucceed(
+      "mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}",
+      "mkdir -p /var/lib/ceph/mon/ceph-${cfg.monA.name}",
+      "chown ceph:ceph -R /var/lib/ceph/",
+      "mkdir -p /etc/ceph",
+      "chown ceph:ceph -R /etc/ceph"
+    );
+    $osd0->mustSucceed(
+      "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+      "chown ceph:ceph -R /var/lib/ceph/",
+      "mkdir -p /etc/ceph",
+      "chown ceph:ceph -R /etc/ceph"
+    );
+    $osd1->mustSucceed(
+      "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+      "chown ceph:ceph -R /var/lib/ceph/",
+      "mkdir -p /etc/ceph",
+      "chown ceph:ceph -R /etc/ceph"
+    );
+
+    # Bootstrap ceph-mon daemon
+    $monA->mustSucceed(
+      "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+      "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+      "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+      "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+      "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+      "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+      "systemctl start ceph-mon-${cfg.monA.name}"
+    );
+    $monA->waitForUnit("ceph-mon-${cfg.monA.name}");
+    $monA->mustSucceed("ceph mon enable-msgr2");
+
+    # Can't check ceph status until a mon is up
+    $monA->succeed("ceph -s | grep 'mon: 1 daemons'");
+
+    # Start the ceph-mgr daemon, it has no deps and hardly any setup
+    $monA->mustSucceed(
+      "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+      "systemctl start ceph-mgr-${cfg.monA.name}"
+    );
+    $monA->waitForUnit("ceph-mgr-a");
+    $monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+
+    # Send the admin keyring to the OSD machines
+    $monA->mustSucceed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared");
+    $osd0->mustSucceed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph");
+    $osd1->mustSucceed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph");
+
+    # Bootstrap both OSDs
+    $osd0->mustSucceed(
+      "mkfs.xfs /dev/vdb",
+      "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+      "echo '{\"cephx_secret\": \"${cfg.osd0.key}\"}' | ceph osd new ${cfg.osd0.uuid} -i -",
+    );
+    $osd1->mustSucceed(
+      "mkfs.xfs /dev/vdb",
+      "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+      "echo '{\"cephx_secret\": \"${cfg.osd1.key}\"}' | ceph osd new ${cfg.osd1.uuid} -i -"
+    );
+
+    # Initialize the OSDs with regular filestore
+    $osd0->mustSucceed(
+      "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+      "chown -R ceph:ceph /var/lib/ceph/osd",
+      "systemctl start ceph-osd-${cfg.osd0.name}",
+    );
+    $osd1->mustSucceed(
+      "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+      "chown -R ceph:ceph /var/lib/ceph/osd",
+      "systemctl start ceph-osd-${cfg.osd1.name}"
+    );
+    $monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+
+    $monA->mustSucceed(
+      "ceph osd pool create multi-node-test 100 100",
+      "ceph osd pool ls | grep 'multi-node-test'",
+      "ceph osd pool rename multi-node-test multi-node-other-test",
+      "ceph osd pool ls | grep 'multi-node-other-test'"
+    );
+    $monA->waitUntilSucceeds("ceph -s | grep '1 pools, 100 pgs'");
+    $monA->mustSucceed("ceph osd pool set multi-node-other-test size 2");
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+    $monA->waitUntilSucceeds("ceph -s | grep '100 active+clean'");
+    $monA->mustFail(
+      "ceph osd pool ls | grep 'multi-node-test'",
+      "ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it"
+    );
+
+    # As we disable the target in the config, we still want to test that it works as intended
+    $osd0->mustSucceed("systemctl stop ceph-osd-${cfg.osd0.name}");
+    $osd1->mustSucceed("systemctl stop ceph-osd-${cfg.osd1.name}");
+    $monA->mustSucceed(
+      "systemctl stop ceph-mgr-${cfg.monA.name}",
+      "systemctl stop ceph-mon-${cfg.monA.name}"
+    );
+    
+    $monA->succeed("systemctl start ceph.target");
+    $monA->waitForUnit("ceph-mon-${cfg.monA.name}");
+    $monA->waitForUnit("ceph-mgr-${cfg.monA.name}");
+    $osd0->succeed("systemctl start ceph.target");
+    $osd0->waitForUnit("ceph-osd-${cfg.osd0.name}");
+    $osd1->succeed("systemctl start ceph.target");
+    $osd1->waitForUnit("ceph-osd-${cfg.osd1.name}");
+    
+    $monA->succeed("ceph -s | grep 'mon: 1 daemons'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
+    $monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+  '';
+in {
+  name = "basic-multi-node-ceph-cluster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lejonet ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+    osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd0; networkConfig = networkOsd0; };
+    osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd1; networkConfig = networkOsd1; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixos/tests/ceph-single-node.nix b/nixos/tests/ceph-single-node.nix
new file mode 100644
index 0000000000000..10b77cff5a316
--- /dev/null
+++ b/nixos/tests/ceph-single-node.nix
@@ -0,0 +1,193 @@
+import ./make-test.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      memorySize = 512;
+      emptyDiskImages = [ 20480 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+
+    # So that we don't have to battle systemd when bootstraping
+    systemd.targets.ceph.wantedBy = lib.mkForce [];
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd0.name cfg.osd1.name ];
+    };
+  }; };
+
+  testscript = { ... }: ''
+    startAll;
+
+    $monA->waitForUnit("network.target");
+
+    # Create the ceph-related directories
+    $monA->mustSucceed(
+      "mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}",
+      "mkdir -p /var/lib/ceph/mon/ceph-${cfg.monA.name}",
+      "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+      "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+      "mkdir -p /etc/ceph",
+      "chown ceph:ceph -R /etc/ceph",
+      "chown ceph:ceph -R /var/lib/ceph/",
+    );
+
+    # Bootstrap ceph-mon daemon
+    $monA->mustSucceed(
+      "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+      "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+      "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+      "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+      "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+      "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+      "systemctl start ceph-mon-${cfg.monA.name}"
+    );
+    $monA->waitForUnit("ceph-mon-${cfg.monA.name}");
+    $monA->mustSucceed("ceph mon enable-msgr2");
+
+    # Can't check ceph status until a mon is up
+    $monA->succeed("ceph -s | grep 'mon: 1 daemons'");
+
+    # Start the ceph-mgr daemon, it has no deps and hardly any setup
+    $monA->mustSucceed(
+      "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+      "systemctl start ceph-mgr-${cfg.monA.name}"
+    );
+    $monA->waitForUnit("ceph-mgr-a");
+    $monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+
+    # Bootstrap both OSDs
+    $monA->mustSucceed(
+      "mkfs.xfs /dev/vdb",
+      "mkfs.xfs /dev/vdc",
+      "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+      "mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+      "echo '{\"cephx_secret\": \"${cfg.osd0.key}\"}' | ceph osd new ${cfg.osd0.uuid} -i -",
+      "echo '{\"cephx_secret\": \"${cfg.osd1.key}\"}' | ceph osd new ${cfg.osd1.uuid} -i -"
+    );
+
+    # Initialize the OSDs with regular filestore
+    $monA->mustSucceed(
+      "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+      "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+      "chown -R ceph:ceph /var/lib/ceph/osd",
+      "systemctl start ceph-osd-${cfg.osd0.name}",
+      "systemctl start ceph-osd-${cfg.osd1.name}"
+    );
+    $monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+
+    $monA->mustSucceed(
+      "ceph osd pool create single-node-test 100 100",
+      "ceph osd pool ls | grep 'single-node-test'",
+      "ceph osd pool rename single-node-test single-node-other-test",
+      "ceph osd pool ls | grep 'single-node-other-test'"
+    );
+    $monA->waitUntilSucceeds("ceph -s | grep '1 pools, 100 pgs'");
+    $monA->mustSucceed(
+      "ceph osd getcrushmap -o crush",
+      "crushtool -d crush -o decrushed",
+      "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+      "crushtool -c modcrush -o recrushed",
+      "ceph osd setcrushmap -i recrushed",
+      "ceph osd pool set single-node-other-test size 2"
+    );
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+    $monA->waitUntilSucceeds("ceph -s | grep '100 active+clean'");
+    $monA->mustFail(
+      "ceph osd pool ls | grep 'multi-node-test'",
+      "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it"
+    );
+
+    # As we disable the target in the config, we still want to test that it works as intended
+    $monA->mustSucceed(
+      "systemctl stop ceph-osd-${cfg.osd0.name}",
+      "systemctl stop ceph-osd-${cfg.osd1.name}",
+      "systemctl stop ceph-mgr-${cfg.monA.name}",
+      "systemctl stop ceph-mon-${cfg.monA.name}"
+    );
+    
+    $monA->succeed("systemctl start ceph.target");
+    $monA->waitForUnit("ceph-mon-${cfg.monA.name}");
+    $monA->waitForUnit("ceph-mgr-${cfg.monA.name}");
+    $monA->waitForUnit("ceph-osd-${cfg.osd0.name}");
+    $monA->waitForUnit("ceph-osd-${cfg.osd1.name}");
+    
+    $monA->succeed("ceph -s | grep 'mon: 1 daemons'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'quorum ${cfg.monA.name}'");
+    $monA->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'");
+    $monA->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
+  '';
+in {
+  name = "basic-single-node-ceph-cluster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ lejonet johanot ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixos/tests/ceph.nix b/nixos/tests/ceph.nix
deleted file mode 100644
index 57120ff978f7d..0000000000000
--- a/nixos/tests/ceph.nix
+++ /dev/null
@@ -1,161 +0,0 @@
-import ./make-test.nix ({pkgs, lib, ...}: {
-  name = "All-in-one-basic-ceph-cluster";
-  meta = with pkgs.stdenv.lib.maintainers; {
-    maintainers = [ johanot lejonet ];
-  };
-
-  nodes = {
-    aio = { pkgs, ... }: {
-      virtualisation = {
-        memorySize = 1536;
-        emptyDiskImages = [ 20480 20480 ];
-        vlans = [ 1 ];
-      };
-
-      networking = {
-        useDHCP = false;
-        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
-          { address = "192.168.1.1"; prefixLength = 24; }
-        ];
-      };
-
-      environment.systemPackages = with pkgs; [
-        bash
-        sudo
-        ceph
-        xfsprogs
-      ];
-
-      boot.kernelModules = [ "xfs" ];
-
-      services.ceph.enable = true;
-      services.ceph.global = {
-        fsid = "066ae264-2a5d-4729-8001-6ad265f50b03";
-        monInitialMembers = "aio";
-        monHost = "192.168.1.1";
-      };
-
-      services.ceph.mon = {
-        enable = true;
-        daemons = [ "aio" ];
-      };
-
-      services.ceph.mgr = {
-        enable = true;
-        daemons = [ "aio" ];
-      };
-
-      services.ceph.osd = {
-        enable = true;
-        daemons = [ "0" "1" ];
-      };
-
-      # So that we don't have to battle systemd when bootstraping
-      systemd.targets.ceph.wantedBy = lib.mkForce [];
-    };
-  };
-
-  testScript = { ... }: ''
-    startAll;
-
-    $aio->waitForUnit("network.target");
-
-    # Create the ceph-related directories
-    $aio->mustSucceed(
-      "mkdir -p /var/lib/ceph/mgr/ceph-aio",
-      "mkdir -p /var/lib/ceph/mon/ceph-aio",
-      "mkdir -p /var/lib/ceph/osd/ceph-{0,1}",
-      "chown ceph:ceph -R /var/lib/ceph/",
-      "mkdir -p /etc/ceph",
-      "chown ceph:ceph -R /etc/ceph"
-    );
-
-    # Bootstrap ceph-mon daemon
-    $aio->mustSucceed(
-      "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
-      "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
-      "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
-      "monmaptool --create --add aio 192.168.1.1 --fsid 066ae264-2a5d-4729-8001-6ad265f50b03 /tmp/monmap",
-      "sudo -u ceph ceph-mon --mkfs -i aio --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
-      "sudo -u ceph touch /var/lib/ceph/mon/ceph-aio/done",
-      "systemctl start ceph-mon-aio"
-    );
-    $aio->waitForUnit("ceph-mon-aio");
-    $aio->mustSucceed("ceph mon enable-msgr2");
-
-    # Can't check ceph status until a mon is up
-    $aio->succeed("ceph -s | grep 'mon: 1 daemons'");
-
-    # Start the ceph-mgr daemon, it has no deps and hardly any setup
-    $aio->mustSucceed(
-      "ceph auth get-or-create mgr.aio mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-aio/keyring",
-      "systemctl start ceph-mgr-aio"
-    );
-    $aio->waitForUnit("ceph-mgr-aio");
-    $aio->waitUntilSucceeds("ceph -s | grep 'quorum aio'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
-
-    # Bootstrap both OSDs
-    $aio->mustSucceed(
-      "mkfs.xfs /dev/vdb",
-      "mkfs.xfs /dev/vdc",
-      "mount /dev/vdb /var/lib/ceph/osd/ceph-0",
-      "mount /dev/vdc /var/lib/ceph/osd/ceph-1",
-      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-0/keyring --name osd.0 --add-key AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==",
-      "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-1/keyring --name osd.1 --add-key AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==",
-      "echo '{\"cephx_secret\": \"AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==\"}' | ceph osd new 55ba2294-3e24-478f-bee0-9dca4c231dd9 -i -",
-      "echo '{\"cephx_secret\": \"AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==\"}' | ceph osd new 5e97a838-85b6-43b0-8950-cb56d554d1e5 -i -"
-    );
-
-    # Initialize the OSDs with regular filestore
-    $aio->mustSucceed(
-      "ceph-osd -i 0 --mkfs --osd-uuid 55ba2294-3e24-478f-bee0-9dca4c231dd9",
-      "ceph-osd -i 1 --mkfs --osd-uuid 5e97a838-85b6-43b0-8950-cb56d554d1e5",
-      "chown -R ceph:ceph /var/lib/ceph/osd",
-      "systemctl start ceph-osd-0",
-      "systemctl start ceph-osd-1"
-    );
-
-    $aio->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
-
-    $aio->mustSucceed(
-      "ceph osd pool create aio-test 100 100",
-      "ceph osd pool ls | grep 'aio-test'",
-      "ceph osd pool rename aio-test aio-other-test",
-      "ceph osd pool ls | grep 'aio-other-test'",
-      "ceph -s | grep '1 pools, 100 pgs'",
-      "ceph osd getcrushmap -o crush",
-      "crushtool -d crush -o decrushed",
-      "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
-      "crushtool -c modcrush -o recrushed",
-      "ceph osd setcrushmap -i recrushed",
-      "ceph osd pool set aio-other-test size 2"
-    );
-    $aio->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
-    $aio->waitUntilSucceeds("ceph -s | grep '100 active+clean'");
-    $aio->mustFail(
-      "ceph osd pool ls | grep 'aio-test'",
-      "ceph osd pool delete aio-other-test aio-other-test --yes-i-really-really-mean-it"
-    );
-
-    # As we disable the target in the config, we still want to test that it works as intended
-    $aio->mustSucceed(
-      "systemctl stop ceph-osd-0",
-      "systemctl stop ceph-osd-1",
-      "systemctl stop ceph-mgr-aio",
-      "systemctl stop ceph-mon-aio"
-    );
-    $aio->succeed("systemctl start ceph.target");
-    $aio->waitForUnit("ceph-mon-aio");
-    $aio->waitForUnit("ceph-mgr-aio");
-    $aio->waitForUnit("ceph-osd-0");
-    $aio->waitForUnit("ceph-osd-1");
-    $aio->succeed("ceph -s | grep 'mon: 1 daemons'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'quorum aio'");
-    $aio->waitUntilSucceeds("ceph osd stat | grep -e '2 osds: 2 up[^,]*, 2 in'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'mgr: aio(active,'");
-    $aio->waitUntilSucceeds("ceph -s | grep 'HEALTH_OK'");
-  '';
-})
diff --git a/nixos/tests/common/letsencrypt/common.nix b/nixos/tests/common/letsencrypt/common.nix
index 798a749f7f9b7..c530de817bf2d 100644
--- a/nixos/tests/common/letsencrypt/common.nix
+++ b/nixos/tests/common/letsencrypt/common.nix
@@ -1,27 +1,9 @@
-{ lib, nodes, ... }: {
+{ lib, nodes, pkgs, ... }: let
+  letsencrypt-ca = nodes.letsencrypt.config.test-support.letsencrypt.caCert;
+in {
   networking.nameservers = [
     nodes.letsencrypt.config.networking.primaryIPAddress
   ];
 
-  nixpkgs.overlays = lib.singleton (self: super: {
-    cacert = super.cacert.overrideDerivation (drv: {
-      installPhase = (drv.installPhase or "") + ''
-        cat "${nodes.letsencrypt.config.test-support.letsencrypt.caCert}" \
-          >> "$out/etc/ssl/certs/ca-bundle.crt"
-      '';
-    });
-
-    # Override certifi so that it accepts fake certificate for Let's Encrypt
-    # Need to override the attribute used by simp_le, which is python3Packages
-    python3Packages = (super.python3.override {
-      packageOverrides = lib.const (pysuper: {
-        certifi = pysuper.certifi.overridePythonAttrs (attrs: {
-          postPatch = (attrs.postPatch or "") + ''
-            cat "${self.cacert}/etc/ssl/certs/ca-bundle.crt" \
-              > certifi/cacert.pem
-          '';
-        });
-      });
-    }).pkgs;
-  });
+  security.pki.certificateFiles = [ letsencrypt-ca ];
 }
diff --git a/nixos/tests/common/letsencrypt/default.nix b/nixos/tests/common/letsencrypt/default.nix
index 58d87c64e3445..110a2520971d0 100644
--- a/nixos/tests/common/letsencrypt/default.nix
+++ b/nixos/tests/common/letsencrypt/default.nix
@@ -1,6 +1,3 @@
-# Fully pluggable module to have Letsencrypt's Boulder ACME service running in
-# a test environment.
-#
 # The certificate for the ACME service is exported as:
 #
 #   config.test-support.letsencrypt.caCert
@@ -54,277 +51,35 @@
 # that it has to be started _before_ the ACME service.
 { config, pkgs, lib, ... }:
 
-let
-  softhsm = pkgs.stdenv.mkDerivation rec {
-    pname = "softhsm";
-    version = "1.3.8";
-
-    src = pkgs.fetchurl {
-      url = "https://dist.opendnssec.org/source/${pname}-${version}.tar.gz";
-      sha256 = "0flmnpkgp65ym7w3qyg78d3fbmvq3aznmi66rgd420n33shf7aif";
-    };
-
-    configureFlags = [ "--with-botan=${pkgs.botan}" ];
-    buildInputs = [ pkgs.sqlite ];
-  };
-
-  pkcs11-proxy = pkgs.stdenv.mkDerivation {
-    name = "pkcs11-proxy";
-
-    src = pkgs.fetchFromGitHub {
-      owner = "SUNET";
-      repo = "pkcs11-proxy";
-      rev = "944684f78bca0c8da6cabe3fa273fed3db44a890";
-      sha256 = "1nxgd29y9wmifm11pjcdpd2y293p0dgi0x5ycis55miy97n0f5zy";
-    };
-
-    postPatch = "patchShebangs mksyscalls.sh";
-
-    nativeBuildInputs = [ pkgs.cmake ];
-    buildInputs = [ pkgs.openssl pkgs.libseccomp ];
-  };
-
-  mkGoDep = { goPackagePath, url ? "https://${goPackagePath}", rev, sha256 }: {
-    inherit goPackagePath;
-    src = pkgs.fetchgit { inherit url rev sha256; };
-  };
-
-  goose = let
-    owner = "liamstask";
-    repo = "goose";
-    rev = "8488cc47d90c8a502b1c41a462a6d9cc8ee0a895";
-    version = "20150116";
-
-  in pkgs.buildGoPackage rec {
-    name = "${repo}-${version}";
-
-    src = pkgs.fetchFromBitbucket {
-      name = "${name}-src";
-      inherit rev owner repo;
-      sha256 = "1jy0pscxjnxjdg3hj111w21g8079rq9ah2ix5ycxxhbbi3f0wdhs";
-    };
-
-    goPackagePath = "bitbucket.org/${owner}/${repo}";
-    subPackages = [ "cmd/goose" ];
-    extraSrcs = map mkGoDep [
-      { goPackagePath = "github.com/go-sql-driver/mysql";
-        rev = "2e00b5cd70399450106cec6431c2e2ce3cae5034";
-        sha256 = "085g48jq9hzmlcxg122n0c4pi41sc1nn2qpx1vrl2jfa8crsppa5";
-      }
-      { goPackagePath = "github.com/kylelemons/go-gypsy";
-        rev = "08cad365cd28a7fba23bb1e57aa43c5e18ad8bb8";
-        sha256 = "1djv7nii3hy451n5jlslk0dblqzb1hia1cbqpdwhnps1g8hqjy8q";
-      }
-      { goPackagePath = "github.com/lib/pq";
-        rev = "ba5d4f7a35561e22fbdf7a39aa0070f4d460cfc0";
-        sha256 = "1mfbqw9g00bk24bfmf53wri5c2wqmgl0qh4sh1qv2da13a7cwwg3";
-      }
-      { goPackagePath = "github.com/mattn/go-sqlite3";
-        rev = "2acfafad5870400156f6fceb12852c281cbba4d5";
-        sha256 = "1rpgil3w4hh1cibidskv1js898hwz83ps06gh0hm3mym7ki8d5h7";
-      }
-      { goPackagePath = "github.com/ziutek/mymysql";
-        rev = "0582bcf675f52c0c2045c027fd135bd726048f45";
-        sha256 = "0bkc9x8sgqbzgdimsmsnhb0qrzlzfv33fgajmmjxl4hcb21qz3rf";
-      }
-      { goPackagePath = "golang.org/x/net";
-        url = "https://go.googlesource.com/net";
-        rev = "10c134ea0df15f7e34d789338c7a2d76cc7a3ab9";
-        sha256 = "14cbr2shl08gyg85n5gj7nbjhrhhgrd52h073qd14j97qcxsakcz";
-      }
-    ];
-  };
-
-  boulder = let
-    owner = "letsencrypt";
-    repo = "boulder";
-    rev = "9c6a1f2adc4c26d925588f5ae366cfd4efb7813a";
-    version = "20180129";
-
-  in pkgs.buildGoPackage rec {
-    name = "${repo}-${version}";
-
-    src = pkgs.fetchFromGitHub {
-      name = "${name}-src";
-      inherit rev owner repo;
-      sha256 = "09kszswrifm9rc6idfaq0p1mz5w21as2qbc8gd5pphrq9cf9pn55";
-    };
-
-    postPatch = ''
-      # compat for go < 1.8
-      sed -i -e 's/time\.Until(\([^)]\+\))/\1.Sub(time.Now())/' \
-        test/ocsp/helper/helper.go
-
-      find test -type f -exec sed -i -e '/libpkcs11-proxy.so/ {
-        s,/usr/local,${pkcs11-proxy},
-      }' {} +
-
-      sed -i -r \
-        -e '/^def +install/a \    return True' \
-        -e 's,exec \./bin/,,' \
-        test/startservers.py
-
-      cat ${lib.escapeShellArg snakeOilCerts.ca.key} > test/test-ca.key
-      cat ${lib.escapeShellArg snakeOilCerts.ca.cert} > test/test-ca.pem
-    '';
-
-    # Until vendored pkcs11 is go 1.9 compatible
-    preBuild = ''
-      rm -r go/src/github.com/letsencrypt/boulder/vendor/github.com/miekg/pkcs11
-    '';
-
-    # XXX: Temporarily brought back putting the source code in the output,
-    # since e95f17e2720e67e2eabd59d7754c814d3e27a0b2 was removing that from
-    # buildGoPackage.
-    preInstall = ''
-      mkdir -p $out
-      pushd "$NIX_BUILD_TOP/go"
-      while read f; do
-        echo "$f" | grep -q '^./\(src\|pkg/[^/]*\)/${goPackagePath}' \
-          || continue
-        mkdir -p "$(dirname "$out/share/go/$f")"
-        cp "$NIX_BUILD_TOP/go/$f" "$out/share/go/$f"
-      done < <(find . -type f)
-      popd
-    '';
-
-    extraSrcs = map mkGoDep [
-      { goPackagePath = "github.com/miekg/pkcs11";
-        rev           = "6dbd569b952ec150d1425722dbbe80f2c6193f83";
-        sha256        = "1m8g6fx7df6hf6q6zsbyw1icjmm52dmsx28rgb0h930wagvngfwb";
-      }
-    ];
-
-    goPackagePath = "github.com/${owner}/${repo}";
-    buildInputs = [ pkgs.libtool ];
-  };
-
-  boulderSource = "${boulder.out}/share/go/src/${boulder.goPackagePath}";
-
-  softHsmConf = pkgs.writeText "softhsm.conf" ''
-    0:/var/lib/softhsm/slot0.db
-    1:/var/lib/softhsm/slot1.db
-  '';
 
+let
   snakeOilCerts = import ./snakeoil-certs.nix;
 
-  wfeDomain = "acme-v01.api.letsencrypt.org";
+  wfeDomain = "acme-v02.api.letsencrypt.org";
   wfeCertFile = snakeOilCerts.${wfeDomain}.cert;
   wfeKeyFile = snakeOilCerts.${wfeDomain}.key;
 
   siteDomain = "letsencrypt.org";
   siteCertFile = snakeOilCerts.${siteDomain}.cert;
   siteKeyFile = snakeOilCerts.${siteDomain}.key;
-
-  # Retrieved via:
-  # curl -s -I https://acme-v01.api.letsencrypt.org/terms \
-  #   | sed -ne 's/^[Ll]ocation: *//p'
-  tosUrl = "https://letsencrypt.org/documents/2017.11.15-LE-SA-v1.2.pdf";
-  tosPath = builtins.head (builtins.match "https?://[^/]+(.*)" tosUrl);
-
-  tosFile = pkgs.fetchurl {
-    url = tosUrl;
-    sha256 = "0yvyckqzj0b1xi61sypcha82nanizzlm8yqy828h2jbza7cxi26c";
-  };
-
+  pebble = pkgs.pebble;
   resolver = let
     message = "You need to define a resolver for the letsencrypt test module.";
     firstNS = lib.head config.networking.nameservers;
   in if config.networking.nameservers == [] then throw message else firstNS;
 
-  cfgDir = pkgs.stdenv.mkDerivation {
-    name = "boulder-config";
-    src = "${boulderSource}/test/config";
-    nativeBuildInputs = [ pkgs.jq ];
-    phases = [ "unpackPhase" "patchPhase" "installPhase" ];
-    postPatch = ''
-      sed -i -e 's/5002/80/' -e 's/5002/443/' va.json
-      sed -i -e '/listenAddress/s/:4000/:80/' wfe.json
-      sed -i -r \
-        -e ${lib.escapeShellArg "s,http://boulder:4000/terms/v1,${tosUrl},g"} \
-        -e 's,http://(boulder|127\.0\.0\.1):4000,https://${wfeDomain},g' \
-        -e '/dnsResolver/s/127\.0\.0\.1:8053/${resolver}:53/' \
-        *.json
-      if grep 4000 *.json; then exit 1; fi
-
-      # Change all ports from 1909X to 909X, because the 1909X range of ports is
-      # allocated by startservers.py in order to intercept gRPC communication.
-      sed -i -e 's/\<1\(909[0-9]\)\>/\1/' *.json
-
-      # Patch out all additional issuer certs
-      jq '. + {ca: (.ca + {Issuers:
-        [.ca.Issuers[] | select(.CertFile == "test/test-ca.pem")]
-      })}' ca.json > tmp
-      mv tmp ca.json
-    '';
-    installPhase = "cp -r . \"$out\"";
-  };
-
-  components = {
-    gsb-test-srv.args = "-apikey my-voice-is-my-passport";
-    gsb-test-srv.waitForPort = 6000;
-    gsb-test-srv.first = true;
-    boulder-sa.args = "--config ${cfgDir}/sa.json";
-    boulder-wfe.args = "--config ${cfgDir}/wfe.json";
-    boulder-ra.args = "--config ${cfgDir}/ra.json";
-    boulder-ca.args = "--config ${cfgDir}/ca.json";
-    boulder-va.args = "--config ${cfgDir}/va.json";
-    boulder-publisher.args = "--config ${cfgDir}/publisher.json";
-    boulder-publisher.waitForPort = 9091;
-    ocsp-updater.args = "--config ${cfgDir}/ocsp-updater.json";
-    ocsp-updater.after = [ "boulder-publisher" ];
-    ocsp-responder.args = "--config ${cfgDir}/ocsp-responder.json";
-    ct-test-srv = {};
-    mail-test-srv.args = let
-      key = "${boulderSource}/test/mail-test-srv/minica-key.pem";
-      crt = "${boulderSource}/test/mail-test-srv/minica.pem";
-     in
-      "--closeFirst 5 --cert ${crt} --key ${key}";
+  pebbleConf.pebble = {
+    listenAddress = "0.0.0.0:443";
+    managementListenAddress = "0.0.0.0:15000";
+    certificate = snakeOilCerts.${wfeDomain}.cert;
+    privateKey = snakeOilCerts.${wfeDomain}.key;
+    httpPort = 80;
+    tlsPort = 443;
+    ocspResponderURL = "http://0.0.0.0:4002";
   };
 
-  commonPath = [ softhsm pkgs.mariadb goose boulder ];
-
-  mkServices = a: b: with lib; listToAttrs (concatLists (mapAttrsToList a b));
-
-  componentServices = mkServices (name: attrs: let
-    mkSrvName = n: "boulder-${n}.service";
-    firsts = lib.filterAttrs (lib.const (c: c.first or false)) components;
-    firstServices = map mkSrvName (lib.attrNames firsts);
-    firstServicesNoSelf = lib.remove "boulder-${name}.service" firstServices;
-    additionalAfter = firstServicesNoSelf ++ map mkSrvName (attrs.after or []);
-    needsPort = attrs ? waitForPort;
-    inits = map (n: "boulder-init-${n}.service") [ "mysql" "softhsm" ];
-    portWaiter = {
-      name = "boulder-${name}";
-      value = {
-        description = "Wait For Port ${toString attrs.waitForPort} (${name})";
-        after = [ "boulder-real-${name}.service" "bind.service" ];
-        requires = [ "boulder-real-${name}.service" ];
-        requiredBy = [ "boulder.service" ];
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        script = let
-          netcat = "${pkgs.libressl.nc}/bin/nc";
-          portCheck = "${netcat} -z 127.0.0.1 ${toString attrs.waitForPort}";
-        in "while ! ${portCheck}; do :; done";
-      };
-    };
-  in lib.optional needsPort portWaiter ++ lib.singleton {
-    name = if needsPort then "boulder-real-${name}" else "boulder-${name}";
-    value = {
-      description = "Boulder ACME Component (${name})";
-      after = inits ++ additionalAfter;
-      requires = inits;
-      requiredBy = [ "boulder.service" ];
-      path = commonPath;
-      environment.GORACE = "halt_on_error=1";
-      environment.SOFTHSM_CONF = softHsmConf;
-      environment.PKCS11_PROXY_SOCKET = "tcp://127.0.0.1:5657";
-      serviceConfig.WorkingDirectory = boulderSource;
-      serviceConfig.ExecStart = "${boulder}/bin/${name} ${attrs.args or ""}";
-      serviceConfig.Restart = "on-failure";
-    };
-  }) components;
+  pebbleConfFile = pkgs.writeText "pebble.conf" (builtins.toJSON pebbleConf);
+  pebbleDataDir = "/root/pebble";
 
 in {
   imports = [ ../resolver.nix ];
@@ -352,94 +107,29 @@ in {
     networking.firewall.enable = false;
 
     networking.extraHosts = ''
-      127.0.0.1 ${toString [
-        "sa.boulder" "ra.boulder" "wfe.boulder" "ca.boulder" "va.boulder"
-        "publisher.boulder" "ocsp-updater.boulder" "admin-revoker.boulder"
-        "boulder" "boulder-mysql" wfeDomain
-      ]}
+      127.0.0.1 ${wfeDomain}
       ${config.networking.primaryIPAddress} ${wfeDomain} ${siteDomain}
     '';
 
-    services.mysql.enable = true;
-    services.mysql.package = pkgs.mariadb;
-
-    services.nginx.enable = true;
-    services.nginx.recommendedProxySettings = true;
-    # This fixes the test on i686
-    services.nginx.commonHttpConfig = ''
-      server_names_hash_bucket_size 64;
-    '';
-    services.nginx.virtualHosts.${wfeDomain} = {
-      onlySSL = true;
-      enableACME = false;
-      sslCertificate = wfeCertFile;
-      sslCertificateKey = wfeKeyFile;
-      locations."/".proxyPass = "http://127.0.0.1:80";
-    };
-    services.nginx.virtualHosts.${siteDomain} = {
-      onlySSL = true;
-      enableACME = false;
-      sslCertificate = siteCertFile;
-      sslCertificateKey = siteKeyFile;
-      locations."= ${tosPath}".alias = tosFile;
-    };
-
     systemd.services = {
-      pkcs11-daemon = {
-        description = "PKCS11 Daemon";
-        after = [ "boulder-init-softhsm.service" ];
-        before = map (n: "${n}.service") (lib.attrNames componentServices);
-        wantedBy = [ "multi-user.target" ];
-        environment.SOFTHSM_CONF = softHsmConf;
-        environment.PKCS11_DAEMON_SOCKET = "tcp://127.0.0.1:5657";
-        serviceConfig.ExecStart = let
-          softhsmLib = "${softhsm}/lib/softhsm/libsofthsm.so";
-        in "${pkcs11-proxy}/bin/pkcs11-daemon ${softhsmLib}";
-      };
-
-      boulder-init-mysql = {
-        description = "Boulder ACME Init (MySQL)";
-        after = [ "mysql.service" ];
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        serviceConfig.WorkingDirectory = boulderSource;
-        path = commonPath;
-        script = "${pkgs.bash}/bin/sh test/create_db.sh";
-      };
-
-      boulder-init-softhsm = {
-        description = "Boulder ACME Init (SoftHSM)";
-        environment.SOFTHSM_CONF = softHsmConf;
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        serviceConfig.WorkingDirectory = boulderSource;
-        preStart = "mkdir -p /var/lib/softhsm";
-        path = commonPath;
+      pebble = {
+        enable = true;
+        description = "Pebble ACME server";
+        requires = [ ];
+        wantedBy = [ "network.target" ];
+        preStart = ''
+          mkdir ${pebbleDataDir}
+        '';
         script = ''
-          softhsm --slot 0 --init-token \
-            --label intermediate --pin 5678 --so-pin 1234
-          softhsm --slot 0 --import test/test-ca.key \
-            --label intermediate_key --pin 5678 --id FB
-          softhsm --slot 1 --init-token \
-            --label root --pin 5678 --so-pin 1234
-          softhsm --slot 1 --import test/test-root.key \
-            --label root_key --pin 5678 --id FA
+          cd ${pebbleDataDir}
+          ${pebble}/bin/pebble -config ${pebbleConfFile}
         '';
+        serviceConfig = {
+          # Required to bind on privileged ports.
+          User = "root";
+          Group = "root";
+        };
       };
-
-      boulder = {
-        description = "Boulder ACME Server";
-        after = map (n: "${n}.service") (lib.attrNames componentServices);
-        wantedBy = [ "multi-user.target" ];
-        serviceConfig.Type = "oneshot";
-        serviceConfig.RemainAfterExit = true;
-        script = let
-          ports = lib.range 8000 8005 ++ lib.singleton 80;
-          netcat = "${pkgs.libressl.nc}/bin/nc";
-          mkPortCheck = port: "${netcat} -z 127.0.0.1 ${toString port}";
-          checks = "(${lib.concatMapStringsSep " && " mkPortCheck ports})";
-        in "while ! ${checks}; do :; done";
-      };
-    } // componentServices;
+    };
   };
 }
diff --git a/nixos/tests/common/letsencrypt/mkcerts.nix b/nixos/tests/common/letsencrypt/mkcerts.nix
index 3b4a589e41427..e7ac2bae46bd2 100644
--- a/nixos/tests/common/letsencrypt/mkcerts.nix
+++ b/nixos/tests/common/letsencrypt/mkcerts.nix
@@ -1,7 +1,7 @@
 { pkgs ? import <nixpkgs> {}
 , lib ? pkgs.lib
 
-, domains ? [ "acme-v01.api.letsencrypt.org" "letsencrypt.org" ]
+, domains ? [ "acme-v02.api.letsencrypt.org" "letsencrypt.org" ]
 }:
 
 pkgs.runCommand "letsencrypt-snakeoil-ca" {
diff --git a/nixos/tests/common/letsencrypt/snakeoil-certs.nix b/nixos/tests/common/letsencrypt/snakeoil-certs.nix
index c3d29ab8f1633..ca4f71ae688a4 100644
--- a/nixos/tests/common/letsencrypt/snakeoil-certs.nix
+++ b/nixos/tests/common/letsencrypt/snakeoil-certs.nix
@@ -2,252 +2,253 @@
 {
   ca.key = builtins.toFile "ca.key" ''
     -----BEGIN PRIVATE KEY-----
-    MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDfdVxC/4HwhuzD
-    9or9CDDu3TBQE5lirJI5KYmfMZtfgdzEjgOzmR9AVSkn2rQeCqzM5m+YCzPO+2y7
-    0Fdk7vDORi1OdhYfUQIW6/TZ27xEjx4t82j9i705yUqTJZKjMbD830geXImJ6VGj
-    Nv/WisTHmwBspWKefYQPN68ZvYNCn0d5rYJg9uROZPJHSI0MYj9iERWIPN+xhZoS
-    xN74ILJ0rEOQfx2GHDhTr99vZYAFqbAIfh35fYulRWarUSekI+rDxa83FD8q9cMg
-    OP84KkLep2dRXXTbUWErGUOpHP55M9M7ws0RVNdl9PUSbDgChl7yYlHCde3261q/
-    zGp5dMV/t/jXXNUgRurvXc4gUKKjS4Sffvg0XVnPs3sMlZ4JNmycK9klgISVmbTK
-    VcjRRJv8Bva2NQVsJ9TIryV0QEk94DucgsC3LbhQfQdmnWVcEdzwrZHNpk9az5mn
-    w42RuvZW9L19T7xpIrdLSHaOis4VEquZjkWIhfIz0DVMeXtYEQmwqFG23Ww0utcp
-    mCW4FPvpyYs5GAPmGWfrlMxsLD/7eteot3AheC+56ZBoVBnI8FFvIX2qci+gfVDu
-    CjvDmbyS/0NvxLGqvSC1GUPmWP3TR5Fb1H8Rp+39zJHRmH+qYWlhcv6p7FlY2/6d
-    9Rkw8WKRTSCB7yeUdNNPiPopk6N4NwIDAQABAoICAQCzV0ei5dntpvwjEp3eElLj
-    glYiDnjOPt5kTjgLsg6XCmyau7ewzrXMNgz/1YE1ky+4i0EI8AS2nAdafQ2HDlXp
-    11zJWfDLVYKtztYGe1qQU6TPEEo1I4/M7waRLliP7XO0n6cL5wzjyIQi0CNolprz
-    8CzZBasutGHmrLQ1nmnYcGk2+NBo7f2yBUaFe27of3mLRVbYrrKBkU5kveiNkABp
-    r0/SipKxbbivQbm7d+TVpqiHSGDaOa54CEksOcfs7n6efOvw8qj326KtG9GJzDE6
-    7XP4U19UHe40XuR0t7Zso/FmRyO6QzNUutJt5LjXHezZ75razTcdMyr0QCU8MUHH
-    jXZxQCsbt+9AmdxUMBm1SMNVBdHYM8oiNHynlgsEj9eM6jxDEss/Uc3FeKoHl+XL
-    L6m28guIB8NivqjVzZcwhxvdiQCzYxjyqMC+/eX7aaK4NIlX2QRMoDL6mJ58Bz/8
-    V2Qxp2UNVwKJFWAmpgXC+sq6XV/TP3HkOvd0OK82Nid2QxEvfE/EmOhU63qAjgUR
-    QnteLEcJ3MkGGurs05pYBDE7ejKVz6uu2tHahFMOv+yanGP2gfivnT9a323/nTqH
-    oR5ffMEI1u/ufpWU7sWXZfL/mH1L47x87k+9wwXHCPeSigcy+hFI7t1+rYsdCmz9
-    V6QtmxZHMLanwzh5R0ipcQKCAQEA8kuZIz9JyYP6L+5qmIUxiWESihVlRCSKIqLB
-    fJ5sQ06aDBV2sqS4XnoWsHuJWUd39rulks8cg8WIQu8oJwVkFI9EpARt/+a1fRP0
-    Ncc9qiBdP6VctQGgKfe5KyOfMzIBUl3zj2cAmU6q+CW1OgdhnEl4QhgBe5XQGquZ
-    Alrd2P2jhJbMO3sNFgzTy7xPEr3KqUy+L4gtRnGOegKIh8EllmsyMRO4eIrZV2z3
-    XI+S2ZLyUn3WHYkaJqvUFrbfekgBBmbk5Ead6ImlsLsBla6MolKrVYV1kN6KT+Y+
-    plcxNpWY8bnWfw5058OWPLPa9LPfReu9rxAeGT2ZLmAhSkjGxQKCAQEA7BkBzT3m
-    SIzop9RKl5VzYbVysCYDjFU9KYMW5kBIw5ghSMnRmU7kXIZUkc6C1L/v9cTNFFLw
-    ZSF4vCHLdYLmDysW2d4DU8fS4qdlDlco5A00g8T1FS7nD9CzdkVN/oix6ujw7RuI
-    7pE1K3JELUYFBc8AZ7mIGGbddeCwnM+NdPIlhWzk5s4x4/r31cdk0gzor0kE4e+d
-    5m0s1T4O/Iak6rc0MGDeTejZQg04p1eAJFYQ6OY23tJhH/kO8CMYnQ4fidfCkf8v
-    85v4EC1MCorFR7J65uSj8MiaL7LTXPvLAkgFls1c3ijQ2tJ8qXvqmfo0by33T1OF
-    ZGyaOP9/1WQSywKCAQB47m6CfyYO5EZNAgxGD8SHsuGT9dXTSwF/BAjacB/NAEA2
-    48eYpko3LWyBrUcCPn+LsGCVg7XRtxepgMBjqXcoI9G4o1VbsgTHZtwus0D91qV0
-    DM7WsPcFu1S6SU8+OCkcuTPFUT2lRvRiYj+vtNttK+ZP5rdmvYFermLyH/Q2R3ID
-    zVgmH+aKKODVASneSsgJ8/nAs5EVZbwc/YKzbx2Zk+s7P4KE95g+4G4dzrMW0RcN
-    QS1LFJDu2DhFFgU4fRO15Ek9/lj2JS2DpfLGiJY8tlI5nyDsq4YRFvQSBdbUTZpG
-    m+CJDegffSlRJtuT4ur/dQf5hmvfYTVBRk2XS/eZAoIBAB143a22PWnvFRfmO02C
-    3X1j/iYZCLZa6aCl+ZTSj4LDGdyRPPXrUDxwlFwDMHfIYfcHEyanV9T4Aa9SdKh9
-    p6RbF6YovbeWqS+b/9RzcupM77JHQuTbDwL9ZXmtGxhcDgGqBHFEz6ogPEfpIrOY
-    GwZnmcBY+7E4HgsZ+lII4rqng6GNP2HEeZvg91Eba+2AqQdAkTh3Bfn+xOr1rT8+
-    u5WFOyGS5g1JtN0280yIcrmWeNPp8Q2Nq4wnNgMqDmeEnNFDOsmo1l6NqMC0NtrW
-    CdxyXj82aXSkRgMQSqw/zk7BmNkDV8VvyOqX/fHWQynnfuYmEco4Pd2UZQgadOW5
-    cVMCggEBANGz1fC+QQaangUzsVNOJwg2+CsUFYlAKYA3pRKZPIyMob2CBXk3Oln/
-    YqOq6j373kG2AX74EZT07JFn28F27JF3r+zpyS/TYrfZyO1lz/5ZejPtDTmqBiVd
-    qa2coaPKwCOz64s77A9KSPyvpvyuTfRVa8UoArHcrQsPXMHgEhnFRsbxgmdP582A
-    kfYfoJBSse6dQtS9ZnREJtyWJlBNIBvsuKwzicuIgtE3oCBcIUZpEa6rBSN7Om2d
-    ex8ejCcS7qpHeULYspXbm5ZcwE4glKlQbJDTKaJ9mjiMdvuNFUZnv1BdMQ3Tb8zf
-    Gvfq54FbDuB10XP8JdLrsy9Z6GEsmoE=
+    MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDQ0b23I1srJZwR
+    2MMdvSJK5pcwLfrXU+4gEZEnWNyT8yeVweya+8vmNNOlvK3zxf+ZiY/7aQ0RZJMO
+    h2+VdlgHmr2QKhQTf1HwfZA/06FolD3/DcS+DMJMSTVr179/XLndeVVZUqU7tjvB
+    AWKSIS8H2hSF1UOPi9gBDR8MwCP6Qgj8WYhbkt9q47/lO96qAmm6U1F+Q7RYM9ZQ
+    IWI81N0Ms5wJocg7n6S19iV66ePh7APapZFYup61gFGWfahmA217ELIZd56n8yjO
+    F0epb9sC0XpYCDRrYKBWLqPiv+6wvdZtZvALItyIv08ZwXlBkFg3LbAAhPnf0Vxz
+    pYysQmyyyzkgy252n+Sie0kx+B4qm6fOkpfgYlPSVTb2dXx/be/SE08u0a9FO0fZ
+    pkByWEZJUUwngsJgLUa7MorQf3avxozfC25XqvzbieZfSXlA7mOUclZbC/WUFpyj
+    MlyJU2eCQ8wSwsPXl91oxcYlOkuVLgd41gr9pGXQSuKIkrgbfkftjg2tDC+7g7O8
+    qrdF42FjbZjIx/74AasmsGh4GTQtiSkvEnTstioC6aCV44DlJWbBIMvkyawubjUl
+    Ppij0H66Y9Q4tEc/ktc7oGQfqqluyLb43TeobTPHALsNeAYb39rMtBo5DDCUc81s
+    fuDMhMr/oYXKrFstUsg5AY6mJaRG0QIDAQABAoICAF5ZVfmoPOoKzTB3GvmV2iez
+    dj4rmDmwT1gn98iqasdiRtFwVGJWQHNcDQDGdmY9YNZThD2Y4nGoWpVm9jC2zuFo
+    thusF3QTw8cARKvCCBzDVhumce1YwHVNYpi+W2TFValOyBRathN7rBXxdUMHQUOv
+    8jPh/uudyNP4xL2zFs5dBchW/7g4bT/TdYGyglGYU4L/YEPHfXWYvk1oOAW6O8Ig
+    aPElKt5drEMW2yplATSzua4RvtEzSMBDIRn43pxxEgdXrNC67nF9+ULc2+Efi/oD
+    Ad9CncSiXO9zlVK/W655p6e4qd6uOqyCm8/MTegkuub7eplRe8D3zGjoNN4kCQ4S
+    rckVvIDDb6vZk7PKx9F7GWIqaG/YvFFFKO1MrAZg7SguFA6PtGOYAFocT03P6KXT
+    l2SnZQWKyxUAlh4tOBGlRFgGCx/krRIKbgNYn/qk/ezcRl8c7GpOPh+b7Icoq7u3
+    l4tIVBBHqS8uGgtyi+YwuJeht2MV1aEcSkykKLh2ipp8tb6spORJUkhjawDjvxeQ
+    GztN30Xh2riTXYZ0HExVTtJa8jyvFyp/97ptPIJXaVt2A2KIS3sBFHKnpY+/OrQg
+    uUauYgi13WFHsKOxZL9GYGk7Ujd8bw4CEcJFxKY7bhpGVI6Du7NRkUDWN0+0yusI
+    2szCJ7+ZqJkrc1+GrI/RAoIBAQDseAEggOLYZkpU2Pht15ZbxjM9ayT2ANq1+RTu
+    LjJx4gv2/o/XJCfMZCL0b9TJqtYeH+N6G9oDRJ99VIhUPedhWSYdj9Qj+rPd++TS
+    bp+MoSjmfUfxLTDrmFHL7ppquAE65aDy3B5c+OCb0I4X6CILUf0LynBzgl4kdrzN
+    U6BG3Mt0RiGPojlPV82B9ZUF/09YAz7BIz9X3KMhze1Gps5OeGuUnc9O2IAJYkrj
+    ur9H2YlNS4w+IjRLAXSXUqC8bqPZp6WTo1G/rlyAkIRXCGN90uk5JQvXoj9immFO
+    WaylbdcNG3YcGutreYeZL/UIWF6zCdc6pYG0cCBJS6S/RN7FAoIBAQDiERrLuUbV
+    3fx/a8uMeZop6hXtQpF7jlFxqUmza7QSvBuwks4QVJF+qMSiSvKDkCKqZD4qVf4N
+    TMxEj5vNR0PbnmDshyKJNGVjEauKJSb65CFDUcL1eR/A/oJvxiIdN1Z4cPrpnRux
+    /zIfPuYfYHpdz52buxxmlD7bfwYmVKVpnzjB9z0I1CasZ5uqB0Z8H0OLyUu8S4ju
+    RfkKBDMgVl2q96i8ZvX4C1b7XuimIUqv4WHq5+ejcYirgrYtUbBIaDU3/LORcJdy
+    /K76L1/up70RTDUYYm/HKaRy+vMTpUsZJ7Qbh0hrvQkUvNQ1HXjprW2AePIYi33N
+    h3mb1ulqw4idAoIBAQCsn0YjVjNDShkFK4bfmLv4rw2Ezoyi0SjYIsb2wN6uaBfX
+    7SlQIuKywH8L9f9eYMoCH8FNyLs0G4paUbVb2fzpAc1jUzXINiHL8TCvtXXfkV5s
+    NBSqqRTHR+CegMZVFZJATpVZ9PptYHmHBY5VQW5o2SdizhudFxRmhg95zIx6boBP
+    l0q0sfYoR66MKpzpTeG8HFJZZ8O7/iNQcCXAp9B/VEUkrrdBlaaSMyD8cb1lVBZ5
+    SKdOTGXkQ2G7feQ86n/OSiYDSvxIc56vc9BIQKVwmuEKiFLGzXh8ILrcGXaBJVgS
+    B3QHPFeTk5o7Z9j2iJxJEuv9sginkhrfpsrTnhEJAoIBACkrUkTtjd/e2F/gIqaH
+    crLVZX7a06G7rktTuA9LuvR6e1Rxt8Mzk3eMhprDqVyaQCXlsYiGNoj3hm+p84az
+    xsDVG/OXPIveFeSv0ByNXYbtSr12w1lu4ICGGP0ACTBm5oFymc83hFarEdas3r2y
+    FTbGW36D2c04jCXvARCz85fDnlN8kgnskMpu5+NUBdsO2n83fmphGyPBbHQNhb4K
+    3G4JQhplab/tWL7YbufqQi67jdh4uS+Duo75c/HW4ZKeH6r9gzomVf5j0/3N6NuO
+    gpkG1tiE/LQ5ejBSUTgvrvh6yYsF3QN53pB/PuoZXu63Xay62ePsa1GlrVjbD5EY
+    4OUCggEAJFr7F7AQLMJTAxHFLCsZZ0ZZ+tXYclBC4eHPkZ6sD5jvL3KIpW3Q7jXk
+    oIoD/XEX4B+Qe5M3jQJ/Y5ZJETHcgfcHZbDpCKN2WHQgldQbAJiFd4GY1OegdVsr
+    7TC8jh3Q2eYjzL8u4z7LSNI6aQSv1eWE7S1Q5j/sX/YYDR4W3CBMeIUpqoDWpn87
+    czbIRyA/4L0Y/HLpg/ZCbvtJZbsQwYXhyqfbjlm4BRQ6JiC5uEBKvuDRUXToBJta
+    JU8XMm+Ae5Ogrw7P6hg68dWpagfjb7UZ7Zxv+VDsbrU6KsDcyGCAwrrRZou/6KUG
+    Eq4OVTSu/s8gmY94tgbjeOaLUPEPmg==
     -----END PRIVATE KEY-----
   '';
   ca.cert = builtins.toFile "ca.cert" ''
     -----BEGIN CERTIFICATE-----
-    MIIFATCCAumgAwIBAgIJANydi4uFZr0LMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV
-    BAMMC1NuYWtlb2lsIENBMCAXDTE4MDcxMjAwMjIxNloYDzIxMTgwNjE4MDAyMjE2
-    WjAWMRQwEgYDVQQDDAtTbmFrZW9pbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
-    ADCCAgoCggIBAN91XEL/gfCG7MP2iv0IMO7dMFATmWKskjkpiZ8xm1+B3MSOA7OZ
-    H0BVKSfatB4KrMzmb5gLM877bLvQV2Tu8M5GLU52Fh9RAhbr9NnbvESPHi3zaP2L
-    vTnJSpMlkqMxsPzfSB5ciYnpUaM2/9aKxMebAGylYp59hA83rxm9g0KfR3mtgmD2
-    5E5k8kdIjQxiP2IRFYg837GFmhLE3vggsnSsQ5B/HYYcOFOv329lgAWpsAh+Hfl9
-    i6VFZqtRJ6Qj6sPFrzcUPyr1wyA4/zgqQt6nZ1FddNtRYSsZQ6kc/nkz0zvCzRFU
-    12X09RJsOAKGXvJiUcJ17fbrWr/Manl0xX+3+Ndc1SBG6u9dziBQoqNLhJ9++DRd
-    Wc+zewyVngk2bJwr2SWAhJWZtMpVyNFEm/wG9rY1BWwn1MivJXRAST3gO5yCwLct
-    uFB9B2adZVwR3PCtkc2mT1rPmafDjZG69lb0vX1PvGkit0tIdo6KzhUSq5mORYiF
-    8jPQNUx5e1gRCbCoUbbdbDS61ymYJbgU++nJizkYA+YZZ+uUzGwsP/t616i3cCF4
-    L7npkGhUGcjwUW8hfapyL6B9UO4KO8OZvJL/Q2/Esaq9ILUZQ+ZY/dNHkVvUfxGn
-    7f3MkdGYf6phaWFy/qnsWVjb/p31GTDxYpFNIIHvJ5R000+I+imTo3g3AgMBAAGj
-    UDBOMB0GA1UdDgQWBBQ3vPWzjLmu5krbSpfhBAht9KL3czAfBgNVHSMEGDAWgBQ3
-    vPWzjLmu5krbSpfhBAht9KL3czAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
-    A4ICAQDF9HyC1ZFN3Ob+JA9Dj5+Rcobi7JIA5F8uW3Q92LfPoVaUGEkBrwJSiTFX
-    47zvP/ySBJIpZ9rzHMbJ+1L+eJgczF1uQ91inthCKo1THTPo5TgBrpJj0YAIunsj
-    9eH1tBnfWFYdVIDZoTSiwPtgIvglpyuK/eJXEe+FRzubhtdc9w1Hlzox1sd0TQuy
-    Pl9KFHg7BlFZfCPig1mkB8pfwjBDgVhv5DKJ9cJXh3R5zSoiyuS2b+qYSvw8YTHq
-    0WNKWUthb7BVAYE3OmcbOHgUAUjtJ6EIGIB9z/SoLe90CofXLXFR5dppuVLKCMBA
-    kgL4luBIu7t8mcnN2yzobvcGHy8RVY6F5abCCy6gackLzjOzvH1SYOxP8yN74aKB
-    ANgcqdWspb8JYoU8lEbA8dhBVrsgBf7XeJlrZvMdcUENlJ2PI0JWr9WvlRAM9rYY
-    EY1alJqBCp6530Ggd6/f0V64cEqptejUdmN9L0zboxKjQf4LjpUNraGvg8tw/xkY
-    4dT1U2HlVnhOyBVkx/tE6zIK/RU16oMqwpjCdfbK/TuWCNc/emJz5PMlp81zm83+
-    dExpWwuV4rt6OQbZ/GSatNLJXOw+pkLjaEhnHgrsgI+HqAUXg3ByKol+1e76wN51
-    k1ZKpB6mk4kejySGPYBHiJwED0IyXu9gUfalSczXFO4ySAvhCg==
+    MIIFDzCCAvegAwIBAgIUU9rbCLTuvaI6gjSsFsJJjfLWIX8wDQYJKoZIhvcNAQEL
+    BQAwFjEUMBIGA1UEAwwLU25ha2VvaWwgQ0EwIBcNMTkxMDE4MDc1NDEyWhgPMjEx
+    OTA5MjQwNzU0MTJaMBYxFDASBgNVBAMMC1NuYWtlb2lsIENBMIICIjANBgkqhkiG
+    9w0BAQEFAAOCAg8AMIICCgKCAgEA0NG9tyNbKyWcEdjDHb0iSuaXMC3611PuIBGR
+    J1jck/MnlcHsmvvL5jTTpbyt88X/mYmP+2kNEWSTDodvlXZYB5q9kCoUE39R8H2Q
+    P9OhaJQ9/w3EvgzCTEk1a9e/f1y53XlVWVKlO7Y7wQFikiEvB9oUhdVDj4vYAQ0f
+    DMAj+kII/FmIW5LfauO/5TveqgJpulNRfkO0WDPWUCFiPNTdDLOcCaHIO5+ktfYl
+    eunj4ewD2qWRWLqetYBRln2oZgNtexCyGXeep/MozhdHqW/bAtF6WAg0a2CgVi6j
+    4r/usL3WbWbwCyLciL9PGcF5QZBYNy2wAIT539Fcc6WMrEJssss5IMtudp/kontJ
+    MfgeKpunzpKX4GJT0lU29nV8f23v0hNPLtGvRTtH2aZAclhGSVFMJ4LCYC1GuzKK
+    0H92r8aM3wtuV6r824nmX0l5QO5jlHJWWwv1lBacozJciVNngkPMEsLD15fdaMXG
+    JTpLlS4HeNYK/aRl0EriiJK4G35H7Y4NrQwvu4OzvKq3ReNhY22YyMf++AGrJrBo
+    eBk0LYkpLxJ07LYqAumgleOA5SVmwSDL5MmsLm41JT6Yo9B+umPUOLRHP5LXO6Bk
+    H6qpbsi2+N03qG0zxwC7DXgGG9/azLQaOQwwlHPNbH7gzITK/6GFyqxbLVLIOQGO
+    piWkRtECAwEAAaNTMFEwHQYDVR0OBBYEFAZcEiVphGxBT4OWXbM6lKu96dvbMB8G
+    A1UdIwQYMBaAFAZcEiVphGxBT4OWXbM6lKu96dvbMA8GA1UdEwEB/wQFMAMBAf8w
+    DQYJKoZIhvcNAQELBQADggIBAGJ5Jnxq1IQ++IRYxCE7r7BqzzF+HTx0EWKkSOmt
+    eSPqeOdhC26hJlclgGZXAF/Xosmn8vkSQMHhj/jr4HI0VF9IyvDUJm8AKsnOgu/7
+    DUey3lEUdOtJpTG9NyTOcrzxToMJ+hWlFLZKxx2dk4FLIvTLjmo1VHM97Bat7XYW
+    IrL9RRIZ25V+eCYtlR7XYjceGFQ0rCdp8SFIQwC6C/AH2tV3b1AJFsND9PcoLu7c
+    //fH+WUQCcD/N0grdC/QCX7AFWzd4rKQ8gjfND4TSYFTSDwW10Mud4kAVhY2P1sY
+    Y3ZpnxWrCHbIZMbszlbMyD+cjsCBnNvOtYGm7pDut/371rllVcB/uOWYWMCtKPoj
+    0elPrwNMrK+P+wceNBCRQO+9gwzB589F2morFTtsob/qtpAygW8Sfl8M+iLWXeYS
+    c3LBLnj0TpgXKRWg7wgIWKSZx9v6pgy70U0qvkjNS1XseUCPf7hfAbxT3xF+37Dw
+    zZRwF4WAWqdnJoOey21mgc+a2DQzqtykA6KfHgCqNFfDbQXPXvNy25DDThbk+paX
+    G2M2EWtr+Nv9s/zm7Xv/pOXlgMFavaj+ikqZ4wfJf6c/sMOdZJtMA4TsYtAJgbc8
+    ts+0eymTq4v5S8/fW51Lbjw6hc1Kcm8k7NbHSi9sEjBfxFLTZNQ5eb4NGr9Od3sU
+    kgwJ
     -----END CERTIFICATE-----
   '';
-  "acme-v01.api.letsencrypt.org".key = builtins.toFile "acme-v01.api.letsencrypt.org.key" ''
+  "acme-v02.api.letsencrypt.org".key = builtins.toFile "acme-v02.api.letsencrypt.org.key" ''
     -----BEGIN RSA PRIVATE KEY-----
-    MIIJKQIBAAKCAgEAvG+sL4q0VkgSClBTn4NkPiUrtXx5oLyZ+CCM1jrQx/xotUt5
-    X2S4/7vMnAK/yRLsR7R2PhXO8CZPqJ7B6OfAgaDTgvipJkZYPZQSMP3KOinM3WJL
-    ssqKh7/HOxZIf0iyUXewrnX5eTAo/CLsUnhBjBD7E99nmQz/leLWSl82sSYDkO3n
-    Uk3/1qJZA8iddb4uH0IEQWcNKev3WoQQzwiVrXBiftlRQOJy5JJXm5m8229MCpMA
-    1AUWmpdu6sl3/gFFdsDhUFq/a7LFrVyaUCMRIHg9szAB7ZFkixr9umQs8jKwuo98
-    3JHB11h2SirwgfIzHHmyhaWhCt22ucTwEXGhq63LtrzZvLsfP8Ql5S+AuqGTH0v8
-    meuc784leAjulBZjkpuIFwDnVv9+YeUEbqJeo1hSHrILddora3nkH4E2dJWmLpqp
-    iPr++GRi+BNgYKW/BQLTJ7C6v+vUs+kdPgYJH5z7oP6f0YZkT0Wkubp/UEz7UV2d
-    fjz57d77DYx5rFWGYzJriWR/xltgL1zDpjwjwG1FDpRqwlyYbBFpjQhxI+X0aT98
-    m6fCzBDQHDb/+JgvsjTHh6OZatahFAwzFIEfrceDv1BG8sBWIaZGhLzYiWQxafl8
-    oXbWv1T6I1jpsTlCdCSkWzaJb4ZjxI9Ga1ynVu8F16+GR2a71wKWu7UbZQsCAwEA
-    AQKCAgBYvrs4FLoD3KNqahRIDqhaQEVKjtn1Yn2dBy9tAXwsg2qI34fE7nnWLwsY
-    +o56U0gmKQ57BOhV36Uqg8JNP0BBjI2wpA19simCrsa2fgAMznzmUpHWHV+KuT5K
-    TJ9OGt2oUpdKQtOASLc0r/neiTZNkf29iTyQLzf7zj4f/qGSYpXRXsnP0F5KJmGH
-    z6agujWckQnSB4eCk9gFsCb+akubyE8K8Kw8w6lajrVl2czBB7SnUj5UnCTeH62k
-    M8goP08Is6QppON8BFDm6bLfRPSe9yIPzu9JhGz2unp+mwkz872Zz1P9yUOieM4U
-    9g4ZFQkPQx1ZpfynUm3pJZ/uhzadBabnIvMe/1qwDAEDifh/WzEM76/2kBpQkHtS
-    qcjwjAElfWnP8aBr1Pj42/cVJy3dbDqb0OawFHx/8xSO2CkY4Gq2h3OYv1XpPv3g
-    S9qqKhvuaT+aD0YjKhP4FYc2vvQSJwdZL8vqOyma8JGmc+r7jakIPCyOx3oPVqnS
-    L2P7DuJ1FcGIZyYOU3UUSzKndDU9fVC8YoLWvHDlwm4RK9UPtdsBY8mEu6BlaAwL
-    zEQG+fbcFnEkHPiJeAohYUCHiqCihLt0pqGwZi+QrudPQE6C47YijGZWJu4VVLjB
-    B2L9iDQKsN4FnBJ9egJIwWBLX3XXQfjC43UGm1A5sBvD+ScsCQKCAQEA7GxU7/SW
-    4YJ+wBXrp7Z3vzlc5mTT5U4L2muWZLhIjT/jmpHpZ4c9a5DY/K9OYcu8XJ+7kx2B
-    N40cU3ZkT2ZbB5/BUCEmi3Wzy3R/KZshHDzvvSZHcXJqVBtv+HGJgR5ssFqAw8c6
-    gJtDls+JE9Sz+nhLk0ZZ4658vbTQfG1lmtzrbC3Kz2xK8RPTdOU5Or7fayeaEKEW
-    ECBJPE41ME2UTdB/E85vyYoee0MBijjAs19QKqvoNbyrsZ5bihcIDYsrvjCmkdW1
-    20IUrSF3ZYJ9bb+CxHeRyNqwvRxPYSkzdMjZHx+xEAvJgw51QqmIi2QQf/qB+ych
-    cSbE/0Jhx4QbDQKCAQEAzAoenEOgmZvUegFUu8C6gWeibMjl3Y9SikQ4CoQO/zWr
-    aoCr5BpbzbtOffwnPfgk9wCGvXf6smOdrLUP1K2QAhBr/vJh7ih2MonvpYr5HPP7
-    maVARR66IgtxXP2ER2I9+9p2OQdecGRP2fUn2KCDQIASHSSY/VjBb8LLJgryC/DS
-    r2b0+m1e2qXfNWt/BYTQZhD/8B/jl/2pl/jI2ne3rkeiwEm7lqZaDt3Q8gC+qoP5
-    /IdG1Gob7UTMCbICWy1aGuzRYUmbpg0Vq4DAV1RtgBySB5oNq5PMBHYpOxedM2nM
-    NxHvf0u6wsxVULwQ4IfWUqUTspjxDmIgogSzmOGadwKCAQEA558if4tynjBImUtg
-    egirvG4oc5doeQhDWJN63eYlPizPgUleD41RQSbBTp04/1qoiV38WJ7ZT2Ex1Rry
-    H0+58vgyXZx8tLh1kufpBQv0HkQc44SzDZP4U7olspMZEaSK+yNPb36p9AEo8IEW
-    XJVQVhywffK4cfUqRHj2oFBU8KlrA6rBPQFtUk4IJkfED6ecHtDHgW8vvFDFLw23
-    0kDPAIU5WmAu6JYmUsBMq+v57kF8urF8Z9kVpIfuSpVR0GL+UfA74DgtWEefFhbp
-    cEutMm4jYPN7ofmOmVc49Yl13f4/qNxVjdDedUUe4FZTbax09cyotzOY8c/3w9R3
-    Ew57qQKCAQAa5jqi30eM+L5KV2KUXhQ4ezEupk2np/15vQSmXkKb4rd2kwAWUmNH
-    /Cmc8mE6CjzVU3xv/iFO41MmMbikkT0rCH80XUAL5cmvX//4ExpEduX0m5SdiC+B
-    zYBkggeuYYVKbsKnQhFxP8hHM8rNBFxJZJj+vpRs0gaudT/TBB5k9JrSBQDHAyQ+
-    Lx/+Ku3UDG5tBlC3l3ypzQdOwb25D49nqooKT64rbkLxMs0ZGoAIet26LRtpZZPI
-    9AjyPkWRP6lhY1c3PD0I5zC0K4Uv/jFxclLOLcEfnZyH+gv1fmd7H7eMixDH93Pn
-    uoiE3EZdU4st2hV+tisRel5S/cuvnA6BAoIBAQDJISK8H0hwYp+J4/WUv/WLtrm4
-    Mhmn8ItdEPAyCljycU6oLHJy4fgmmfRHeoO1i3jb87ks2GghegFBbJNzugfoGxIM
-    dLWIV+uFXWs24fMJ/J6lqN1JtAj7HjvqkXp061X+MdIJ0DsACygzFfJOjv+Ij77Q
-    Q1OBTSPfb0EWFNOuIJr9i2TwdN9eW/2ZMo1bPuwe4ttPEIBssfIC02dn2KD1RTqM
-    1l+L97vVFk7CoSJZf5rLeysLVyUeGdDcoEcRA6fKhfB/55h+iqrZNvySX1HrR6on
-    PQcxDRPJD7f9rMsTzVl3DOxzvXAU3lIcZtPZps97IwXceAAh2e1kZNNv/cxj
+    MIIJKQIBAAKCAgEApny0WhfDwEXe6WDTCw8qBuMAPDr88pj6kbhQWfzAW2c0TggJ
+    Etjs9dktENeTpSl14nnLVMiSYIJPYY3KbOIFQH1qDaOuQ7NaOhj9CdMTm5r9bl+C
+    YAyqLIMQ9AAZDhUcQjOy3moiL7ClFHlkFYuEzZBO9DF7hJpfUFIs0Idg50mNoZh/
+    K/fb4P2skNjfCjjomTRUmZHxT6G00ImSTtSaYbN/WHut1xXwJvOoT1nlEA/PghKm
+    JJ9ZuRMSddUJmjL+sT09L8LVkK8CKeHi4r58DHM0D0u8owIFV9qsXd5UvZHaNgvQ
+    4OAWGukMX+TxRuqkUZkaj84vnNL+ttEMl4jedw0ImzNtCOYehDyTPRkfng5PLWMS
+    vWbwyP8jDd2578mSbx5BF7ypYX366+vknjIFyZ5WezcC1pscIHxLoEwuhuf+knN+
+    kFkLOHeYbqQrU6mxSnu9q0hnNvGUkTP0a/1aLOGRfQ5C/pxpE/Rebi8qfM/OJFd4
+    mSxGL93JUTXWAItiIeBnQpIne65/Ska9dWynOEfIb0okdet3kfmNHz3zc17dZ5g4
+    AdOSCgHAlQgFt/Qd8W6xXUe4C5Mfv2ctxRrfQhDwtB6rMByPwzImnciC2h3vCwD3
+    vS/vjUyWICyhZyi2LZDUQz+sCKBXCYYcYh8ThFO40j5x1OnYMq7XQvyl8QkCAwEA
+    AQKCAgBSAfdssWwRF9m3p6QNPIj9H3AMOxpB/azffqTFzsSJwYp4LWkayZPfffy+
+    4RGvN38D8e6ActP3ifjEGu3tOGBR5fUJhujeHEiDea+a2Ug9S9kuNwmnelWQ23bM
+    Wgf9cdSbn4+qEymHyEFolmsAWdsuzri1fHJVXR06GWBNz4GiLA8B3HY4GD1M1Gfe
+    aZVkGagpXyeVBdiR2xuP5VQWVI8/NQWzdiipW/sRlNABVkyI3uDeN4VzYLL3gTeE
+    p021kQz4DSxIjHZacHpmWwhBnIbKMy0fo7TlrqcnIWXqTwv63Q9Zs/RN8NOyqb0Y
+    t1NKFWafcwUsdOnrG9uv/cVwF1FNE8puydaOi8rL1zAeK89JH8NRQ02wohR9w8qy
+    b2tB6DyGMtuqBt8Il6GA16ZoEuaXeayvlsvDEmG1cS9ZwBvfgrVPAmlm2AYdIf5B
+    RHIJu4BJC6Nn2ehVLqxx1QDhog3SOnAsCmcfg5g/fCwxcVMLIhODFoiKYGeMitDG
+    Q4e5JKcOg+RR8PT/n4eY4rUDBGtsR+Nw8S2DWgXmSufyfDtKCjZB4IuLWPS29tNh
+    zF6iYfoiTWzrSs/yqPSKIFpv+PWZwkKSvjdxia6lSBYYEON4W2QICEtiEs+SvcG4
+    0eIqWM+rRmPnJyMfGqX6GCs3rHDQB2VNJPBCYPQalJ/KwZumAQKCAQEA0ezM6qPJ
+    1JM/fddgeQ50h0T9TRXVUTCISxXza+l4NuFt1NdqUOdHsGtbL1JR4GaQUG8qD1/P
+    R39YgnQEQimxpmYLCZkobkwPxTZm9oiMXpcJrlN4PB5evaWShRSv3mgigpt3Wzml
+    Td+2R9RoA/hvF/wEyIvaWznYOyugBC7GXs20dNnZDULhUapeQu7r6JvgmxBOby7S
+    0FbhGplBiSDETzZURqzH/GMJKaJtNgyyVf3Hbg4mZAQDWoBRr+8HxsNbDkxP6e91
+    QrPHy2VZFiaTmJfoxRhyMTn7/JZaLJaUHDOniOsdMj/V7vMCgpfBqh5vR8bKzuPy
+    ZINggpcFPp1IYQKCAQEAywc7AQoktMBCru/3vzBqUveXbR3RKzNyZCTH5CMm3UNH
+    zmblFgqF2nxzNil21GqAXzSwZk5FyHbkeD3yvEZm+bXzsZTDNokAwoiTgyrr2tf8
+    GLMlCHHl5euIh1xHuyg/oKajVGOoXUXK8piqiDpQKd3Zwc6u2oyQlh+gYTPKh+7i
+    ilipkYawoE6teb6JUGpvU+d27INgNhB2oDEXY3pG2PbV+wv229ykSZxh1sJUdDwT
+    a8eTg+3pCGXtOZiJoQTFwKUlD2WYTGqS4Gx6dIJco5k+ZikGNST1JGE64Jl4MZdI
+    rtyvpcYblh5Q14sJGvp4kWYS9tjEM8pA+4Z9th3JqQKCAQEAkidH0+UM1A9gmQCm
+    jiHeR39ky5Jz3f7oJT63J15479yrVxBTWNhtNQrJhXzOvGkr+JQsuF+ANMsYmFql
+    zFqy8KMC9D/JwmD6adeif+o5sHF/r/s1LsYGOAtao4TvnOzrefs7ciwERt+GTSQ4
+    9uq0jgJMYkPcVr9DKI8K7V6ThdW52dECKRVzQiRXVEp7vIsqKUuFECuNYrfaKWai
+    FhLWGkA9FKee5L0e1/naB1N3ph72Bk2btO6GVzAXr2HADEZe0umWiczJ2xLH+3go
+    Oh/JiufYi8ClYFh6dDVJutlrbOcZsV3gCegfzikqijmWABcIavSgpsJVNF2zh7gV
+    Uq62gQKCAQAdO2FHeQpn6/at8WceY/4rC/MFhvGC4tlpidIuCtGhsfo4wZ/iWImF
+    N73u4nF1jBAHpTJwyHxLrLKgjWrRqOFSutvniZ/BzmAJolh63kcvL0Hg3IpMePm8
+    7PivZJ3/WIAwxU1m7SJkq5PY8ho7mwnHvWWI/hU26l42/z68QBS9FawQd0uS5G2x
+    5yIbEU/8ABcfYYhB7XiA0EYEMo1HiWeB/ag5iTN13ILbBmUf4sL+KVgygH3A1RRk
+    XSiWzluij2lZn22ClgIjnoSfQ38uH0bvVzUgyG9YX4XcQxOTGwWvPjT82FGB8NAw
+    ARVqs14QQFfzt1qrp/I38rsAfBDFk+xhAoIBAQCEKNk/oJcy9t/jMIbLcn6z3aCc
+    Fn8GBPSXtFj0t6weN5lHof+cggw4owMFWQQyAXxo/K6NnKNydMPZ5qjtLsHNpbpQ
+    aT1Or0/1YR1bJ8Lo82B4QM++7F761GWQPvE/tyrfPkfkWl92ITIpmnlw4wycRlkq
+    9anI2fnj1nIZwixzE2peb6PcsZU2HOs9uZ5RRd9wia696I7IpNibs4O4J2WTm4va
+    +NeYif3V2g9qwgT0Va0c9/Jlg3b58R0vA8j/VCU5I0TyXpkB3Xapx+pvEdZ3viUL
+    mXZaVotmWjgBXGDtd2VQg2ZiAMXHn3RzXSgV4Z+A/XacRs75h9bNw0ZJYrz1
     -----END RSA PRIVATE KEY-----
   '';
-  "acme-v01.api.letsencrypt.org".cert = builtins.toFile "acme-v01.api.letsencrypt.org.cert" ''
+  "acme-v02.api.letsencrypt.org".cert = builtins.toFile "acme-v02.api.letsencrypt.org.cert" ''
     -----BEGIN CERTIFICATE-----
     MIIEtDCCApwCAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
-    IENBMCAXDTE4MDcxMjAwMjIxN1oYDzIxMTgwNjE4MDAyMjE3WjAnMSUwIwYDVQQD
-    DBxhY21lLXYwMS5hcGkubGV0c2VuY3J5cHQub3JnMIICIjANBgkqhkiG9w0BAQEF
-    AAOCAg8AMIICCgKCAgEAvG+sL4q0VkgSClBTn4NkPiUrtXx5oLyZ+CCM1jrQx/xo
-    tUt5X2S4/7vMnAK/yRLsR7R2PhXO8CZPqJ7B6OfAgaDTgvipJkZYPZQSMP3KOinM
-    3WJLssqKh7/HOxZIf0iyUXewrnX5eTAo/CLsUnhBjBD7E99nmQz/leLWSl82sSYD
-    kO3nUk3/1qJZA8iddb4uH0IEQWcNKev3WoQQzwiVrXBiftlRQOJy5JJXm5m8229M
-    CpMA1AUWmpdu6sl3/gFFdsDhUFq/a7LFrVyaUCMRIHg9szAB7ZFkixr9umQs8jKw
-    uo983JHB11h2SirwgfIzHHmyhaWhCt22ucTwEXGhq63LtrzZvLsfP8Ql5S+AuqGT
-    H0v8meuc784leAjulBZjkpuIFwDnVv9+YeUEbqJeo1hSHrILddora3nkH4E2dJWm
-    LpqpiPr++GRi+BNgYKW/BQLTJ7C6v+vUs+kdPgYJH5z7oP6f0YZkT0Wkubp/UEz7
-    UV2dfjz57d77DYx5rFWGYzJriWR/xltgL1zDpjwjwG1FDpRqwlyYbBFpjQhxI+X0
-    aT98m6fCzBDQHDb/+JgvsjTHh6OZatahFAwzFIEfrceDv1BG8sBWIaZGhLzYiWQx
-    afl8oXbWv1T6I1jpsTlCdCSkWzaJb4ZjxI9Ga1ynVu8F16+GR2a71wKWu7UbZQsC
-    AwEAATANBgkqhkiG9w0BAQsFAAOCAgEAzeGlFMz1Bo+bbpZDQ60HLdw7qDp3SPJi
-    x5LYG860yzbh9ghvyc59MIm5E6vB140LRJAs+Xo6VdVSTC4jUA2kI9k1BQsbZKds
-    XT0RqA7HkqcLS3t3JWFkkKbCshMGZTSZ//hpbaUG1qEAfUfmZw1lAxqSa0kqavbP
-    awf7k8qHbqcj7WORCdH7fjKAjntEQwIpl1GEkAdCSghOJz2/o9aWmiGZt27OM/sG
-    MLSrcmL3QBElCjOxg14P8rnsmZ+VEp6MO93otoJ4dJL7fN7vTIh5ThbS384at/4l
-    4KK/y7XctUzAtWzhnodjk/NSgrrGX2kseOGOWEM1sZc9xtinHH2tpOMqtLVOkgHD
-    Lul+TArqgqeoOdEM/9OL64kgOrO/JzxBq+egLUi4wgAul2wmtecKZK1dkwYZHeqW
-    74i55yeBp+TTomnPr0ZBns6xKFYldJVzC34OB+2YVDxe8y9XtWtuQOxFw0LQHhNb
-    zy5aBverWzZFwiIIjJoVHTQq848uKBJec0YILfMinS1Wjif4xqW/IMfi+GFS0oka
-    sKCGNE/8ur9u/Jm6cbto3f2dtV8/vkhiITQgwzM2jalyuVJ9jyPxG7EvbTvZORgw
-    pRvBRTd4/eE7I1L+UDe6x8EjR/MrqfF9FWVGOZo4vPTyNbrSWYBh6s9kYy56ds1l
-    IRxst1BXEfI=
+    IENBMCAXDTE5MTAxODA3NTQxM1oYDzIxMTkwOTI0MDc1NDEzWjAnMSUwIwYDVQQD
+    DBxhY21lLXYwMi5hcGkubGV0c2VuY3J5cHQub3JnMIICIjANBgkqhkiG9w0BAQEF
+    AAOCAg8AMIICCgKCAgEApny0WhfDwEXe6WDTCw8qBuMAPDr88pj6kbhQWfzAW2c0
+    TggJEtjs9dktENeTpSl14nnLVMiSYIJPYY3KbOIFQH1qDaOuQ7NaOhj9CdMTm5r9
+    bl+CYAyqLIMQ9AAZDhUcQjOy3moiL7ClFHlkFYuEzZBO9DF7hJpfUFIs0Idg50mN
+    oZh/K/fb4P2skNjfCjjomTRUmZHxT6G00ImSTtSaYbN/WHut1xXwJvOoT1nlEA/P
+    ghKmJJ9ZuRMSddUJmjL+sT09L8LVkK8CKeHi4r58DHM0D0u8owIFV9qsXd5UvZHa
+    NgvQ4OAWGukMX+TxRuqkUZkaj84vnNL+ttEMl4jedw0ImzNtCOYehDyTPRkfng5P
+    LWMSvWbwyP8jDd2578mSbx5BF7ypYX366+vknjIFyZ5WezcC1pscIHxLoEwuhuf+
+    knN+kFkLOHeYbqQrU6mxSnu9q0hnNvGUkTP0a/1aLOGRfQ5C/pxpE/Rebi8qfM/O
+    JFd4mSxGL93JUTXWAItiIeBnQpIne65/Ska9dWynOEfIb0okdet3kfmNHz3zc17d
+    Z5g4AdOSCgHAlQgFt/Qd8W6xXUe4C5Mfv2ctxRrfQhDwtB6rMByPwzImnciC2h3v
+    CwD3vS/vjUyWICyhZyi2LZDUQz+sCKBXCYYcYh8ThFO40j5x1OnYMq7XQvyl8QkC
+    AwEAATANBgkqhkiG9w0BAQsFAAOCAgEAkx0GLPuCvKSLTHxVLh5tP4jxSGG/zN37
+    PeZLu3QJTdRdRc8bgeOGXAVEVFbqOLTNTsuY1mvpiv2V6wxR6nns+PIHeLY/UOdc
+    mOreKPtMU2dWPp3ybec2Jwii6PhAXZJ26AKintmug1psMw7662crR3SCnn85/CvW
+    192vhr5gM1PqLBIlbsX0tAqxAwBe1YkxBb9vCq8NVghJlKme49xnwGULMTGs15MW
+    hIPx6sW93zwrGiTsDImH49ILGF+NcX1AgAq90nG0j/l5zhDgXGJglX+K1xP99X1R
+    de3I4uoufPa5q+Pjmhy7muL+o4Qt0D0Vm86RqqjTkNPsr7gAJtt66A7TJrYiIoKn
+    GTIBsgM6egeFLLYQsT0ap/59HJismO2Pjx4Jk/jHOkC8TJsXQNRq1Km76VMBnuc0
+    2CMoD9pb38GjUUH94D4hJK4Ls/gJMF3ftKUyR8Sr/LjE6qU6Yj+ZpeEQP4kW9ANq
+    Lv9KSNDQQpRTL4LwGLTGomksLTQEekge7/q4J2TQRZNYJ/mxnrBKRcv9EAMgBMXq
+    Q+7GHtKDv9tJVlMfG/MRD3CMuuSRiT3OVbvMMkFzsPkqxYAP1CqE/JGvh67TzKI+
+    MUfXKehA6TKuxrTVqCtoFIfGaqA9IWyoRTtugYq/xssB9ESeEYGeaM1A9Yueqz+h
+    KkBZO00jHSE=
     -----END CERTIFICATE-----
   '';
   "letsencrypt.org".key = builtins.toFile "letsencrypt.org.key" ''
     -----BEGIN RSA PRIVATE KEY-----
-    MIIJKAIBAAKCAgEAwPvhlwemgPi6919sSD7Pz6l6CRfU1G/fDc0AvsMN/nTmiGND
-    pqn9ef1CA+RtLtOuPc1LLyEovcfu75/V+6KSgO4k19E2CrFCFwjEOWDGF4DgclT3
-    751WGmFJgzPEfZfhbOrmQfQau86KxAtNZVp9FxcKbuLyQ/sNNxfNMB+7IHbVhwvz
-    VcndHpYZEP6kdnwvNLP22bouX5q3avxWStln01uZ0BfUm4XwxaUNIU7t0Dv56FK9
-    C9hW9AZae0do0BJBWRF7xSwLeDJqn9uZz+sX0X/tIaaSQSBuZySj0He5ZKzdUO0t
-    px2xTS2Brl3Y2BOJaOE98HubWvdKoslLt4X2rVrMxGa86SmFzcyDL1RSowcP/ruy
-    y555l7pepL5s4cmMgRBBXj5tXhqUTVOn5WO+JClLk+rtvtAT4rogJmMqEKmMw2t7
-    LNy1W9ri/378QG/i3AGaLIL/7GsPbuRO51Sdti4QMVe2zNFze72mzNmj1SXokWy7
-    +ZvjUMp55oEjRRsTPUZdNOEHJWy6Os2znuqL7ZpIHCxBG8FKnkCViXRJqAA8bzcE
-    hR+pLamLIOHlv4kdzJ6phHkSvK68qvbRReUmOjJgSupVBI9jhK+fHay/UWR4zfJQ
-    ed99H8ZOoiXlrLCVs+VPDynUUKrzF1nYyolNzi/NS4e4AbnfWgyC5JKRpjUCAwEA
-    AQKCAgB0fNYL+zM3MGxy+2d6KGf6GnuuV3NBlBGY3ACyJT0iNmAdPYXNaVi2tPeP
-    L+fz1xSa+3uBhEt6Wt/QRrO8g8JZDuawWvl69MpG6yS+2bpY35MbkExkl50sqULd
-    bncRtIb+3r+EWht099RtR8E9B6TwNhk3G8hO3pB4i+ZwQQcMLo7vSHhmdUYCu2mA
-    B6UwW/+GmYbMoARz8wj6DDzuS1LPksBCis/r3KqcMue9Dk6gXkOYR7ETIFBEVj1x
-    ooYS6qIFaHdEajS2JgCUY9LxXR/wdn6lzE0GANSDb+tt34bJzUp+Gdxvvo2SX4Ci
-    xsUokIpmA2gG7CW3gAPORSFuMu/VYZtvt+owNYlODXRPuGi/eLDknFRB/S4Nx0J0
-    WZZq5uTgJdQainyKYtDZALia5X4cc5I2hNetCorG9jNZIsSunbIAG+htx2FI3eqK
-    jwOUiHE8SCZ6YdXoDQjg2w+g8jeB23eqkPyzunpZphYiKay7VFeLwQEMC2a791ln
-    +MbHhhpRAc1uAoU2reB2fxKyaPlOfAWVMgUOGlgpVOuEVeMoc1CwjajaFztGG7fI
-    8EHNoyAftCdXnTaLZk2KZnnIDHHzFXR62TE1GJFD1fdI1pHAloCbgA4h+Dtwm1Uu
-    iAEEfvVU/E5wbtAzv6pY32+OKX5kyHAbM5/e918B8ZxmHG1J9QKCAQEA6FwxsRG3
-    526NnZak540yboht5kV12BNBChjmARv/XgZ7o1VsfwjaosErMvasUBcHDEYOC/oE
-    ZgPAyrMVsYm0xe/5FSIFLJVeYXTr0rmCNhVtBCHx3IS94BCXreNnz0qoEWnb5E09
-    Z1O42D0yGcLXklg6QaJfb7EdHh03F3dSVMHyDR3JlAQHRINeuP6LlQpbvRD3adH5
-    QWr2M3k+Stuq2OJdG7eUS1dreCxRShLuDjDhiZekdl/TB3LM0prOaWrKBrryN2g6
-    mjiasH6I5zRD3LQP5zg57Thb8afHqA4Fb85Frt6ltfFlPTIoxXZ5drVhmRWfXXnQ
-    POnj8T+w4zVjvwKCAQEA1J4ivyFkCL0JTSY3/PtwAQvBBj3GazzU6P+urWeH74Vh
-    WK17Ae40iOUHGyy80Db/fVY4VLQTpxvAeG91Gj5Nd/AucXJgOrisabcEz6N/xUs5
-    sjJNgXuNKTAgjYBu0bqLXxgZj43zT8JhA6KW7RuYU0PtHMRragz4RbK9NWDaVvJb
-    xSR5QoVLS00PerUa0SfupEYKCrlSTP6FOM5YNkCuSMt7X6/m9cR0WwVINKvUQBiT
-    ObrN+KeBmF9awpQQnQOq/GbCl3kf6VyPQqYFhdrWSg52w33c2tBVYrtHJpeXGcin
-    akw4KKcj4rdU2qxMuuRiD5paagshbLdGsYMTbSzjCwKCAQEAh89DGAyUIcfDLAWd
-    st0bSfGh0oJsw3NVg3JUFPfpRWqiny/Rr1pcd95RwoLc6h7bdrgHg8aJBZtR9ue/
-    WTp0l3CQdGKjBZD0TiAJqevViIjzZAP3Gn3XgPwRu4f75/Pp0eu+o2zl49vSYUk7
-    XEU+vIGm4y/leiHaM/y9c5DBZVrKgBIV/NZx7QCfv56/tMgOIK6m/YnFlw/OgP1v
-    hE9qR0PfSdD98x9QaDf290WjMFYvrL0eWjXd4S+fOcVTude55z8jTXE1N2i4OUpr
-    +D7bH0d7OBjr+pQDYXZAQyCW2ueEYRYvYu2Jz7/ehrOdgN25AsHZmMgXB1NpcFta
-    pyJQfwKCAQByoPMwworRH0GVg4Zp8RFYrwKZH9MK29gZ6kc9m/Sw0OND0PvhdZCD
-    QZ8MKpl9VDl4VHS4TgHOdWrWQ5kJ1g8kG6yeY0C4R/pEYHTKkWaAcucfSHl61qar
-    TxQt1dFpZz5evXqCZ9CG7tApCo5+NQNx2MxMVyVmHqn3wb66uYXdnHqXlet+Tqji
-    ZyByUpOrsfC6RjyBvZo+gnZGwxDR5xtPiczxML+/PvRQYk+kfgNHrzgoxqrnZT+8
-    a6ReBT/TtzeHLsu4qIfo44slLqcJnIstkBC9ouzgV7PBMCDTEKVZNFH2QDOCz2HM
-    iHTKFFyl4h1wNhKK24dguor1hyqBENMzAoIBAAQvQHwRWIVlfCMRI170Ls8AXB9Z
-    MMdZJ37bh6kmJpkV3+HB1ZkKwofHKR9h/3xLt5iYXzqT+/zA4EAsFFs1A93+tkzh
-    yPrN5iTSJicophZSlA4ObX1hMkgshvl7ZB1fRM5WyiszBOfm8W7eAxaK8nY2oAoP
-    tI7rioo6CFBNMCGbOl4gEX6YJ4OsVSm+efCRSDDw+3HW8H2YgqufBzAULk1Jcj5t
-    ZvraXpC5qZ92VtsH0cGA1ovNDAmoOV4AAvtZVpLQsXwaphad/Fbn/ItGrrluvvFC
-    HuldRzYtl/AQtoirK86LTY3aAmcwVFuiYvDQMzjzkJvVMmRCFZBcUIaz2oI=
+    MIIJKgIBAAKCAgEA9dpdPEyzD3/BBds7tA/51s+WmLFyWuFrq4yMd2R+vi5gvK7n
+    lLNVKhYgiTmK2Um+UEpGucJqZHcTSZA1Bz4S/8ND/AI9I6EmwvBinY5/PubxEALk
+    9YiDA+IzH8ZGFM8wXg7fMbbJAsyv+SHAtr2jmCsggrpuD5fgzs2p+F2q0+oVoeFw
+    MAOUdAf2jNtNLEj2Q6MiR5Xq+wFOcRtXlNlXWIX3NrmubO/xOpDNpsyjyYC5Ld+W
+    06MS5bTHSdv56AkUg2PugMChj15TOddEJIK8zPXFTlMYye9SKwjhNUZovfe4xXCa
+    Tj2nmzrcuMKLz+S3sKQeTWjiRcY3w4zTlAbhtGXDjXjhMObrHoWM8e3cTL4NJMvt
+    tNStXficxbeTbIiYu+7dtF0q+iWaZqexc6PdAaIpFZ0XSw+i5iLdQZmBwzY7NLlH
+    pQupfh6ze0qDUVZAMDubo4JKUTBzH6QTuhHx+uUm7Lc8YdNArn7o/vMZDQym1Eia
+    xKxZuCGaqFvq8ZK4nBVsHfcXbhF/XD2HMid3t7ImbREVu9qnc+En+acU/SJaaL3r
+    jMW6HLVMr6+vQrCzYkvLzKYpoUm9D1Kcn6d8Ofxl2iCaY9CkMr5/6J1p1wcTdcN7
+    IVQ/DFBeTDauyWbyZkO/lPoZoakWyXOx9S9tgClzhFmNgRkZv9wN+QguNDcCAwEA
+    AQKCAgEA0ndlacGfaJ1NeN39dmBW2XZMzdrassJXkjx34528gsLhPaXdyobbWXQn
+    1lHUc7+VlNaBRXUR73+gm1FAlDqnuRxIjuy7ukyzCh8PzSG3/PlnVPWlXCzJPAHh
+    EkqCpD3agirpF34LBsKDwxsKB2bBLft9kWxX3DGA2olmAKDvJQs4CaUcjX4DEHHg
+    tyTmJAsyByUYq3/D8a1koZ9ukpadF8NXpxm+ILQoJqLf6vM1I8N2w7atP/BStSLV
+    mH0gq2tajEB4ZPCDXmC5jsKiKz9gsXWUu0CX8AdYqE6pvRnRgQ8Ytq1265QMb+8s
+    FV82oXqDZkyZRFuNmX3fLyDX39kkTcVS37S56Gzk4EzDWE/u2RXCAPeWla2zUFYI
+    hg8X4ZAwbZRODtK2cZTuCZEILM/iKmtSgHC+aQhp18EUAefa7WGrRD4AvbTxH4VF
+    ek60bwISBk5Mhf39MwqIiQxGOFmfLsQReZvzH4jI5zfDXf/0yZ/1SdGeu6+Walt0
+    V81Ua/DB6zshHpeSP74HMuJHZ4DOQfcV/ndyzvoP84pAjenSx6O034OwQTkpoMI/
+    f/2rK8kdzYSL4f//kFMuRLqmAwOmAFYB2oMo0/YaIoQ4vgTHDKTSxj5mbno56GdT
+    huMAVMKskaCSVbyMB/xyQG7senLItVv+HafVk6ChMUbkIjv9zgECggEBAP+ux1RG
+    cETGjK2U3CRoHGxR7FwaX6hkSokG+aFdVLer+WUrZmR8Ccvh2ALpm8K1G6TTk/5X
+    ZeVX4+1VFYDeTHMN8g20usS5mw3v2GF3fGxGLe4q56l4/4kKMZOrSBuWH4niiIKD
+    0QogdzWkpQJ93nMbZxZ5lk+lRZVf3qSm6nzyP468ndrfI57Ov5OUIWZ7KhTUH9IK
+    8/urUk+lEvyzQmNTlt5ZZXRz7cR01K8chx1zevVAyynzSuGjTysaBN7LTT0v3yVu
+    96yKNsxJvuIz2+4qSjhbnN4jH+feN0VsdF3+Qkru0lBmLVgJl4X67XFaAKMDU9yv
+    3alS53Pkol+Dy1cCggEBAPYodofHC1ydoOmCvUAq4oJNtyI4iIOY/ch3sxVhkNyi
+    KBscQqbay/DiXFiNl+NsemzB1PrHzvCaqKcBKw537XzeKqUgYuVLkFGubf9bDhXi
+    wSRcYbU/oNTgiTgXPW8wH60uIoLaiNi1/YjO2zh4GEY/kFqSuD54Y91iFmcC75bv
+    OjCNugnRdpRjOFhaeNx75tdverR37w3APVZuBSv3bJlMPCtaf+fEAKxJxeqCs3Oq
+    rtsw2TQ4TqfE8/w9qPCVv3bQbMbO48SwjxAz47qH2h3qGu3Ov8badeARe+Ou7nuI
+    U13gPuPOhPXIQP/MYOyamPJdFyng1b8vyNsfjOcWMiECggEAEkMgl6NkV3U7DRbp
+    1mvdQ9tiH33+wR9Qt5LY966b43aUHKbJ7Hlzla1u6V5YMsMO02oNUwhZDdWGQShn
+    ncnC+iDP3iy/flenfIpaETQgnfcxRqan31H2Joqk2eBNCTNi001r5K6XmrqQ6TL2
+    WkQ1RFF7vn42vz+VxcKQO4B0lTIUWhSczcpMWAZ6ZocZD6HScqRoFW+U16/39Bpd
+    TdFb944742vNNFEndXXGzy8hc3gRGz1ihX+MJKuuduyn1mX9AVbPAHR5mkhQ+6x0
+    xuFfXxaEMJxSiwdFOyGDHyFM+n2zrHh8ayOxL22X9gjjNspv6zTMo6GoGnUCdSOq
+    eVoHhwKCAQEAot5O3rOB/vuEljwcv7IgQJrvCsNg/8FgWR1p7kGpuXHJG3btWrz1
+    pyH+e9DjqGQD9KWjJ3LAp02NPUJ2nJIZHj9Y8/yjspb2nDTPLt+uSCjKJibBt0ys
+    O219HRGzYjfzHYCi8PVrCggQAk7rmUdMuF4iQutE4ICDgtz9eZbls3YBiFKdvxVK
+    Yg/sHflucmPAbtah13prPyvs6ZzN6zNANYXNYdn1OwHieBwvyWRFG8jY/MorTHPd
+    BwA3drPNbbGHBzQMZNZKub8gSVYr3SU52gUlYCclmIq+50xqLlF2FWIz1q8irVPd
+    gUnIR/eQQbxgaivRwbGze1ZAjUsozVVQQQKCAQEA9uAKU3O06bEUGj+L0G+7R7r/
+    bi2DNi2kLJ7jyq+n0OqcHEQ1zFK4LAPaXY0yMYXieUzhivMGLSNDiubGO2/KxkFF
+    REXUFgYWZYMwrKsUuscybB64cQDwzD0oXrhvEa2PHecdG6AZ63iLcHaaDzyCPID/
+    wtljekLO2jbJ5esXZd016lykFfUd/K4KP1DGyI2Dkq6q0gTc/Y36gDAcPhIWtzna
+    UujYCe3a8DWCElH4geKXaB5ABbV1eJ8Lch599lXJ9Hszem6QNosFsPaHDCcqLS9H
+    yy2WA6CY2LVU7kONN+O0kxs2fVbxIkI+d/LZyX/yIGlkXcAzL07llIlrTAYebQ==
     -----END RSA PRIVATE KEY-----
   '';
   "letsencrypt.org".cert = builtins.toFile "letsencrypt.org.cert" ''
     -----BEGIN CERTIFICATE-----
     MIIEpzCCAo8CAgKaMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNVBAMMC1NuYWtlb2ls
-    IENBMCAXDTE4MDcxMjAwMjIxOVoYDzIxMTgwNjE4MDAyMjE5WjAaMRgwFgYDVQQD
+    IENBMCAXDTE5MTAxODA3NTQxNVoYDzIxMTkwOTI0MDc1NDE1WjAaMRgwFgYDVQQD
     DA9sZXRzZW5jcnlwdC5vcmcwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-    AQDA++GXB6aA+Lr3X2xIPs/PqXoJF9TUb98NzQC+ww3+dOaIY0Omqf15/UID5G0u
-    0649zUsvISi9x+7vn9X7opKA7iTX0TYKsUIXCMQ5YMYXgOByVPfvnVYaYUmDM8R9
-    l+Fs6uZB9Bq7zorEC01lWn0XFwpu4vJD+w03F80wH7sgdtWHC/NVyd0elhkQ/qR2
-    fC80s/bZui5fmrdq/FZK2WfTW5nQF9SbhfDFpQ0hTu3QO/noUr0L2Fb0Blp7R2jQ
-    EkFZEXvFLAt4Mmqf25nP6xfRf+0hppJBIG5nJKPQd7lkrN1Q7S2nHbFNLYGuXdjY
-    E4lo4T3we5ta90qiyUu3hfatWszEZrzpKYXNzIMvVFKjBw/+u7LLnnmXul6kvmzh
-    yYyBEEFePm1eGpRNU6flY74kKUuT6u2+0BPiuiAmYyoQqYzDa3ss3LVb2uL/fvxA
-    b+LcAZosgv/saw9u5E7nVJ22LhAxV7bM0XN7vabM2aPVJeiRbLv5m+NQynnmgSNF
-    GxM9Rl004QclbLo6zbOe6ovtmkgcLEEbwUqeQJWJdEmoADxvNwSFH6ktqYsg4eW/
-    iR3MnqmEeRK8rryq9tFF5SY6MmBK6lUEj2OEr58drL9RZHjN8lB5330fxk6iJeWs
-    sJWz5U8PKdRQqvMXWdjKiU3OL81Lh7gBud9aDILkkpGmNQIDAQABMA0GCSqGSIb3
-    DQEBCwUAA4ICAQAkx3jcryukAuYP7PQxMy3LElOl65ZFVqxDtTDlr7DvAkWJzVCb
-    g08L6Tu+K0rKh2RbG/PqS0+8/jBgc4IwSOPfDDAX+sinfj0kwXG34WMzB0G3fQzU
-    2BMplJDOaBcNqHG8pLP1BG+9HAtR/RHe9p2Jw8LG2qmZs6uemPT/nCTNoyIL4oxh
-    UncjETV4ayCHDKD1XA7/icgddYsnfLQHWuIMuCrmQCHo0uQAd7qVHfUWZ+gcsZx0
-    jTNCcaI8OTS2S65Bjaq2HaM7GMcUYNUD2vSyNQeQbha4ZeyZ9bPyFzznPMmrPXQe
-    MJdkbJ009RQIG9As79En4m+l+/6zrdx4DNdROqaL6YNiSebWMnuFHpMW/rCnhrT/
-    HYadijHOiJJGj9tWSdC4XJs7fvZW3crMPUYxpOvl01xW2ZlgaekILi1FAjSMQVoV
-    NhWstdGCKJdthJqLL5MtNdfgihKcmgkJqKFXTkPv7sgAQCopu6X+S+srCgn856Lv
-    21haRWZa8Ml+E0L/ticT8Fd8Luysc6K9TJ4mT8ENC5ywvgDlEkwBD3yvINXm5lg1
-    xOIxv/Ye5gFk1knuM7OzpUFBrXUHdVVxflCUqNAhFPbcXwjgEQ+A+S5B0vI6Ohue
-    ZnR/wuiou6Y+Yzh8XfqL/3H18mGDdjyMXI1B6l4Judk000UVyr46cnI7mw==
+    AQD12l08TLMPf8EF2zu0D/nWz5aYsXJa4WurjIx3ZH6+LmC8rueUs1UqFiCJOYrZ
+    Sb5QSka5wmpkdxNJkDUHPhL/w0P8Aj0joSbC8GKdjn8+5vEQAuT1iIMD4jMfxkYU
+    zzBeDt8xtskCzK/5IcC2vaOYKyCCum4Pl+DOzan4XarT6hWh4XAwA5R0B/aM200s
+    SPZDoyJHler7AU5xG1eU2VdYhfc2ua5s7/E6kM2mzKPJgLkt35bToxLltMdJ2/no
+    CRSDY+6AwKGPXlM510QkgrzM9cVOUxjJ71IrCOE1Rmi997jFcJpOPaebOty4wovP
+    5LewpB5NaOJFxjfDjNOUBuG0ZcONeOEw5usehYzx7dxMvg0ky+201K1d+JzFt5Ns
+    iJi77t20XSr6JZpmp7Fzo90BoikVnRdLD6LmIt1BmYHDNjs0uUelC6l+HrN7SoNR
+    VkAwO5ujgkpRMHMfpBO6EfH65Sbstzxh00Cufuj+8xkNDKbUSJrErFm4IZqoW+rx
+    kricFWwd9xduEX9cPYcyJ3e3siZtERW72qdz4Sf5pxT9IlpoveuMxboctUyvr69C
+    sLNiS8vMpimhSb0PUpyfp3w5/GXaIJpj0KQyvn/onWnXBxN1w3shVD8MUF5MNq7J
+    ZvJmQ7+U+hmhqRbJc7H1L22AKXOEWY2BGRm/3A35CC40NwIDAQABMA0GCSqGSIb3
+    DQEBCwUAA4ICAQBbJwE+qc0j6JGHWe0TGjv1viJU3WuyJkMRi+ejx0p/k7Ntp5An
+    2wLC7b/lVP/Nh+PKY/iXWn/BErv2MUo4POc1g8svgxsmMMh5KGGieIfGs7xT+JMH
+    dzZZM+pUpIB5fEO5JfjiOEOKDdAvRSs0mTAVYZEokGkXSNWyylvEaA16mHtMgPjo
+    Lm75d0O66RfJDdd/hTl8umGpF7kEGW1qYk2QmuPr7AqOa8na7olL5fMPh6Q7yRqx
+    GIS9JKQ0fWl8Ngk09WfwUN/kEMcp9Jl5iunNRkbpUJIM/lHFkSA7yOFFL+dVWzd4
+    2r+ddJXTFzW8Rwt65l8SV2MEhijEamKva3mqKLIRWxDsfFVT1T04LWFtnzMW4Z29
+    UHF9Pi7XSyKz0Y/Lz31mNTkjJYbOvbnwok8lc3wFWHc+lummZk8IkCq8xfqzwmwX
+    Ow6EV+Q6VaQpOHumQZ12pBBLtL8DyDhWaRUgVy2vYpwYsMYa5BFMcKCynjlSewo9
+    G2hNoW45cQZP1qHltRR9Xad7SaP7iTETDCiR7AWOqSpDipSh9eMfVW97ZbSfz+vl
+    xl8PZEZMTRIIRVXsPP+E8gtDUhUQp2+Vcz8r6q71qslXM09xl/501uaNjCc3hH2R
+    iw2N77Lho1F3FrBbHdML3RYHZI55eC9iQw6R4S+R4b+iWLJoHzHrW61itg==
     -----END CERTIFICATE-----
   '';
 }
diff --git a/nixos/tests/fancontrol.nix b/nixos/tests/fancontrol.nix
new file mode 100644
index 0000000000000..83ddbb54c5bb7
--- /dev/null
+++ b/nixos/tests/fancontrol.nix
@@ -0,0 +1,25 @@
+import ./make-test.nix ({ pkgs, ... } : {
+  name = "fancontrol";
+
+  machine =
+    { ... }:
+    { hardware.fancontrol.enable = true;
+      hardware.fancontrol.config = ''
+        INTERVAL=42
+        DEVPATH=hwmon1=devices/platform/dummy
+        DEVNAME=hwmon1=dummy
+        FCTEMPS=hwmon1/device/pwm1=hwmon1/device/temp1_input
+        FCFANS=hwmon1/device/pwm1=hwmon1/device/fan1_input
+        MINTEMP=hwmon1/device/pwm1=25
+        MAXTEMP=hwmon1/device/pwm1=65
+        MINSTART=hwmon1/device/pwm1=150
+        MINSTOP=hwmon1/device/pwm1=0
+      '';
+    };
+
+  # This configuration cannot be valid for the test VM, so it's expected to get an 'outdated' error.
+  testScript = ''
+    $machine->waitForUnit("fancontrol.service");
+    $machine->waitUntilSucceeds("journalctl -eu fancontrol | grep 'Configuration appears to be outdated'");
+  '';
+})
diff --git a/nixos/tests/gotify-server.nix b/nixos/tests/gotify-server.nix
new file mode 100644
index 0000000000000..0ffc3138d5a1d
--- /dev/null
+++ b/nixos/tests/gotify-server.nix
@@ -0,0 +1,45 @@
+import ./make-test.nix ({ pkgs, lib, ...} : {
+  name = "gotify-server";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.gotify = {
+      enable = true;
+      port = 3000;
+    };
+  };
+
+  testScript = ''
+    startAll;
+
+    $machine->waitForUnit("gotify-server");
+    $machine->waitForOpenPort(3000);
+
+    my $token = $machine->succeed(
+      "curl --fail -sS -X POST localhost:3000/application -F name=nixos " .
+      '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' .
+      '| jq .token | xargs echo -n'
+    );
+
+    my $usertoken = $machine->succeed(
+      "curl --fail -sS -X POST localhost:3000/client -F name=nixos " .
+      '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" ' .
+      '| jq .token | xargs echo -n'
+    );
+
+    $machine->succeed(
+      "curl --fail -sS -X POST 'localhost:3000/message?token=$token' -H 'Accept: application/json' " .
+      '-F title=Gotify -F message=Works'
+    );
+
+    my $title = $machine->succeed(
+      "curl --fail -sS 'localhost:3000/message?since=0&token=$usertoken' | jq '.messages|.[0]|.title' | xargs echo -n"
+    );
+
+    $title eq "Gotify" or die "Wrong title ($title), expected 'Gotify'!";
+  '';
+})
diff --git a/nixos/tests/installer.nix b/nixos/tests/installer.nix
index a136678c6eff0..eb1f4f192dd11 100644
--- a/nixos/tests/installer.nix
+++ b/nixos/tests/installer.nix
@@ -54,8 +54,6 @@ let
 
         hardware.enableAllFirmware = lib.mkForce false;
 
-        services.udisks2.enable = lib.mkDefault false;
-
         ${replaceChars ["\n"] ["\n  "] extraConfig}
       }
     '';
@@ -295,8 +293,6 @@ let
               ++ optional (bootLoader == "grub" && grubVersion == 1) pkgs.grub
               ++ optionals (bootLoader == "grub" && grubVersion == 2) [ pkgs.grub2 pkgs.grub2_efi ];
 
-            services.udisks2.enable = mkDefault false;
-
             nix.binaryCaches = mkForce [ ];
             nix.extraOptions =
               ''
diff --git a/nixos/tests/matomo.nix b/nixos/tests/matomo.nix
new file mode 100644
index 0000000000000..4efa65a7b6dea
--- /dev/null
+++ b/nixos/tests/matomo.nix
@@ -0,0 +1,43 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  matomoTest = package:
+  makeTest {
+    machine = { config, pkgs, ... }: {
+      services.matomo = {
+        package = package;
+        enable = true;
+        nginx = {
+          forceSSL = false;
+          enableACME = false;
+        };
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mysql;
+      };
+      services.nginx.enable = true;
+    };
+
+    testScript = ''
+      startAll;
+      $machine->waitForUnit("mysql.service");
+      $machine->waitForUnit("phpfpm-matomo.service");
+      $machine->waitForUnit("nginx.service");
+      $machine->succeed("curl -sSfL http://localhost/ | grep '<title>Matomo[^<]*Installation'");
+    '';
+  };
+in {
+  matomo = matomoTest pkgs.matomo // {
+    name = "matomo";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata ];
+  };
+  matomo-beta = matomoTest pkgs.matomo-beta // {
+    name = "matomo-beta";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata ];
+  };
+}
diff --git a/nixos/tests/openarena.nix b/nixos/tests/openarena.nix
new file mode 100644
index 0000000000000..4cc4db229637d
--- /dev/null
+++ b/nixos/tests/openarena.nix
@@ -0,0 +1,36 @@
+import ./make-test.nix ({ pkgs, ...} : {
+  name = "openarena";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ tomfitzhenry ];
+  };
+
+  machine =
+    { pkgs, ... }:
+
+    { imports = [];
+      environment.systemPackages = with pkgs; [
+        socat
+      ];
+      services.openarena = {
+        enable = true;
+        extraFlags = [
+          "+set dedicated 2"
+          "+set sv_hostname 'My NixOS server'"
+          "+map oa_dm1"
+        ];
+      };
+    };
+
+  testScript =
+    ''
+      $machine->waitForUnit("openarena.service");
+      $machine->waitUntilSucceeds("ss --numeric --udp --listening | grep -q 27960");
+
+      # The log line containing 'resolve address' is last and only message that occurs after
+      # the server starts accepting clients.
+      $machine->waitUntilSucceeds("journalctl -u openarena.service | grep 'resolve address: dpmaster.deathmask.net'");
+
+      # Check it's possible to join the server.
+      $machine->succeed("echo -n -e '\\xff\\xff\\xff\\xffgetchallenge' | socat - UDP4-DATAGRAM:127.0.0.1:27960 | grep -q challengeResponse");
+    '';
+})
diff --git a/nixos/tests/orangefs.nix b/nixos/tests/orangefs.nix
new file mode 100644
index 0000000000000..bdf4fc10c4475
--- /dev/null
+++ b/nixos/tests/orangefs.nix
@@ -0,0 +1,88 @@
+import ./make-test.nix ({ ... } :
+
+let
+  server = { pkgs, ... } : {
+    networking.firewall.allowedTCPPorts = [ 3334 ];
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 4096 ];
+
+    fileSystems = pkgs.lib.mkVMOverride
+      [ { mountPoint = "/data";
+          device = "/dev/disk/by-label/data";
+          fsType = "ext4";
+        }
+      ];
+
+    services.orangefs.server = {
+      enable = true;
+      dataStorageSpace = "/data/storage";
+      metadataStorageSpace = "/data/meta";
+      servers = {
+        server1 = "tcp://server1:3334";
+        server2 = "tcp://server2:3334";
+      };
+    };
+  };
+
+  client = { lib, ... } : {
+    networking.firewall.enable = true;
+
+    services.orangefs.client = {
+      enable = true;
+      fileSystems = [{
+        target = "tcp://server1:3334/orangefs";
+        mountPoint = "/orangefs";
+      }];
+    };
+  };
+
+in {
+  name = "orangefs";
+
+  nodes = {
+    server1 = server;
+    server2 = server;
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    # format storage
+    foreach my $server  (($server1,$server2))
+    {
+      $server->start();
+      $server->waitForUnit("multi-user.target");
+      $server->succeed("mkdir -p /data/storage /data/meta");
+      $server->succeed("chown orangefs:orangefs /data/storage /data/meta");
+      $server->succeed("chmod 0770 /data/storage /data/meta");
+      $server->succeed("sudo -g orangefs -u orangefs pvfs2-server -f /etc/orangefs/server.conf");
+    }
+
+    # start services after storage is formated on all machines
+    foreach my $server  (($server1,$server2))
+    {
+      $server->succeed("systemctl start orangefs-server.service");
+    }
+
+    # Check if clients can reach and mount the FS
+    foreach my $client  (($client1,$client2))
+    {
+      $client->start();
+      $client->waitForUnit("orangefs-client.service");
+      # Both servers need to be reachable
+      $client->succeed("pvfs2-check-server -h server1 -f orangefs -n tcp -p 3334");
+      $client->succeed("pvfs2-check-server -h server2 -f orangefs -n tcp -p 3334");
+      $client->waitForUnit("orangefs.mount");
+
+    }
+
+    # R/W test between clients
+    $client1->succeed("echo test > /orangefs/file1");
+    $client2->succeed("grep test /orangefs/file1");
+
+  '';
+})
diff --git a/nixos/tests/os-prober.nix b/nixos/tests/os-prober.nix
index 9cd9f4ecd1505..5407a62339fe6 100644
--- a/nixos/tests/os-prober.nix
+++ b/nixos/tests/os-prober.nix
@@ -51,12 +51,11 @@ let
       hashed-mirrors =
       connect-timeout = 1
     '';
-    services.udisks2.enable = lib.mkForce false;
   };
   # /etc/nixos/configuration.nix for the vm
   configFile = pkgs.writeText "configuration.nix"  ''
     {config, pkgs, ...}: ({
-    imports = 
+    imports =
           [ ./hardware-configuration.nix
             <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
           ];
diff --git a/nixos/tests/trickster.nix b/nixos/tests/trickster.nix
new file mode 100644
index 0000000000000..1461a32bb07e8
--- /dev/null
+++ b/nixos/tests/trickster.nix
@@ -0,0 +1,29 @@
+import ./make-test.nix ({ pkgs, ... }: {
+  name = "trickster";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ "1000101" ];
+  };
+
+  nodes = {
+    prometheus = { ... }: {
+      services.prometheus.enable = true;
+      networking.firewall.allowedTCPPorts = [ 9090 ];
+    };
+    trickster = { ... }: {
+      services.trickster.enable = true;
+    };
+  };
+
+  testScript = ''
+    startAll;
+    $prometheus->waitForUnit("prometheus.service");
+    $prometheus->waitForOpenPort(9090);
+    $prometheus->waitUntilSucceeds("curl -L http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'");
+    $trickster->waitForUnit("trickster.service");
+    $trickster->waitForOpenPort(8082);
+    $trickster->waitForOpenPort(9090);
+    $trickster->waitUntilSucceeds("curl -L http://localhost:8082/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'");
+    $trickster->waitUntilSucceeds("curl -L http://prometheus:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'");
+    $trickster->waitUntilSucceeds("curl -L http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'");
+  '';
+})
\ No newline at end of file
diff --git a/nixos/tests/yggdrasil.nix b/nixos/tests/yggdrasil.nix
new file mode 100644
index 0000000000000..ddff35cce3a10
--- /dev/null
+++ b/nixos/tests/yggdrasil.nix
@@ -0,0 +1,123 @@
+let
+  aliceIp6 = "200:3b91:b2d8:e708:fbf3:f06:fdd5:90d0";
+  aliceKeys = {
+    EncryptionPublicKey = "13e23986fe76bc3966b42453f479bc563348b7ff76633b7efcb76e185ec7652f";
+    EncryptionPrivateKey = "9f86947b15e86f9badac095517a1982e39a2db37ca726357f95987b898d82208";
+    SigningPublicKey = "e2c43349083bc1e998e4ec4535b4c6a8f44ca9a5a8e07336561267253b2be5f4";
+    SigningPrivateKey = "fe3add8da35316c05f6d90d3ca79bd2801e6ccab6d37e5339fef4152589398abe2c43349083bc1e998e4ec4535b4c6a8f44ca9a5a8e07336561267253b2be5f4";
+  };
+  bobIp6 = "201:ebbd:bde9:f138:c302:4afa:1fb6:a19a";
+  bobConfig = {
+    InterfacePeers = {
+      eth1 = [ "tcp://192.168.1.200:12345" ];
+    };
+    MulticastInterfaces = [ "eth1" ];
+    LinkLocalTCPPort = 54321;
+    EncryptionPublicKey = "c99d6830111e12d1b004c52fe9e5a2eef0f6aefca167aca14589a370b7373279";
+    EncryptionPrivateKey = "2e698a53d3fdce5962d2ff37de0fe77742a5c8b56cd8259f5da6aa792f6e8ba3";
+    SigningPublicKey = "de111da0ec781e45bf6c63ecb45a78c24d7d4655abfaeea83b26c36eb5c0fd5b";
+    SigningPrivateKey = "2a6c21550f3fca0331df50668ffab66b6dce8237bcd5728e571e8033b363e247de111da0ec781e45bf6c63ecb45a78c24d7d4655abfaeea83b26c36eb5c0fd5b";
+  };
+
+in import ./make-test.nix ({ pkgs, ...} : {
+  name = "yggdrasil";
+  meta = with pkgs.stdenv.lib.maintainers; {
+    maintainers = [ gazally ];
+  };
+
+  nodes = rec {
+    # Alice is listening for peerings on a specified port,
+    # but has multicast peering disabled.  Alice has part of her
+    # yggdrasil config in Nix and part of it in a file.
+    alice =
+      { ... }:
+      {
+        networking = {
+          interfaces.eth1.ipv4.addresses = [{
+            address = "192.168.1.200";
+            prefixLength = 24;
+          }];
+          firewall.allowedTCPPorts = [ 80 12345 ];
+        };
+        services.httpd.enable = true;
+        services.httpd.adminAddr = "foo@example.org";
+
+        services.yggdrasil = {
+          enable = true;
+          config = {
+            Listen = ["tcp://0.0.0.0:12345"];
+            MulticastInterfaces = [ ];
+          };
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-alice-conf";
+                         text = builtins.toJSON aliceKeys;
+                       });
+        };
+      };
+
+    # Bob is set up to peer with Alice, and also to do local multicast
+    # peering.  Bob's yggdrasil config is in a file.
+    bob =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 54321 ];
+        services.yggdrasil = {
+          enable = true;
+          openMulticastPort = true;
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-bob-conf";
+                         text = builtins.toJSON bobConfig;
+                       });
+        };
+      };
+
+    # Carol only does local peering.  Carol's yggdrasil config is all Nix.
+    carol =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 43210 ];
+        services.yggdrasil = {
+          enable = true;
+          denyDhcpcdInterfaces = [ "ygg0" ];
+          config = {
+            IfTAPMode = true;
+            IFName = "ygg0";
+            MulticastInterfaces = [ "eth1" ];
+            LinkLocalTCPPort = 43210;
+          };
+        };
+      };
+    };
+
+  testScript =
+    ''
+      # Give Alice a head start so she is ready when Bob calls.
+      $alice->start;
+      $alice->waitForUnit("yggdrasil.service");
+
+      $bob->start;
+      $carol->start;
+      $bob->waitForUnit("yggdrasil.service");
+      $carol->waitForUnit("yggdrasil.service");
+
+      $carol->waitUntilSucceeds("[ `ip -o -6 addr show dev ygg0 scope global | grep -v tentative | wc -l` -ge 1 ]");
+      my $carolIp6 = (split /[ \/]+/, $carol->succeed("ip -o -6 addr show dev ygg0 scope global"))[3];
+
+      # If Alice can talk to Carol, then Bob's outbound peering and Carol's
+      # local peering have succeeded and everybody is connected.
+      $alice->waitUntilSucceeds("ping -c 1 $carolIp6");
+      $alice->succeed("ping -c 1 ${bobIp6}");
+
+      $bob->succeed("ping -c 1 ${aliceIp6}");
+      $bob->succeed("ping -c 1 $carolIp6");
+
+      $carol->succeed("ping -c 1 ${aliceIp6}");
+      $carol->succeed("ping -c 1 ${bobIp6}");
+
+      $carol->fail("journalctl -u dhcpcd | grep ygg0");
+
+      $alice->waitForUnit("httpd.service");
+      $carol->succeed("curl --fail -g http://[${aliceIp6}]");
+
+    '';
+})