about summary refs log tree commit diff
diff options
context:
space:
mode:
authorgithub-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>2022-03-16 18:01:43 +0000
committerGitHub <noreply@github.com>2022-03-16 18:01:43 +0000
commit177bd4ed530b9b008b9c164d484ff7dc282af363 (patch)
tree296d482fe3728726191efba736526e1bb6a771fb
parent899bab1df0eaca5b5601df738560438f4f8dcdfe (diff)
parent026b6280bd2545deec4a60761ef3c9468b373af7 (diff)
Merge staging-next into staging
-rw-r--r--nixos/doc/manual/from_md/release-notes/rl-2205.section.xml46
-rw-r--r--nixos/doc/manual/release-notes/rl-2205.section.md13
-rw-r--r--nixos/modules/services/cluster/hadoop/conf.nix22
-rw-r--r--nixos/modules/services/cluster/hadoop/default.nix97
-rw-r--r--nixos/modules/services/cluster/hadoop/hdfs.nix295
-rw-r--r--nixos/modules/services/cluster/hadoop/yarn.nix98
-rw-r--r--nixos/modules/services/web-apps/nextcloud.nix11
-rw-r--r--nixos/tests/all-tests.nix6
-rw-r--r--nixos/tests/hadoop/default.nix7
-rw-r--r--nixos/tests/hadoop/hadoop.nix273
-rw-r--r--nixos/tests/hadoop/hdfs.nix61
-rw-r--r--nixos/tests/hadoop/yarn.nix34
-rw-r--r--nixos/tests/nextcloud/default.nix2
-rw-r--r--pkgs/applications/networking/cluster/fluxcd/default.nix8
-rw-r--r--pkgs/applications/networking/cluster/hadoop/default.nix65
-rw-r--r--pkgs/applications/science/computer-architecture/qtrvsim/default.nix4
-rw-r--r--pkgs/data/icons/kora-icon-theme/default.nix4
-rw-r--r--pkgs/development/compilers/open-watcom/v2.nix6
-rw-r--r--pkgs/development/libraries/libarchive-qt/default.nix9
-rw-r--r--pkgs/development/node-packages/default.nix2
-rw-r--r--pkgs/development/python-modules/bumps/default.nix33
-rw-r--r--pkgs/development/python-modules/cloudscraper/default.nix16
-rw-r--r--pkgs/development/python-modules/google-cloud-storage/default.nix8
-rw-r--r--pkgs/development/python-modules/qcengine/default.nix4
-rw-r--r--pkgs/development/python-modules/samsungtvws/default.nix4
-rw-r--r--pkgs/development/tools/database/prisma-engines/default.nix6
-rw-r--r--pkgs/development/tools/jp/default.nix16
-rw-r--r--pkgs/development/tools/k6/default.nix4
-rw-r--r--pkgs/development/tools/stylua/default.nix6
-rw-r--r--pkgs/servers/nextcloud/default.nix17
-rw-r--r--pkgs/servers/piping-server-rust/default.nix6
-rw-r--r--pkgs/tools/archivers/p7zip/default.nix35
-rwxr-xr-xpkgs/tools/archivers/p7zip/update.sh47
-rw-r--r--pkgs/tools/backup/btrbk/default.nix4
-rw-r--r--pkgs/tools/misc/up/default.nix2
-rw-r--r--pkgs/tools/networking/frp/default.nix6
-rw-r--r--pkgs/tools/system/syslog-ng/default.nix4
-rw-r--r--pkgs/top-level/all-packages.nix2
38 files changed, 804 insertions, 479 deletions
diff --git a/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml b/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
index 8121c39ab2e28..a3316d37d971d 100644
--- a/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
+++ b/nixos/doc/manual/from_md/release-notes/rl-2205.section.xml
@@ -582,6 +582,25 @@
       </listitem>
       <listitem>
         <para>
+          Services in the <literal>hadoop</literal> module previously
+          set <literal>openFirewall</literal> to true by default. This
+          has now been changed to false. Node definitions for multi-node
+          clusters would need <literal>openFirewall = true;</literal> to
+          be added to to hadoop services when upgrading from NixOS
+          21.11.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
+          <literal>services.hadoop.yarn.nodemanager</literal> now uses
+          cgroup-based CPU limit enforcement by default. Additionally,
+          the option <literal>useCGroups</literal> was added to
+          nodemanagers as an easy way to switch back to the old
+          behavior.
+        </para>
+      </listitem>
+      <listitem>
+        <para>
           The <literal>wafHook</literal> hook now honors
           <literal>NIX_BUILD_CORES</literal> when
           <literal>enableParallelBuilding</literal> is not set
@@ -1186,6 +1205,33 @@
       </listitem>
       <listitem>
         <para>
+          Some improvements have been made to the
+          <literal>hadoop</literal> module:
+        </para>
+        <itemizedlist spacing="compact">
+          <listitem>
+            <para>
+              A <literal>gatewayRole</literal> option has been added,
+              for deploying hadoop cluster configuration files to a node
+              that does not have any active services
+            </para>
+          </listitem>
+          <listitem>
+            <para>
+              Support for older versions of hadoop have been added to
+              the module
+            </para>
+          </listitem>
+          <listitem>
+            <para>
+              Overriding and extending site XML files has been made
+              easier
+            </para>
+          </listitem>
+        </itemizedlist>
+      </listitem>
+      <listitem>
+        <para>
           If you are using Wayland you can choose to use the Ozone
           Wayland support in Chrome and several Electron apps by setting
           the environment variable <literal>NIXOS_OZONE_WL=1</literal>
diff --git a/nixos/doc/manual/release-notes/rl-2205.section.md b/nixos/doc/manual/release-notes/rl-2205.section.md
index deef736b239b7..5ee73d683d1d6 100644
--- a/nixos/doc/manual/release-notes/rl-2205.section.md
+++ b/nixos/doc/manual/release-notes/rl-2205.section.md
@@ -248,6 +248,14 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
 
+- Services in the `hadoop` module previously set `openFirewall` to true by default.
+  This has now been changed to false. Node definitions for multi-node clusters would need
+  `openFirewall = true;` to be added to to hadoop services when upgrading from NixOS 21.11.
+
+- `services.hadoop.yarn.nodemanager` now uses cgroup-based CPU limit enforcement by default.
+  Additionally, the option `useCGroups` was added to nodemanagers as an easy way to switch
+  back to the old behavior.
+
 - The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
 
 - `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
@@ -440,6 +448,11 @@ In addition to numerous new and upgraded packages, this release has the followin
 
 - The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
 
+- Some improvements have been made to the `hadoop` module:
+  - A `gatewayRole` option has been added, for deploying hadoop cluster configuration files to a node that does not have any active services
+  - Support for older versions of hadoop have been added to the module
+  - Overriding and extending site XML files has been made easier
+
 - If you are using Wayland you can choose to use the Ozone Wayland support
   in Chrome and several Electron apps by setting the environment variable
   `NIXOS_OZONE_WL=1` (for example via
diff --git a/nixos/modules/services/cluster/hadoop/conf.nix b/nixos/modules/services/cluster/hadoop/conf.nix
index 0caec5cfc203f..e3c26a0d5505d 100644
--- a/nixos/modules/services/cluster/hadoop/conf.nix
+++ b/nixos/modules/services/cluster/hadoop/conf.nix
@@ -1,6 +1,6 @@
 { cfg, pkgs, lib }:
 let
-  propertyXml = name: value: ''
+  propertyXml = name: value: lib.optionalString (value != null) ''
     <property>
       <name>${name}</name>
       <value>${builtins.toString value}</value>
@@ -29,16 +29,16 @@ let
     export HADOOP_LOG_DIR=/tmp/hadoop/$USER
   '';
 in
-pkgs.runCommand "hadoop-conf" {} ''
+pkgs.runCommand "hadoop-conf" {} (with cfg; ''
   mkdir -p $out/
-  cp ${siteXml "core-site.xml" cfg.coreSite}/* $out/
-  cp ${siteXml "hdfs-site.xml" cfg.hdfsSite}/* $out/
-  cp ${siteXml "mapred-site.xml" cfg.mapredSite}/* $out/
-  cp ${siteXml "yarn-site.xml" cfg.yarnSite}/* $out/
-  cp ${siteXml "httpfs-site.xml" cfg.httpfsSite}/* $out/
-  cp ${cfgFile "container-executor.cfg" cfg.containerExecutorCfg}/* $out/
+  cp ${siteXml "core-site.xml" (coreSite // coreSiteInternal)}/* $out/
+  cp ${siteXml "hdfs-site.xml" (hdfsSiteDefault // hdfsSite // hdfsSiteInternal)}/* $out/
+  cp ${siteXml "mapred-site.xml" (mapredSiteDefault // mapredSite)}/* $out/
+  cp ${siteXml "yarn-site.xml" (yarnSiteDefault // yarnSite // yarnSiteInternal)}/* $out/
+  cp ${siteXml "httpfs-site.xml" httpfsSite}/* $out/
+  cp ${cfgFile "container-executor.cfg" containerExecutorCfg}/* $out/
   cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
   cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
-  cp ${cfg.log4jProperties} $out/log4j.properties
-  ${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") cfg.extraConfDirs}
-''
+  cp ${log4jProperties} $out/log4j.properties
+  ${lib.concatMapStringsSep "\n" (dir: "cp -r ${dir}/* $out/") extraConfDirs}
+'')
diff --git a/nixos/modules/services/cluster/hadoop/default.nix b/nixos/modules/services/cluster/hadoop/default.nix
index a1a95fe31cac5..a4fdea81037c7 100644
--- a/nixos/modules/services/cluster/hadoop/default.nix
+++ b/nixos/modules/services/cluster/hadoop/default.nix
@@ -21,24 +21,50 @@ with lib;
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>
       '';
     };
+    coreSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to core-site.xml based on module options
+      '';
+    };
 
-    hdfsSite = mkOption {
+    hdfsSiteDefault = mkOption {
       default = {
         "dfs.namenode.rpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-address" = "0.0.0.0:9870";
+        "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-bind-host" = "0.0.0.0";
       };
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for hdfs-site.xml
+      '';
+    };
+    hdfsSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
         {
           "dfs.nameservices" = "namenode1";
         }
       '';
       description = ''
-        Hadoop hdfs-site.xml definition
+        Additional options and overrides for hdfs-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>
       '';
     };
+    hdfsSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to hdfs-site.xml based on module options
+      '';
+    };
 
-    mapredSite = mkOption {
+    mapredSiteDefault = mkOption {
       default = {
         "mapreduce.framework.name" = "yarn";
         "yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}/lib/${cfg.package.untarDir}";
@@ -54,18 +80,25 @@ with lib;
         }
       '';
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for mapred-site.xml
+      '';
+    };
+    mapredSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
-        options.services.hadoop.mapredSite.default // {
+        {
           "mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
         }
       '';
       description = ''
-        Hadoop mapred-site.xml definition
+        Additional options and overrides for mapred-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>
       '';
     };
 
-    yarnSite = mkOption {
+    yarnSiteDefault = mkOption {
       default = {
         "yarn.nodemanager.admin-env" = "PATH=$PATH";
         "yarn.nodemanager.aux-services" = "mapreduce_shuffle";
@@ -77,19 +110,34 @@ with lib;
         "yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
         "yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
         "yarn.resourcemanager.bind-host" = "0.0.0.0";
-        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
+        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler";
       };
       type = types.attrsOf types.anything;
+      description = ''
+        Default options for yarn-site.xml
+      '';
+    };
+    yarnSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
       example = literalExpression ''
-        options.services.hadoop.yarnSite.default // {
+        {
           "yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
         }
       '';
       description = ''
-        Hadoop yarn-site.xml definition
+        Additional options and overrides for yarn-site.xml
         <link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>
       '';
     };
+    yarnSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = ''
+        Internal option to add configs to yarn-site.xml based on module options
+      '';
+    };
 
     httpfsSite = mkOption {
       default = { };
@@ -123,6 +171,7 @@ with lib;
         "yarn.nodemanager.linux-container-executor.group"="hadoop";
         "min.user.id"=1000;
         "feature.terminal.enabled"=1;
+        "feature.mount-cgroup.enabled" = 1;
       };
       type = types.attrsOf types.anything;
       example = literalExpression ''
@@ -148,6 +197,8 @@ with lib;
       description = "Directories containing additional config files to be added to HADOOP_CONF_DIR";
     };
 
+    gatewayRole.enable = mkEnableOption "gateway role for deploying hadoop configs";
+
     package = mkOption {
       type = types.package;
       default = pkgs.hadoop;
@@ -157,20 +208,16 @@ with lib;
   };
 
 
-  config = mkMerge [
-    (mkIf (builtins.hasAttr "yarn" config.users.users ||
-           builtins.hasAttr "hdfs" config.users.users ||
-           builtins.hasAttr "httpfs" config.users.users) {
-      users.groups.hadoop = {
-        gid = config.ids.gids.hadoop;
-      };
-      environment = {
-        systemPackages = [ cfg.package ];
-        etc."hadoop-conf".source = let
-          hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
-        in "${hadoopConf}";
-      };
-    })
-
-  ];
+  config = mkIf cfg.gatewayRole.enable {
+    users.groups.hadoop = {
+      gid = config.ids.gids.hadoop;
+    };
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."hadoop-conf".source = let
+        hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+      in "${hadoopConf}";
+      variables.HADOOP_CONF_DIR = "/etc/hadoop-conf/";
+    };
+  };
 }
diff --git a/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixos/modules/services/cluster/hadoop/hdfs.nix
index be667aa82d8a6..325a002ad32fc 100644
--- a/nixos/modules/services/cluster/hadoop/hdfs.nix
+++ b/nixos/modules/services/cluster/hadoop/hdfs.nix
@@ -1,191 +1,191 @@
-{ config, lib, pkgs, ...}:
+{ config, lib, pkgs, ... }:
 with lib;
 let
   cfg = config.services.hadoop;
+
+  # Config files for hadoop services
   hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
-  restartIfChanged  = mkOption {
-    type = types.bool;
-    description = ''
-      Automatically restart the service on config change.
-      This can be set to false to defer restarts on clusters running critical applications.
-      Please consider the security implications of inadvertently running an older version,
-      and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
-    '';
-    default = false;
-  };
+
+  # Generator for HDFS service options
+  hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
+    enable = mkEnableOption serviceName;
+    restartIfChanged = mkOption {
+      type = types.bool;
+      description = ''
+        Automatically restart the service on config change.
+        This can be set to false to defer restarts on clusters running critical applications.
+        Please consider the security implications of inadvertently running an older version,
+        and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+      '';
+      default = false;
+    };
+    extraFlags = mkOption{
+      type = with types; listOf str;
+      default = [];
+      description = "Extra command line flags to pass to ${serviceName}";
+      example = [
+        "-Dcom.sun.management.jmxremote"
+        "-Dcom.sun.management.jmxremote.port=8010"
+      ];
+    };
+    extraEnv = mkOption{
+      type = with types; attrsOf str;
+      default = {};
+      description = "Extra environment variables for ${serviceName}";
+    };
+  } // (optionalAttrs firewallOption {
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Open firewall ports for ${serviceName}.";
+    };
+  }) // (optionalAttrs (extraOpts != null) extraOpts);
+
+  # Generator for HDFS service configs
+  hadoopServiceConfig =
+    { name
+    , serviceOptions ? cfg.hdfs."${toLower name}"
+    , description ? "Hadoop HDFS ${name}"
+    , User ? "hdfs"
+    , allowedTCPPorts ? [ ]
+    , preStart ? ""
+    , environment ? { }
+    , extraConfig ? { }
+    }: (
+
+      mkIf serviceOptions.enable ( mkMerge [{
+        systemd.services."hdfs-${toLower name}" = {
+          inherit description preStart;
+          environment = environment // serviceOptions.extraEnv;
+          wantedBy = [ "multi-user.target" ];
+          inherit (serviceOptions) restartIfChanged;
+          serviceConfig = {
+            inherit User;
+            SyslogIdentifier = "hdfs-${toLower name}";
+            ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
+            Restart = "always";
+          };
+        };
+
+        services.hadoop.gatewayRole.enable = true;
+
+        networking.firewall.allowedTCPPorts = mkIf
+          ((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
+          allowedTCPPorts;
+      } extraConfig])
+    );
+
 in
 {
   options.services.hadoop.hdfs = {
-    namenode = {
-      enable = mkEnableOption "Whether to run the HDFS NameNode";
+
+    namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
       formatOnInit = mkOption {
         type = types.bool;
         default = false;
         description = ''
-          Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.
-          For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)
-          to initialize an HA cluster manually.
-        '';
-      };
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for namenode
-        '';
-      };
-    };
-    datanode = {
-      enable = mkEnableOption "Whether to run the HDFS DataNode";
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for datanode
+          Format HDFS namenode on first start. This is useful for quickly spinning up
+          ephemeral HDFS clusters with a single namenode.
+          For HA clusters, initialization involves multiple steps across multiple nodes.
+          Follow this guide to initialize an HA cluster manually:
+          <link xlink:href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
         '';
       };
     };
-    journalnode = {
-      enable = mkEnableOption "Whether to run the HDFS JournalNode";
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for journalnode
-        '';
+
+    datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
+      dataDirs = mkOption {
+        default = null;
+        description = "Tier and path definitions for datanode storage.";
+        type = with types; nullOr (listOf (submodule {
+          options = {
+            type = mkOption {
+              type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
+              description = ''
+                Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
+              '';
+            };
+            path = mkOption {
+              type = path;
+              example = [ "/var/lib/hadoop/hdfs/dn" ];
+              description = "Determines where on the local filesystem a data node should store its blocks.";
+            };
+          };
+        }));
       };
     };
-    zkfc = {
-      enable = mkEnableOption "Whether to run the HDFS ZooKeeper failover controller";
-      inherit restartIfChanged;
+
+    journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; };
+
+    zkfc = hadoopServiceOption {
+      serviceName = "HDFS ZooKeeper failover controller";
+      firewallOption = false;
     };
-    httpfs = {
-      enable = mkEnableOption "Whether to run the HDFS HTTPfs server";
+
+    httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
       tempPath = mkOption {
         type = types.path;
         default = "/tmp/hadoop/httpfs";
-        description = ''
-          HTTPFS_TEMP path used by HTTPFS
-        '';
-      };
-      inherit restartIfChanged;
-      openFirewall = mkOption {
-        type = types.bool;
-        default = true;
-        description = ''
-          Open firewall ports for HTTPFS
-        '';
+        description = "HTTPFS_TEMP path used by HTTPFS";
       };
     };
+
   };
 
   config = mkMerge [
-    (mkIf cfg.hdfs.namenode.enable {
-      systemd.services.hdfs-namenode = {
-        description = "Hadoop HDFS NameNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.namenode) restartIfChanged;
-
-        preStart = (mkIf cfg.hdfs.namenode.formatOnInit ''
-          ${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true
-        '');
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-namenode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode";
-          Restart = "always";
-        };
-      };
-
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.namenode.openFirewall [
+    (hadoopServiceConfig {
+      name = "NameNode";
+      allowedTCPPorts = [
         9870 # namenode.http-address
         8020 # namenode.rpc-address
-        8022 # namenode. servicerpc-address
-      ]);
+        8022 # namenode.servicerpc-address
+        8019 # dfs.ha.zkfc.port
+      ];
+      preStart = (mkIf cfg.hdfs.namenode.formatOnInit
+        "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
+      );
     })
-    (mkIf cfg.hdfs.datanode.enable {
-      systemd.services.hdfs-datanode = {
-        description = "Hadoop HDFS DataNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.datanode) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-datanode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} datanode";
-          Restart = "always";
-        };
-      };
 
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.datanode.openFirewall [
+    (hadoopServiceConfig {
+      name = "DataNode";
+      # port numbers for datanode changed between hadoop 2 and 3
+      allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
         9864 # datanode.http.address
         9866 # datanode.address
         9867 # datanode.ipc.address
-      ]);
+      ] else [
+        50075 # datanode.http.address
+        50010 # datanode.address
+        50020 # datanode.ipc.address
+      ];
+      extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = let d = cfg.hdfs.datanode.dataDirs; in
+        if (d!= null) then (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs) else d;
     })
-    (mkIf cfg.hdfs.journalnode.enable {
-      systemd.services.hdfs-journalnode = {
-        description = "Hadoop HDFS JournalNode";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.journalnode) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-journalnode";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} journalnode";
-          Restart = "always";
-        };
-      };
 
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.journalnode.openFirewall [
+    (hadoopServiceConfig {
+      name = "JournalNode";
+      allowedTCPPorts = [
         8480 # dfs.journalnode.http-address
         8485 # dfs.journalnode.rpc-address
-      ]);
+      ];
     })
-    (mkIf cfg.hdfs.zkfc.enable {
-      systemd.services.hdfs-zkfc = {
-        description = "Hadoop HDFS ZooKeeper failover controller";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.zkfc) restartIfChanged;
-
-        serviceConfig = {
-          User = "hdfs";
-          SyslogIdentifier = "hdfs-zkfc";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} zkfc";
-          Restart = "always";
-        };
-      };
-    })
-    (mkIf cfg.hdfs.httpfs.enable {
-      systemd.services.hdfs-httpfs = {
-        description = "Hadoop httpfs";
-        wantedBy = [ "multi-user.target" ];
-        inherit (cfg.hdfs.httpfs) restartIfChanged;
-
-        environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
 
-        preStart = ''
-          mkdir -p $HTTPFS_TEMP
-        '';
+    (hadoopServiceConfig {
+      name = "zkfc";
+      description = "Hadoop HDFS ZooKeeper failover controller";
+    })
 
-        serviceConfig = {
-          User = "httpfs";
-          SyslogIdentifier = "hdfs-httpfs";
-          ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} httpfs";
-          Restart = "always";
-        };
-      };
-      networking.firewall.allowedTCPPorts = (mkIf cfg.hdfs.httpfs.openFirewall [
+    (hadoopServiceConfig {
+      name = "HTTPFS";
+      environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
+      preStart = "mkdir -p $HTTPFS_TEMP";
+      User = "httpfs";
+      allowedTCPPorts = [
         14000 # httpfs.http.port
-      ]);
+      ];
     })
-    (mkIf (
-        cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
-    ) {
+
+    (mkIf cfg.gatewayRole.enable {
       users.users.hdfs = {
         description = "Hadoop HDFS user";
         group = "hadoop";
@@ -199,5 +199,6 @@ in
         isSystemUser = true;
       };
     })
+
   ];
 }
diff --git a/nixos/modules/services/cluster/hadoop/yarn.nix b/nixos/modules/services/cluster/hadoop/yarn.nix
index 37c26ea10f76f..74e16bdec687a 100644
--- a/nixos/modules/services/cluster/hadoop/yarn.nix
+++ b/nixos/modules/services/cluster/hadoop/yarn.nix
@@ -13,23 +13,77 @@ let
     '';
     default = false;
   };
+  extraFlags = mkOption{
+    type = with types; listOf str;
+    default = [];
+    description = "Extra command line flags to pass to the service";
+    example = [
+      "-Dcom.sun.management.jmxremote"
+      "-Dcom.sun.management.jmxremote.port=8010"
+    ];
+  };
+  extraEnv = mkOption{
+    type = with types; attrsOf str;
+    default = {};
+    description = "Extra environment variables";
+  };
 in
 {
   options.services.hadoop.yarn = {
     resourcemanager = {
-      enable = mkEnableOption "Whether to run the Hadoop YARN ResourceManager";
-      inherit restartIfChanged;
+      enable = mkEnableOption "Hadoop YARN ResourceManager";
+      inherit restartIfChanged extraFlags extraEnv;
+
       openFirewall = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Open firewall ports for resourcemanager
         '';
       };
     };
     nodemanager = {
-      enable = mkEnableOption "Whether to run the Hadoop YARN NodeManager";
-      inherit restartIfChanged;
+      enable = mkEnableOption "Hadoop YARN NodeManager";
+      inherit restartIfChanged extraFlags extraEnv;
+
+      resource = {
+        cpuVCores = mkOption {
+          description = "Number of vcores that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationVCores = mkOption {
+          description = "The maximum virtual CPU cores any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        memoryMB = mkOption {
+          description = "Amount of physical memory, in MB, that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationMB = mkOption {
+          description = "The maximum physical memory any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+      };
+
+      useCGroups = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Use cgroups to enforce resource limits on containers
+        '';
+      };
+
+      localDir = mkOption {
+        description = "List of directories to store localized files in.";
+        type = with types; nullOr (listOf path);
+        example = [ "/var/lib/hadoop/yarn/nm" ];
+        default = null;
+      };
+
       addBinBash = mkOption {
         type = types.bool;
         default = true;
@@ -39,7 +93,7 @@ in
       };
       openFirewall = mkOption {
         type = types.bool;
-        default = true;
+        default = false;
         description = ''
           Open firewall ports for nodemanager.
           Because containers can listen on any ephemeral port, TCP ports 1024–65535 will be opened.
@@ -49,10 +103,7 @@ in
   };
 
   config = mkMerge [
-    (mkIf (
-        cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
-    ) {
-
+    (mkIf cfg.gatewayRole.enable {
       users.users.yarn = {
         description = "Hadoop YARN user";
         group = "hadoop";
@@ -65,15 +116,19 @@ in
         description = "Hadoop YARN ResourceManager";
         wantedBy = [ "multi-user.target" ];
         inherit (cfg.yarn.resourcemanager) restartIfChanged;
+        environment = cfg.yarn.resourcemanager.extraEnv;
 
         serviceConfig = {
           User = "yarn";
           SyslogIdentifier = "yarn-resourcemanager";
           ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
-                      " resourcemanager";
+                      " resourcemanager ${escapeShellArgs cfg.yarn.resourcemanager.extraFlags}";
           Restart = "always";
         };
       };
+
+      services.hadoop.gatewayRole.enable = true;
+
       networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
         8088 # resourcemanager.webapp.address
         8030 # resourcemanager.scheduler.address
@@ -94,6 +149,7 @@ in
         description = "Hadoop YARN NodeManager";
         wantedBy = [ "multi-user.target" ];
         inherit (cfg.yarn.nodemanager) restartIfChanged;
+        environment = cfg.yarn.nodemanager.extraEnv;
 
         preStart = ''
           # create log dir
@@ -101,8 +157,9 @@ in
           chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
 
           # set up setuid container executor binary
+          umount /run/wrappers/yarn-nodemanager/cgroup/cpu || true
           rm -rf /run/wrappers/yarn-nodemanager/ || true
-          mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop}
+          mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop,cgroup/cpu}
           cp ${cfg.package}/lib/${cfg.package.untarDir}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
           chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
           chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
@@ -114,11 +171,26 @@ in
           SyslogIdentifier = "yarn-nodemanager";
           PermissionsStartOnly = true;
           ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
-                      " nodemanager";
+                      " nodemanager ${escapeShellArgs cfg.yarn.nodemanager.extraFlags}";
           Restart = "always";
         };
       };
 
+      services.hadoop.gatewayRole.enable = true;
+
+      services.hadoop.yarnSiteInternal = with cfg.yarn.nodemanager; {
+        "yarn.nodemanager.local-dirs" = localDir;
+        "yarn.scheduler.maximum-allocation-vcores" = resource.maximumAllocationVCores;
+        "yarn.scheduler.maximum-allocation-mb" = resource.maximumAllocationMB;
+        "yarn.nodemanager.resource.cpu-vcores" = resource.cpuVCores;
+        "yarn.nodemanager.resource.memory-mb" = resource.memoryMB;
+      } // mkIf useCGroups {
+        "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" = "/hadoop-yarn";
+        "yarn.nodemanager.linux-container-executor.resources-handler.class" = "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount" = "true";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount-path" = "/run/wrappers/yarn-nodemanager/cgroup";
+      };
+
       networking.firewall.allowedTCPPortRanges = [
         (mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
       ];
diff --git a/nixos/modules/services/web-apps/nextcloud.nix b/nixos/modules/services/web-apps/nextcloud.nix
index 141ab98e29bfc..b32220a5e5790 100644
--- a/nixos/modules/services/web-apps/nextcloud.nix
+++ b/nixos/modules/services/web-apps/nextcloud.nix
@@ -153,7 +153,7 @@ in {
     package = mkOption {
       type = types.package;
       description = "Which package to use for the Nextcloud instance.";
-      relatedPackages = [ "nextcloud21" "nextcloud22" "nextcloud23" ];
+      relatedPackages = [ "nextcloud22" "nextcloud23" ];
     };
     phpPackage = mkOption {
       type = types.package;
@@ -571,15 +571,6 @@ in {
               nextcloud defined in an overlay, please set `services.nextcloud.package` to
               `pkgs.nextcloud`.
             ''
-          # 21.03 will not be an official release - it was instead 21.05.
-          # This versionOlder statement remains set to 21.03 for backwards compatibility.
-          # See https://github.com/NixOS/nixpkgs/pull/108899 and
-          # https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
-          # FIXME(@Ma27) remove this else-if as soon as 21.05 is EOL! This is only here
-          # to ensure that users who are on Nextcloud 19 with a stateVersion <21.05 with
-          # no explicit services.nextcloud.package don't upgrade to v21 by accident (
-          # nextcloud20 throws an eval-error because it's dropped).
-          else if versionOlder stateVersion "21.03" then nextcloud20
           else if versionOlder stateVersion "21.11" then nextcloud21
           else if versionOlder stateVersion "22.05" then nextcloud22
           else nextcloud23
diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix
index 6f4c621359522..b2b35119ce353 100644
--- a/nixos/tests/all-tests.nix
+++ b/nixos/tests/all-tests.nix
@@ -189,9 +189,9 @@ in
   grocy = handleTest ./grocy.nix {};
   grub = handleTest ./grub.nix {};
   gvisor = handleTest ./gvisor.nix {};
-  hadoop.all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hadoop.nix {};
-  hadoop.hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/hdfs.nix {};
-  hadoop.yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop/yarn.nix {};
+  hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
+  hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
+  hadoop2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop2; };
   haka = handleTest ./haka.nix {};
   haproxy = handleTest ./haproxy.nix {};
   hardened = handleTest ./hardened.nix {};
diff --git a/nixos/tests/hadoop/default.nix b/nixos/tests/hadoop/default.nix
new file mode 100644
index 0000000000000..d2a97cbeffb8d
--- /dev/null
+++ b/nixos/tests/hadoop/default.nix
@@ -0,0 +1,7 @@
+{ handleTestOn, package, ... }:
+
+{
+  all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop.nix { inherit package; };
+  hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hdfs.nix { inherit package; };
+  yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./yarn.nix { inherit package; };
+}
diff --git a/nixos/tests/hadoop/hadoop.nix b/nixos/tests/hadoop/hadoop.nix
index 48737debab546..b132f4fa58b01 100644
--- a/nixos/tests/hadoop/hadoop.nix
+++ b/nixos/tests/hadoop/hadoop.nix
@@ -1,121 +1,148 @@
 # This test is very comprehensive. It tests whether all hadoop services work well with each other.
 # Run this when updating the Hadoop package or making significant changes to the hadoop module.
 # For a more basic test, see hdfs.nix and yarn.nix
-import ../make-test-python.nix ({pkgs, ...}: {
-
-  nodes = let
-    package = pkgs.hadoop;
-    coreSite = {
-      "fs.defaultFS" = "hdfs://ns1";
-    };
-    hdfsSite = {
-      "dfs.namenode.rpc-bind-host" = "0.0.0.0";
-      "dfs.namenode.http-bind-host" = "0.0.0.0";
-      "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
-
-      # HA Quorum Journal Manager configuration
-      "dfs.nameservices" = "ns1";
-      "dfs.ha.namenodes.ns1" = "nn1,nn2";
-      "dfs.namenode.shared.edits.dir.ns1.nn1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
-      "dfs.namenode.shared.edits.dir.ns1.nn2" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
-      "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
-      "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
-      "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
-      "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
-      "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
-      "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
-
-      # Automatic failover configuration
-      "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
-      "dfs.ha.automatic-failover.enabled.ns1" = "true";
-      "dfs.ha.fencing.methods" = "shell(true)";
-      "ha.zookeeper.quorum" = "zk1:2181";
-    };
-    yarnSiteHA = {
-      "yarn.resourcemanager.zk-address" = "zk1:2181";
-      "yarn.resourcemanager.ha.enabled" = "true";
-      "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
-      "yarn.resourcemanager.hostname.rm1" = "rm1";
-      "yarn.resourcemanager.hostname.rm2" = "rm2";
-      "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
-      "yarn.resourcemanager.cluster-id" = "cluster1";
-      # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
-      # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
-      # that causes AM containers to fail otherwise.
-      "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
-      "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
-    };
-  in {
-    zk1 = { ... }: {
-      services.zookeeper.enable = true;
-      networking.firewall.allowedTCPPorts = [ 2181 ];
-    };
-
-    # HDFS cluster
-    nn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.namenode.enable = true;
-        hdfs.zkfc.enable = true;
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-combined";
+
+  nodes =
+    let
+      coreSite = {
+        "fs.defaultFS" = "hdfs://ns1";
+      };
+      hdfsSite = {
+        # HA Quorum Journal Manager configuration
+        "dfs.nameservices" = "ns1";
+        "dfs.ha.namenodes.ns1" = "nn1,nn2";
+        "dfs.namenode.shared.edits.dir.ns1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
+        "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
+        "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
+        "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
+        "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
+        "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
+        "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
+
+        # Automatic failover configuration
+        "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
+        "dfs.ha.automatic-failover.enabled.ns1" = "true";
+        "dfs.ha.fencing.methods" = "shell(true)";
+        "ha.zookeeper.quorum" = "zk1:2181";
+      };
+      yarnSite = {
+        "yarn.resourcemanager.zk-address" = "zk1:2181";
+        "yarn.resourcemanager.ha.enabled" = "true";
+        "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
+        "yarn.resourcemanager.hostname.rm1" = "rm1";
+        "yarn.resourcemanager.hostname.rm2" = "rm2";
+        "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
+        "yarn.resourcemanager.cluster-id" = "cluster1";
+        # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
+        # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
+        # that causes AM containers to fail otherwise.
+        "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
+        "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
+      };
+    in
+    {
+      zk1 = { ... }: {
+        services.zookeeper.enable = true;
+        networking.firewall.allowedTCPPorts = [ 2181 ];
       };
-    };
-    nn2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.namenode.enable = true;
-        hdfs.zkfc.enable = true;
+
+      # HDFS cluster
+      nn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
+      };
+      nn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
       };
-    };
 
-    jn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    jn2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    jn3 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.journalnode.enable = true;
+      jn3 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+
+      dn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.datanode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
 
-    dn1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        hdfs.datanode.enable = true;
+      # YARN cluster
+      rm1 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-
-    # YARN cluster
-    rm1 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.resourcemanager.enable = true;
+      rm2 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    rm2 = {pkgs, options, ...}: {
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.resourcemanager.enable = true;
+      nm1 = { options, ... }: {
+        virtualisation.memorySize = 2048;
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.nodemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
       };
-    };
-    nm1 = {pkgs, options, ...}: {
-      virtualisation.memorySize = 2048;
-      services.hadoop = {
-        inherit package coreSite hdfsSite;
-        yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-        yarn.nodemanager.enable = true;
+      client = { options, ... }: {
+        services.hadoop = {
+          gatewayRole.enable = true;
+          inherit package coreSite hdfsSite yarnSite;
+        };
       };
-    };
   };
 
   testScript = ''
@@ -173,26 +200,26 @@ import ../make-test-python.nix ({pkgs, ...}: {
     # DN should have started by now, but confirm anyway
     dn1.wait_for_unit("hdfs-datanode")
     # Print states of namenodes
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
     # Wait for cluster to exit safemode
-    dn1.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
     # test R/W
-    dn1.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
-    assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+    client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
     # Test NN failover
     nn1.succeed("systemctl stop hdfs-namenode")
-    assert "active" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
-    assert "testfilecontents" in dn1.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+    assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
     nn1.succeed("systemctl start hdfs-namenode")
     nn1.wait_for_open_port(9870)
     nn1.wait_for_open_port(8022)
     nn1.wait_for_open_port(8020)
-    assert "standby" in dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
-    dn1.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
+    assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState | systemd-cat")
 
     #### YARN tests ####
 
@@ -208,21 +235,21 @@ import ../make-test-python.nix ({pkgs, ...}: {
     nm1.wait_for_unit("yarn-nodemanager")
     nm1.wait_for_open_port(8042)
     nm1.wait_for_open_port(8040)
-    nm1.wait_until_succeeds("yarn node -list | grep Nodes:1")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
-    nm1.succeed("sudo -u yarn yarn node -list | systemd-cat")
+    client.wait_until_succeeds("yarn node -list | grep Nodes:1")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    client.succeed("sudo -u yarn yarn node -list | systemd-cat")
 
     # Test RM failover
     rm1.succeed("systemctl stop yarn-resourcemanager")
-    assert "standby" not in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
     rm1.succeed("systemctl start yarn-resourcemanager")
     rm1.wait_for_unit("yarn-resourcemanager")
     rm1.wait_for_open_port(8088)
-    assert "standby" in nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
-    nm1.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
+    assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState | systemd-cat")
 
-    assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
-    assert "SUCCEEDED" in nm1.succeed("yarn application -list -appStates FINISHED")
+    assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
+    assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
   '';
 })
diff --git a/nixos/tests/hadoop/hdfs.nix b/nixos/tests/hadoop/hdfs.nix
index b63cbf4803271..9415500463dec 100644
--- a/nixos/tests/hadoop/hdfs.nix
+++ b/nixos/tests/hadoop/hdfs.nix
@@ -1,32 +1,46 @@
 # Test a minimal HDFS cluster with no HA
-import ../make-test-python.nix ({...}: {
-  nodes = {
-    namenode = {pkgs, ...}: {
+import ../make-test-python.nix ({ package, lib, ... }:
+with lib;
+{
+  name = "hadoop-hdfs";
+
+  nodes = let
+    coreSite = {
+      "fs.defaultFS" = "hdfs://namenode:8020";
+      "hadoop.proxyuser.httpfs.groups" = "*";
+      "hadoop.proxyuser.httpfs.hosts" = "*";
+    };
+    in {
+    namenode = { pkgs, ... }: {
       services.hadoop = {
-        package = pkgs.hadoop;
+        inherit package;
         hdfs = {
           namenode = {
             enable = true;
+            openFirewall = true;
             formatOnInit = true;
           };
-          httpfs.enable = true;
-        };
-        coreSite = {
-          "fs.defaultFS" = "hdfs://namenode:8020";
-          "hadoop.proxyuser.httpfs.groups" = "*";
-          "hadoop.proxyuser.httpfs.hosts" = "*";
+          httpfs = {
+            # The NixOS hadoop module only support webHDFS on 3.3 and newer
+            enable = mkIf (versionAtLeast package.version "3.3") true;
+            openFirewall = true;
+          };
         };
+        inherit coreSite;
       };
     };
-    datanode = {pkgs, ...}: {
+    datanode = { pkgs, ... }: {
       services.hadoop = {
-        package = pkgs.hadoop;
-        hdfs.datanode.enable = true;
-        coreSite = {
-          "fs.defaultFS" = "hdfs://namenode:8020";
-          "hadoop.proxyuser.httpfs.groups" = "*";
-          "hadoop.proxyuser.httpfs.hosts" = "*";
+        inherit package;
+        hdfs.datanode = {
+          enable = true;
+          openFirewall = true;
+          dataDirs = [{
+            type = "DISK";
+            path = "/tmp/dn1";
+          }];
         };
+        inherit coreSite;
       };
     };
   };
@@ -37,21 +51,32 @@ import ../make-test-python.nix ({...}: {
     namenode.wait_for_unit("hdfs-namenode")
     namenode.wait_for_unit("network.target")
     namenode.wait_for_open_port(8020)
+    namenode.succeed("ss -tulpne | systemd-cat")
+    namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
     namenode.wait_for_open_port(9870)
 
     datanode.wait_for_unit("hdfs-datanode")
     datanode.wait_for_unit("network.target")
+  '' + ( if versionAtLeast package.version "3" then ''
     datanode.wait_for_open_port(9864)
     datanode.wait_for_open_port(9866)
     datanode.wait_for_open_port(9867)
 
-    namenode.succeed("curl -f http://namenode:9870")
     datanode.succeed("curl -f http://datanode:9864")
+  '' else ''
+    datanode.wait_for_open_port(50075)
+    datanode.wait_for_open_port(50010)
+    datanode.wait_for_open_port(50020)
+
+    datanode.succeed("curl -f http://datanode:50075")
+  '' ) + ''
+    namenode.succeed("curl -f http://namenode:9870")
 
     datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
     datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
     assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
 
+  '' + optionalString ( versionAtLeast package.version "3.3" ) ''
     namenode.wait_for_unit("hdfs-httpfs")
     namenode.wait_for_open_port(14000)
     assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
diff --git a/nixos/tests/hadoop/yarn.nix b/nixos/tests/hadoop/yarn.nix
index 09bdb35791c7e..1bf8e3831f67a 100644
--- a/nixos/tests/hadoop/yarn.nix
+++ b/nixos/tests/hadoop/yarn.nix
@@ -1,22 +1,30 @@
 # This only tests if YARN is able to start its services
-import ../make-test-python.nix ({...}: {
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-yarn";
+
   nodes = {
-    resourcemanager = {pkgs, ...}: {
-      services.hadoop.package = pkgs.hadoop;
-      services.hadoop.yarn.resourcemanager.enable = true;
-      services.hadoop.yarnSite = {
-        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
+    resourcemanager = { ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.resourcemanager = {
+          enable = true;
+          openFirewall = true;
+        };
       };
     };
-    nodemanager = {pkgs, ...}: {
-      services.hadoop.package = pkgs.hadoop;
-      services.hadoop.yarn.nodemanager.enable = true;
-      services.hadoop.yarnSite = {
-        "yarn.resourcemanager.hostname" = "resourcemanager";
-        "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+    nodemanager = { options, lib, ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.nodemanager = {
+          enable = true;
+          openFirewall = true;
+        };
+        yarnSite = options.services.hadoop.yarnSite.default // {
+          "yarn.resourcemanager.hostname" = "resourcemanager";
+          "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+        };
       };
     };
-
   };
 
   testScript = ''
diff --git a/nixos/tests/nextcloud/default.nix b/nixos/tests/nextcloud/default.nix
index 34d3c345354c7..b7b1c5c66002e 100644
--- a/nixos/tests/nextcloud/default.nix
+++ b/nixos/tests/nextcloud/default.nix
@@ -18,4 +18,4 @@ foldl
     };
   })
 { }
-  [ 21 22 23 ]
+  [ 22 23 ]
diff --git a/pkgs/applications/networking/cluster/fluxcd/default.nix b/pkgs/applications/networking/cluster/fluxcd/default.nix
index a583bf3089542..d5762d938a323 100644
--- a/pkgs/applications/networking/cluster/fluxcd/default.nix
+++ b/pkgs/applications/networking/cluster/fluxcd/default.nix
@@ -1,9 +1,9 @@
 { lib, buildGoModule, fetchFromGitHub, fetchzip, installShellFiles }:
 
 let
-  version = "0.27.3";
-  sha256 = "08ax1033456hfm5qz0r671xm5ig0047nqp7xffyn9za498bm4i5q";
-  manifestsSha256 = "165kspq10nvlihcb1460qmbw5r1mlzs5gliw01qa4mymvzmlggk7";
+  version = "0.27.4";
+  sha256 = "06951i332gr17nsbns8mh4kcjilqfw5w95shaznpaksx93f554g0";
+  manifestsSha256 = "0fvzh7j3vi5hw8jbw2gisjnn53bffwnp7zm3dwcbv3svwpw7823d";
 
   manifests = fetchzip {
     url =
@@ -23,7 +23,7 @@ in buildGoModule rec {
     inherit sha256;
   };
 
-  vendorSha256 = "sha256-ENSfec7iSKOkILgVCVnORpAia4D+vBjQAUXDA7EIvVQ=";
+  vendorSha256 = "sha256-7sHLXjyYMWSFckDPeVGJYK+nwhbRpD76tV334PCVYwA=";
 
   postUnpack = ''
     cp -r ${manifests} source/cmd/flux/manifests
diff --git a/pkgs/applications/networking/cluster/hadoop/default.nix b/pkgs/applications/networking/cluster/hadoop/default.nix
index 6a48cc8ada89e..adb46540cba7d 100644
--- a/pkgs/applications/networking/cluster/hadoop/default.nix
+++ b/pkgs/applications/networking/cluster/hadoop/default.nix
@@ -15,6 +15,8 @@
 , zlib
 , zstd
 , openssl
+, glibc
+, nixosTests
 }:
 
 with lib;
@@ -22,7 +24,7 @@ with lib;
 assert elem stdenv.system [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ];
 
 let
-  common = { pname, version, untarDir ? "${pname}-${version}", sha256, jdk, openssl ? null, nativeLibs ? [ ], libPatches ? "" }:
+  common = { pname, version, untarDir ? "${pname}-${version}", sha256, jdk, openssl ? null, nativeLibs ? [ ], libPatches ? "", tests }:
     stdenv.mkDerivation rec {
       inherit pname version jdk libPatches untarDir openssl;
       src = fetchurl {
@@ -38,7 +40,10 @@ let
       installPhase = ''
         mkdir -p $out/{lib/${untarDir}/conf,bin,lib}
         mv * $out/lib/${untarDir}
-
+      '' + optionalString stdenv.isLinux ''
+        # All versions need container-executor, but some versions can't use autoPatchelf because of broken SSL versions
+        patchelf --set-interpreter ${glibc.out}/lib64/ld-linux-x86-64.so.2 $out/lib/${untarDir}/bin/container-executor
+      '' + ''
         for n in $(find $out/lib/${untarDir}/bin -type f ! -name "*.*"); do
           makeWrapper "$n" "$out/bin/$(basename $n)"\
             --set-default JAVA_HOME ${jdk.home}\
@@ -49,6 +54,8 @@ let
         done
       '' + libPatches;
 
+      passthru = { inherit tests; };
+
       meta = {
         homepage = "https://hadoop.apache.org/";
         description = "Framework for distributed processing of large data sets across clusters of computers";
@@ -73,30 +80,34 @@ in
 {
   # Different version of hadoop support different java runtime versions
   # https://cwiki.apache.org/confluence/display/HADOOP/Hadoop+Java+Versions
-  hadoop_3_3 =
-    common
-      (rec {
-        pname = "hadoop";
-        version = "3.3.1";
-        untarDir = "${pname}-${version}";
-        sha256 = rec {
-          x86_64-linux = "1b3v16ihysqaxw8za1r5jlnphy8dwhivdx2d0z64309w57ihlxxd";
-          x86_64-darwin = x86_64-linux;
-          aarch64-linux = "00ln18vpi07jq2slk3kplyhcj8ad41n0yl880q5cihilk7daclxz";
-          aarch64-darwin = aarch64-linux;
-        };
-
-        inherit openssl;
-        nativeLibs = [ stdenv.cc.cc.lib protobuf3_7 zlib snappy ];
-        libPatches = ''
-          ln -s ${getLib cyrus_sasl}/lib/libsasl2.so $out/lib/${untarDir}/lib/native/libsasl2.so.2
-          ln -s ${getLib openssl}/lib/libcrypto.so $out/lib/${untarDir}/lib/native/
-          ln -s ${getLib zlib}/lib/libz.so.1 $out/lib/${untarDir}/lib/native/
-          ln -s ${getLib zstd}/lib/libzstd.so.1 $out/lib/${untarDir}/lib/native/
-          ln -s ${getLib bzip2}/lib/libbz2.so.1 $out/lib/${untarDir}/lib/native/
-        '' + optionalString stdenv.isLinux "patchelf --add-rpath ${jdk.home}/lib/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0";
-        jdk = jdk11_headless;
-      });
+  hadoop_3_3 = common rec {
+    pname = "hadoop";
+    version = "3.3.1";
+    untarDir = "${pname}-${version}";
+    sha256 = rec {
+      x86_64-linux = "1b3v16ihysqaxw8za1r5jlnphy8dwhivdx2d0z64309w57ihlxxd";
+      x86_64-darwin = x86_64-linux;
+      aarch64-linux = "00ln18vpi07jq2slk3kplyhcj8ad41n0yl880q5cihilk7daclxz";
+      aarch64-darwin = aarch64-linux;
+    };
+    jdk = jdk11_headless;
+    inherit openssl;
+    # TODO: Package and add Intel Storage Acceleration Library
+    nativeLibs = [ stdenv.cc.cc.lib protobuf3_7 zlib snappy ];
+    libPatches = ''
+      ln -s ${getLib cyrus_sasl}/lib/libsasl2.so $out/lib/${untarDir}/lib/native/libsasl2.so.2
+      ln -s ${getLib openssl}/lib/libcrypto.so $out/lib/${untarDir}/lib/native/
+      ln -s ${getLib zlib}/lib/libz.so.1 $out/lib/${untarDir}/lib/native/
+      ln -s ${getLib zstd}/lib/libzstd.so.1 $out/lib/${untarDir}/lib/native/
+      ln -s ${getLib bzip2}/lib/libbz2.so.1 $out/lib/${untarDir}/lib/native/
+    '' + optionalString stdenv.isLinux ''
+      # libjvm.so for Java >=11
+      patchelf --add-rpath ${jdk.home}/lib/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0
+      # Java 8 has libjvm.so at a different path
+      patchelf --add-rpath ${jdk.home}/jre/lib/amd64/server $out/lib/${untarDir}/lib/native/libnativetask.so.1.0.0
+    '';
+    tests = nixosTests.hadoop;
+  };
   hadoop_3_2 = common rec {
     pname = "hadoop";
     version = "3.2.2";
@@ -104,11 +115,13 @@ in
     jdk = jdk8_headless;
     # not using native libs because of broken openssl_1_0_2 dependency
     # can be manually overriden
+    tests = nixosTests.hadoop_3_2;
   };
   hadoop2 = common rec {
     pname = "hadoop";
     version = "2.10.1";
     sha256.x86_64-linux = "1w31x4bk9f2swnx8qxx0cgwfg8vbpm6cy5lvfnbbpl3rsjhmyg97";
     jdk = jdk8_headless;
+    tests = nixosTests.hadoop2;
   };
 }
diff --git a/pkgs/applications/science/computer-architecture/qtrvsim/default.nix b/pkgs/applications/science/computer-architecture/qtrvsim/default.nix
index 24d9f642ec567..fc840fe45fbc8 100644
--- a/pkgs/applications/science/computer-architecture/qtrvsim/default.nix
+++ b/pkgs/applications/science/computer-architecture/qtrvsim/default.nix
@@ -2,13 +2,13 @@
 
 stdenv.mkDerivation rec {
   pname = "QtRVSim";
-  version = "0.9.1";
+  version = "0.9.2";
 
   src = fetchFromGitHub {
     owner = "cvut";
     repo = "qtrvsim";
     rev = "refs/tags/v${version}";
-    sha256 = "AOksVS0drIBnK4RCxZw40yVxf4E8GjG9kU0rIZsY9gA=";
+    sha256 = "B1l+ysrodeDbxYfdLLMF8yk4/uPXTcDrTaMtYm89HuU=";
   };
 
   nativeBuildInputs = [ cmake wrapQtAppsHook ];
diff --git a/pkgs/data/icons/kora-icon-theme/default.nix b/pkgs/data/icons/kora-icon-theme/default.nix
index 8df63a75ad8dc..01572e7976123 100644
--- a/pkgs/data/icons/kora-icon-theme/default.nix
+++ b/pkgs/data/icons/kora-icon-theme/default.nix
@@ -2,13 +2,13 @@
 
 stdenv.mkDerivation rec  {
   pname = "kora-icon-theme";
-  version = "1.5.0";
+  version = "1.5.1";
 
   src = fetchFromGitHub  {
     owner = "bikass";
     repo = "kora";
     rev = "v${version}";
-    sha256 = "sha256-kUgNj7KuxsQ/BvQ0ORl3xzEm9gv69+2PS0Bgv8i/S9U=";
+    sha256 = "sha256-3TKjd2Lblb+/zFq7rkdgnD1dJU3kis7QZi7Ui74IWzA=";
   };
 
   nativeBuildInputs = [
diff --git a/pkgs/development/compilers/open-watcom/v2.nix b/pkgs/development/compilers/open-watcom/v2.nix
index b521aaef106b1..dc2af6e7835de 100644
--- a/pkgs/development/compilers/open-watcom/v2.nix
+++ b/pkgs/development/compilers/open-watcom/v2.nix
@@ -12,14 +12,14 @@
 
 stdenv.mkDerivation rec {
   pname = "open-watcom-v2";
-  version = "unstable-2022-02-22";
+  version = "unstable-2022-03-14";
   name = "${pname}-unwrapped-${version}";
 
   src = fetchFromGitHub {
     owner = "open-watcom";
     repo = "open-watcom-v2";
-    rev = "9e25b3d6b8066f09b4f7131a31de1cf2af691e9a";
-    sha256 = "1w336070kmhc6cmn2aqr8vm0fmw3yza2n0w4asvs2kqxjgmbn6i2";
+    rev = "22627ccc1bd3de70aff9ac056e0dc9ecf7f7b6ec";
+    sha256 = "khy/fhmQjTGKfx6iOUBt+ySwpEx0df/7meyNvBnJAPY=";
   };
 
   postPatch = ''
diff --git a/pkgs/development/libraries/libarchive-qt/default.nix b/pkgs/development/libraries/libarchive-qt/default.nix
index 62b425bba0589..ae74fd3028a92 100644
--- a/pkgs/development/libraries/libarchive-qt/default.nix
+++ b/pkgs/development/libraries/libarchive-qt/default.nix
@@ -1,19 +1,20 @@
-{ mkDerivation, lib, fetchFromGitLab, libarchive, xz, zlib, bzip2, cmake, ninja }:
+{ mkDerivation, lib, fetchFromGitLab, libarchive, xz, zlib, bzip2, meson, pkg-config, ninja }:
 
 mkDerivation rec {
   pname = "libarchive-qt";
-  version = "2.0.6";
+  version = "2.0.7";
 
   src = fetchFromGitLab {
     owner = "marcusbritanicus";
     repo = pname;
     rev = "v${version}";
-    sha256 = "sha256-Z+2zjQolV1Ncr6v9r7fGrc/fEMt0iMtGwv9eZ2Tu2cA=";
+    sha256 = "sha256-KRywB+Op44N00q9tgO2WNCliRgUDRvrCms1O8JYt62o=";
   };
 
   nativeBuildInputs = [
-    cmake
+    meson
     ninja
+    pkg-config
   ];
 
   buildInputs = [
diff --git a/pkgs/development/node-packages/default.nix b/pkgs/development/node-packages/default.nix
index c950ce9e5e9ae..b308f6128c2d2 100644
--- a/pkgs/development/node-packages/default.nix
+++ b/pkgs/development/node-packages/default.nix
@@ -359,7 +359,7 @@ let
 
       src = fetchurl {
         url = "https://registry.npmjs.org/prisma/-/prisma-${version}.tgz";
-        sha512 = "sha512-dAld12vtwdz9Rz01nOjmnXe+vHana5PSog8t0XGgLemKsUVsaupYpr74AHaS3s78SaTS5s2HOghnJF+jn91ZrA==";
+        sha512 = "sha512-8SdsLPhKR3mOfoo2o73h9mNn3v5kA/RqGA26Sv6qDS78Eh2uepPqt5e8/nwj5EOblYm5HEGuitaXQrOCLb6uTw==";
       };
       postInstall = with pkgs; ''
         wrapProgram "$out/bin/prisma" \
diff --git a/pkgs/development/python-modules/bumps/default.nix b/pkgs/development/python-modules/bumps/default.nix
index 6e0637d2a6bc6..d1926c9631aa7 100644
--- a/pkgs/development/python-modules/bumps/default.nix
+++ b/pkgs/development/python-modules/bumps/default.nix
@@ -1,24 +1,37 @@
-{ lib, buildPythonPackage, fetchPypi, six}:
+{ lib
+, buildPythonPackage
+, fetchPypi
+, pythonOlder
+, six
+}:
 
 buildPythonPackage rec {
   pname = "bumps";
-  version = "0.8.1";
+  version = "0.9.0";
+  format = "setuptools";
 
-  propagatedBuildInputs = [six];
-
-  # Bumps does not provide its own tests.py, so the test
-  # always fails
-  doCheck = false;
+  disabled = pythonOlder "3.7";
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "f4f2ee712a1e468a2ce5c0a32f67739a83331f0cb7b9c50b9e7510daefc12169";
+    hash = "sha256-BY9kg0ksKfrpQgsl1aDDJJ+zKJmURqwTtKxlITxse+o=";
   };
 
+  propagatedBuildInputs = [
+    six
+  ];
+
+  # Module has no tests
+  doCheck = false;
+
+  pythonImportsCheck = [
+    "bumps"
+  ];
+
   meta = with lib; {
-    homepage = "https://www.reflectometry.org/danse/software.html";
     description = "Data fitting with bayesian uncertainty analysis";
-    maintainers = with maintainers; [ rprospero ];
+    homepage = "https://bumps.readthedocs.io/";
     license = licenses.publicDomain;
+    maintainers = with maintainers; [ rprospero ];
   };
 }
diff --git a/pkgs/development/python-modules/cloudscraper/default.nix b/pkgs/development/python-modules/cloudscraper/default.nix
index cd11aa03f863b..6f693f369cda5 100644
--- a/pkgs/development/python-modules/cloudscraper/default.nix
+++ b/pkgs/development/python-modules/cloudscraper/default.nix
@@ -1,6 +1,6 @@
 { lib
 , buildPythonPackage
-, isPy3k
+, pythonOlder
 , fetchPypi
 , requests
 , requests-toolbelt
@@ -9,12 +9,14 @@
 
 buildPythonPackage rec {
   pname = "cloudscraper";
-  version = "1.2.58";
-  disabled = !isPy3k;
+  version = "1.2.60";
+  format = "setuptools";
+
+  disabled = pythonOlder "3.7";
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "1wnzv2k8cm8q1x18r4zg8pcnpm4gsdp82hywwjimp2v2qll918nx";
+    hash = "sha256-DTQTsv/59895UTsMmqxYtSfFosUWPRx8wMT4zKHQ9Oc=";
   };
 
   propagatedBuildInputs = [
@@ -27,10 +29,12 @@ buildPythonPackage rec {
   # nixpkgs yet, and also aren't included in the PyPI bundle.  TODO.
   doCheck = false;
 
-  pythonImportsCheck = [ "cloudscraper" ];
+  pythonImportsCheck = [
+    "cloudscraper"
+  ];
 
   meta = with lib; {
-    description = "A Python module to bypass Cloudflare's anti-bot page";
+    description = "Python module to bypass Cloudflare's anti-bot page";
     homepage = "https://github.com/venomous/cloudscraper";
     license = licenses.mit;
     maintainers = with maintainers; [ kini ];
diff --git a/pkgs/development/python-modules/google-cloud-storage/default.nix b/pkgs/development/python-modules/google-cloud-storage/default.nix
index bce2141020941..629c323506b18 100644
--- a/pkgs/development/python-modules/google-cloud-storage/default.nix
+++ b/pkgs/development/python-modules/google-cloud-storage/default.nix
@@ -9,15 +9,19 @@
 , google-cloud-testutils
 , google-resumable-media
 , mock
+, pythonOlder
 }:
 
 buildPythonPackage rec {
   pname = "google-cloud-storage";
-  version = "2.2.0";
+  version = "2.2.1";
+  format = "setuptools";
+
+  disabled = pythonOlder "3.7";
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "sha256-01mWgBE11R20m7j3p+Kc7cwlqotDXu0MTA7y+e5W0dk=";
+    hash = "sha256-AkT0YScQy17ERfxndDh1ZOI/mCM2P7QIsock4hAkAbc=";
   };
 
   propagatedBuildInputs = [
diff --git a/pkgs/development/python-modules/qcengine/default.nix b/pkgs/development/python-modules/qcengine/default.nix
index ea5325ea660cb..795b8460b5dbe 100644
--- a/pkgs/development/python-modules/qcengine/default.nix
+++ b/pkgs/development/python-modules/qcengine/default.nix
@@ -11,7 +11,7 @@
 
 buildPythonPackage rec {
   pname = "qcengine";
-  version = "0.22.0";
+  version = "0.23.0";
 
   checkInputs = [ pytestCheckHook ];
 
@@ -25,7 +25,7 @@ buildPythonPackage rec {
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "685a08247b561ed1c7a7b42e68293f90b412e83556626304a3f826a15be51308";
+    sha256 = "sha256-gDn0Nu6ALTr3KyZnYDSA6RE3S5JQj562FP2RI9U3Gxs=";
   };
 
   doCheck = true;
diff --git a/pkgs/development/python-modules/samsungtvws/default.nix b/pkgs/development/python-modules/samsungtvws/default.nix
index b4bebb79a0fdb..bed661ce20060 100644
--- a/pkgs/development/python-modules/samsungtvws/default.nix
+++ b/pkgs/development/python-modules/samsungtvws/default.nix
@@ -5,12 +5,12 @@
 
 buildPythonPackage rec {
   pname = "samsungtvws";
-  version = "2.3.0";
+  version = "2.4.0";
   disabled = isPy27;
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "sha256-2ly9lbnIHGHB55ml10jKE7dC5LdN1ToGW4GqfxTC5kI=";
+    sha256 = "sha256-LbNHaSbNCwoffox6B8kEUzxjkSJotB+P1bw3wbU7DZk=";
   };
 
   propagatedBuildInputs = [
diff --git a/pkgs/development/tools/database/prisma-engines/default.nix b/pkgs/development/tools/database/prisma-engines/default.nix
index 48456166b92bd..18614291d587a 100644
--- a/pkgs/development/tools/database/prisma-engines/default.nix
+++ b/pkgs/development/tools/database/prisma-engines/default.nix
@@ -10,19 +10,19 @@
 
 rustPlatform.buildRustPackage rec {
   pname = "prisma-engines";
-  version = "3.10.0";
+  version = "3.11.0";
 
   src = fetchFromGitHub {
     owner = "prisma";
     repo = "prisma-engines";
     rev = version;
-    sha256 = "sha256-0m0RjIasEGB9QxZc7wKCMLnxHXkSlvCDA2QWa87mRRs=";
+    sha256 = "sha256-z7ebwidY+p350XaGeyohoSHWc2DhfzpRxsRDLON1BuA=";
   };
 
   # Use system openssl.
   OPENSSL_NO_VENDOR = 1;
 
-  cargoSha256 = "sha256-KNQa+wLLl4abz48QKYkWu7A+FTGIyB+1EWAnLuWpJwc=";
+  cargoSha256 = "sha256-PQdLoNJL9szPzPtFRznWS0lngTvtWK+Ko2rp4JWH9dQ=";
 
   nativeBuildInputs = [ pkg-config ];
 
diff --git a/pkgs/development/tools/jp/default.nix b/pkgs/development/tools/jp/default.nix
index f1f5b37c10dcf..ba78a4ce6bcc5 100644
--- a/pkgs/development/tools/jp/default.nix
+++ b/pkgs/development/tools/jp/default.nix
@@ -1,18 +1,18 @@
-{ lib, buildGoPackage, fetchFromGitHub }:
+{ lib, buildGoModule, fetchFromGitHub }:
 
-buildGoPackage rec {
+buildGoModule rec {
   pname = "jp";
-  version = "0.1.3";
-  rev = version;
-
-  goPackagePath = "github.com/jmespath/jp";
+  version = "0.2.1";
 
   src = fetchFromGitHub {
-    inherit rev;
+    rev = version;
     owner = "jmespath";
     repo = "jp";
-    sha256 = "0fdbnihbd0kq56am3bmh2zrfk4fqjslcbm48malbgmpqw3a5nvpi";
+    hash = "sha256-a3WvLAdUZk+Y+L+opPDMBvdN5x5B6nAi/lL8JHJG/gY=";
   };
+
+  vendorSha256 = "sha256-K6ZNtART7tcVBH5myV6vKrKWfnwK8yTa6/KK4QLyr00=";
+
   meta = with lib; {
     description = "A command line interface to the JMESPath expression language for JSON";
     homepage = "https://github.com/jmespath/jp";
diff --git a/pkgs/development/tools/k6/default.nix b/pkgs/development/tools/k6/default.nix
index d50062e4bdbb9..66ff4a8b2df48 100644
--- a/pkgs/development/tools/k6/default.nix
+++ b/pkgs/development/tools/k6/default.nix
@@ -2,13 +2,13 @@
 
 buildGoModule rec {
   pname = "k6";
-  version = "0.36.0";
+  version = "0.37.0";
 
   src = fetchFromGitHub {
     owner = "grafana";
     repo = pname;
     rev = "v${version}";
-    sha256 = "sha256-yWEh0sPMGe6mNcLKhbmJEUCHzZKFGMcTRNQrHgiQ+BQ=";
+    sha256 = "sha256-5pxOg+pwa2VrEWinDadx2ZFYXiQgochbU4bCkJEezQw=";
   };
 
   subPackages = [ "./" ];
diff --git a/pkgs/development/tools/stylua/default.nix b/pkgs/development/tools/stylua/default.nix
index 1b25634502a98..e21f058b8a066 100644
--- a/pkgs/development/tools/stylua/default.nix
+++ b/pkgs/development/tools/stylua/default.nix
@@ -7,16 +7,16 @@
 
 rustPlatform.buildRustPackage rec {
   pname = "stylua";
-  version = "0.12.4";
+  version = "0.12.5";
 
   src = fetchFromGitHub {
     owner = "johnnymorganz";
     repo = pname;
     rev = "v${version}";
-    sha256 = "sha256-BPLN7/LaVDtCOJBgIJVbnENUyFtacRsK3JxDupytzOA=";
+    sha256 = "sha256-4tQQTTAdIAhlkBJevwwwGXOKd6bJJOyG4nlbCv7909Y=";
   };
 
-  cargoSha256 = "sha256-MZsFbFQp5Rw20pXzvTFNhMiVx/TJZ63/2rU7vj7IcqQ=";
+  cargoSha256 = "sha256-DGe2lB8xZgY9ikTsIHDOdHzTyHfDaSlmy8FU/S9FDCI=";
 
   buildFeatures = lib.optional lua52Support "lua52"
     ++ lib.optional luauSupport "luau";
diff --git a/pkgs/servers/nextcloud/default.nix b/pkgs/servers/nextcloud/default.nix
index 735bfdeafb18c..d9a4465a10a45 100644
--- a/pkgs/servers/nextcloud/default.nix
+++ b/pkgs/servers/nextcloud/default.nix
@@ -33,23 +33,18 @@ let
     };
   };
 in {
-  nextcloud20 = throw ''
-    Nextcloud v20 has been removed from `nixpkgs` as the support for it was dropped
-    by upstream in 2021-10. Please upgrade to at least Nextcloud v21 by declaring
+  nextcloud21 = throw ''
+    Nextcloud v21 has been removed from `nixpkgs` as the support for it was dropped
+    by upstream in 2022-02. Please upgrade to at least Nextcloud v22 by declaring
 
-        services.nextcloud.package = pkgs.nextcloud21;
+        services.nextcloud.package = pkgs.nextcloud22;
 
     in your NixOS config.
 
-    WARNING: if you were on Nextcloud 19 on NixOS 21.05 you have to upgrade to Nextcloud 20
-    first on 21.05 because Nextcloud doesn't support upgrades accross multiple major versions!
+    WARNING: if you were on Nextcloud 20 on NixOS 21.11 you have to upgrade to Nextcloud 21
+    first on 21.11 because Nextcloud doesn't support upgrades accross multiple major versions!
   '';
 
-  nextcloud21 = generic {
-    version = "21.0.9";
-    sha256 = "sha256-p6bvgTXmmjGN3TRQpG88f3YPksh0QzWG9j9KnEjcrqE=";
-  };
-
   nextcloud22 = generic {
     version = "22.2.5";
     sha256 = "sha256-gb5N0u5tu4/nI2xIpjXwm2hiSDCrBhIDyN6gKGOsdS8=";
diff --git a/pkgs/servers/piping-server-rust/default.nix b/pkgs/servers/piping-server-rust/default.nix
index 7cf9442f13bf4..8885179de4db3 100644
--- a/pkgs/servers/piping-server-rust/default.nix
+++ b/pkgs/servers/piping-server-rust/default.nix
@@ -2,16 +2,16 @@
 
 rustPlatform.buildRustPackage rec {
   pname = "piping-server-rust";
-  version = "0.12.0";
+  version = "0.12.1";
 
   src = fetchFromGitHub {
     owner = "nwtgck";
     repo = pname;
     rev = "v${version}";
-    sha256 = "sha256-eDO2y/4660IAcD9vf1Vt6t3nv3Rc+zCRRFBbW/FeKIw=";
+    sha256 = "sha256-L15ofIM5a/qoJHGXmkuTsmQLLmERG/PxAJ4+z1nn7w4=";
   };
 
-  cargoSha256 = "sha256-U68R543l28osPe0DjuERqB/G6ur/BZDpWMZIO9RObaM=";
+  cargoSha256 = "sha256-CcIM7T7P4LbPxPK1ZqoJRP0IsLMEwMZg9DcuRu0aJHM=";
 
   buildInputs = lib.optionals stdenv.isDarwin [ CoreServices Security ];
 
diff --git a/pkgs/tools/archivers/p7zip/default.nix b/pkgs/tools/archivers/p7zip/default.nix
index 7f892a44da5db..5e92553b636c8 100644
--- a/pkgs/tools/archivers/p7zip/default.nix
+++ b/pkgs/tools/archivers/p7zip/default.nix
@@ -8,7 +8,18 @@ stdenv.mkDerivation rec {
     owner  = "jinfeihan57";
     repo   = pname;
     rev    = "v${version}";
-    sha256 = "sha256-19F4hPV0nKVuFZNbOcXrcA1uW6Y3HQolaHVIYXGmh18=";
+    sha256 = {
+      free = "sha256-DrBuf2VPdcprHI6pMSmL7psm2ofOrUf0Oj0qwMjXzkk=";
+      unfree = "sha256-19F4hPV0nKVuFZNbOcXrcA1uW6Y3HQolaHVIYXGmh18=";
+    }.${if enableUnfree then "unfree" else "free"};
+    # remove the unRAR related code from the src drv
+    # > the license requires that you agree to these use restrictions,
+    # > or you must remove the software (source and binary) from your hard disks
+    # https://fedoraproject.org/wiki/Licensing:Unrar
+    extraPostFetch = lib.optionalString (!enableUnfree) ''
+      rm -r $out/CPP/7zip/Compress/Rar*
+      find $out -name makefile'*' -exec sed -i '/Rar/d' {} +
+    '';
   };
 
   # Default makefile is full of impurities on Darwin. The patch doesn't hurt Linux so I'm leaving it unconditional
@@ -25,11 +36,6 @@ stdenv.mkDerivation rec {
     substituteInPlace makefile.machine \
       --replace 'CC=gcc'  'CC=${stdenv.cc.targetPrefix}gcc' \
       --replace 'CXX=g++' 'CXX=${stdenv.cc.targetPrefix}g++'
-  '' + lib.optionalString (!enableUnfree) ''
-    # Remove non-free RAR source code
-    # (see DOC/License.txt, https://fedoraproject.org/wiki/Licensing:Unrar)
-    rm -r CPP/7zip/Compress/Rar*
-    find . -name makefile'*' -exec sed -i '/Rar/d' {} +
   '';
 
   makeFlags = [ "DEST_HOME=${placeholder "out"}" ];
@@ -46,13 +52,20 @@ stdenv.mkDerivation rec {
 
   NIX_CFLAGS_COMPILE = lib.optionalString stdenv.cc.isClang "-Wno-error=c++11-narrowing";
 
-  meta = {
+  passthru.updateScript = ./update.sh;
+
+  meta = with lib; {
     homepage = "https://github.com/jinfeihan57/p7zip";
     description = "A new p7zip fork with additional codecs and improvements (forked from https://sourceforge.net/projects/p7zip/)";
-    platforms = lib.platforms.unix;
-    maintainers = [ lib.maintainers.raskin ];
+    license = with licenses;
+      # p7zip code is largely lgpl2Plus
+      # CPP/7zip/Compress/LzfseDecoder.cpp is bsd3
+      [ lgpl2Plus /* and */ bsd3 ] ++
+      # and CPP/7zip/Compress/Rar* are unfree with the unRAR license restriction
+      # the unRAR compression code is disabled by default
+      lib.optionals enableUnfree [ unfree ];
+    maintainers = with maintainers; [ raskin jk ];
+    platforms = platforms.unix;
     mainProgram = "7z";
-    # RAR code is under non-free UnRAR license, but we remove it
-    license = if enableUnfree then lib.licenses.unfree else lib.licenses.lgpl2Plus;
   };
 }
diff --git a/pkgs/tools/archivers/p7zip/update.sh b/pkgs/tools/archivers/p7zip/update.sh
new file mode 100755
index 0000000000000..0d4b91e56e800
--- /dev/null
+++ b/pkgs/tools/archivers/p7zip/update.sh
@@ -0,0 +1,47 @@
+#! /usr/bin/env nix-shell
+#! nix-shell -i bash -p coreutils gnused curl jq
+set -euo pipefail
+cd "$(dirname "${BASH_SOURCE[0]}")"
+
+DRV_DIR="$PWD"
+
+OLD_VERSION="$(sed -nE 's/\s*version = "(.*)".*/\1/p' ./default.nix)"
+
+NEW_VERSION="$(curl https://api.github.com/repos/jinfeihan57/p7zip/releases/latest | jq .tag_name -r | tr -d 'v')"
+
+echo "comparing versions $OLD_VERSION => $NEW_VERSION"
+if [[ "$OLD_VERSION" == "$NEW_VERSION" ]]; then
+    echo "Already up to date! Doing nothing"
+    exit 0
+fi
+
+NIXPKGS_ROOT="$(realpath "$DRV_DIR/../../../..")"
+
+echo "getting free source hash"
+OLD_FREE_HASH="$(nix-instantiate --eval --strict -E "with import $NIXPKGS_ROOT {}; p7zip.src.drvAttrs.outputHash" | tr -d '"')"
+echo "getting unfree source hash"
+OLD_UNFREE_HASH="$(nix-instantiate --eval --strict -E "with import $NIXPKGS_ROOT {}; (p7zip.override { enableUnfree = true; }).src.drvAttrs.outputHash" | tr -d '"')"
+
+
+NEW_FREE_HASH=$(nix-prefetch -f "$NIXPKGS_ROOT" -E "p7zip.src" --rev "v$NEW_VERSION")
+
+NEW_UNFREE_OUT=$(nix-prefetch -f "$NIXPKGS_ROOT" -E "(p7zip.override { enableUnfree = true; }).src" --rev "v$NEW_VERSION" --output raw --print-path)
+# first line of raw output is the hash
+NEW_UNFREE_HASH="$(echo "$NEW_UNFREE_OUT" | sed -n 1p)"
+# second line of raw output is the src path
+NEW_UNFREE_SRC="$(echo "$NEW_UNFREE_OUT" | sed -n 2p)"
+# make sure to nuke the unfree src from the updater's machine
+# > the license requires that you agree to these use restrictions, or you must remove the software (source and binary) from your hard disks
+# https://fedoraproject.org/wiki/Licensing:Unrar
+nix-store --delete "$NEW_UNFREE_SRC"
+
+
+echo "updating version"
+sed -i "s/version = \"$OLD_VERSION\";/version = \"$NEW_VERSION\";/" "$DRV_DIR/default.nix"
+
+echo "updating free hash"
+sed -i "s@free = \"$OLD_FREE_HASH\";@free = \"$NEW_FREE_HASH\";@" "$DRV_DIR/default.nix"
+echo "updating unfree hash"
+sed -i "s@unfree = \"$OLD_UNFREE_HASH\";@unfree = \"$NEW_UNFREE_HASH\";@" "$DRV_DIR/default.nix"
+
+echo "done"
diff --git a/pkgs/tools/backup/btrbk/default.nix b/pkgs/tools/backup/btrbk/default.nix
index b04263a7168ad..d9f336c86d483 100644
--- a/pkgs/tools/backup/btrbk/default.nix
+++ b/pkgs/tools/backup/btrbk/default.nix
@@ -18,11 +18,11 @@
 
 stdenv.mkDerivation rec {
   pname = "btrbk";
-  version = "0.32.0";
+  version = "0.32.1";
 
   src = fetchurl {
     url = "https://digint.ch/download/btrbk/releases/${pname}-${version}.tar.xz";
-    sha256 = "HmvNtIgFfeaiFuSRobWlcJqusPSYtqAqx+79+CeNVDQ=";
+    sha256 = "flQf1KTybPImDoD+iNe+P+u1rOiYxXjQoltuGPWuX3g=";
   };
 
   nativeBuildInputs = [ asciidoctor makeWrapper ];
diff --git a/pkgs/tools/misc/up/default.nix b/pkgs/tools/misc/up/default.nix
index 47c504cd1a71c..7737c67faa23c 100644
--- a/pkgs/tools/misc/up/default.nix
+++ b/pkgs/tools/misc/up/default.nix
@@ -13,8 +13,6 @@ buildGoModule rec {
 
   vendorSha256 = "1q8wfsfl3rz698ck5q5s5z6iw9k134fxxvwipcp2b052n998rcrx";
 
-  doCheck = false;
-
   meta = with lib; {
     description = "Ultimate Plumber is a tool for writing Linux pipes with instant live preview";
     homepage = "https://github.com/akavel/up";
diff --git a/pkgs/tools/networking/frp/default.nix b/pkgs/tools/networking/frp/default.nix
index 1ef3c13172b39..e7d2752a94fae 100644
--- a/pkgs/tools/networking/frp/default.nix
+++ b/pkgs/tools/networking/frp/default.nix
@@ -2,16 +2,16 @@
 
 buildGoModule rec {
   pname = "frp";
-  version = "0.39.1";
+  version = "0.40.0";
 
   src = fetchFromGitHub {
     owner = "fatedier";
     repo = pname;
     rev = "v${version}";
-    sha256 = "sha256-tqdrYrIWbmRn+5iAZKd9GlcmFNQnh3yNxZ95As7+v5Q=";
+    sha256 = "sha256-W+88Fq9oYDBLCNp+6rc9jACJzky7FCZg/xLDowGGdm0=";
   };
 
-  vendorSha256 = "sha256-NPnchl+N6DeqMhsOIw2MYD/i2IZzHS9ZqbUOeulgb90=";
+  vendorSha256 = "sha256-iBjMFOERWQ1aPn+2gEoI9og2ov2LlBVV1sLAZlvqZPM=";
 
   doCheck = false;
 
diff --git a/pkgs/tools/system/syslog-ng/default.nix b/pkgs/tools/system/syslog-ng/default.nix
index 5d7fd2b1b6df9..6948728f16b9a 100644
--- a/pkgs/tools/system/syslog-ng/default.nix
+++ b/pkgs/tools/system/syslog-ng/default.nix
@@ -7,11 +7,11 @@
 
 stdenv.mkDerivation rec {
   pname = "syslog-ng";
-  version = "3.35.1";
+  version = "3.36.1";
 
   src = fetchurl {
     url = "https://github.com/${pname}/${pname}/releases/download/${pname}-${version}/${pname}-${version}.tar.gz";
-    sha256 = "sha256-HQI4sGs+WYfIWeW1Kezuc491us/wSxSTmLH+jLsSHlM=";
+    sha256 = "sha256-kKJcl2f+dJ21DxGN38kuxxOZdj0uzVrU8R/17qBJ5gs=";
   };
 
   nativeBuildInputs = [ pkg-config which ];
diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix
index 5e7112b66a0d3..492693c04e62b 100644
--- a/pkgs/top-level/all-packages.nix
+++ b/pkgs/top-level/all-packages.nix
@@ -8240,7 +8240,7 @@ with pkgs;
   grocy = callPackage ../servers/grocy { };
 
   inherit (callPackage ../servers/nextcloud {})
-    nextcloud20 nextcloud21 nextcloud22 nextcloud23;
+    nextcloud21 nextcloud22 nextcloud23;
 
   nextcloud-client = libsForQt5.callPackage ../applications/networking/nextcloud-client { };