about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--lib/fixed-points.nix9
-rw-r--r--nixos/modules/services/cluster/kubernetes/addon-manager.nix83
-rw-r--r--nixos/modules/services/cluster/kubernetes/addons/dashboard.nix36
-rw-r--r--nixos/modules/services/cluster/kubernetes/apiserver.nix69
-rw-r--r--nixos/modules/services/cluster/kubernetes/controller-manager.nix39
-rw-r--r--nixos/modules/services/cluster/kubernetes/default.nix26
-rw-r--r--nixos/modules/services/cluster/kubernetes/flannel.nix68
-rw-r--r--nixos/modules/services/cluster/kubernetes/kubelet.nix86
-rw-r--r--nixos/modules/services/cluster/kubernetes/pki.nix165
-rw-r--r--nixos/modules/services/cluster/kubernetes/proxy.nix37
-rw-r--r--nixos/modules/services/cluster/kubernetes/scheduler.nix34
-rw-r--r--nixos/tests/kubernetes/base.nix5
-rw-r--r--nixos/tests/kubernetes/dns.nix3
-rw-r--r--nixos/tests/kubernetes/rbac.nix4
-rw-r--r--pkgs/applications/editors/vscode/default.nix6
-rw-r--r--pkgs/applications/misc/stretchly/default.nix139
-rw-r--r--pkgs/applications/networking/browsers/tor-browser-bundle-bin/default.nix10
-rw-r--r--pkgs/applications/networking/protonmail-bridge/default.nix4
-rw-r--r--pkgs/development/compilers/openjdk/11.nix10
-rw-r--r--pkgs/development/compilers/openjdk/8.nix20
-rw-r--r--pkgs/development/compilers/scala/dotty-bare.nix4
-rw-r--r--pkgs/development/python-modules/python-language-server/default.nix4
-rw-r--r--pkgs/development/python-modules/xdot/default.nix4
-rw-r--r--pkgs/development/ruby-modules/bundix/default.nix4
-rw-r--r--pkgs/os-specific/linux/kernel/linux-4.14.nix4
-rw-r--r--pkgs/os-specific/linux/kernel/linux-4.19.nix4
-rw-r--r--pkgs/os-specific/linux/kernel/linux-4.9.nix4
-rw-r--r--pkgs/os-specific/linux/kernel/linux-5.0.nix4
-rw-r--r--pkgs/os-specific/linux/numatop/default.nix27
-rw-r--r--pkgs/servers/monitoring/grafana/default.nix6
-rw-r--r--pkgs/tools/package-management/nixops/default.nix21
-rw-r--r--pkgs/tools/package-management/nixops/generic.nix11
-rw-r--r--pkgs/tools/package-management/nixops/nixops-v1_6_1.nix31
-rw-r--r--pkgs/tools/package-management/nixops/unstable.nix23
-rw-r--r--pkgs/top-level/all-packages.nix8
35 files changed, 793 insertions, 219 deletions
diff --git a/lib/fixed-points.nix b/lib/fixed-points.nix
index 2f818c88de5db..968930526a639 100644
--- a/lib/fixed-points.nix
+++ b/lib/fixed-points.nix
@@ -30,9 +30,12 @@ rec {
   #     nix-repl> converge (x: x / 2) 16
   #     0
   converge = f: x:
-    if (f x) == x
-    then x
-    else converge f (f x);
+    let
+      x' = f x;
+    in
+      if x' == x
+      then x
+      else converge f x';
 
   # Modify the contents of an explicitly recursive attribute set in a way that
   # honors `self`-references. This is accomplished with a function
diff --git a/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
index 17f2dde31a71e..b9a56811d2bd8 100644
--- a/nixos/modules/services/cluster/kubernetes/addon-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -63,18 +63,49 @@ in
     };
 
     enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
+    bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = let
+
+    addonManagerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+    bootstrapAddonsPaths = filter (a: a != null) [
+      cfg.bootstrapAddonsKubeconfig.caFile
+      cfg.bootstrapAddonsKubeconfig.certFile
+      cfg.bootstrapAddonsKubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
     environment.etc."kubernetes/addons".source = "${addons}/";
 
+    #TODO: Get rid of kube-addon-manager in the future for the following reasons
+    # - it is basically just a shell script wrapped around kubectl
+    # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+    # - it is designed to be used with k8s system components only
+    # - it would be better with a more Nix-oriented way of managing addons
     systemd.services.kube-addon-manager = {
       description = "Kubernetes addon manager";
       wantedBy = [ "kubernetes.target" ];
-      after = [ "kube-apiserver.service" ];
-      environment.ADDON_PATH = "/etc/kubernetes/addons/";
-      path = [ pkgs.gawk ];
+      after = [ "kube-node-online.target" ];
+      before = [ "kubernetes.target" ];
+      environment = {
+        ADDON_PATH = "/etc/kubernetes/addons/";
+        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
+      };
+      path = with pkgs; [ gawk kubectl ];
+      preStart = ''
+        until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
+          echo kubectl -n kube-system get serviceaccounts/default: exit status $?
+          sleep 2
+        done
+      '';
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = "${top.package}/bin/kube-addons";
@@ -84,8 +115,52 @@ in
         Restart = "on-failure";
         RestartSec = 10;
       };
+      unitConfig.ConditionPathExists = addonManagerPaths;
     };
 
+    systemd.paths.kube-addon-manager = {
+      wantedBy = [ "kube-addon-manager.service" ];
+      pathConfig = {
+        PathExists = addonManagerPaths;
+        PathChanged = addonManagerPaths;
+      };
+    };
+
+    services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
+
+    systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
+      wantedBy = [ "kube-control-plane-online.target" ];
+      after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      path = [ pkgs.kubectl ];
+      environment = {
+        KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
+      };
+      preStart = with pkgs; let
+        files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+          cfg.bootstrapAddons;
+      in ''
+        until kubectl auth can-i '*' '*' -q 2>/dev/null; do
+          echo kubectl auth can-i '*' '*': exit status $?
+          sleep 2
+        done
+
+        kubectl apply -f ${concatStringsSep " \\\n -f " files}
+      '';
+      script = "echo Ok";
+      unitConfig.ConditionPathExists = bootstrapAddonsPaths;
+    };
+
+    systemd.paths.kube-addon-manager-bootstrap = {
+      wantedBy = [ "kube-addon-manager-bootstrap.service" ];
+      pathConfig = {
+        PathExists = bootstrapAddonsPaths;
+        PathChanged = bootstrapAddonsPaths;
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
+
     services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
     (let
       name = system:kube-addon-manager;
diff --git a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
index 454e7d35bc01c..2295694ffc740 100644
--- a/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
+++ b/nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
@@ -169,6 +169,23 @@ in {
         };
       };
 
+      kubernetes-dashboard-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            k8s-app = "kubernetes-dashboard";
+            # Allows editing resource and makes sure it is created first.
+            "addonmanager.kubernetes.io/mode" = "EnsureExists";
+          };
+          name = "kubernetes-dashboard-settings";
+          namespace = "kube-system";
+        };
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
+
       kubernetes-dashboard-sa = {
         apiVersion = "v1";
         kind = "ServiceAccount";
@@ -210,20 +227,9 @@ in {
         };
         type = "Opaque";
       };
-      kubernetes-dashboard-cm = {
-        apiVersion = "v1";
-        kind = "ConfigMap";
-        metadata = {
-          labels = {
-            k8s-app = "kubernetes-dashboard";
-            # Allows editing resource and makes sure it is created first.
-            "addonmanager.kubernetes.io/mode" = "EnsureExists";
-          };
-          name = "kubernetes-dashboard-settings";
-          namespace = "kube-system";
-        };
-      };
-    } // (optionalAttrs cfg.rbac.enable
+    }
+
+    (optionalAttrs cfg.rbac.enable
       (let
         subjects = [{
           kind = "ServiceAccount";
@@ -323,6 +329,6 @@ in {
             inherit subjects;
           };
         })
-    ));
+    ))];
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixos/modules/services/cluster/kubernetes/apiserver.nix
index 455d02396040a..0c04648355b42 100644
--- a/nixos/modules/services/cluster/kubernetes/apiserver.nix
+++ b/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -184,6 +184,18 @@ in
       type = bool;
     };
 
+    proxyClientCertFile = mkOption {
+      description = "Client certificate to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
+    proxyClientKeyFile = mkOption {
+      description = "Key to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
     runtimeConfig = mkOption {
       description = ''
         Api runtime configuration. See
@@ -272,11 +284,32 @@ in
   ###### implementation
   config = mkMerge [
 
-    (mkIf cfg.enable {
+    (let
+
+      apiserverPaths = filter (a: a != null) [
+        cfg.clientCaFile
+        cfg.etcd.caFile
+        cfg.etcd.certFile
+        cfg.etcd.keyFile
+        cfg.kubeletClientCaFile
+        cfg.kubeletClientCertFile
+        cfg.kubeletClientKeyFile
+        cfg.serviceAccountKeyFile
+        cfg.tlsCertFile
+        cfg.tlsKeyFile
+      ];
+      etcdPaths = filter (a: a != null) [
+        config.services.etcd.trustedCaFile
+        config.services.etcd.certFile
+        config.services.etcd.keyFile
+      ];
+
+    in mkIf cfg.enable {
         systemd.services.kube-apiserver = {
           description = "Kubernetes APIServer Service";
-          wantedBy = [ "kubernetes.target" ];
-          after = [ "network.target" ];
+          wantedBy = [ "kube-control-plane-online.target" ];
+          after = [ "certmgr.service" ];
+          before = [ "kube-control-plane-online.target" ];
           serviceConfig = {
             Slice = "kubernetes.slice";
             ExecStart = ''${top.package}/bin/kube-apiserver \
@@ -316,6 +349,10 @@ in
                 "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
               ${optionalString (cfg.kubeletClientKeyFile != null)
                 "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+              ${optionalString (cfg.proxyClientCertFile != null)
+                "--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
+              ${optionalString (cfg.proxyClientKeyFile != null)
+                "--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
               --insecure-bind-address=${cfg.insecureBindAddress} \
               --insecure-port=${toString cfg.insecurePort} \
               ${optionalString (cfg.runtimeConfig != "")
@@ -341,6 +378,15 @@ in
             Restart = "on-failure";
             RestartSec = 5;
           };
+          unitConfig.ConditionPathExists = apiserverPaths;
+        };
+
+        systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
+          wantedBy = [ "kube-apiserver.service" ];
+          pathConfig = {
+            PathExists = apiserverPaths;
+            PathChanged = apiserverPaths;
+          };
         };
 
         services.etcd = {
@@ -354,6 +400,18 @@ in
           initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
         };
 
+        systemd.services.etcd = {
+          unitConfig.ConditionPathExists = etcdPaths;
+        };
+
+        systemd.paths.etcd = {
+          wantedBy = [ "etcd.service" ];
+          pathConfig = {
+            PathExists = etcdPaths;
+            PathChanged = etcdPaths;
+          };
+        };
+
         services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
 
           apiserver-kubelet-api-admin-crb = {
@@ -389,6 +447,11 @@ in
                   ] ++ cfg.extraSANs;
           action = "systemctl restart kube-apiserver.service";
         };
+        apiserverProxyClient = mkCert {
+          name = "kube-apiserver-proxy-client";
+          CN = "front-proxy-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
         apiserverKubeletClient = mkCert {
           name = "kube-apiserver-kubelet-client";
           CN = "system:kube-apiserver";
diff --git a/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
index 060fd9b78db6b..ba56f3fa8274e 100644
--- a/nixos/modules/services/cluster/kubernetes/controller-manager.nix
+++ b/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -104,11 +104,31 @@ in
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
-    systemd.services.kube-controller-manager = {
+  config = let
+
+    controllerManagerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+      cfg.rootCaFile
+      cfg.serviceAccountKeyFile
+      cfg.tlsCertFile
+      cfg.tlsKeyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-controller-manager = rec {
       description = "Kubernetes Controller Manager Service";
-      wantedBy = [ "kubernetes.target" ];
+      wantedBy = [ "kube-control-plane-online.target" ];
       after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
+      preStart = ''
+        until kubectl auth can-i get /api -q 2>/dev/null; do
+          echo kubectl auth can-i get /api: exit status $?
+          sleep 2
+        done
+      '';
       serviceConfig = {
         RestartSec = "30s";
         Restart = "on-failure";
@@ -120,7 +140,7 @@ in
             "--cluster-cidr=${cfg.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
+          --kubeconfig=${environment.KUBECONFIG} \
           --leader-elect=${boolToString cfg.leaderElect} \
           ${optionalString (cfg.rootCaFile!=null)
             "--root-ca-file=${cfg.rootCaFile}"} \
@@ -141,7 +161,16 @@ in
         User = "kubernetes";
         Group = "kubernetes";
       };
-      path = top.path;
+      path = top.path ++ [ pkgs.kubectl ];
+      unitConfig.ConditionPathExists = controllerManagerPaths;
+    };
+
+    systemd.paths.kube-controller-manager = {
+      wantedBy = [ "kube-controller-manager.service" ];
+      pathConfig = {
+        PathExists = controllerManagerPaths;
+        PathChanged = controllerManagerPaths;
+      };
     };
 
     services.kubernetes.pki.certs = with top.lib; {
diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix
index 3e53d18f8bbf3..192c893f8a169 100644
--- a/nixos/modules/services/cluster/kubernetes/default.nix
+++ b/nixos/modules/services/cluster/kubernetes/default.nix
@@ -263,6 +263,30 @@ in {
         wantedBy = [ "multi-user.target" ];
       };
 
+      systemd.targets.kube-control-plane-online = {
+        wantedBy = [ "kubernetes.target" ];
+        before = [ "kubernetes.target" ];
+      };
+
+      systemd.services.kube-control-plane-online = rec {
+        description = "Kubernetes control plane is online";
+        wantedBy = [ "kube-control-plane-online.target" ];
+        after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
+        before = [ "kube-control-plane-online.target" ];
+        environment.KUBECONFIG = cfg.lib.mkKubeConfig "default" cfg.kubeconfig;
+        path = [ pkgs.kubectl ];
+        preStart = ''
+          until kubectl get --raw=/healthz 2>/dev/null; do
+            echo kubectl get --raw=/healthz: exit status $?
+            sleep 3
+          done
+        '';
+        script = "echo Ok";
+        serviceConfig = {
+          TimeoutSec = "500";
+        };
+      };
+
       systemd.tmpfiles.rules = [
         "d /opt/cni/bin 0755 root root -"
         "d /run/kubernetes 0755 kubernetes kubernetes -"
@@ -286,6 +310,8 @@ in {
       services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
                           then cfg.apiserver.advertiseAddress
                           else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
+
+      services.kubernetes.kubeconfig.server = mkDefault cfg.apiserverAddress;
     })
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixos/modules/services/cluster/kubernetes/flannel.nix
index 93ee2fd65eebf..e79fbcb620025 100644
--- a/nixos/modules/services/cluster/kubernetes/flannel.nix
+++ b/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -24,16 +24,26 @@ in
   ###### interface
   options.services.kubernetes.flannel = {
     enable = mkEnableOption "enable flannel networking";
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes flannel";
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
+  config = let
+
+    flannelPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+    kubeconfig = top.lib.mkKubeConfig "flannel" cfg.kubeconfig;
+
+  in mkIf cfg.enable {
     services.flannel = {
 
       enable = mkDefault true;
       network = mkDefault top.clusterCidr;
-      inherit storageBackend;
-      nodeName = config.services.kubernetes.kubelet.hostname;
+      inherit storageBackend kubeconfig;
+      nodeName = top.kubelet.hostname;
     };
 
     services.kubernetes.kubelet = {
@@ -48,24 +58,66 @@ in
       }];
     };
 
-    systemd.services."mk-docker-opts" = {
+    systemd.services.mk-docker-opts = {
       description = "Pre-Docker Actions";
+      wantedBy = [ "flannel.target" ];
+      before = [ "flannel.target" ];
       path = with pkgs; [ gawk gnugrep ];
       script = ''
         ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker
         systemctl restart docker
       '';
+      unitConfig.ConditionPathExists = [ "/run/flannel/subnet.env" ];
       serviceConfig.Type = "oneshot";
     };
 
-    systemd.paths."flannel-subnet-env" = {
-      wantedBy = [ "flannel.service" ];
+    systemd.paths.flannel-subnet-env = {
+      wantedBy = [ "mk-docker-opts.service" ];
       pathConfig = {
-        PathModified = "/run/flannel/subnet.env";
+        PathExists = [ "/run/flannel/subnet.env" ];
+        PathChanged = [ "/run/flannel/subnet.env" ];
         Unit = "mk-docker-opts.service";
       };
     };
 
+    systemd.targets.flannel = {
+      wantedBy = [ "kube-node-online.target" ];
+      before = [ "kube-node-online.target" ];
+    };
+
+    systemd.services.flannel = {
+      wantedBy = [ "flannel.target" ];
+      after = [ "kubelet.target" ];
+      before = [ "flannel.target" ];
+      path = with pkgs; [ iptables kubectl ];
+      environment.KUBECONFIG = kubeconfig;
+      preStart = let
+        args = [
+          "--selector=kubernetes.io/hostname=${top.kubelet.hostname}"
+          # flannel exits if node is not registered yet, before that there is no podCIDR
+          "--output=jsonpath={.items[0].spec.podCIDR}"
+          # if jsonpath cannot be resolved exit with status 1
+          "--allow-missing-template-keys=false"
+        ];
+      in ''
+        until kubectl get nodes ${concatStringsSep " " args} 2>/dev/null; do
+          echo Waiting for ${top.kubelet.hostname} to be RegisteredNode
+          sleep 1
+        done
+      '';
+      unitConfig.ConditionPathExists = flannelPaths;
+    };
+
+    systemd.paths.flannel = {
+      wantedBy = [ "flannel.service" ];
+      pathConfig = {
+        PathExists = flannelPaths;
+        PathChanged = flannelPaths;
+      };
+    };
+
+    services.kubernetes.flannel.kubeconfig.server = mkDefault top.apiserverAddress;
+
     systemd.services.docker = {
       environment.DOCKER_OPTS = "-b none";
       serviceConfig.EnvironmentFile = "-/run/flannel/docker";
@@ -92,7 +144,6 @@ in
 
     # give flannel som kubernetes rbac permissions if applicable
     services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
-
       flannel-cr = {
         apiVersion = "rbac.authorization.k8s.io/v1beta1";
         kind = "ClusterRole";
@@ -128,7 +179,6 @@ in
           name = "flannel-client";
         }];
       };
-
     };
   };
 }
diff --git a/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixos/modules/services/cluster/kubernetes/kubelet.nix
index c94bb28bf7fb5..2a4a0624555d8 100644
--- a/nixos/modules/services/cluster/kubernetes/kubelet.nix
+++ b/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -241,21 +241,28 @@ in
 
   ###### implementation
   config = mkMerge [
-    (mkIf cfg.enable {
+    (let
+
+      kubeletPaths = filter (a: a != null) [
+        cfg.kubeconfig.caFile
+        cfg.kubeconfig.certFile
+        cfg.kubeconfig.keyFile
+        cfg.clientCaFile
+        cfg.tlsCertFile
+        cfg.tlsKeyFile
+      ];
+
+    in mkIf cfg.enable {
       services.kubernetes.kubelet.seedDockerImages = [infraContainer];
 
       systemd.services.kubelet = {
         description = "Kubernetes Kubelet Service";
-        wantedBy = [ "kubernetes.target" ];
-        after = [ "network.target" "docker.service" "kube-apiserver.service" ];
+        wantedBy = [ "kubelet.target" ];
+        after = [ "kube-control-plane-online.target" ];
+        before = [ "kubelet.target" ];
         path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ top.path;
         preStart = ''
-          ${concatMapStrings (img: ''
-            echo "Seeding docker image: ${img}"
-            docker load <${img}
-          '') cfg.seedDockerImages}
-
-          rm /opt/cni/bin/* || true
+          rm -f /opt/cni/bin/* || true
           ${concatMapStrings (package: ''
             echo "Linking cni package: ${package}"
             ln -fs ${package}/bin/* /opt/cni/bin
@@ -308,6 +315,56 @@ in
           '';
           WorkingDirectory = top.dataDir;
         };
+        unitConfig.ConditionPathExists = kubeletPaths;
+      };
+
+      systemd.paths.kubelet = {
+        wantedBy =  [ "kubelet.service" ];
+        pathConfig = {
+          PathExists = kubeletPaths;
+          PathChanged = kubeletPaths;
+        };
+      };
+
+      systemd.services.docker.before = [ "kubelet.service" ];
+
+      systemd.services.docker-seed-images = {
+        wantedBy = [ "docker.service" ];
+        after = [ "docker.service" ];
+        before = [ "kubelet.service" ];
+        path = with pkgs; [ docker ];
+        preStart = ''
+          ${concatMapStrings (img: ''
+            echo "Seeding docker image: ${img}"
+            docker load <${img}
+          '') cfg.seedDockerImages}
+        '';
+        script = "echo Ok";
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Slice = "kubernetes.slice";
+      };
+
+      systemd.services.kubelet-online = {
+        wantedBy = [ "kube-node-online.target" ];
+        after = [ "flannel.target" "kubelet.target" ];
+        before = [ "kube-node-online.target" ];
+        # it is complicated. flannel needs kubelet to run the pause container before
+        # it discusses the node CIDR with apiserver and afterwards configures and restarts
+        # dockerd. Until then prevent creating any pods because they have to be recreated anyway
+        # because the network of docker0 has been changed by flannel.
+        script = let
+          docker-env = "/run/flannel/docker";
+          flannel-date = "stat --print=%Y ${docker-env}";
+          docker-date = "systemctl show --property=ActiveEnterTimestamp --value docker";
+        in ''
+          until test -f ${docker-env} ; do sleep 1 ; done
+          while test `${flannel-date}` -gt `date +%s --date="$(${docker-date})"` ; do
+            sleep 1
+          done
+        '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.Slice = "kubernetes.slice";
       };
 
       # Allways include cni plugins
@@ -354,5 +411,16 @@ in
       };
     })
 
+    {
+      systemd.targets.kubelet = {
+        wantedBy = [ "kube-node-online.target" ];
+        before = [ "kube-node-online.target" ];
+      };
+
+      systemd.targets.kube-node-online = {
+        wantedBy = [ "kubernetes.target" ];
+        before = [ "kubernetes.target" ];
+      };
+    }
   ];
 }
diff --git a/nixos/modules/services/cluster/kubernetes/pki.nix b/nixos/modules/services/cluster/kubernetes/pki.nix
index 38deca23a990f..75a29473cea4e 100644
--- a/nixos/modules/services/cluster/kubernetes/pki.nix
+++ b/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -27,12 +27,11 @@ let
   certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
   cfsslAPITokenLength = 32;
 
-  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
-    top.lib.mkKubeConfig "cluster-admin" {
-        server = top.apiserverAddress;
-        certFile = cert;
-        keyFile = key;
-    };
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin; {
+    server = top.apiserverAddress;
+    certFile = cert;
+    keyFile = key;
+  };
 
   remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
 in
@@ -119,6 +118,12 @@ in
     cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
     cfsslCert = "${cfsslCertPathPrefix}.pem";
     cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+    cfsslPort = toString config.services.cfssl.port;
+
+    certmgrPaths = [
+      top.caFile
+      certmgrAPITokenPath
+    ];
   in
   {
 
@@ -168,13 +173,40 @@ in
         chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
       '')]);
 
+    systemd.targets.cfssl-online = {
+      wantedBy = [ "network-online.target" ];
+      after = [ "cfssl.service" "network-online.target" "cfssl-online.service" ];
+    };
+
+    systemd.services.cfssl-online = {
+      description = "Wait for ${remote} to be reachable.";
+      wantedBy = [ "cfssl-online.target" ];
+      before = [ "cfssl-online.target" ];
+      path = [ pkgs.curl ];
+      preStart = ''
+        until curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o /dev/null; do
+          echo curl ${remote}/api/v1/cfssl/info: exit status $?
+          sleep 2
+        done
+      '';
+      script = "echo Ok";
+      serviceConfig = {
+        TimeoutSec = "300";
+      };
+    };
+
     systemd.services.kube-certmgr-bootstrap = {
       description = "Kubernetes certmgr bootstrapper";
-      wantedBy = [ "certmgr.service" ];
-      after = [ "cfssl.target" ];
+      wantedBy = [ "cfssl-online.target" ];
+      after = [ "cfssl-online.target" ];
+      before = [ "certmgr.service" ];
+      path = with pkgs; [ curl cfssl ];
       script = concatStringsSep "\n" [''
         set -e
 
+        mkdir -p $(dirname ${certmgrAPITokenPath})
+        mkdir -p $(dirname ${top.caFile})
+
         # If there's a cfssl (cert issuer) running locally, then don't rely on user to
         # manually paste it in place. Just symlink.
         # otherwise, create the target file, ready for users to insert the token
@@ -186,15 +218,18 @@ in
         fi
       ''
       (optionalString (cfg.pkiTrustOnBootstrap) ''
-        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
-          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
-            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
+        if [ ! -s "${top.caFile}" ]; then
+          until test -s ${top.caFile}.json; do
+            sleep 2
+            curl --fail-early -fskd '{}' ${remote}/api/v1/cfssl/info -o ${top.caFile}.json
+          done
+          cfssljson -f ${top.caFile}.json -stdout >${top.caFile}
+          rm ${top.caFile}.json
         fi
       '')
       ];
       serviceConfig = {
-        RestartSec = "10s";
-        Restart = "on-failure";
+        TimeoutSec = "500";
       };
     };
 
@@ -230,35 +265,28 @@ in
           mapAttrs mkSpec cfg.certs;
       };
 
-      #TODO: Get rid of kube-addon-manager in the future for the following reasons
-      # - it is basically just a shell script wrapped around kubectl
-      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
-      # - it is designed to be used with k8s system components only
-      # - it would be better with a more Nix-oriented way of managing addons
-      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
-        environment.KUBECONFIG = with cfg.certs.addonManager;
-          top.lib.mkKubeConfig "addon-manager" {
-            server = top.apiserverAddress;
-            certFile = cert;
-            keyFile = key;
-          };
-        }
-
-        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
-          serviceConfig.PermissionsStartOnly = true;
-          preStart = with pkgs;
-          let
-            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
-              top.addonManager.bootstrapAddons;
-          in
-          ''
-            export KUBECONFIG=${clusterAdminKubeconfig}
-            ${kubectl}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
-          '';
-        })]);
+      systemd.services.certmgr = {
+        wantedBy = [ "cfssl-online.target" ];
+        after = [ "cfssl-online.target" "kube-certmgr-bootstrap.service" ];
+        preStart = ''
+          while ! test -s ${certmgrAPITokenPath} ; do
+            sleep 1
+            echo Waiting for ${certmgrAPITokenPath}
+          done
+        '';
+        unitConfig.ConditionPathExists = certmgrPaths;
+      };
+
+      systemd.paths.certmgr = {
+        wantedBy = [ "certmgr.service" ];
+        pathConfig = {
+          PathExists = certmgrPaths;
+          PathChanged = certmgrPaths;
+        };
+      };
 
       environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig)
-        clusterAdminKubeconfig;
+        (top.lib.mkKubeConfig "cluster-admin" clusterAdminKubeconfig);
 
       environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
       (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
@@ -284,38 +312,22 @@ in
           exit 1
         fi
 
+        do_restart=$(test -s ${certmgrAPITokenPath} && echo -n y || echo -n n)
+
         echo $token > ${certmgrAPITokenPath}
         chmod 600 ${certmgrAPITokenPath}
 
-        echo "Restarting certmgr..." >&1
-        systemctl restart certmgr
-
-        echo "Waiting for certs to appear..." >&1
-
-        ${optionalString top.kubelet.enable ''
-          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
-          echo "Restarting kubelet..." >&1
-          systemctl restart kubelet
-        ''}
-
-        ${optionalString top.proxy.enable ''
-          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
-          echo "Restarting kube-proxy..." >&1
-          systemctl restart kube-proxy
-        ''}
-
-        ${optionalString top.flannel.enable ''
-          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
-          echo "Restarting flannel..." >&1
-          systemctl restart flannel
-        ''}
+        if [ y = $do_restart ]; then
+          echo "Restarting certmgr..." >&1
+          systemctl restart certmgr
+        fi
 
-        echo "Node joined succesfully"
+        echo "Node joined succesfully" >&1
       '')];
 
       # isolate etcd on loopback at the master node
       # easyCerts doesn't support multimaster clusters anyway atm.
-      services.etcd = with cfg.certs.etcd; {
+      services.etcd = mkIf top.apiserver.enable (with cfg.certs.etcd; {
         listenClientUrls = ["https://127.0.0.1:2379"];
         listenPeerUrls = ["https://127.0.0.1:2380"];
         advertiseClientUrls = ["https://etcd.local:2379"];
@@ -324,19 +336,11 @@ in
         certFile = mkDefault cert;
         keyFile = mkDefault key;
         trustedCaFile = mkDefault caCert;
-      };
+      });
       networking.extraHosts = mkIf (config.services.etcd.enable) ''
         127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
       '';
 
-      services.flannel = with cfg.certs.flannelClient; {
-        kubeconfig = top.lib.mkKubeConfig "flannel" {
-          server = top.apiserverAddress;
-          certFile = cert;
-          keyFile = key;
-        };
-      };
-
       services.kubernetes = {
 
         apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
@@ -353,7 +357,16 @@ in
           kubeletClientCaFile = mkDefault caCert;
           kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
           kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+          proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
+          proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
         });
+        addonManager = mkIf top.addonManager.enable {
+          kubeconfig = with cfg.certs.addonManager; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+          bootstrapAddonsKubeconfig = clusterAdminKubeconfig;
+        };
         controllerManager = mkIf top.controllerManager.enable {
           serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
           rootCaFile = cfg.certs.controllerManagerClient.caCert;
@@ -362,6 +375,12 @@ in
             keyFile = mkDefault key;
           };
         };
+        flannel = mkIf top.flannel.enable {
+          kubeconfig = with cfg.certs.flannelClient; {
+            certFile = cert;
+            keyFile = key;
+          };
+        };
         scheduler = mkIf top.scheduler.enable {
           kubeconfig = with cfg.certs.schedulerClient; {
             certFile = mkDefault cert;
diff --git a/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixos/modules/services/cluster/kubernetes/proxy.nix
index 83cd3e2310009..8a90542fe633c 100644
--- a/nixos/modules/services/cluster/kubernetes/proxy.nix
+++ b/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -45,12 +45,28 @@ in
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
-    systemd.services.kube-proxy = {
+  config = let
+
+    proxyPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-proxy = rec {
       description = "Kubernetes Proxy Service";
-      wantedBy = [ "kubernetes.target" ];
-      after = [ "kube-apiserver.service" ];
-      path = with pkgs; [ iptables conntrack_tools ];
+      wantedBy = [ "kube-node-online.target" ];
+      after = [ "kubelet-online.service" ];
+      before = [ "kube-node-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig;
+      path = with pkgs; [ iptables conntrack_tools kubectl ];
+      preStart = ''
+        until kubectl auth can-i get nodes/${top.kubelet.hostname} -q 2>/dev/null; do
+          echo kubectl auth can-i get nodes/${top.kubelet.hostname}: exit status $?
+          sleep 2
+        done
+      '';
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-proxy \
@@ -59,7 +75,7 @@ in
             "--cluster-cidr=${top.clusterCidr}"} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
+          --kubeconfig=${environment.KUBECONFIG} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
           ${cfg.extraOpts}
         '';
@@ -67,6 +83,15 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
+      unitConfig.ConditionPathExists = proxyPaths;
+    };
+
+    systemd.paths.kube-proxy = {
+      wantedBy = [ "kube-proxy.service" ];
+      pathConfig = {
+        PathExists = proxyPaths;
+        PathChanged = proxyPaths;
+      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixos/modules/services/cluster/kubernetes/scheduler.nix
index 0305b9aefe59c..d585282595473 100644
--- a/nixos/modules/services/cluster/kubernetes/scheduler.nix
+++ b/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -56,18 +56,35 @@ in
   };
 
   ###### implementation
-  config = mkIf cfg.enable {
-    systemd.services.kube-scheduler = {
+  config =  let
+
+    schedulerPaths = filter (a: a != null) [
+      cfg.kubeconfig.caFile
+      cfg.kubeconfig.certFile
+      cfg.kubeconfig.keyFile
+    ];
+
+  in mkIf cfg.enable {
+    systemd.services.kube-scheduler = rec {
       description = "Kubernetes Scheduler Service";
-      wantedBy = [ "kubernetes.target" ];
+      wantedBy = [ "kube-control-plane-online.target" ];
       after = [ "kube-apiserver.service" ];
+      before = [ "kube-control-plane-online.target" ];
+      environment.KUBECONFIG = top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig;
+      path = [ pkgs.kubectl ];
+      preStart = ''
+        until kubectl auth can-i get /api -q 2>/dev/null; do
+          echo kubectl auth can-i get /api: exit status $?
+          sleep 2
+        done
+      '';
       serviceConfig = {
         Slice = "kubernetes.slice";
         ExecStart = ''${top.package}/bin/kube-scheduler \
           --address=${cfg.address} \
           ${optionalString (cfg.featureGates != [])
             "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
-          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
+          --kubeconfig=${environment.KUBECONFIG} \
           --leader-elect=${boolToString cfg.leaderElect} \
           --port=${toString cfg.port} \
           ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
@@ -79,6 +96,15 @@ in
         Restart = "on-failure";
         RestartSec = 5;
       };
+      unitConfig.ConditionPathExists = schedulerPaths;
+    };
+
+    systemd.paths.kube-scheduler = {
+      wantedBy = [ "kube-scheduler.service" ];
+      pathConfig = {
+        PathExists = schedulerPaths;
+        PathChanged = schedulerPaths;
+      };
     };
 
     services.kubernetes.pki.certs = {
diff --git a/nixos/tests/kubernetes/base.nix b/nixos/tests/kubernetes/base.nix
index ec1a75e74c413..212023859f6d2 100644
--- a/nixos/tests/kubernetes/base.nix
+++ b/nixos/tests/kubernetes/base.nix
@@ -30,7 +30,10 @@ let
         { config, pkgs, lib, nodes, ... }:
           mkMerge [
             {
-              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
+              boot = {
+                postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
+                kernel.sysctl = { "fs.inotify.max_user_instances" = 256; };
+              };
               virtualisation.memorySize = mkDefault 1536;
               virtualisation.diskSize = mkDefault 4096;
               networking = {
diff --git a/nixos/tests/kubernetes/dns.nix b/nixos/tests/kubernetes/dns.nix
index 46bcb01a52652..e7db0a58ab616 100644
--- a/nixos/tests/kubernetes/dns.nix
+++ b/nixos/tests/kubernetes/dns.nix
@@ -77,6 +77,7 @@ let
   singleNodeTest = {
     test = ''
       # prepare machine1 for test
+      $machine1->waitForUnit("kubernetes.target");
       $machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
       $machine1->waitUntilSucceeds("docker load < ${redisImage}");
       $machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
@@ -102,6 +103,8 @@ let
       # Node token exchange
       $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
       $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+      $machine1->waitForUnit("kubernetes.target");
+      $machine2->waitForUnit("kubernetes.target");
 
       # prepare machines for test
       $machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix
index 3ce7adcd0d717..967fe506004fd 100644
--- a/nixos/tests/kubernetes/rbac.nix
+++ b/nixos/tests/kubernetes/rbac.nix
@@ -94,6 +94,8 @@ let
 
   singlenode = base // {
     test = ''
+      $machine1->waitForUnit("kubernetes.target");
+
       $machine1->waitUntilSucceeds("kubectl get node machine1.my.zyx | grep -w Ready");
 
       $machine1->waitUntilSucceeds("docker load < ${kubectlImage}");
@@ -116,6 +118,8 @@ let
       # Node token exchange
       $machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
       $machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
+      $machine1->waitForUnit("kubernetes.target");
+      $machine2->waitForUnit("kubernetes.target");
 
       $machine1->waitUntilSucceeds("kubectl get node machine2.my.zyx | grep -w Ready");
 
diff --git a/pkgs/applications/editors/vscode/default.nix b/pkgs/applications/editors/vscode/default.nix
index 189c49cde35a2..67c1ddf175fc4 100644
--- a/pkgs/applications/editors/vscode/default.nix
+++ b/pkgs/applications/editors/vscode/default.nix
@@ -108,6 +108,10 @@ in
 
         mkdir -p $out/share/pixmaps
         cp $out/lib/vscode/resources/app/resources/linux/code.png $out/share/pixmaps/code.png
+
+        # Override the previously determined VSCODE_PATH with the one we know to be correct
+        sed -i "/ELECTRON=/iVSCODE_PATH='$out/lib/vscode'" $out/bin/code
+        grep -q "VSCODE_PATH='$out/lib/vscode'" $out/bin/code # check if sed succeeded
       '';
 
     preFixup = lib.optionalString (system == "i686-linux" || system == "x86_64-linux") ''
@@ -129,7 +133,7 @@ in
       homepage = https://code.visualstudio.com/;
       downloadPage = https://code.visualstudio.com/Updates;
       license = licenses.unfree;
-      maintainers = with maintainers; [ eadwu ];
+      maintainers = with maintainers; [ eadwu synthetica ];
       platforms = [ "i686-linux" "x86_64-linux" "x86_64-darwin" ];
     };
   }
diff --git a/pkgs/applications/misc/stretchly/default.nix b/pkgs/applications/misc/stretchly/default.nix
new file mode 100644
index 0000000000000..8f26ee99e227d
--- /dev/null
+++ b/pkgs/applications/misc/stretchly/default.nix
@@ -0,0 +1,139 @@
+{ GConf
+, alsaLib
+, at-spi2-atk
+, atk
+, cairo
+, cups
+, dbus
+, expat
+, fetchurl
+, fontconfig
+, gdk_pixbuf
+, glib
+, gtk2
+, gtk3
+, lib
+, libX11
+, libXScrnSaver
+, libXcomposite
+, libXcursor
+, libXdamage
+, libXext
+, libXfixes
+, libXi
+, libXrandr
+, libXrender
+, libXtst
+, libappindicator
+, libdrm
+, libnotify
+, libpciaccess
+, libpng12
+, libxcb
+, nspr
+, nss
+, pango
+, pciutils
+, pulseaudio
+, stdenv
+, udev
+, wrapGAppsHook
+}:
+
+let
+  libs = [
+    GConf
+    alsaLib
+    at-spi2-atk
+    atk
+    cairo
+    cups
+    dbus
+    expat
+    fontconfig
+    gdk_pixbuf
+    glib
+    gtk2
+    gtk3
+    libX11
+    libXScrnSaver
+    libXcomposite
+    libXcursor
+    libXdamage
+    libXext
+    libXfixes
+    libXi
+    libXrandr
+    libXrender
+    libXtst
+    libappindicator
+    libdrm
+    libnotify
+    libpciaccess
+    libpng12
+    libxcb
+    nspr
+    nss
+    pango
+    pciutils
+    pulseaudio
+    stdenv.cc.cc.lib
+    udev
+  ];
+
+  libPath = lib.makeLibraryPath libs;
+in
+
+stdenv.mkDerivation rec {
+  pname = "stretchly";
+  version = "0.19.1";
+
+  src = fetchurl {
+    url = "https://github.com/hovancik/stretchly/releases/download/v${version}/stretchly-${version}.tar.xz";
+    sha256 = "1q2wxfqs8qv9b1rfh5lhmyp3rrgdl05m6ihsgkxlgp0yzi07afz8";
+  };
+
+  nativeBuildInputs = [
+    wrapGAppsHook
+  ];
+
+  buildInputs = libs;
+
+  dontPatchELF = true;
+  dontBuild = true;
+  dontConfigure = true;
+
+  installPhase = ''
+    mkdir -p $out/bin $out/lib/stretchly
+    cp -r ./* $out/lib/stretchly/
+    ln -s $out/lib/stretchly/libffmpeg.so $out/lib/
+    ln -s $out/lib/stretchly/libnode.so $out/lib/
+    ln -s $out/lib/stretchly/stretchly $out/bin/
+  '';
+
+  preFixup = ''
+    patchelf --set-rpath "${libPath}" $out/lib/stretchly/libffmpeg.so
+    patchelf --set-rpath "${libPath}" $out/lib/stretchly/libnode.so
+
+    patchelf \
+      --set-rpath "$out/lib/stretchly:${libPath}" \
+      --set-interpreter "$(cat $NIX_CC/nix-support/dynamic-linker)" \
+      $out/lib/stretchly/stretchly
+  '';
+
+  meta = with stdenv.lib; {
+    description = "A break time reminder app";
+    longDescription = ''
+      stretchly is a cross-platform electron app that reminds you to take
+      breaks when working on your computer. By default, it runs in your tray
+      and displays a reminder window containing an idea for a microbreak for 20
+      seconds every 10 minutes. Every 30 minutes, it displays a window
+      containing an idea for a longer 5 minute break.
+    '';
+    homepage = https://hovancik.net/stretchly;
+    downloadPage = https://hovancik.net/stretchly/downloads/;
+    license = licenses.bsd2;
+    maintainers = with maintainers; [ cdepillabout ];
+    platforms = platforms.linux;
+  };
+}
diff --git a/pkgs/applications/networking/browsers/tor-browser-bundle-bin/default.nix b/pkgs/applications/networking/browsers/tor-browser-bundle-bin/default.nix
index 283c63dcd6fc4..f37677b05213b 100644
--- a/pkgs/applications/networking/browsers/tor-browser-bundle-bin/default.nix
+++ b/pkgs/applications/networking/browsers/tor-browser-bundle-bin/default.nix
@@ -302,10 +302,12 @@ stdenv.mkDerivation rec {
     # easily generated by firefox at startup.
     rm -f "\$HOME/TorBrowser/Data/Browser/profile.default"/{compatibility.ini,extensions.ini,extensions.json}
 
+    # XDG
+    : "\''${XDG_RUNTIME_DIR:=/run/user/\$(id -u)}"
+    : "\''${XDG_CONFIG_HOME:=\$REAL_HOME/.config}"
+
     ${optionalString pulseaudioSupport ''
       # Figure out some envvars for pulseaudio
-      : "\''${XDG_RUNTIME_DIR:=/run/user/\$(id -u)}"
-      : "\''${XDG_CONFIG_HOME:=\$REAL_HOME/.config}"
       : "\''${PULSE_SERVER:=\$XDG_RUNTIME_DIR/pulse/native}"
       : "\''${PULSE_COOKIE:=\$XDG_CONFIG_HOME/pulse/cookie}"
     ''}
@@ -336,9 +338,9 @@ stdenv.mkDerivation rec {
       \
       TMPDIR="\''${TMPDIR:-/tmp}" \
       HOME="\$HOME" \
-      XAUTHORITY="\''${XAUTHORITY:-}" \
+      XAUTHORITY="\''${XAUTHORITY:-\$HOME/.Xauthority}" \
       DISPLAY="\$DISPLAY" \
-      DBUS_SESSION_BUS_ADDRESS="\$DBUS_SESSION_BUS_ADDRESS" \
+      DBUS_SESSION_BUS_ADDRESS="\''${DBUS_SESSION_BUS_ADDRESS:-unix:path=\$XDG_RUNTIME_DIR/bus}" \\
       \
       XDG_DATA_HOME="\$HOME/.local/share" \
       XDG_DATA_DIRS="$WRAPPER_XDG_DATA_DIRS" \
diff --git a/pkgs/applications/networking/protonmail-bridge/default.nix b/pkgs/applications/networking/protonmail-bridge/default.nix
index eac1acb1e6a30..2e04ce856a8c1 100644
--- a/pkgs/applications/networking/protonmail-bridge/default.nix
+++ b/pkgs/applications/networking/protonmail-bridge/default.nix
@@ -2,7 +2,7 @@
   libsecret, libGL, libpulseaudio, glib, makeWrapper, makeDesktopItem }:
 
 let
-  version = "1.1.3-1";
+  version = "1.1.4-1";
 
   description = ''
     An application that runs on your computer in the background and seamlessly encrypts
@@ -25,7 +25,7 @@ in stdenv.mkDerivation rec {
 
   src = fetchurl {
     url = "https://protonmail.com/download/protonmail-bridge_${version}_amd64.deb";
-    sha256 = "15kyhyx1v4bb2sqzwq7bqrxxw7g733p5jnsgjqka2ygzg6dl2c5n";
+    sha256 = "16w3l81j10syl2pis08sl752yapbgjy531qs0n1ghmsx2d12n7kl";
   };
 
   nativeBuildInputs = [ makeWrapper ];
diff --git a/pkgs/development/compilers/openjdk/11.nix b/pkgs/development/compilers/openjdk/11.nix
index f2a566c87df61..5840061ed53e5 100644
--- a/pkgs/development/compilers/openjdk/11.nix
+++ b/pkgs/development/compilers/openjdk/11.nix
@@ -18,16 +18,16 @@ let
     else "amd64";
 
   major = "11";
-  update = ".0.2";
-  build = "9";
-  repover = "jdk-${major}${update}+${build}";
+  update = ".0.3";
+  build = "ga";
+  repover = "jdk-${major}${update}-${build}";
 
   openjdk = stdenv.mkDerivation {
-    name = "openjdk-${major}${update}-b${build}";
+    name = "openjdk-${major}${update}-${build}";
 
     src = fetchurl {
       url = "http://hg.openjdk.java.net/jdk-updates/jdk${major}u/archive/${repover}.tar.gz";
-      sha256 = "0xc7nksvj72cgw8zrmvlcwaasinpij1j1959398a4nqvzpvpxg30";
+      sha256 = "1v6pam38iidlhz46046h17hf5kki6n3kl302awjcyxzk7bmkvb8x";
     };
 
     nativeBuildInputs = [ pkgconfig ];
diff --git a/pkgs/development/compilers/openjdk/8.nix b/pkgs/development/compilers/openjdk/8.nix
index a3a0a9d593467..832954dd67f21 100644
--- a/pkgs/development/compilers/openjdk/8.nix
+++ b/pkgs/development/compilers/openjdk/8.nix
@@ -21,44 +21,44 @@ let
     else
       throw "openjdk requires i686-linux or x86_64 linux";
 
-  update = "202";
+  update = "212";
   build = "ga";
   baseurl = "http://hg.openjdk.java.net/jdk8u/jdk8u";
   repover = "jdk8u${update}-${build}";
   jdk8 = fetchurl {
              url = "${baseurl}/archive/${repover}.tar.gz";
-             sha256 = "0asx7qkhmrlfmhrljck5gb3yp4v0aa8k35y4xfcph41x0m0mvrdb";
+             sha256 = "00rl33h4cl4b4p3hcid765h38x2jdkb14ylh8k1zhnd0ka76crgg";
           };
   langtools = fetchurl {
              url = "${baseurl}/langtools/archive/${repover}.tar.gz";
-             sha256 = "07q6l3slmi5fgwjnsk6bd8miv8glmw15w5f6yyvp8nlp2d54l33n";
+             sha256 = "0va6g2dccf1ph6mpwxswbks5axp7zz258cl89qq9r8jn4ni04agw";
           };
   hotspot = fetchurl {
              url = "${baseurl}/hotspot/archive/${repover}.tar.gz";
-             sha256 = "01k4pwhn3nmkzdhdj1v58dgir4iwsj9mm2ml1541z31s53g037cq";
+             sha256 = "0sgr9df10hs49pjld6c6kr374v4zwd9s52pc3drz68zrlk71ja4s";
           };
   corba = fetchurl {
              url = "${baseurl}/corba/archive/${repover}.tar.gz";
-             sha256 = "0v39kl2iiyh74p3cp6bjhshkwxpgbffza9abzjgp7cpdfhcc73p0";
+             sha256 = "1hq0sr4k4k4iv815kg72i9lvd7n7mn5pmw96ckk9p1rnyagn9z03";
           };
   jdk = fetchurl {
              url = "${baseurl}/jdk/archive/${repover}.tar.gz";
-             sha256 = "0z1cy6aq09j25jyryj47rms15h5175p2h23fg5pv035zapf8nb1b";
+             sha256 = "1fc59jrbfq8l067mggzy5dnrvni7lwaqd7hahs4nqv87kyrfg545";
           };
   jaxws = fetchurl {
              url = "${baseurl}/jaxws/archive/${repover}.tar.gz";
-             sha256 = "0y0mk4sra9d29kgx842m5y4bz9gczc9ypkajv6m5igjv7sizzsv7";
+             sha256 = "1ka2fvyxdmpfhk814s314gx53yvdr19vpsqygx283v9nbq90l1yg";
           };
   jaxp = fetchurl {
              url = "${baseurl}/jaxp/archive/${repover}.tar.gz";
-             sha256 = "07ssrjhffkdncxxhsbid21hlg51y7js3x7sb4g474vmmi3qj6vmb";
+             sha256 = "15vlgs5v2ax8sqwh7bg50fnlrwlpnkp0myzrvpqs1mcza8pyasp8";
           };
   nashorn = fetchurl {
              url = "${baseurl}/nashorn/archive/${repover}.tar.gz";
-             sha256 = "0r0b8ra0ibzbdpxz6nv6i2zrzh2j5sxgprpnl6gf4d9h0i29ickj";
+             sha256 = "1jzn0yi0v6lda5y8ib07g1p6zymnbcx9yy6iz8niggpm7205y93h";
           };
   openjdk8 = stdenv.mkDerivation {
-    name = "openjdk-8u${update}b${build}";
+    name = "openjdk-8u${update}-${build}";
 
     srcs = [ jdk8 langtools hotspot corba jdk jaxws jaxp nashorn ];
     sourceRoot = ".";
diff --git a/pkgs/development/compilers/scala/dotty-bare.nix b/pkgs/development/compilers/scala/dotty-bare.nix
index 5f1d384a32835..ab42eae31a68d 100644
--- a/pkgs/development/compilers/scala/dotty-bare.nix
+++ b/pkgs/development/compilers/scala/dotty-bare.nix
@@ -1,12 +1,12 @@
 { stdenv, fetchurl, makeWrapper, jre, ncurses }:
 
 stdenv.mkDerivation rec {
-  version = "0.10.0-RC1";
+  version = "0.14.0-RC1";
   name = "dotty-bare-${version}";
 
   src = fetchurl {
     url = "https://github.com/lampepfl/dotty/releases/download/${version}/dotty-${version}.tar.gz";
-    sha256 = "0s9vh0d6xx99gl0ji0dgmbq36f79c0iwfbrfqwmaclqm9yq5m54k";
+    sha256 = "0nrgsyhqjlpvnjqgb18pryr8q7knq3dq25jhp98s4wh76nssm1zr";
   };
 
   propagatedBuildInputs = [ jre ncurses.dev ] ;
diff --git a/pkgs/development/python-modules/python-language-server/default.nix b/pkgs/development/python-modules/python-language-server/default.nix
index 121a98335b640..09ded676b74fb 100644
--- a/pkgs/development/python-modules/python-language-server/default.nix
+++ b/pkgs/development/python-modules/python-language-server/default.nix
@@ -21,13 +21,13 @@ in
 
 buildPythonPackage rec {
   pname = "python-language-server";
-  version = "0.25.0";
+  version = "0.26.1";
 
   src = fetchFromGitHub {
     owner = "palantir";
     repo = "python-language-server";
     rev = version;
-    sha256 = "10la48m10j4alfnpw0xw359fb833scf5kv7kjvh7djf6ij7cfsvq";
+    sha256 = "003fy8bbvwibnsnyxw1qwg2rxnhbfylqs67ixr6fdnw6mmrzd6fg";
   };
 
   # The tests require all the providers, disable otherwise.
diff --git a/pkgs/development/python-modules/xdot/default.nix b/pkgs/development/python-modules/xdot/default.nix
index 79f0fb13d70d4..8ad249a4c1a48 100644
--- a/pkgs/development/python-modules/xdot/default.nix
+++ b/pkgs/development/python-modules/xdot/default.nix
@@ -3,11 +3,11 @@
 
 buildPythonPackage rec {
   pname = "xdot";
-  version = "1.0";
+  version = "1.1";
 
   src = fetchPypi {
     inherit pname version;
-    sha256 = "18a2ri8vggaxy7im1x9hki34v519y5jy4n07zpqq5br9syb7h1ky";
+    sha256 = "0cr4rh7dz4dfzyxrk5pzhm0d15gkrgkfp3i5lw178xy81pc56p71";
   };
 
   disabled = !isPy3k;
diff --git a/pkgs/development/ruby-modules/bundix/default.nix b/pkgs/development/ruby-modules/bundix/default.nix
index 92f14114cf102..d6a8f3ddbc669 100644
--- a/pkgs/development/ruby-modules/bundix/default.nix
+++ b/pkgs/development/ruby-modules/bundix/default.nix
@@ -6,13 +6,13 @@ buildRubyGem rec {
 
   name = "${gemName}-${version}";
   gemName = "bundix";
-  version = "2.4.1";
+  version = "2.4.2";
 
   src = fetchFromGitHub {
     owner = "manveru";
     repo = "bundix";
     rev = version;
-    sha256 = "175qmv7dj7v50v71b78dzn5pb4a35ml6p15asks9q1rrlkz0n4gn";
+    sha256 = "03jhj1dy0ljrymjnpi6mcxn36a29qxr835l1lc11879jjzvnr2ax";
   };
 
   buildInputs = [ ruby bundler ];
diff --git a/pkgs/os-specific/linux/kernel/linux-4.14.nix b/pkgs/os-specific/linux/kernel/linux-4.14.nix
index f49d7de1a53d7..768d06bb05302 100644
--- a/pkgs/os-specific/linux/kernel/linux-4.14.nix
+++ b/pkgs/os-specific/linux/kernel/linux-4.14.nix
@@ -3,7 +3,7 @@
 with stdenv.lib;
 
 buildLinux (args // rec {
-  version = "4.14.111";
+  version = "4.14.112";
 
   # modDirVersion needs to be x.y.z, will automatically add .0 if needed
   modDirVersion = if (modDirVersionArg == null) then concatStrings (intersperse "." (take 3 (splitString "." "${version}.0"))) else modDirVersionArg;
@@ -13,6 +13,6 @@ buildLinux (args // rec {
 
   src = fetchurl {
     url = "mirror://kernel/linux/kernel/v4.x/linux-${version}.tar.xz";
-    sha256 = "1s56819kkr7h48njk708f9gapy2hr97vxawp5qflv1izamb7s6gq";
+    sha256 = "0wp40cr3gpj8g2hghcvhz669qshd7zkfjkq78gkdg840rki02q9j";
   };
 } // (args.argsOverride or {}))
diff --git a/pkgs/os-specific/linux/kernel/linux-4.19.nix b/pkgs/os-specific/linux/kernel/linux-4.19.nix
index 85a8ff54df4a2..9bdbdf4558fb1 100644
--- a/pkgs/os-specific/linux/kernel/linux-4.19.nix
+++ b/pkgs/os-specific/linux/kernel/linux-4.19.nix
@@ -3,7 +3,7 @@
 with stdenv.lib;
 
 buildLinux (args // rec {
-  version = "4.19.34";
+  version = "4.19.35";
 
   # modDirVersion needs to be x.y.z, will automatically add .0 if needed
   modDirVersion = if (modDirVersionArg == null) then concatStrings (intersperse "." (take 3 (splitString "." "${version}.0"))) else modDirVersionArg;
@@ -13,6 +13,6 @@ buildLinux (args // rec {
 
   src = fetchurl {
     url = "mirror://kernel/linux/kernel/v4.x/linux-${version}.tar.xz";
-    sha256 = "1k5qhyh7nwfs5pkbrjpxyj6w17424qcmmd9v7jqfbgnx3wm5wyfx";
+    sha256 = "1q2742sbi9vgk791xr1rbi5mr3ra4k1bdzq9dr610870y1hfb9rg";
   };
 } // (args.argsOverride or {}))
diff --git a/pkgs/os-specific/linux/kernel/linux-4.9.nix b/pkgs/os-specific/linux/kernel/linux-4.9.nix
index 8923cb158b524..afeaac91ba89a 100644
--- a/pkgs/os-specific/linux/kernel/linux-4.9.nix
+++ b/pkgs/os-specific/linux/kernel/linux-4.9.nix
@@ -1,11 +1,11 @@
 { stdenv, buildPackages, fetchurl, perl, buildLinux, ... } @ args:
 
 buildLinux (args // rec {
-  version = "4.9.168";
+  version = "4.9.169";
   extraMeta.branch = "4.9";
 
   src = fetchurl {
     url = "mirror://kernel/linux/kernel/v4.x/linux-${version}.tar.xz";
-    sha256 = "07h9xwxpdxb6gm1fy0d8s6p1zalmw3mbzjgd4gipvmzsxwhiqiad";
+    sha256 = "1c6nz27q0m6nbb7v7kba6zrhzav5bqqllvwzzqf9cmd5cdn66xmp";
   };
 } // (args.argsOverride or {}))
diff --git a/pkgs/os-specific/linux/kernel/linux-5.0.nix b/pkgs/os-specific/linux/kernel/linux-5.0.nix
index a418e4b4bcc06..11a7287fdb821 100644
--- a/pkgs/os-specific/linux/kernel/linux-5.0.nix
+++ b/pkgs/os-specific/linux/kernel/linux-5.0.nix
@@ -3,7 +3,7 @@
 with stdenv.lib;
 
 buildLinux (args // rec {
-  version = "5.0.7";
+  version = "5.0.8";
 
   # modDirVersion needs to be x.y.z, will automatically add .0 if needed
   modDirVersion = if (modDirVersionArg == null) then concatStrings (intersperse "." (take 3 (splitString "." "${version}.0"))) else modDirVersionArg;
@@ -13,6 +13,6 @@ buildLinux (args // rec {
 
   src = fetchurl {
     url = "mirror://kernel/linux/kernel/v5.x/linux-${version}.tar.xz";
-    sha256 = "1v2lxwamnfm879a9qi9fwp5zyvlzjw9qa0aizidjbiwz5dk7gq8n";
+    sha256 = "0dalr3vym2ixmblnlrw10dfx0icdf2aab38z2j9y1qfcx128140i";
   };
 } // (args.argsOverride or {}))
diff --git a/pkgs/os-specific/linux/numatop/default.nix b/pkgs/os-specific/linux/numatop/default.nix
new file mode 100644
index 0000000000000..80f14b568eda2
--- /dev/null
+++ b/pkgs/os-specific/linux/numatop/default.nix
@@ -0,0 +1,27 @@
+{ stdenv, fetchurl, pkgconfig, numactl, ncurses, check }:
+
+stdenv.mkDerivation rec {
+  pname = "numatop";
+  version = "2.1";
+  src = fetchurl {
+    url = "https://github.com/intel/${pname}/releases/download/v${version}/${pname}-v${version}.tar.xz";
+    sha256 = "1s7psq1xyswj0lpx10zg5lnppav2xy9safkfx3rssrs9c2fp5d76";
+  };
+
+  nativeBuildInputs = [ pkgconfig ];
+  buildInputs = [ numactl ncurses ];
+  checkInputs = [ check ];
+
+  doCheck  = true;
+
+  meta = with stdenv.lib; {
+    description = "Tool for runtime memory locality characterization and analysis of processes and threads on a NUMA system";
+    homepage = https://01.org/numatop;
+    license = licenses.bsd3;
+    maintainers = with maintainers; [ dtzWill ];
+    platforms = [
+      { kernel.name = "linux"; cpu.family = "x86"; }
+      { kernel.name = "linux"; cpu.family = "power"; }
+    ];
+  };
+}
diff --git a/pkgs/servers/monitoring/grafana/default.nix b/pkgs/servers/monitoring/grafana/default.nix
index 6fd416baae836..ba5ba8725bc54 100644
--- a/pkgs/servers/monitoring/grafana/default.nix
+++ b/pkgs/servers/monitoring/grafana/default.nix
@@ -1,7 +1,7 @@
 { lib, buildGoPackage, fetchurl, fetchFromGitHub, phantomjs2 }:
 
 buildGoPackage rec {
-  version = "6.1.3";
+  version = "6.1.4";
   name = "grafana-${version}";
   goPackagePath = "github.com/grafana/grafana";
 
@@ -11,12 +11,12 @@ buildGoPackage rec {
     rev = "v${version}";
     owner = "grafana";
     repo = "grafana";
-    sha256 = "0svg1q3h8m3i6snznsx0lwzb7lrv532v0nvpbwr6ydg2gd3aqwih";
+    sha256 = "0a0k66vbsi2704pb5vr8mjr7n3v5dapnfhqxkrw6biicj8ahka30";
   };
 
   srcStatic = fetchurl {
     url = "https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${version}.linux-amd64.tar.gz";
-    sha256 = "0a7160lmi67pa32q5d6ib14vp5rx7850w0vvfxchmdm08dv3hm7k";
+    sha256 = "1wamnvv2jiyi6cyw9p65j2hm3si345asfwl7kjg7drx0vn08ks6g";
   };
 
   postPatch = ''
diff --git a/pkgs/tools/package-management/nixops/default.nix b/pkgs/tools/package-management/nixops/default.nix
index 73e8c90d4e429..7cd7935d155cf 100644
--- a/pkgs/tools/package-management/nixops/default.nix
+++ b/pkgs/tools/package-management/nixops/default.nix
@@ -1,24 +1,9 @@
 { callPackage, newScope, pkgs, fetchurl }:
 
 callPackage ./generic.nix (rec {
-  version = "1.6.1";
+  version = "1.7";
   src = fetchurl {
-    url = "http://nixos.org/releases/nixops/nixops-${version}/nixops-${version}.tar.bz2";
-    sha256 = "0lfx5fhyg3z6725ydsk0ibg5qqzp5s0x9nbdww02k8s307axiah3";
-  };
-# nixops is incompatible with the most recent versions of listed
-# azure-mgmt-* packages, therefore we are pinning them to
-# package-private versions, so that they don't get trampled by
-# updates.
-# see
-# https://github.com/NixOS/nixops/issues/1065
-  python2Packages = pkgs.python2Packages.override {
-    overrides = (self: super: let callPackage = newScope self; in {
-      azure-mgmt-compute = callPackage ./azure-mgmt-compute { };
-      azure-mgmt-network = callPackage ./azure-mgmt-network { };
-      azure-mgmt-nspkg = callPackage ./azure-mgmt-nspkg { };
-      azure-mgmt-resource = callPackage ./azure-mgmt-resource { };
-      azure-mgmt-storage = callPackage ./azure-mgmt-storage { };
-    });
+    url = "https://nixos.org/releases/nixops/nixops-${version}/nixops-${version}.tar.bz2";
+    sha256 = "091c0b5bca57d4aa20be20e826ec161efe3aec9c788fbbcf3806a734a517f0f3";
   };
 })
diff --git a/pkgs/tools/package-management/nixops/generic.nix b/pkgs/tools/package-management/nixops/generic.nix
index 813ae64bef552..79400a8e5ea04 100644
--- a/pkgs/tools/package-management/nixops/generic.nix
+++ b/pkgs/tools/package-management/nixops/generic.nix
@@ -1,4 +1,4 @@
-{ lib, python2Packages, libxslt, docbook_xsl_ns, openssh, cacert
+{ lib, python2Packages, libxslt, docbook_xsl_ns, openssh, cacert, nixopsAzurePackages ? []
 # version args
 , src, version
 , meta ? {}
@@ -16,11 +16,6 @@ python2Packages.buildPythonApplication {
       boto3
       hetzner
       libcloud
-      azure-storage
-      azure-mgmt-compute
-      azure-mgmt-network
-      azure-mgmt-resource
-      azure-mgmt-storage
       adal
       # Go back to sqlite once Python 2.7.13 is released
       pysqlite
@@ -28,7 +23,7 @@ python2Packages.buildPythonApplication {
       digital-ocean
       libvirt
       typing
-    ];
+    ] ++ nixopsAzurePackages;
 
   checkPhase =
   # Ensure, that there are no (python) import errors
@@ -53,7 +48,7 @@ python2Packages.buildPythonApplication {
   meta = {
     homepage = https://github.com/NixOS/nixops;
     description = "NixOS cloud provisioning and deployment tool";
-    maintainers = with lib.maintainers; [ eelco rob domenkozar ];
+    maintainers = with lib.maintainers; [ aminechikhaoui eelco rob domenkozar ];
     platforms = lib.platforms.unix;
     license = lib.licenses.lgpl3;
   } // meta;
diff --git a/pkgs/tools/package-management/nixops/nixops-v1_6_1.nix b/pkgs/tools/package-management/nixops/nixops-v1_6_1.nix
new file mode 100644
index 0000000000000..d9ba324dce720
--- /dev/null
+++ b/pkgs/tools/package-management/nixops/nixops-v1_6_1.nix
@@ -0,0 +1,31 @@
+{ callPackage, newScope, pkgs, fetchurl }:
+
+callPackage ./generic.nix (rec {
+  version = "1.6.1";
+  src = fetchurl {
+    url = "http://nixos.org/releases/nixops/nixops-${version}/nixops-${version}.tar.bz2";
+    sha256 = "0lfx5fhyg3z6725ydsk0ibg5qqzp5s0x9nbdww02k8s307axiah3";
+  };
+  nixopsAzurePackages = with python2Packages; [
+    azure-storage
+    azure-mgmt-compute
+    azure-mgmt-network
+    azure-mgmt-resource
+    azure-mgmt-storage
+  ];
+  # nixops is incompatible with the most recent versions of listed
+  # azure-mgmt-* packages, therefore we are pinning them to
+  # package-private versions, so that they don't get trampled by
+  # updates.
+  # see
+  # https://github.com/NixOS/nixops/issues/1065
+  python2Packages = pkgs.python2Packages.override {
+    overrides = (self: super: let callPackage = newScope self; in {
+      azure-mgmt-compute = callPackage ./azure-mgmt-compute { };
+      azure-mgmt-network = callPackage ./azure-mgmt-network { };
+      azure-mgmt-nspkg = callPackage ./azure-mgmt-nspkg { };
+      azure-mgmt-resource = callPackage ./azure-mgmt-resource { };
+      azure-mgmt-storage = callPackage ./azure-mgmt-storage { };
+    });
+  };
+})
diff --git a/pkgs/tools/package-management/nixops/unstable.nix b/pkgs/tools/package-management/nixops/unstable.nix
index 88d9d0c94bef3..94975807dea6b 100644
--- a/pkgs/tools/package-management/nixops/unstable.nix
+++ b/pkgs/tools/package-management/nixops/unstable.nix
@@ -5,26 +5,9 @@
 # Then copy the URL to the tarball.
 
 callPackage ./generic.nix (rec {
-  version = "1.6.1pre2728_8ed39f9";
+  version = "1.7pre2764_932bf43";
   src = fetchurl {
-    url = "https://hydra.nixos.org/build/88329589/download/2/nixops-${version}.tar.bz2";
-    sha256 = "1ppnhqmsbiijm6r77h86abv3fjny5iq35yvj207s520kjwzaj7kc";
+    url = "https://hydra.nixos.org/build/92372343/download/2/nixops-${version}.tar.bz2";
+    sha256 = "f35bf81bf2805473ea54248d0ee92d163d00a1992f3f75d17e8cf430db1f9919";
   };
-  # # Marking unstable as broken, instead of using the pinned version,
-  # # like stable does You might be able to use the following code (as
-  # # in stable), to run unstable against the pinned packages
-  # python2Packages = pkgs.python2Packages.override {
-  #   overrides = (self: super: let callPackage = newScope self; in {
-  #     azure-mgmt-compute = callPackage ./azure-mgmt-compute { };
-  #     azure-mgmt-network = callPackage ./azure-mgmt-network { };
-  #     azure-mgmt-nspkg = callPackage ./azure-mgmt-nspkg { };
-  #     azure-mgmt-resource = callPackage ./azure-mgmt-resource { };
-  #     azure-mgmt-storage = callPackage ./azure-mgmt-storage { };
-  #   });
-  # };
-  # # otherwise
-  # # see https://github.com/NixOS/nixpkgs/pull/52550
-  # # see https://github.com/NixOS/nixops/issues/1065
-  # # see https://github.com/NixOS/nixpkgs/issues/52547
-  meta.broken = true;
 })
diff --git a/pkgs/top-level/all-packages.nix b/pkgs/top-level/all-packages.nix
index d7efb150aea3f..d9db7bedce360 100644
--- a/pkgs/top-level/all-packages.nix
+++ b/pkgs/top-level/all-packages.nix
@@ -1607,6 +1607,8 @@ in
 
   long-shebang = callPackage ../misc/long-shebang {};
 
+  numatop = callPackage ../os-specific/linux/numatop { };
+
   iio-sensor-proxy = callPackage ../os-specific/linux/iio-sensor-proxy { };
 
   ipvsadm = callPackage ../os-specific/linux/ipvsadm { };
@@ -19839,6 +19841,10 @@ in
     bison = bison2;
   };
 
+  stretchly = callPackage ../applications/misc/stretchly {
+    inherit (gnome2) GConf;
+  };
+
   stumpish = callPackage ../applications/window-managers/stumpish {};
 
   stumpwm = callPackage ../applications/window-managers/stumpwm {
@@ -22907,6 +22913,8 @@ in
 
   nixops = callPackage ../tools/package-management/nixops { };
 
+  nixops_1_6_1 = callPackage ../tools/package-management/nixops/nixops-v1_6_1.nix {};
+
   nixopsUnstable = lowPrio (callPackage ../tools/package-management/nixops/unstable.nix { });
 
   nixops-dns = callPackage ../tools/package-management/nixops/nixops-dns.nix { };