about summary refs log tree commit diff
path: root/nixos/modules/virtualisation
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/virtualisation')
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix1
-rw-r--r--nixos/modules/virtualisation/amazon-init.nix1
-rw-r--r--nixos/modules/virtualisation/containers.nix70
-rw-r--r--nixos/modules/virtualisation/cri-o.nix2
-rw-r--r--nixos/modules/virtualisation/docker.nix26
-rw-r--r--nixos/modules/virtualisation/hyperv-image.nix1
-rw-r--r--nixos/modules/virtualisation/incus.nix12
-rw-r--r--nixos/modules/virtualisation/linode-config.nix1
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix12
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix12
-rw-r--r--nixos/modules/virtualisation/podman/default.nix12
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix45
-rw-r--r--nixos/modules/virtualisation/vmware-image.nix1
13 files changed, 157 insertions, 39 deletions
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
index f0d9b95f81f6b..c7fe1bed51592 100644
--- a/nixos/modules/virtualisation/amazon-image.nix
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -103,4 +103,5 @@ in
     # (e.g. it depends on GTK).
     services.udisks2.enable = false;
   };
+  meta.maintainers = with maintainers; [ arianvp ];
 }
diff --git a/nixos/modules/virtualisation/amazon-init.nix b/nixos/modules/virtualisation/amazon-init.nix
index 8b98f2e32dd5d..8475097df07ca 100644
--- a/nixos/modules/virtualisation/amazon-init.nix
+++ b/nixos/modules/virtualisation/amazon-init.nix
@@ -84,4 +84,5 @@ in {
       };
     };
   };
+  meta.maintainers = with maintainers; [ arianvp ];
 }
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
index 3e33cabf26608..b3d81078eb349 100644
--- a/nixos/modules/virtualisation/containers.nix
+++ b/nixos/modules/virtualisation/containers.nix
@@ -28,6 +28,43 @@ in
       description = lib.mdDoc "Enable the OCI seccomp BPF hook";
     };
 
+    cdi = {
+      dynamic.nvidia.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable dynamic CDI configuration for NVidia devices by running nvidia-container-toolkit on boot.
+        '';
+      };
+
+      static = mkOption {
+        type = types.attrs;
+        default = { };
+        description = lib.mdDoc ''
+          Declarative CDI specification. Each key of the attribute set
+          will be mapped to a file in /etc/cdi. It is required for every
+          key to be provided in JSON format.
+        '';
+        example = {
+          some-vendor = builtins.fromJSON ''
+              {
+                "cdiVersion": "0.5.0",
+                "kind": "some-vendor.com/foo",
+                "devices": [],
+                "containerEdits": []
+              }
+            '';
+
+          some-other-vendor = {
+            cdiVersion = "0.5.0";
+            kind = "some-other-vendor.com/bar";
+            devices = [];
+            containerEdits = [];
+          };
+        };
+      };
+    };
+
     containersConf.settings = mkOption {
       type = toml.type;
       default = { };
@@ -113,6 +150,8 @@ in
 
   config = lib.mkIf cfg.enable {
 
+    hardware.nvidia-container-toolkit-cdi-generator.enable = lib.mkIf cfg.cdi.dynamic.nvidia.enable true;
+
     virtualisation.containers.containersConf.cniPlugins = [ pkgs.cni-plugins ];
 
     virtualisation.containers.containersConf.settings = {
@@ -124,19 +163,28 @@ in
       };
     };
 
-    environment.etc."containers/containers.conf".source =
-      toml.generate "containers.conf" cfg.containersConf.settings;
-
-    environment.etc."containers/storage.conf".source =
-      toml.generate "storage.conf" cfg.storage.settings;
+    environment.etc = let
+      cdiStaticConfigurationFiles = (lib.attrsets.mapAttrs'
+        (name: value:
+          lib.attrsets.nameValuePair "cdi/${name}.json"
+            { text = builtins.toJSON value; })
+        cfg.cdi.static);
+    in {
+      "containers/containers.conf".source =
+        toml.generate "containers.conf" cfg.containersConf.settings;
+
+      "containers/storage.conf".source =
+        toml.generate "storage.conf" cfg.storage.settings;
+
+      "containers/registries.conf".source = toml.generate "registries.conf" {
+        registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
+      };
 
-    environment.etc."containers/registries.conf".source = toml.generate "registries.conf" {
-      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
-    };
+      "containers/policy.json".source =
+        if cfg.policy != { } then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+        else "${pkgs.skopeo.policy}/default-policy.json";
+    } // cdiStaticConfigurationFiles;
 
-    environment.etc."containers/policy.json".source =
-      if cfg.policy != { } then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
-      else "${pkgs.skopeo.policy}/default-policy.json";
   };
 
 }
diff --git a/nixos/modules/virtualisation/cri-o.nix b/nixos/modules/virtualisation/cri-o.nix
index dacd700537c78..417cf516c7f4b 100644
--- a/nixos/modules/virtualisation/cri-o.nix
+++ b/nixos/modules/virtualisation/cri-o.nix
@@ -6,7 +6,7 @@ let
 
   crioPackage = pkgs.cri-o.override {
     extraPackages = cfg.extraPackages
-      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+      ++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
   };
 
   format = pkgs.formats.toml { };
diff --git a/nixos/modules/virtualisation/docker.nix b/nixos/modules/virtualisation/docker.nix
index b0d61ee06091d..cceb186e0b36e 100644
--- a/nixos/modules/virtualisation/docker.nix
+++ b/nixos/modules/virtualisation/docker.nix
@@ -72,6 +72,8 @@ in
         type = types.bool;
         default = false;
         description = lib.mdDoc ''
+          **Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+
           Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
         '';
       };
@@ -93,8 +95,18 @@ in
         default = null;
         description =
           lib.mdDoc ''
-            This option determines which Docker storage driver to use. By default
-            it let's docker automatically choose preferred storage driver.
+            This option determines which Docker
+            [storage driver](https://docs.docker.com/storage/storagedriver/select-storage-driver/)
+            to use.
+            By default it lets docker automatically choose the preferred storage
+            driver.
+            However, it is recommended to specify a storage driver explicitly, as
+            docker's default varies over versions.
+
+            ::: {.warning}
+            Changing the storage driver will cause any existing containers
+            and images to become inaccessible.
+            :::
           '';
       };
 
@@ -175,6 +187,16 @@ in
       users.groups.docker.gid = config.ids.gids.docker;
       systemd.packages = [ cfg.package ];
 
+      # Docker 25.0.0 supports CDI by default
+      # (https://docs.docker.com/engine/release-notes/25.0/#new). Encourage
+      # moving to CDI as opposed to having deprecated runtime
+      # wrappers.
+      warnings = lib.optionals (cfg.enableNvidia && (lib.strings.versionAtLeast cfg.package.version "25")) [
+        ''
+          You have set virtualisation.docker.enableNvidia. This option is deprecated, please set virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+        ''
+      ];
+
       systemd.services.docker = {
         wantedBy = optional cfg.enableOnBoot "multi-user.target";
         after = [ "network.target" "docker.socket" ];
diff --git a/nixos/modules/virtualisation/hyperv-image.nix b/nixos/modules/virtualisation/hyperv-image.nix
index efaea0c110d23..fddff7bf1c69a 100644
--- a/nixos/modules/virtualisation/hyperv-image.nix
+++ b/nixos/modules/virtualisation/hyperv-image.nix
@@ -60,7 +60,6 @@ in {
     boot.growPartition = true;
 
     boot.loader.grub = {
-      version = 2;
       device = "nodev";
       efiSupport = true;
       efiInstallAsRemovable = true;
diff --git a/nixos/modules/virtualisation/incus.nix b/nixos/modules/virtualisation/incus.nix
index bbe5b48b95bb1..3bbe0ba458516 100644
--- a/nixos/modules/virtualisation/incus.nix
+++ b/nixos/modules/virtualisation/incus.nix
@@ -97,6 +97,12 @@ in
           considered failed and systemd will attempt to restart it.
         '';
       };
+
+      ui = {
+        enable = lib.mkEnableOption (lib.mdDoc "(experimental) Incus UI");
+
+        package = lib.mkPackageOption pkgs [ "incus" "ui" ] { };
+      };
     };
   };
 
@@ -165,10 +171,12 @@ in
         "${config.boot.zfs.package}/lib/udev"
       ];
 
-      environment = {
+      environment = lib.mkMerge [ {
         # Override Path to the LXC template configuration directory
         INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
-      };
+      } (lib.mkIf (cfg.ui.enable) {
+        "INCUS_UI" = cfg.ui.package;
+      }) ];
 
       serviceConfig = {
         ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
diff --git a/nixos/modules/virtualisation/linode-config.nix b/nixos/modules/virtualisation/linode-config.nix
index bbf81bda9c024..209bff57ea8be 100644
--- a/nixos/modules/virtualisation/linode-config.nix
+++ b/nixos/modules/virtualisation/linode-config.nix
@@ -59,7 +59,6 @@ with lib;
 
       grub = {
         enable = true;
-        version = 2;
         forceInstall = true;
         device = "nodev";
 
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
index 8d3a480e6dc8c..95e3083ff9eda 100644
--- a/nixos/modules/virtualisation/lxc-container.nix
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -14,7 +14,9 @@
 
   options = { };
 
-  config = {
+  config = let
+    initScript = if config.boot.initrd.systemd.enable then "prepare-root" else "init";
+  in {
     boot.isContainer = true;
     boot.postBootCommands =
       ''
@@ -41,7 +43,7 @@
 
       contents = [
         {
-          source = config.system.build.toplevel + "/init";
+          source = config.system.build.toplevel + "/${initScript}";
           target = "/sbin/init";
         }
         # Technically this is not required for lxc, but having also make this configuration work with systemd-nspawn.
@@ -65,7 +67,7 @@
 
       pseudoFiles = [
         "/sbin d 0755 0 0"
-        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/init"
+        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/${initScript}"
         "/dev d 0755 0 0"
         "/proc d 0555 0 0"
         "/sys d 0555 0 0"
@@ -74,7 +76,7 @@
 
     system.build.installBootLoader = pkgs.writeScript "install-lxd-sbin-init.sh" ''
       #!${pkgs.runtimeShell}
-      ${pkgs.coreutils}/bin/ln -fs "$1/init" /sbin/init
+      ${pkgs.coreutils}/bin/ln -fs "$1/${initScript}" /sbin/init
     '';
 
     # networkd depends on this, but systemd module disables this for containers
@@ -83,7 +85,7 @@
     systemd.packages = [ pkgs.distrobuilder.generator ];
 
     system.activationScripts.installInitScript = lib.mkForce ''
-      ln -fs $systemConfig/init /sbin/init
+      ln -fs $systemConfig/${initScript} /sbin/init
     '';
   };
 }
diff --git a/nixos/modules/virtualisation/oci-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
index 07ed08ab2f84d..a88715587d654 100644
--- a/nixos/modules/virtualisation/oci-containers.nix
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -252,10 +252,13 @@ let
       text = ''
         ${cfg.backend} rm -f ${name} || true
         ${optionalString (isValidLogin container.login) ''
+          # try logging in, if it fails, check if image exists locally
           ${cfg.backend} login \
           ${container.login.registry} \
           --username ${container.login.username} \
-          --password-stdin < ${container.login.passwordFile}
+          --password-stdin < ${container.login.passwordFile} \
+          || ${cfg.backend} image inspect ${container.image} >/dev/null \
+          || { echo "image doesn't exist locally and login failed" >&2 ; exit 1; }
         ''}
         ${optionalString (container.imageFile != null) ''
           ${cfg.backend} load -i ${container.imageFile}
@@ -308,9 +311,10 @@ let
     );
 
     preStop = if cfg.backend == "podman"
-      then "[ $SERVICE_RESULT = success ] || podman stop --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
-      else "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}";
-    postStop =  if cfg.backend == "podman"
+      then "podman stop --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
+      else "${cfg.backend} stop ${name}";
+
+    postStop = if cfg.backend == "podman"
       then "podman rm -f --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
       else "${cfg.backend} rm -f ${name} || true";
 
diff --git a/nixos/modules/virtualisation/podman/default.nix b/nixos/modules/virtualisation/podman/default.nix
index 47382f9beab00..a977390542166 100644
--- a/nixos/modules/virtualisation/podman/default.nix
+++ b/nixos/modules/virtualisation/podman/default.nix
@@ -9,7 +9,7 @@ let
     extraPackages = cfg.extraPackages
       # setuid shadow
       ++ [ "/run/wrappers" ]
-      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+      ++ lib.optional (config.boot.supportedFilesystems.zfs or false) config.boot.zfs.package;
   });
 
   # Provides a fake "docker" binary mapping to podman
@@ -82,6 +82,8 @@ in
       type = types.bool;
       default = false;
       description = lib.mdDoc ''
+        **Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+
         Enable use of NVidia GPUs from within podman containers.
       '';
     };
@@ -166,6 +168,12 @@ in
       inherit (networkConfig) dns_enabled network_interface;
     in
     lib.mkIf cfg.enable {
+      warnings = lib.optionals cfg.enableNvidia [
+        ''
+          You have set virtualisation.podman.enableNvidia. This option is deprecated, please set virtualisation.containers.cdi.dynamic.nvidia.enable instead.
+        ''
+      ];
+
       environment.systemPackages = [ cfg.package ]
         ++ lib.optional cfg.dockerCompat dockerCompat;
 
@@ -208,9 +216,11 @@ in
         requires = [ "podman.service" ];
       };
 
+      systemd.services.podman.environment = config.networking.proxy.envVars;
       systemd.sockets.podman.wantedBy = [ "sockets.target" ];
       systemd.sockets.podman.socketConfig.SocketGroup = "podman";
 
+      systemd.user.services.podman.environment = config.networking.proxy.envVars;
       systemd.user.sockets.podman.wantedBy = [ "sockets.target" ];
 
       systemd.timers.podman-prune.timerConfig = lib.mkIf cfg.autoPrune.enable {
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
index 3d7f3ccb62f84..75ba6dacc122c 100644
--- a/nixos/modules/virtualisation/qemu-vm.nix
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -701,7 +701,10 @@ in
           type = types.listOf types.str;
           default = [];
           example = [ "-vga std" ];
-          description = lib.mdDoc "Options passed to QEMU.";
+          description = lib.mdDoc ''
+            Options passed to QEMU.
+            See [QEMU User Documentation](https://www.qemu.org/docs/master/system/qemu-manpage) for a complete list.
+          '';
         };
 
       consoles = mkOption {
@@ -732,9 +735,10 @@ in
           description = lib.mdDoc ''
             Networking-related command-line options that should be passed to qemu.
             The default is to use userspace networking (SLiRP).
+            See the [QEMU Wiki on Networking](https://wiki.qemu.org/Documentation/Networking) for details.
 
             If you override this option, be advised to keep
-            ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the example)
+            `''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}` (as seen in the example)
             to keep the default runtime behaviour.
           '';
         };
@@ -809,14 +813,19 @@ in
           defaultText = "!cfg.useBootLoader";
           description =
             lib.mdDoc ''
-              If enabled, the virtual machine will boot directly into the kernel instead of through a bootloader. Other relevant parameters such as the initrd are also passed to QEMU.
+              If enabled, the virtual machine will boot directly into the kernel instead of through a bootloader.
+              Read more about this feature in the [QEMU documentation on Direct Linux Boot](https://qemu-project.gitlab.io/qemu/system/linuxboot.html)
 
+              This is enabled by default.
               If you want to test netboot, consider disabling this option.
+              Enable a bootloader with {option}`virtualisation.useBootLoader` if you need.
 
-              This will not boot / reboot correctly into a system that has switched to a different configuration on disk.
+              Relevant parameters such as those set in `boot.initrd` and `boot.kernelParams` are also passed to QEMU.
+              Additional parameters can be supplied on invocation through the environment variable `$QEMU_KERNEL_PARAMS`.
+              They are added to the `-append` option, see [QEMU User Documentation](https://www.qemu.org/docs/master/system/qemu-manpage) for details
+              For example, to let QEMU use the parent terminal as the serial console, set `QEMU_KERNEL_PARAMS="console=ttyS0"`.
 
-              This is enabled by default if you don't enable bootloaders, but you can still enable a bootloader if you need.
-              Read more about this feature: <https://qemu-project.gitlab.io/qemu/system/linuxboot.html>.
+              This will not (re-)boot correctly into a system that has switched to a different configuration on disk.
             '';
         };
       initrd =
@@ -846,6 +855,8 @@ in
 
             If disabled, the kernel and initrd are directly booted,
             forgoing any bootloader.
+
+            Check the documentation on {option}`virtualisation.directBoot.enable` for details.
           '';
       };
 
@@ -866,9 +877,11 @@ in
         type = types.package;
         default = (pkgs.OVMF.override {
           secureBoot = cfg.useSecureBoot;
+          systemManagementModeRequired = cfg.useSecureBoot;
         }).fd;
         defaultText = ''(pkgs.OVMF.override {
           secureBoot = cfg.useSecureBoot;
+          systemManagementModeRequired = cfg.useSecureBoot;
         }).fd'';
         description =
         lib.mdDoc "OVMF firmware package, defaults to OVMF configured with secure boot if needed.";
@@ -1066,10 +1079,18 @@ in
         ''}
       '';
 
-    systemd.tmpfiles.rules = lib.mkIf config.boot.initrd.systemd.enable [
-      "f /etc/NIXOS 0644 root root -"
-      "d /boot 0644 root root -"
-    ];
+    systemd.tmpfiles.settings."10-qemu-vm" = lib.mkIf config.boot.initrd.systemd.enable {
+      "/etc/NIXOS".f = {
+        mode = "0644";
+        user = "root";
+        group = "root";
+      };
+      "${config.boot.loader.efi.efiSysMountPoint}".d = {
+        mode = "0644";
+        user = "root";
+        group = "root";
+      };
+    };
 
     # After booting, register the closure of the paths in
     # `virtualisation.additionalPaths' in the Nix database in the VM.  This
@@ -1164,6 +1185,10 @@ in
         "-tpmdev emulator,id=tpm_dev_0,chardev=chrtpm"
         "-device ${cfg.tpm.deviceModel},tpmdev=tpm_dev_0"
       ])
+      (mkIf (cfg.efi.OVMF.systemManagementModeRequired or false) [
+        "-machine" "q35,smm=on"
+        "-global" "driver=cfi.pflash01,property=secure,value=on"
+      ])
     ];
 
     virtualisation.qemu.drives = mkMerge [
diff --git a/nixos/modules/virtualisation/vmware-image.nix b/nixos/modules/virtualisation/vmware-image.nix
index a38713b4d4ee1..3674b37d0b97e 100644
--- a/nixos/modules/virtualisation/vmware-image.nix
+++ b/nixos/modules/virtualisation/vmware-image.nix
@@ -80,7 +80,6 @@ in {
     boot.growPartition = true;
 
     boot.loader.grub = {
-      version = 2;
       device = "nodev";
       efiSupport = true;
       efiInstallAsRemovable = true;