about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ops/users/default.nix10
-rw-r--r--tools/when/default.nix6
-rw-r--r--tools/when/when.go206
-rw-r--r--tvix/Cargo.lock462
-rw-r--r--tvix/Cargo.nix1353
-rw-r--r--tvix/Cargo.toml5
-rw-r--r--tvix/README.md3
-rw-r--r--tvix/boot/README.md2
-rw-r--r--tvix/boot/tests/default.nix9
-rw-r--r--tvix/build/Cargo.toml7
-rw-r--r--tvix/build/src/bin/tvix-build.rs24
-rw-r--r--tvix/castore/Cargo.toml6
-rw-r--r--tvix/castore/src/blobservice/from_addr.rs43
-rw-r--r--tvix/castore/src/blobservice/memory.rs28
-rw-r--r--tvix/castore/src/blobservice/mod.rs2
-rw-r--r--tvix/castore/src/blobservice/sled.rs150
-rw-r--r--tvix/castore/src/blobservice/tests/mod.rs1
-rw-r--r--tvix/castore/src/directoryservice/bigtable.rs2
-rw-r--r--tvix/castore/src/directoryservice/closure_validator.rs57
-rw-r--r--tvix/castore/src/directoryservice/from_addr.rs17
-rw-r--r--tvix/castore/src/directoryservice/grpc.rs2
-rw-r--r--tvix/castore/src/directoryservice/memory.rs9
-rw-r--r--tvix/castore/src/directoryservice/mod.rs6
-rw-r--r--tvix/castore/src/directoryservice/object_store.rs261
-rw-r--r--tvix/castore/src/directoryservice/sled.rs101
-rw-r--r--tvix/castore/src/directoryservice/tests/mod.rs1
-rw-r--r--tvix/castore/src/directoryservice/traverse.rs95
-rw-r--r--tvix/castore/src/directoryservice/utils.rs91
-rw-r--r--tvix/castore/src/errors.rs7
-rw-r--r--tvix/castore/src/import/archive.rs4
-rw-r--r--tvix/castore/src/import/mod.rs163
-rw-r--r--tvix/castore/src/proto/grpc_directoryservice_wrapper.rs83
-rw-r--r--tvix/cli/Cargo.toml2
-rw-r--r--tvix/cli/src/main.rs74
-rw-r--r--tvix/default.nix4
-rw-r--r--tvix/docs/src/SUMMARY.md4
-rw-r--r--tvix/docs/src/TODO.md29
-rw-r--r--tvix/docs/src/nix-daemon/changelog.md202
-rw-r--r--tvix/docs/src/nix-daemon/logging.md122
-rw-r--r--tvix/docs/src/nix-daemon/operations.md894
-rw-r--r--tvix/docs/src/nix-daemon/serialization.md305
-rw-r--r--tvix/glue/Cargo.toml6
-rw-r--r--tvix/glue/benches/eval.rs24
-rw-r--r--tvix/glue/src/builtins/import.rs4
-rw-r--r--tvix/glue/src/builtins/mod.rs3
-rw-r--r--tvix/glue/src/fetchers/mod.rs22
-rw-r--r--tvix/glue/src/tests/mod.rs18
-rw-r--r--tvix/glue/src/tvix_store_io.rs61
-rw-r--r--tvix/nar-bridge-go/.gitignore (renamed from tvix/nar-bridge/.gitignore)0
-rw-r--r--tvix/nar-bridge-go/README.md (renamed from tvix/nar-bridge/README.md)2
-rw-r--r--tvix/nar-bridge-go/cmd/nar-bridge-http/main.go (renamed from tvix/nar-bridge/cmd/nar-bridge-http/main.go)4
-rw-r--r--tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go (renamed from tvix/nar-bridge/cmd/nar-bridge-http/otel.go)0
-rw-r--r--tvix/nar-bridge-go/default.nix (renamed from tvix/nar-bridge/default.nix)2
-rw-r--r--tvix/nar-bridge-go/go.mod (renamed from tvix/nar-bridge/go.mod)2
-rw-r--r--tvix/nar-bridge-go/go.sum (renamed from tvix/nar-bridge/go.sum)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/nar_get.go (renamed from tvix/nar-bridge/pkg/http/nar_get.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/nar_put.go (renamed from tvix/nar-bridge/pkg/http/nar_put.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo.go (renamed from tvix/nar-bridge/pkg/http/narinfo.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo_get.go (renamed from tvix/nar-bridge/pkg/http/narinfo_get.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/narinfo_put.go (renamed from tvix/nar-bridge/pkg/http/narinfo_put.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/http/server.go (renamed from tvix/nar-bridge/pkg/http/server.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/http/util.go (renamed from tvix/nar-bridge/pkg/http/util.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/blob_upload.go (renamed from tvix/nar-bridge/pkg/importer/blob_upload.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/counting_writer.go (renamed from tvix/nar-bridge/pkg/importer/counting_writer.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/directory_upload.go (renamed from tvix/nar-bridge/pkg/importer/directory_upload.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go (renamed from tvix/nar-bridge/pkg/importer/gen_pathinfo.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/importer.go (renamed from tvix/nar-bridge/pkg/importer/importer.go)0
-rw-r--r--tvix/nar-bridge-go/pkg/importer/importer_test.go (renamed from tvix/nar-bridge/pkg/importer/importer_test.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/importer/roundtrip_test.go (renamed from tvix/nar-bridge/pkg/importer/roundtrip_test.go)2
-rw-r--r--tvix/nar-bridge-go/pkg/importer/util_test.go (renamed from tvix/nar-bridge/pkg/importer/util_test.go)0
-rw-r--r--tvix/nar-bridge-go/testdata/emptydirectory.nar (renamed from tvix/nar-bridge/testdata/emptydirectory.nar)bin96 -> 96 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar (renamed from tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar)bin464152 -> 464152 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/onebyteexecutable.nar (renamed from tvix/nar-bridge/testdata/onebyteexecutable.nar)bin152 -> 152 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/onebyteregular.nar (renamed from tvix/nar-bridge/testdata/onebyteregular.nar)bin120 -> 120 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/popdirectories.nar (renamed from tvix/nar-bridge/testdata/popdirectories.nar)bin600 -> 600 bytes
-rw-r--r--tvix/nar-bridge-go/testdata/symlink.nar (renamed from tvix/nar-bridge/testdata/symlink.nar)bin136 -> 136 bytes
-rw-r--r--tvix/nix-compat/src/nar/mod.rs2
-rw-r--r--tvix/nix-compat/src/nar/reader/async/mod.rs41
-rw-r--r--tvix/nix-compat/src/nar/reader/async/test.rs12
-rw-r--r--tvix/nix-compat/src/nar/reader/mod.rs31
-rw-r--r--tvix/nix-compat/src/nar/reader/read.rs32
-rw-r--r--tvix/nix-compat/src/nar/reader/test.rs12
-rw-r--r--tvix/nix-compat/src/store_path/mod.rs13
-rw-r--r--tvix/nix-compat/src/wire/bytes/mod.rs82
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/mod.rs263
-rw-r--r--tvix/nix-compat/src/wire/bytes/reader/trailer.rs15
-rw-r--r--tvix/store/Cargo.toml20
-rw-r--r--tvix/store/docs/api.md2
-rw-r--r--tvix/store/src/bin/tvix-store.rs18
-rw-r--r--tvix/store/src/import.rs9
-rw-r--r--tvix/store/src/nar/import.rs340
-rw-r--r--tvix/store/src/nar/mod.rs28
-rw-r--r--tvix/store/src/nar/renderer.rs51
-rw-r--r--tvix/store/src/pathinfoservice/bigtable.rs13
-rw-r--r--tvix/store/src/pathinfoservice/combinators.rs111
-rw-r--r--tvix/store/src/pathinfoservice/from_addr.rs6
-rw-r--r--tvix/store/src/pathinfoservice/grpc.rs141
-rw-r--r--tvix/store/src/pathinfoservice/lru.rs128
-rw-r--r--tvix/store/src/pathinfoservice/memory.rs67
-rw-r--r--tvix/store/src/pathinfoservice/mod.rs20
-rw-r--r--tvix/store/src/pathinfoservice/nix_http.rs196
-rw-r--r--tvix/store/src/pathinfoservice/sled.rs118
-rw-r--r--tvix/store/src/pathinfoservice/tests/mod.rs2
-rw-r--r--tvix/store/src/pathinfoservice/tests/utils.rs10
-rw-r--r--tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs25
-rw-r--r--tvix/store/src/utils.rs17
-rw-r--r--tvix/tools/crunch-v2/Cargo.lock23
-rw-r--r--tvix/tools/crunch-v2/Cargo.toml2
-rw-r--r--tvix/tools/crunch-v2/src/main.rs2
-rw-r--r--tvix/tools/narinfo2parquet/Cargo.lock47
-rw-r--r--tvix/tools/narinfo2parquet/Cargo.nix115
-rw-r--r--tvix/website/landing-en.md2
-rw-r--r--users/Profpatsch/my-prelude/default.nix1
-rw-r--r--users/Profpatsch/my-prelude/my-prelude.cabal1
-rw-r--r--users/Profpatsch/my-prelude/src/Arg.hs34
-rw-r--r--users/Profpatsch/my-prelude/src/Postgres/MonadPostgres.hs259
-rw-r--r--users/Profpatsch/whatcd-resolver/src/AppT.hs3
-rw-r--r--users/Profpatsch/whatcd-resolver/src/Redacted.hs11
-rw-r--r--users/Profpatsch/whatcd-resolver/src/WhatcdResolver.hs120
-rw-r--r--users/Profpatsch/whatcd-resolver/whatcd-resolver.cabal4
-rw-r--r--users/amjoseph/OWNERS3
-rw-r--r--users/amjoseph/keys.nix22
-rw-r--r--users/flokli/archeology/default.nix8
-rw-r--r--users/flokli/keyboards/dilemma/default.nix18
-rw-r--r--users/flokli/keyboards/k6_pro/default.nix18
-rw-r--r--users/tazjin/nixos/modules/physical.nix1
126 files changed, 4707 insertions, 2820 deletions
diff --git a/ops/users/default.nix b/ops/users/default.nix
index c54a681dc..a50575f3f 100644
--- a/ops/users/default.nix
+++ b/ops/users/default.nix
@@ -229,4 +229,14 @@
     email = "tvl@alice-carroll.pet";
     password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$mt/0RzKw4RHxm7ybpMHP5Q$P/SDBMv5si9D98NFO/eZgh2+InlByqYxqAvQWhl+p0c";
   }
+  {
+    username = "yuka";
+    email = "tvl@yuka.dev";
+    password = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$aEyiAIuynQMwfY7xE+pMxg$QdghylHO2JZMR/YyYf4UAnhhb/gBdAkoDeANEwdixxU";
+  }
+  {
+    username = "benjaminedwardwebb";
+    email = "benjaminedwardwebb@gmail.com";
+    password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$kdFNmxgIGsF8TkB/GoPy1A$GUXd3M35Jqxqlfra4gPCcFW3ehE0RVrlHOzaoD7Pu7s";
+  }
 ]
diff --git a/tools/when/default.nix b/tools/when/default.nix
new file mode 100644
index 000000000..1aee5e1ea
--- /dev/null
+++ b/tools/when/default.nix
@@ -0,0 +1,6 @@
+{ depot, ... }:
+
+depot.nix.buildGo.program {
+  name = "when";
+  srcs = [ ./when.go ];
+}
diff --git a/tools/when/when.go b/tools/when/when.go
new file mode 100644
index 000000000..a2ac494e8
--- /dev/null
+++ b/tools/when/when.go
@@ -0,0 +1,206 @@
+package main
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const usage = `usage: when <time>
+
+This program converts the given time into various formats (currently a local
+timestamp, UTC timestamp, and UNIX epoch). It tries to guess what the input is.
+
+Some valid queries:
+
+  2024-01-05
+  1715079241
+  tomorrow 5PM
+  -22h
+  -7h10m
+  Mar 15
+  Sep 3 18:00
+
+For now a single timestamp and a single duration (which is added either to the
+current time, or the given time) is supported.`
+
+func printTime(t time.Time) {
+	fmt.Println("Local:", t.Format("Mon 02 January 2006 at 15:04:05 MST"))
+	fmt.Println("UTC:  ", t.UTC().Format(time.RFC3339))
+	fmt.Println("UNIX: ", t.Unix())
+}
+
+type FieldSet uint8
+
+const (
+	SetYear FieldSet = 1 << iota
+	SetDay
+	SetMonth
+	SetHour
+	SetMinute
+	SetSecond
+	SetLocation
+)
+
+const (
+	SetDate  = SetYear | SetDay | SetMonth
+	SetClock = SetHour | SetMinute | SetSecond
+)
+
+// mergeTimes returns a new time.Time with all fields in this overridden with the
+// specified fields from that.
+func mergeTimes(this time.Time, that time.Time, set FieldSet) time.Time {
+	year, month, day := this.Date()
+	hour, min, sec := this.Clock()
+	loc := this.Location()
+
+	if set&SetYear == SetYear {
+		year = that.Year()
+	}
+	if set&SetMonth == SetMonth {
+		month = that.Month()
+	}
+	if set&SetDay == SetDay {
+		day = that.Day()
+	}
+	if set&SetHour == SetHour {
+		hour = that.Hour()
+	}
+	if set&SetMinute == SetMinute {
+		min = that.Minute()
+	}
+	if set&SetSecond == SetSecond {
+		sec = that.Second()
+	}
+	if set&SetLocation == SetLocation {
+		loc = that.Location()
+	}
+
+	return time.Date(year, month, day, hour, min, sec, 0, loc)
+}
+
+func parseTime(input string) (time.Time, error) {
+	// try unix times
+	if i, err := strconv.ParseInt(input, 10, 64); err == nil {
+		if i < 9999999999 {
+			return time.Unix(i, 0), nil
+		}
+		if i < 9999999999999 {
+			return time.UnixMilli(i), nil
+		}
+	}
+
+	// try simple date/time formats
+	if t, err := time.Parse(time.DateOnly, input); err == nil {
+		return t, nil
+	}
+
+	if t, err := time.Parse(time.Kitchen, input); err == nil {
+		now := time.Now()
+		return mergeTimes(now, t, SetClock), nil
+	}
+
+	if t, err := time.Parse(time.TimeOnly, input); err == nil {
+		now := time.Now()
+		return mergeTimes(now, t, SetClock), nil
+	}
+
+	if t, err := time.Parse("15:04", input); err == nil {
+		now := time.Now()
+		return mergeTimes(now, t, SetClock), nil
+	}
+
+	if t, err := time.Parse("3PM", input); err == nil {
+		now := time.Now()
+		return mergeTimes(now, t, SetClock), nil
+	}
+
+	if t, err := time.Parse(time.DateTime, input); err == nil {
+		return t, nil
+	}
+
+	if t, err := time.Parse(time.Stamp, input); err == nil {
+		now := time.Now()
+		return mergeTimes(t, now, SetYear|SetLocation), nil
+	}
+
+	if t, err := time.Parse("Jan _2 15:04", input); err == nil {
+		now := time.Now()
+		return mergeTimes(t, now, SetYear|SetLocation), nil
+	}
+
+	if t, err := time.Parse("Jan _2", input); err == nil {
+		now := time.Now()
+		return mergeTimes(t, now, SetYear|SetLocation), nil
+	}
+
+	return time.Time{}, fmt.Errorf("could not parse time: %q", input)
+}
+
+func parseDuration(input string) (time.Duration, error) {
+	// some simple rewriting
+	switch input {
+	case "yesterday":
+		input = "-24h"
+	case "tomorrow":
+		input = "24h"
+	case "today", "now":
+		return time.Duration(0), nil
+	}
+
+	// TODO: days, months, weeks, ...
+	return time.ParseDuration(input)
+}
+
+func main() {
+	if len(os.Args) < 2 {
+		fmt.Fprintln(os.Stderr, usage)
+		os.Exit(1)
+	}
+
+	var d time.Duration
+	var t time.Time
+	var err error
+	var haveTime, haveDuration bool
+
+	// Try to parse entire input as one full thing, before getting more
+	// clever.
+	if t, err = parseTime(strings.Join(os.Args[1:], " ")); err == nil {
+		printTime(t)
+		return
+	}
+
+	for _, arg := range os.Args[1:] {
+		if !haveTime {
+			if t, err = parseTime(arg); err == nil {
+				haveTime = true
+				continue
+			}
+		}
+
+		if !haveDuration {
+			if d, err = parseDuration(arg); err == nil {
+				haveDuration = true
+				continue
+			}
+		}
+	}
+
+	if err != nil {
+		fmt.Fprintln(os.Stderr, "Not sure what you want, try another time.")
+		os.Exit(1)
+	}
+
+	if haveTime && haveDuration {
+		printTime(t.Add(d))
+	} else if haveTime {
+		printTime(t)
+	} else if haveDuration {
+		printTime(time.Now().Add(d))
+	} else {
+		fmt.Fprintln(os.Stderr, "Not sure what you want, try another time.")
+		os.Exit(1)
+	}
+}
diff --git a/tvix/Cargo.lock b/tvix/Cargo.lock
index 334b69b7f..dc5298c45 100644
--- a/tvix/Cargo.lock
+++ b/tvix/Cargo.lock
@@ -18,6 +18,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
 [[package]]
+name = "ahash"
+version = "0.8.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "version_check",
+ "zerocopy",
+]
+
+[[package]]
 name = "aho-corasick"
 version = "1.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -27,6 +39,12 @@ dependencies = [
 ]
 
 [[package]]
+name = "allocator-api2"
+version = "0.2.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
+
+[[package]]
 name = "android-tzdata"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -134,9 +152,9 @@ dependencies = [
 
 [[package]]
 name = "async-compression"
-version = "0.4.6"
+version = "0.4.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c"
+checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693"
 dependencies = [
  "bzip2",
  "flate2",
@@ -145,6 +163,8 @@ dependencies = [
  "pin-project-lite",
  "tokio",
  "xz2",
+ "zstd",
+ "zstd-safe",
 ]
 
 [[package]]
@@ -205,17 +225,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "async-recursion"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.48",
-]
-
-[[package]]
 name = "async-signal"
 version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -301,13 +310,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
 dependencies = [
  "async-trait",
- "axum-core 0.3.4",
+ "axum-core",
  "bitflags 1.3.2",
  "bytes",
  "futures-util",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
+ "http",
+ "http-body",
+ "hyper",
  "itoa",
  "matchit",
  "memchr",
@@ -323,40 +332,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "axum"
-version = "0.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e"
-dependencies = [
- "async-trait",
- "axum-core 0.4.3",
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "http-body-util",
- "hyper 1.2.0",
- "hyper-util",
- "itoa",
- "matchit",
- "memchr",
- "mime",
- "percent-encoding",
- "pin-project-lite",
- "rustversion",
- "serde",
- "serde_json",
- "serde_path_to_error",
- "serde_urlencoded",
- "sync_wrapper",
- "tokio",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
 name = "axum-core"
 version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -365,8 +340,8 @@ dependencies = [
  "async-trait",
  "bytes",
  "futures-util",
- "http 0.2.11",
- "http-body 0.4.6",
+ "http",
+ "http-body",
  "mime",
  "rustversion",
  "tower-layer",
@@ -374,27 +349,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "axum-core"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3"
-dependencies = [
- "async-trait",
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "http-body-util",
- "mime",
- "pin-project-lite",
- "rustversion",
- "sync_wrapper",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
 name = "backtrace"
 version = "0.3.69"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -427,9 +381,9 @@ version = "0.2.9"
 source = "git+https://github.com/flokli/bigtable_rs?rev=0af404741dfc40eb9fa99cf4d4140a09c5c20df7#0af404741dfc40eb9fa99cf4d4140a09c5c20df7"
 dependencies = [
  "gcp_auth",
- "http 0.2.11",
+ "http",
  "log",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
  "prost-wkt",
@@ -439,7 +393,7 @@ dependencies = [
  "serde_with",
  "thiserror",
  "tokio",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tower",
 ]
@@ -1397,10 +1351,10 @@ dependencies = [
  "base64",
  "chrono",
  "home",
- "hyper 0.14.28",
+ "hyper",
  "hyper-rustls",
  "ring",
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "rustls-pemfile 1.0.4",
  "serde",
  "serde_json",
@@ -1462,35 +1416,16 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
 
 [[package]]
 name = "h2"
-version = "0.3.24"
+version = "0.3.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9"
+checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
 dependencies = [
  "bytes",
  "fnv",
  "futures-core",
  "futures-sink",
  "futures-util",
- "http 0.2.11",
- "indexmap 2.1.0",
- "slab",
- "tokio",
- "tokio-util",
- "tracing",
-]
-
-[[package]]
-name = "h2"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51ee2dd2e4f378392eeff5d51618cd9a63166a2513846bbc55f21cfacd9199d4"
-dependencies = [
- "bytes",
- "fnv",
- "futures-core",
- "futures-sink",
- "futures-util",
- "http 1.1.0",
+ "http",
  "indexmap 2.1.0",
  "slab",
  "tokio",
@@ -1515,6 +1450,10 @@ name = "hashbrown"
 version = "0.14.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
+dependencies = [
+ "ahash",
+ "allocator-api2",
+]
 
 [[package]]
 name = "heck"
@@ -1561,47 +1500,13 @@ dependencies = [
 ]
 
 [[package]]
-name = "http"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
-[[package]]
 name = "http-body"
 version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
 dependencies = [
  "bytes",
- "http 0.2.11",
- "pin-project-lite",
-]
-
-[[package]]
-name = "http-body"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
-dependencies = [
- "bytes",
- "http 1.1.0",
-]
-
-[[package]]
-name = "http-body-util"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d"
-dependencies = [
- "bytes",
- "futures-core",
- "http 1.1.0",
- "http-body 1.0.0",
+ "http",
  "pin-project-lite",
 ]
 
@@ -1633,9 +1538,9 @@ dependencies = [
  "futures-channel",
  "futures-core",
  "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
+ "h2",
+ "http",
+ "http-body",
  "httparse",
  "httpdate",
  "itoa",
@@ -1648,35 +1553,15 @@ dependencies = [
 ]
 
 [[package]]
-name = "hyper"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a"
-dependencies = [
- "bytes",
- "futures-channel",
- "futures-util",
- "h2 0.4.3",
- "http 1.1.0",
- "http-body 1.0.0",
- "httparse",
- "httpdate",
- "itoa",
- "pin-project-lite",
- "smallvec",
- "tokio",
-]
-
-[[package]]
 name = "hyper-rustls"
 version = "0.24.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
 dependencies = [
  "futures-util",
- "http 0.2.11",
- "hyper 0.14.28",
- "rustls 0.21.10",
+ "http",
+ "hyper",
+ "rustls 0.21.12",
  "rustls-native-certs 0.6.3",
  "tokio",
  "tokio-rustls 0.24.1",
@@ -1688,29 +1573,13 @@ version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
 dependencies = [
- "hyper 0.14.28",
+ "hyper",
  "pin-project-lite",
  "tokio",
  "tokio-io-timeout",
 ]
 
 [[package]]
-name = "hyper-util"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa"
-dependencies = [
- "bytes",
- "futures-util",
- "http 1.1.0",
- "http-body 1.0.0",
- "hyper 1.2.0",
- "pin-project-lite",
- "socket2",
- "tokio",
-]
-
-[[package]]
 name = "iana-time-zone"
 version = "0.1.60"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2000,6 +1869,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
 
 [[package]]
+name = "lru"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
+dependencies = [
+ "hashbrown 0.14.3",
+]
+
+[[package]]
 name = "lzma-sys"
 version = "0.1.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2104,9 +1982,9 @@ dependencies = [
 
 [[package]]
 name = "mio"
-version = "0.8.10"
+version = "0.8.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
 dependencies = [
  "libc",
  "log",
@@ -2280,10 +2158,10 @@ dependencies = [
  "chrono",
  "futures",
  "humantime",
- "hyper 0.14.28",
+ "hyper",
  "itertools 0.12.0",
  "md-5",
- "parking_lot 0.12.1",
+ "parking_lot 0.12.2",
  "percent-encoding",
  "quick-xml",
  "rand",
@@ -2319,13 +2197,12 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
 
 [[package]]
 name = "opentelemetry"
-version = "0.21.0"
+version = "0.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a"
+checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf"
 dependencies = [
  "futures-core",
  "futures-sink",
- "indexmap 2.1.0",
  "js-sys",
  "once_cell",
  "pin-project-lite",
@@ -2335,49 +2212,46 @@ dependencies = [
 
 [[package]]
 name = "opentelemetry-otlp"
-version = "0.14.0"
+version = "0.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f24cda83b20ed2433c68241f918d0f6fdec8b1d43b7a9590ab4420c5095ca930"
+checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb"
 dependencies = [
  "async-trait",
  "futures-core",
- "http 0.2.11",
+ "http",
  "opentelemetry",
  "opentelemetry-proto",
  "opentelemetry-semantic-conventions",
  "opentelemetry_sdk",
- "prost 0.11.9",
+ "prost",
  "thiserror",
  "tokio",
- "tonic 0.9.2",
+ "tonic",
 ]
 
 [[package]]
 name = "opentelemetry-proto"
-version = "0.4.0"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2e155ce5cc812ea3d1dffbd1539aed653de4bf4882d60e6e04dcf0901d674e1"
+checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
 dependencies = [
  "opentelemetry",
  "opentelemetry_sdk",
- "prost 0.11.9",
- "tonic 0.9.2",
+ "prost",
+ "tonic",
 ]
 
 [[package]]
 name = "opentelemetry-semantic-conventions"
-version = "0.13.0"
+version = "0.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84"
-dependencies = [
- "opentelemetry",
-]
+checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910"
 
 [[package]]
 name = "opentelemetry_sdk"
-version = "0.21.2"
+version = "0.22.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4"
+checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e"
 dependencies = [
  "async-trait",
  "crossbeam-channel",
@@ -2438,9 +2312,9 @@ dependencies = [
 
 [[package]]
 name = "parking_lot"
-version = "0.12.1"
+version = "0.12.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb"
 dependencies = [
  "lock_api",
  "parking_lot_core 0.9.9",
@@ -2665,22 +2539,12 @@ dependencies = [
 
 [[package]]
 name = "prost"
-version = "0.11.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd"
-dependencies = [
- "bytes",
- "prost-derive 0.11.9",
-]
-
-[[package]]
-name = "prost"
 version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
 dependencies = [
  "bytes",
- "prost-derive 0.12.3",
+ "prost-derive",
 ]
 
 [[package]]
@@ -2697,7 +2561,7 @@ dependencies = [
  "once_cell",
  "petgraph",
  "prettyplease",
- "prost 0.12.3",
+ "prost",
  "prost-types",
  "pulldown-cmark",
  "pulldown-cmark-to-cmark",
@@ -2709,19 +2573,6 @@ dependencies = [
 
 [[package]]
 name = "prost-derive"
-version = "0.11.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
-dependencies = [
- "anyhow",
- "itertools 0.10.5",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "prost-derive"
 version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
@@ -2739,7 +2590,7 @@ version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
 dependencies = [
- "prost 0.12.3",
+ "prost",
 ]
 
 [[package]]
@@ -2750,7 +2601,7 @@ checksum = "4d8ef9c3f0f1dab910d2b7e2c24a8e4322e122eba6d7a1921eeebcebbc046c40"
 dependencies = [
  "chrono",
  "inventory",
- "prost 0.12.3",
+ "prost",
  "serde",
  "serde_derive",
  "serde_json",
@@ -2764,7 +2615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5b31cae9a54ca84fee1504740a82eebf2479532905e106f63ca0c3bc8d780321"
 dependencies = [
  "heck",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
  "quote",
@@ -2777,7 +2628,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "435be4a8704091b4c5fb1d79799de7f2dbff53af05edf29385237f8cf7ab37ee"
 dependencies = [
  "chrono",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "prost-types",
  "prost-wkt",
@@ -3010,10 +2861,10 @@ dependencies = [
  "encoding_rs",
  "futures-core",
  "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
+ "h2",
+ "http",
+ "http-body",
+ "hyper",
  "hyper-rustls",
  "ipnet",
  "js-sys",
@@ -3022,7 +2873,7 @@ dependencies = [
  "once_cell",
  "percent-encoding",
  "pin-project-lite",
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "rustls-native-certs 0.6.3",
  "rustls-pemfile 1.0.4",
  "serde",
@@ -3154,9 +3005,9 @@ dependencies = [
 
 [[package]]
 name = "rustls"
-version = "0.21.10"
+version = "0.21.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
+checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
 dependencies = [
  "log",
  "ring",
@@ -3166,9 +3017,9 @@ dependencies = [
 
 [[package]]
 name = "rustls"
-version = "0.22.2"
+version = "0.22.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41"
+checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
 dependencies = [
  "log",
  "ring",
@@ -3391,16 +3242,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "serde_path_to_error"
-version = "0.1.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
-dependencies = [
- "itoa",
- "serde",
-]
-
-[[package]]
 name = "serde_qs"
 version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3860,11 +3701,10 @@ dependencies = [
 
 [[package]]
 name = "tokio-listener"
-version = "0.3.2"
+version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96367e127b4cf47b92592a5154a563435fe28fe3fccf25917d4a34ee59c87303"
+checksum = "4134661e12ec11c6276be73544a43144a357b08dfab5c41fd226e15b5bc9a6b2"
 dependencies = [
- "axum 0.7.4",
  "document-features",
  "futures-core",
  "futures-util",
@@ -3873,7 +3713,7 @@ dependencies = [
  "socket2",
  "tokio",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tracing",
 ]
 
@@ -3905,7 +3745,7 @@ version = "0.24.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
 dependencies = [
- "rustls 0.21.10",
+ "rustls 0.21.12",
  "tokio",
 ]
 
@@ -3915,7 +3755,7 @@ version = "0.25.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
 dependencies = [
- "rustls 0.22.2",
+ "rustls 0.22.4",
  "rustls-pki-types",
  "tokio",
 ]
@@ -4010,51 +3850,23 @@ dependencies = [
 
 [[package]]
 name = "tonic"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
-dependencies = [
- "async-trait",
- "axum 0.6.20",
- "base64",
- "bytes",
- "futures-core",
- "futures-util",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
- "hyper-timeout",
- "percent-encoding",
- "pin-project",
- "prost 0.11.9",
- "tokio",
- "tokio-stream",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
-name = "tonic"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13"
 dependencies = [
  "async-stream",
  "async-trait",
- "axum 0.6.20",
+ "axum",
  "base64",
  "bytes",
- "h2 0.3.24",
- "http 0.2.11",
- "http-body 0.4.6",
- "hyper 0.14.28",
+ "h2",
+ "http",
+ "http-body",
+ "hyper",
  "hyper-timeout",
  "percent-encoding",
  "pin-project",
- "prost 0.12.3",
+ "prost",
  "rustls-native-certs 0.7.0",
  "rustls-pemfile 2.1.0",
  "rustls-pki-types",
@@ -4086,11 +3898,11 @@ version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
 dependencies = [
- "prost 0.12.3",
+ "prost",
  "prost-types",
  "tokio",
  "tokio-stream",
- "tonic 0.11.0",
+ "tonic",
 ]
 
 [[package]]
@@ -4181,9 +3993,9 @@ dependencies = [
 
 [[package]]
 name = "tracing-opentelemetry"
-version = "0.22.0"
+version = "0.23.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596"
+checksum = "a9be14ba1bbe4ab79e9229f7f89fab8d120b865859f10527f31c033e599d2284"
 dependencies = [
  "js-sys",
  "once_cell",
@@ -4198,16 +4010,6 @@ dependencies = [
 ]
 
 [[package]]
-name = "tracing-serde"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1"
-dependencies = [
- "serde",
- "tracing-core",
-]
-
-[[package]]
 name = "tracing-subscriber"
 version = "0.3.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -4217,15 +4019,12 @@ dependencies = [
  "nu-ansi-term",
  "once_cell",
  "regex",
- "serde",
- "serde_json",
  "sharded-slab",
  "smallvec",
  "thread_local",
  "tracing",
  "tracing-core",
  "tracing-log",
- "tracing-serde",
 ]
 
 [[package]]
@@ -4241,13 +4040,13 @@ dependencies = [
  "bytes",
  "clap",
  "itertools 0.12.0",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "rstest",
  "thiserror",
  "tokio",
  "tokio-listener",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tracing",
@@ -4260,6 +4059,7 @@ dependencies = [
 name = "tvix-castore"
 version = "0.1.0"
 dependencies = [
+ "async-compression",
  "async-process",
  "async-stream",
  "async-tempfile",
@@ -4276,10 +4076,10 @@ dependencies = [
  "lazy_static",
  "libc",
  "object_store",
- "parking_lot 0.12.1",
+ "parking_lot 0.12.2",
  "petgraph",
  "pin-project-lite",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "rstest",
  "rstest_reuse",
@@ -4294,7 +4094,7 @@ dependencies = [
  "tokio-stream",
  "tokio-tar",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tower",
@@ -4385,7 +4185,6 @@ name = "tvix-glue"
 version = "0.1.0"
 dependencies = [
  "async-compression",
- "async-recursion",
  "bstr",
  "bytes",
  "criterion",
@@ -4434,8 +4233,8 @@ name = "tvix-store"
 version = "0.1.0"
 dependencies = [
  "anyhow",
+ "async-compression",
  "async-process",
- "async-recursion",
  "async-stream",
  "bigtable_rs",
  "blake3",
@@ -4446,12 +4245,14 @@ dependencies = [
  "data-encoding",
  "futures",
  "lazy_static",
+ "lru",
  "nix-compat",
  "opentelemetry",
  "opentelemetry-otlp",
  "opentelemetry_sdk",
+ "parking_lot 0.12.2",
  "pin-project-lite",
- "prost 0.12.3",
+ "prost",
  "prost-build",
  "reqwest",
  "rstest",
@@ -4469,7 +4270,7 @@ dependencies = [
  "tokio-retry",
  "tokio-stream",
  "tokio-util",
- "tonic 0.11.0",
+ "tonic",
  "tonic-build",
  "tonic-reflection",
  "tower",
@@ -4479,7 +4280,6 @@ dependencies = [
  "tvix-castore",
  "url",
  "walkdir",
- "xz2",
 ]
 
 [[package]]
@@ -4813,9 +4613,9 @@ dependencies = [
 
 [[package]]
 name = "web-time"
-version = "0.2.4"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0"
+checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
 dependencies = [
  "js-sys",
  "wasm-bindgen",
@@ -5066,6 +4866,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
 
 [[package]]
+name = "zerocopy"
+version = "0.7.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
+dependencies = [
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.48",
+]
+
+[[package]]
 name = "zeroize"
 version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/tvix/Cargo.nix b/tvix/Cargo.nix
index 715d08d34..f6c3108fa 100644
--- a/tvix/Cargo.nix
+++ b/tvix/Cargo.nix
@@ -193,6 +193,49 @@ rec {
           "rustc-dep-of-std" = [ "core" "compiler_builtins" ];
         };
       };
+      "ahash" = rec {
+        crateName = "ahash";
+        version = "0.8.11";
+        edition = "2018";
+        sha256 = "04chdfkls5xmhp1d48gnjsmglbqibizs3bpbj6rsj604m10si7g8";
+        authors = [
+          "Tom Kaitchuck <Tom.Kaitchuck@gmail.com>"
+        ];
+        dependencies = [
+          {
+            name = "cfg-if";
+            packageId = "cfg-if";
+          }
+          {
+            name = "once_cell";
+            packageId = "once_cell";
+            usesDefaultFeatures = false;
+            target = { target, features }: (!(("arm" == target."arch" or null) && ("none" == target."os" or null)));
+            features = [ "alloc" ];
+          }
+          {
+            name = "zerocopy";
+            packageId = "zerocopy";
+            usesDefaultFeatures = false;
+            features = [ "simd" ];
+          }
+        ];
+        buildDependencies = [
+          {
+            name = "version_check";
+            packageId = "version_check";
+          }
+        ];
+        features = {
+          "atomic-polyfill" = [ "dep:atomic-polyfill" "once_cell/atomic-polyfill" ];
+          "compile-time-rng" = [ "const-random" ];
+          "const-random" = [ "dep:const-random" ];
+          "default" = [ "std" "runtime-rng" ];
+          "getrandom" = [ "dep:getrandom" ];
+          "runtime-rng" = [ "getrandom" ];
+          "serde" = [ "dep:serde" ];
+        };
+      };
       "aho-corasick" = rec {
         crateName = "aho-corasick";
         version = "1.1.2";
@@ -218,6 +261,21 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "perf-literal" "std" ];
       };
+      "allocator-api2" = rec {
+        crateName = "allocator-api2";
+        version = "0.2.18";
+        edition = "2018";
+        sha256 = "0kr6lfnxvnj164j1x38g97qjlhb7akppqzvgfs0697140ixbav2w";
+        authors = [
+          "Zakarum <zaq.dev@icloud.com>"
+        ];
+        features = {
+          "default" = [ "std" ];
+          "serde" = [ "dep:serde" ];
+          "std" = [ "alloc" ];
+        };
+        resolvedDefaultFeatures = [ "alloc" ];
+      };
       "android-tzdata" = rec {
         crateName = "android-tzdata";
         version = "0.1.1";
@@ -454,9 +512,9 @@ rec {
       };
       "async-compression" = rec {
         crateName = "async-compression";
-        version = "0.4.6";
+        version = "0.4.9";
         edition = "2018";
-        sha256 = "0b6874q56g1cx8ivs9j89d757rsh9kyrrwlp1852094jjrmg85m1";
+        sha256 = "14r6vbsbbkqjiqy0qwwywjakdi29jfyidhqp389l5r4gm7bsp7jf";
         authors = [
           "Wim Looman <wim@nemo157.com>"
           "Allen Bui <fairingrey@gmail.com>"
@@ -496,6 +554,27 @@ rec {
             packageId = "xz2";
             optional = true;
           }
+          {
+            name = "zstd";
+            packageId = "zstd";
+            rename = "libzstd";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "zstd-safe";
+            packageId = "zstd-safe";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+        ];
+        devDependencies = [
+          {
+            name = "tokio";
+            packageId = "tokio";
+            usesDefaultFeatures = false;
+            features = [ "io-util" "macros" "rt-multi-thread" "io-std" ];
+          }
         ];
         features = {
           "all" = [ "all-implementations" "all-algorithms" ];
@@ -518,7 +597,7 @@ rec {
           "zstd-safe" = [ "dep:zstd-safe" ];
           "zstdmt" = [ "zstd" "zstd-safe/zstdmt" ];
         };
-        resolvedDefaultFeatures = [ "bzip2" "flate2" "gzip" "tokio" "xz" "xz2" ];
+        resolvedDefaultFeatures = [ "bzip2" "flate2" "gzip" "libzstd" "tokio" "xz" "xz2" "zstd" "zstd-safe" ];
       };
       "async-io" = rec {
         crateName = "async-io";
@@ -698,35 +777,6 @@ rec {
         ];
 
       };
-      "async-recursion" = rec {
-        crateName = "async-recursion";
-        version = "1.0.5";
-        edition = "2018";
-        sha256 = "1l2vlgyaa9a2dd0y1vbqyppzsvpdr1y4rar4gn1qi68pl5dmmmaz";
-        procMacro = true;
-        authors = [
-          "Robert Usher <266585+dcchut@users.noreply.github.com>"
-        ];
-        dependencies = [
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "quote";
-            packageId = "quote";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "syn";
-            packageId = "syn 2.0.48";
-            usesDefaultFeatures = false;
-            features = [ "full" "parsing" "printing" "proc-macro" "clone-impls" ];
-          }
-        ];
-
-      };
       "async-signal" = rec {
         crateName = "async-signal";
         version = "0.2.5";
@@ -944,7 +994,7 @@ rec {
         ];
 
       };
-      "axum 0.6.20" = rec {
+      "axum" = rec {
         crateName = "axum";
         version = "0.6.20";
         edition = "2021";
@@ -956,7 +1006,7 @@ rec {
           }
           {
             name = "axum-core";
-            packageId = "axum-core 0.3.4";
+            packageId = "axum-core";
           }
           {
             name = "bitflags";
@@ -974,15 +1024,15 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "stream" ];
           }
           {
@@ -1072,184 +1122,7 @@ rec {
           "ws" = [ "tokio" "dep:tokio-tungstenite" "dep:sha1" "dep:base64" ];
         };
       };
-      "axum 0.7.4" = rec {
-        crateName = "axum";
-        version = "0.7.4";
-        edition = "2021";
-        sha256 = "17kv7v8m981cqmfbv5m538fzxhw51l9bajv06kfddi7njarb8dhj";
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-          }
-          {
-            name = "axum-core";
-            packageId = "axum-core 0.4.3";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "http-body-util";
-            packageId = "http-body-util";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-            optional = true;
-          }
-          {
-            name = "hyper-util";
-            packageId = "hyper-util";
-            optional = true;
-            features = [ "tokio" "server" "server-auto" ];
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-          {
-            name = "matchit";
-            packageId = "matchit";
-          }
-          {
-            name = "memchr";
-            packageId = "memchr";
-          }
-          {
-            name = "mime";
-            packageId = "mime";
-          }
-          {
-            name = "percent-encoding";
-            packageId = "percent-encoding";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-            optional = true;
-            features = [ "raw_value" ];
-          }
-          {
-            name = "serde_path_to_error";
-            packageId = "serde_path_to_error";
-            optional = true;
-          }
-          {
-            name = "serde_urlencoded";
-            packageId = "serde_urlencoded";
-            optional = true;
-          }
-          {
-            name = "sync_wrapper";
-            packageId = "sync_wrapper";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            rename = "tokio";
-            optional = true;
-            features = [ "time" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            usesDefaultFeatures = false;
-            features = [ "util" ];
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-        ];
-        buildDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-            features = [ "derive" ];
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            rename = "tokio";
-            features = [ "macros" "rt" "rt-multi-thread" "net" "test-util" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            rename = "tower";
-            features = [ "util" "timeout" "limit" "load-shed" "steer" "filter" ];
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-          }
-        ];
-        features = {
-          "__private_docs" = [ "tower/full" "dep:tower-http" ];
-          "default" = [ "form" "http1" "json" "matched-path" "original-uri" "query" "tokio" "tower-log" "tracing" ];
-          "form" = [ "dep:serde_urlencoded" ];
-          "http1" = [ "dep:hyper" "hyper?/http1" ];
-          "http2" = [ "dep:hyper" "hyper?/http2" ];
-          "json" = [ "dep:serde_json" "dep:serde_path_to_error" ];
-          "macros" = [ "dep:axum-macros" ];
-          "multipart" = [ "dep:multer" ];
-          "query" = [ "dep:serde_urlencoded" ];
-          "tokio" = [ "dep:hyper-util" "dep:tokio" "tokio/net" "tokio/rt" "tower/make" "tokio/macros" ];
-          "tower-log" = [ "tower/log" ];
-          "tracing" = [ "dep:tracing" "axum-core/tracing" ];
-          "ws" = [ "dep:hyper" "tokio" "dep:tokio-tungstenite" "dep:sha1" "dep:base64" ];
-        };
-        resolvedDefaultFeatures = [ "default" "form" "http1" "json" "matched-path" "original-uri" "query" "tokio" "tower-log" "tracing" ];
-      };
-      "axum-core 0.3.4" = rec {
+      "axum-core" = rec {
         crateName = "axum-core";
         version = "0.3.4";
         edition = "2021";
@@ -1271,11 +1144,11 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "mime";
@@ -1309,85 +1182,6 @@ rec {
           "tracing" = [ "dep:tracing" ];
         };
       };
-      "axum-core 0.4.3" = rec {
-        crateName = "axum-core";
-        version = "0.4.3";
-        edition = "2021";
-        sha256 = "1qx28wg4j6qdcdrisqwyaavlzc0zvbsrcwa99zf9456lfbyn6p51";
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "http-body-util";
-            packageId = "http-body-util";
-          }
-          {
-            name = "mime";
-            packageId = "mime";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "sync_wrapper";
-            packageId = "sync_wrapper";
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-        ];
-        buildDependencies = [
-          {
-            name = "rustversion";
-            packageId = "rustversion";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "alloc" ];
-          }
-        ];
-        features = {
-          "__private_docs" = [ "dep:tower-http" ];
-          "tracing" = [ "dep:tracing" ];
-        };
-        resolvedDefaultFeatures = [ "tracing" ];
-      };
       "backtrace" = rec {
         crateName = "backtrace";
         version = "0.3.69";
@@ -1497,7 +1291,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "log";
@@ -1505,7 +1299,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-types";
@@ -1540,7 +1334,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "transport" ];
           }
           {
@@ -4284,7 +4078,7 @@ rec {
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" "runtime" "http2" ];
           }
           {
@@ -4299,7 +4093,7 @@ rec {
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
           }
           {
             name = "rustls-pemfile";
@@ -4477,11 +4271,11 @@ rec {
         ];
 
       };
-      "h2 0.3.24" = rec {
+      "h2" = rec {
         crateName = "h2";
-        version = "0.3.24";
+        version = "0.3.26";
         edition = "2018";
-        sha256 = "1jf9488b66nayxzp3iw3b2rb64y49hdbbywnv9wfwrsv14i48b5v";
+        sha256 = "1s7msnfv7xprzs6xzfj5sg6p8bjcdpcqcmjjbkd345cyi1x55zl1";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Sean McArthur <sean@seanmonstar.com>"
@@ -4512,79 +4306,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "indexmap";
-            packageId = "indexmap 2.1.0";
-            features = [ "std" ];
-          }
-          {
-            name = "slab";
-            packageId = "slab";
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "io-util" ];
-          }
-          {
-            name = "tokio-util";
-            packageId = "tokio-util";
-            features = [ "codec" "io" ];
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-            usesDefaultFeatures = false;
-            features = [ "std" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "rt-multi-thread" "macros" "sync" "net" ];
-          }
-        ];
-        features = { };
-      };
-      "h2 0.4.3" = rec {
-        crateName = "h2";
-        version = "0.4.3";
-        edition = "2021";
-        sha256 = "1m4rj76zl77jany6p10k4mm1cqwsrlc1dmgmxwp3jy7kwk92vvji";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "fnv";
-            packageId = "fnv";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-sink";
-            packageId = "futures-sink";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
+            packageId = "http";
           }
           {
             name = "indexmap";
@@ -4668,6 +4390,21 @@ rec {
         authors = [
           "Amanieu d'Antras <amanieu@gmail.com>"
         ];
+        dependencies = [
+          {
+            name = "ahash";
+            packageId = "ahash";
+            optional = true;
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "allocator-api2";
+            packageId = "allocator-api2";
+            optional = true;
+            usesDefaultFeatures = false;
+            features = [ "alloc" ];
+          }
+        ];
         features = {
           "ahash" = [ "dep:ahash" ];
           "alloc" = [ "dep:alloc" ];
@@ -4682,7 +4419,7 @@ rec {
           "rustc-dep-of-std" = [ "nightly" "core" "compiler_builtins" "alloc" "rustc-internal-api" ];
           "serde" = [ "dep:serde" ];
         };
-        resolvedDefaultFeatures = [ "inline-more" "raw" ];
+        resolvedDefaultFeatures = [ "ahash" "allocator-api2" "default" "inline-more" "raw" ];
       };
       "heck" = rec {
         crateName = "heck";
@@ -4757,7 +4494,7 @@ rec {
         ];
 
       };
-      "http 0.2.11" = rec {
+      "http" = rec {
         crateName = "http";
         version = "0.2.11";
         edition = "2018";
@@ -4783,36 +4520,7 @@ rec {
         ];
 
       };
-      "http 1.1.0" = rec {
-        crateName = "http";
-        version = "1.1.0";
-        edition = "2018";
-        sha256 = "0n426lmcxas6h75c2cp25m933pswlrfjz10v91vc62vib2sdvf91";
-        authors = [
-          "Alex Crichton <alex@alexcrichton.com>"
-          "Carl Lerche <me@carllerche.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "fnv";
-            packageId = "fnv";
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-        ];
-        features = {
-          "default" = [ "std" ];
-        };
-        resolvedDefaultFeatures = [ "default" "std" ];
-      };
-      "http-body 0.4.6" = rec {
+      "http-body" = rec {
         crateName = "http-body";
         version = "0.4.6";
         edition = "2018";
@@ -4829,63 +4537,7 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-        ];
-
-      };
-      "http-body 1.0.0" = rec {
-        crateName = "http-body";
-        version = "1.0.0";
-        edition = "2018";
-        sha256 = "0hyn8n3iadrbwq8y0p1rl1275s4nm49bllw5wji29g4aa3dqbb0w";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-        ];
-
-      };
-      "http-body-util" = rec {
-        crateName = "http-body-util";
-        version = "0.1.1";
-        edition = "2018";
-        sha256 = "07agldas2qgcfc05ckiarlmf9vzragbda823nqhrqrc6mjrghx84";
-        authors = [
-          "Carl Lerche <me@carllerche.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
+            packageId = "http";
           }
           {
             name = "pin-project-lite";
@@ -4927,7 +4579,7 @@ rec {
         ];
 
       };
-      "hyper 0.14.28" = rec {
+      "hyper" = rec {
         crateName = "hyper";
         version = "0.14.28";
         edition = "2018";
@@ -4956,16 +4608,16 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             optional = true;
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "httparse";
@@ -5034,104 +4686,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "client" "default" "full" "h2" "http1" "http2" "runtime" "server" "socket2" "stream" "tcp" ];
       };
-      "hyper 1.2.0" = rec {
-        crateName = "hyper";
-        version = "1.2.0";
-        edition = "2021";
-        sha256 = "0fi6k7hz5fmdph0a5r8hw50d7h2n9zxkizmafcmb65f67bblhr8q";
-        authors = [
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-channel";
-            packageId = "futures-channel";
-            optional = true;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "h2";
-            packageId = "h2 0.4.3";
-            optional = true;
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "httparse";
-            packageId = "httparse";
-            optional = true;
-          }
-          {
-            name = "httpdate";
-            packageId = "httpdate";
-            optional = true;
-          }
-          {
-            name = "itoa";
-            packageId = "itoa";
-            optional = true;
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-            optional = true;
-          }
-          {
-            name = "smallvec";
-            packageId = "smallvec";
-            optional = true;
-            features = [ "const_generics" "const_new" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "sync" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "futures-channel";
-            packageId = "futures-channel";
-            features = [ "sink" ];
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-            features = [ "sink" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "fs" "macros" "net" "io-std" "io-util" "rt" "rt-multi-thread" "sync" "time" "test-util" ];
-          }
-        ];
-        features = {
-          "client" = [ "dep:want" "dep:pin-project-lite" "dep:smallvec" ];
-          "ffi" = [ "dep:libc" "dep:http-body-util" ];
-          "full" = [ "client" "http1" "http2" "server" ];
-          "http1" = [ "dep:futures-channel" "dep:futures-util" "dep:httparse" "dep:itoa" ];
-          "http2" = [ "dep:futures-channel" "dep:futures-util" "dep:h2" ];
-          "server" = [ "dep:httpdate" "dep:pin-project-lite" "dep:smallvec" ];
-          "tracing" = [ "dep:tracing" ];
-        };
-        resolvedDefaultFeatures = [ "default" "http1" "http2" "server" ];
-      };
       "hyper-rustls" = rec {
         crateName = "hyper-rustls";
         version = "0.24.2";
@@ -5145,17 +4699,17 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             features = [ "client" ];
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
           }
           {
@@ -5176,12 +4730,12 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "full" ];
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
             features = [ "tls12" ];
           }
@@ -5218,7 +4772,7 @@ rec {
         dependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" ];
           }
           {
@@ -5237,7 +4791,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "client" "http1" "tcp" ];
           }
           {
@@ -5248,82 +4802,6 @@ rec {
         ];
 
       };
-      "hyper-util" = rec {
-        crateName = "hyper-util";
-        version = "0.1.3";
-        edition = "2021";
-        sha256 = "1akngan7j0n2n0wd25c6952mvqbkj9gp1lcwzyxjc0d37l8yyf6a";
-        authors = [
-          "Sean McArthur <sean@seanmonstar.com>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "http";
-            packageId = "http 1.1.0";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 1.0.0";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-          }
-          {
-            name = "pin-project-lite";
-            packageId = "pin-project-lite";
-          }
-          {
-            name = "socket2";
-            packageId = "socket2";
-            optional = true;
-            features = [ "all" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            optional = true;
-            features = [ "net" "rt" "time" ];
-          }
-        ];
-        devDependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 1.2.0";
-            features = [ "full" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "macros" "test-util" ];
-          }
-        ];
-        features = {
-          "client" = [ "hyper/client" "dep:tracing" "dep:futures-channel" "dep:tower" "dep:tower-service" ];
-          "client-legacy" = [ "client" ];
-          "full" = [ "client" "client-legacy" "server" "server-auto" "service" "http1" "http2" "tokio" ];
-          "http1" = [ "hyper/http1" ];
-          "http2" = [ "hyper/http2" ];
-          "server" = [ "hyper/server" ];
-          "server-auto" = [ "server" "http1" "http2" ];
-          "service" = [ "dep:tower" "dep:tower-service" ];
-          "tokio" = [ "dep:tokio" "dep:socket2" ];
-        };
-        resolvedDefaultFeatures = [ "default" "http1" "http2" "server" "server-auto" "tokio" ];
-      };
       "iana-time-zone" = rec {
         crateName = "iana-time-zone";
         version = "0.1.60";
@@ -6155,6 +5633,28 @@ rec {
         };
         resolvedDefaultFeatures = [ "std" ];
       };
+      "lru" = rec {
+        crateName = "lru";
+        version = "0.12.3";
+        edition = "2015";
+        sha256 = "1p5hryc967wdh56q9wzb2x9gdqy3yd0sqmnb2fcf7z28wrsjw9nk";
+        authors = [
+          "Jerome Froelich <jeromefroelic@hotmail.com>"
+        ];
+        dependencies = [
+          {
+            name = "hashbrown";
+            packageId = "hashbrown 0.14.3";
+            optional = true;
+          }
+        ];
+        features = {
+          "default" = [ "hashbrown" ];
+          "hashbrown" = [ "dep:hashbrown" ];
+          "nightly" = [ "hashbrown" "hashbrown/nightly" ];
+        };
+        resolvedDefaultFeatures = [ "default" "hashbrown" ];
+      };
       "lzma-sys" = rec {
         crateName = "lzma-sys";
         version = "0.1.20";
@@ -6424,9 +5924,9 @@ rec {
       };
       "mio" = rec {
         crateName = "mio";
-        version = "0.8.10";
+        version = "0.8.11";
         edition = "2018";
-        sha256 = "02gyaxvaia9zzi4drrw59k9s0j6pa5d1y2kv7iplwjipdqlhngcg";
+        sha256 = "034byyl0ardml5yliy1hmvx8arkmn9rv479pid794sm07ia519m4";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Thomas de Zeeuw <thomasdezeeuw@gmail.com>"
@@ -7009,7 +6509,7 @@ rec {
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -7025,7 +6525,7 @@ rec {
           }
           {
             name = "parking_lot";
-            packageId = "parking_lot 0.12.1";
+            packageId = "parking_lot 0.12.2";
           }
           {
             name = "percent-encoding";
@@ -7103,7 +6603,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             features = [ "server" ];
           }
           {
@@ -7172,9 +6672,9 @@ rec {
       };
       "opentelemetry" = rec {
         crateName = "opentelemetry";
-        version = "0.21.0";
+        version = "0.22.0";
         edition = "2021";
-        sha256 = "12jfmyx8k9q2sjlx4wp76ddzaf94i7lnkliv1c9mj164bnd36chy";
+        sha256 = "1gv70rx8172g9n82v9f97ircax7v4ydzyprq1nvsxwp3gfc5f3ch";
         dependencies = [
           {
             name = "futures-core";
@@ -7185,10 +6685,6 @@ rec {
             packageId = "futures-sink";
           }
           {
-            name = "indexmap";
-            packageId = "indexmap 2.1.0";
-          }
-          {
             name = "js-sys";
             packageId = "js-sys";
             target = { target, features }: (("wasm32" == target."arch" or null) && (!("wasi" == target."os" or null)));
@@ -7205,6 +6701,7 @@ rec {
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "urlencoding";
@@ -7222,9 +6719,9 @@ rec {
       };
       "opentelemetry-otlp" = rec {
         crateName = "opentelemetry-otlp";
-        version = "0.14.0";
+        version = "0.15.0";
         edition = "2021";
-        sha256 = "0c59bh4wa824mf89ayivsjqwipkg1y6r27r4d0y47lhfna1xlk7j";
+        sha256 = "1jxbi5w4xgwg4gcj0lz4310y926bglw25b2546pkkilmjj6nn08s";
         dependencies = [
           {
             name = "async-trait";
@@ -7236,8 +6733,9 @@ rec {
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
             optional = true;
+            usesDefaultFeatures = false;
           }
           {
             name = "opentelemetry";
@@ -7260,45 +6758,45 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.11.9";
+            packageId = "prost";
             optional = true;
           }
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "tokio";
             packageId = "tokio";
             optional = true;
+            usesDefaultFeatures = false;
             features = [ "sync" "rt" ];
           }
           {
             name = "tonic";
-            packageId = "tonic 0.9.2";
+            packageId = "tonic";
             optional = true;
+            usesDefaultFeatures = false;
           }
         ];
         devDependencies = [
           {
             name = "tokio";
             packageId = "tokio";
+            usesDefaultFeatures = false;
             features = [ "macros" "rt-multi-thread" ];
           }
         ];
         features = {
           "default" = [ "grpc-tonic" "trace" ];
-          "grpc-sys" = [ "grpcio" "opentelemetry-proto/gen-grpcio" ];
           "grpc-tonic" = [ "tonic" "prost" "http" "tokio" "opentelemetry-proto/gen-tonic" ];
-          "grpcio" = [ "dep:grpcio" ];
           "gzip-tonic" = [ "tonic/gzip" ];
           "http" = [ "dep:http" ];
           "http-proto" = [ "prost" "opentelemetry-http" "opentelemetry-proto/gen-tonic-messages" "http" "trace" "metrics" ];
           "integration-testing" = [ "tonic" "prost" "tokio/full" "trace" ];
           "logs" = [ "opentelemetry/logs" "opentelemetry_sdk/logs" "opentelemetry-proto/logs" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" "opentelemetry-proto/metrics" ];
-          "openssl" = [ "grpcio/openssl" ];
-          "openssl-vendored" = [ "grpcio/openssl-vendored" ];
           "opentelemetry-http" = [ "dep:opentelemetry-http" ];
           "prost" = [ "dep:prost" ];
           "reqwest" = [ "dep:reqwest" ];
@@ -7307,8 +6805,6 @@ rec {
           "reqwest-rustls" = [ "reqwest" "reqwest/rustls-tls-native-roots" ];
           "serde" = [ "dep:serde" ];
           "serialize" = [ "serde" ];
-          "surf" = [ "dep:surf" ];
-          "surf-client" = [ "surf" "opentelemetry-http/surf" ];
           "tls" = [ "tonic/tls" ];
           "tls-roots" = [ "tls" "tonic/tls-roots" ];
           "tokio" = [ "dep:tokio" ];
@@ -7319,9 +6815,9 @@ rec {
       };
       "opentelemetry-proto" = rec {
         crateName = "opentelemetry-proto";
-        version = "0.4.0";
+        version = "0.5.0";
         edition = "2021";
-        sha256 = "1qblsq0hkksdw3k60bc8yi5xwlynmqwibggz3lyyl4n8bk75bqd2";
+        sha256 = "1r5a1k4fryqijhsar36ld806yf82isw11xfnx7d80nwgnv4xv3rs";
         dependencies = [
           {
             name = "opentelemetry";
@@ -7335,53 +6831,47 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.11.9";
+            packageId = "prost";
             optional = true;
           }
           {
             name = "tonic";
-            packageId = "tonic 0.9.2";
+            packageId = "tonic";
             optional = true;
             usesDefaultFeatures = false;
             features = [ "codegen" "prost" ];
           }
         ];
         features = {
-          "full" = [ "gen-tonic" "gen-grpcio" "trace" "logs" "metrics" "zpages" "with-serde" ];
-          "gen-grpcio" = [ "grpcio" "prost" ];
+          "full" = [ "gen-tonic" "trace" "logs" "metrics" "zpages" "with-serde" ];
           "gen-tonic" = [ "gen-tonic-messages" "tonic/transport" ];
           "gen-tonic-messages" = [ "tonic" "prost" ];
-          "grpcio" = [ "dep:grpcio" ];
+          "hex" = [ "dep:hex" ];
           "logs" = [ "opentelemetry/logs" "opentelemetry_sdk/logs" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" ];
           "prost" = [ "dep:prost" ];
+          "schemars" = [ "dep:schemars" ];
           "serde" = [ "dep:serde" ];
           "tonic" = [ "dep:tonic" ];
           "trace" = [ "opentelemetry/trace" "opentelemetry_sdk/trace" ];
-          "with-serde" = [ "serde" ];
+          "with-schemars" = [ "schemars" ];
+          "with-serde" = [ "serde" "hex" ];
           "zpages" = [ "trace" ];
         };
         resolvedDefaultFeatures = [ "gen-tonic" "gen-tonic-messages" "prost" "tonic" "trace" ];
       };
       "opentelemetry-semantic-conventions" = rec {
         crateName = "opentelemetry-semantic-conventions";
-        version = "0.13.0";
+        version = "0.14.0";
         edition = "2021";
-        sha256 = "115wbgk840dklyhpg3lwp4x1m643qd7f0vkz8hmfz0pry4g4yxzm";
-        dependencies = [
-          {
-            name = "opentelemetry";
-            packageId = "opentelemetry";
-            usesDefaultFeatures = false;
-          }
-        ];
+        sha256 = "04197racbkpj75fh9jnwkdznjzv6l2ljpbr8ryfk9f9gqkb5pazr";
 
       };
       "opentelemetry_sdk" = rec {
         crateName = "opentelemetry_sdk";
-        version = "0.21.2";
+        version = "0.22.1";
         edition = "2021";
-        sha256 = "1r7gw2j2n800rd0vdnga32yhlfmc3c4y0sadcr97licam74aw5ig";
+        sha256 = "0zkbkl29qik7cfmwbhr2ncink8fp9vi5x2qgk8gf6jg67c8wg44y";
         dependencies = [
           {
             name = "async-trait";
@@ -7434,11 +6924,12 @@ rec {
             packageId = "rand";
             optional = true;
             usesDefaultFeatures = false;
-            features = [ "std" "std_rng" ];
+            features = [ "std" "std_rng" "small_rng" ];
           }
           {
             name = "thiserror";
             packageId = "thiserror";
+            usesDefaultFeatures = false;
           }
           {
             name = "tokio";
@@ -7596,11 +7087,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" ];
       };
-      "parking_lot 0.12.1" = rec {
+      "parking_lot 0.12.2" = rec {
         crateName = "parking_lot";
-        version = "0.12.1";
-        edition = "2018";
-        sha256 = "13r2xk7mnxfc5g0g6dkdxqdqad99j7s7z8zhzz4npw5r0g0v4hip";
+        version = "0.12.2";
+        edition = "2021";
+        sha256 = "1ys2dzz6cysjmwyivwxczl1ljpcf5cj4qmhdj07d5bkc9z5g0jky";
         authors = [
           "Amanieu d'Antras <amanieu@gmail.com>"
         ];
@@ -8241,35 +7732,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "alloc" "bit-set" "default" "fork" "lazy_static" "regex-syntax" "rusty-fork" "std" "tempfile" "timeout" ];
       };
-      "prost 0.11.9" = rec {
-        crateName = "prost";
-        version = "0.11.9";
-        edition = "2021";
-        sha256 = "1kc1hva2h894hc0zf6r4r8fsxfpazf7xn5rj3jya9sbrsyhym0hb";
-        authors = [
-          "Dan Burkert <dan@danburkert.com>"
-          "Lucio Franco <luciofranco14@gmail.com"
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "bytes";
-            packageId = "bytes";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "prost-derive";
-            packageId = "prost-derive 0.11.9";
-            optional = true;
-          }
-        ];
-        features = {
-          "default" = [ "prost-derive" "std" ];
-          "prost-derive" = [ "dep:prost-derive" ];
-        };
-        resolvedDefaultFeatures = [ "default" "prost-derive" "std" ];
-      };
-      "prost 0.12.3" = rec {
+      "prost" = rec {
         crateName = "prost";
         version = "0.12.3";
         edition = "2021";
@@ -8287,7 +7750,7 @@ rec {
           }
           {
             name = "prost-derive";
-            packageId = "prost-derive 0.12.3";
+            packageId = "prost-derive";
             optional = true;
           }
         ];
@@ -8348,7 +7811,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             usesDefaultFeatures = false;
           }
           {
@@ -8399,45 +7862,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "cleanup-markdown" "default" "format" "prettyplease" "pulldown-cmark" "pulldown-cmark-to-cmark" "syn" ];
       };
-      "prost-derive 0.11.9" = rec {
-        crateName = "prost-derive";
-        version = "0.11.9";
-        edition = "2021";
-        sha256 = "1d3mw2s2jba1f7wcjmjd6ha2a255p2rmynxhm1nysv9w1z8xilp5";
-        procMacro = true;
-        authors = [
-          "Dan Burkert <dan@danburkert.com>"
-          "Lucio Franco <luciofranco14@gmail.com>"
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "anyhow";
-            packageId = "anyhow";
-          }
-          {
-            name = "itertools";
-            packageId = "itertools 0.10.5";
-            usesDefaultFeatures = false;
-            features = [ "use_alloc" ];
-          }
-          {
-            name = "proc-macro2";
-            packageId = "proc-macro2";
-          }
-          {
-            name = "quote";
-            packageId = "quote";
-          }
-          {
-            name = "syn";
-            packageId = "syn 1.0.109";
-            features = [ "extra-traits" ];
-          }
-        ];
-
-      };
-      "prost-derive 0.12.3" = rec {
+      "prost-derive" = rec {
         crateName = "prost-derive";
         version = "0.12.3";
         edition = "2021";
@@ -8488,7 +7913,7 @@ rec {
         dependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             usesDefaultFeatures = false;
             features = [ "prost-derive" ];
           }
@@ -8520,7 +7945,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "serde";
@@ -8556,7 +7981,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-build";
@@ -8590,7 +8015,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-wkt";
@@ -8612,7 +8037,7 @@ rec {
         buildDependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-build";
@@ -9265,21 +8690,21 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             target = { target, features }: (!("wasm32" == target."arch" or null));
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
             target = { target, features }: (!("wasm32" == target."arch" or null));
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "tcp" "http1" "http2" "client" "runtime" ];
@@ -9328,7 +8753,7 @@ rec {
           }
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             optional = true;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "dangerous_configuration" ];
@@ -9428,7 +8853,7 @@ rec {
         devDependencies = [
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             usesDefaultFeatures = false;
             target = { target, features }: (!("wasm32" == target."arch" or null));
             features = [ "tcp" "stream" "http1" "http2" "client" "server" "runtime" ];
@@ -9890,11 +9315,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "alloc" "default" "event" "fs" "net" "pipe" "process" "std" "termios" "time" "use-libc-auxv" ];
       };
-      "rustls 0.21.10" = rec {
+      "rustls 0.21.12" = rec {
         crateName = "rustls";
-        version = "0.21.10";
+        version = "0.21.12";
         edition = "2021";
-        sha256 = "1fmpzk3axnhkd99saqkvraifdfms4pkyi56lkihf8n877j0sdmgr";
+        sha256 = "0gjdg2a9r81sdwkyw3n5yfbkrr6p9gyk3xr2kcsr3cs83x6s2miz";
         dependencies = [
           {
             name = "log";
@@ -9931,11 +9356,11 @@ rec {
         };
         resolvedDefaultFeatures = [ "dangerous_configuration" "default" "log" "logging" "tls12" ];
       };
-      "rustls 0.22.2" = rec {
+      "rustls 0.22.4" = rec {
         crateName = "rustls";
-        version = "0.22.2";
+        version = "0.22.4";
         edition = "2021";
-        sha256 = "0hcxyhq6ynvws9v5b2h81s1nwmijmya7a3vyyyhsy1wqpmb9jz78";
+        sha256 = "0cl4q6w0x1cl5ldjsgbbiiqhkz6qg5vxl5dkn9wwsyxc44vzfkmz";
         dependencies = [
           {
             name = "log";
@@ -10575,27 +10000,7 @@ rec {
           "preserve_order" = [ "indexmap" "std" ];
           "std" = [ "serde/std" ];
         };
-        resolvedDefaultFeatures = [ "alloc" "default" "raw_value" "std" ];
-      };
-      "serde_path_to_error" = rec {
-        crateName = "serde_path_to_error";
-        version = "0.1.16";
-        edition = "2021";
-        sha256 = "19hlz2359l37ifirskpcds7sxg0gzpqvfilibs7whdys0128i6dg";
-        authors = [
-          "David Tolnay <dtolnay@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "itoa";
-            packageId = "itoa";
-          }
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-        ];
-
+        resolvedDefaultFeatures = [ "alloc" "default" "std" ];
       };
       "serde_qs" = rec {
         crateName = "serde_qs";
@@ -11058,7 +10463,6 @@ rec {
           "drain_keep_rest" = [ "drain_filter" ];
           "serde" = [ "dep:serde" ];
         };
-        resolvedDefaultFeatures = [ "const_generics" "const_new" ];
       };
       "smol_str" = rec {
         crateName = "smol_str";
@@ -11962,16 +11366,11 @@ rec {
       };
       "tokio-listener" = rec {
         crateName = "tokio-listener";
-        version = "0.3.2";
+        version = "0.4.2";
         edition = "2021";
-        sha256 = "00vkr1cywd2agn8jbkzwwf7y4ps3cfjm8l9ab697px2cgc97wdln";
+        sha256 = "1cm6r5dmpq96s8gw9dgsinq5g8s466j48dg7dckwc4gc28g6cd21";
         dependencies = [
           {
-            name = "axum";
-            packageId = "axum 0.7.4";
-            rename = "axum07";
-          }
-          {
             name = "document-features";
             packageId = "document-features";
           }
@@ -12015,7 +11414,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             rename = "tonic";
             optional = true;
           }
@@ -12032,12 +11431,14 @@ rec {
           }
         ];
         features = {
-          "axum07" = [ "dep:hyper1" "dep:hyper-util" "dep:futures-util" "dep:tower-service" "dep:tower" ];
+          "axum07" = [ "dep:hyper1" "dep:hyper-util" "dep:futures-util" "dep:tower-service" "dep:tower" "dep:axum07" ];
           "clap" = [ "dep:clap" ];
           "default" = [ "user_facing_default" "tokio-util" ];
           "hyper014" = [ "dep:hyper014" ];
           "inetd" = [ "dep:futures-util" ];
+          "multi-listener" = [ "dep:futures-util" ];
           "nix" = [ "dep:nix" ];
+          "sd_listen" = [ "socket2" ];
           "serde" = [ "dep:serde" "serde_with" ];
           "serde_with" = [ "dep:serde_with" ];
           "socket2" = [ "dep:socket2" ];
@@ -12116,7 +11517,7 @@ rec {
         dependencies = [
           {
             name = "rustls";
-            packageId = "rustls 0.21.10";
+            packageId = "rustls 0.21.12";
             usesDefaultFeatures = false;
           }
           {
@@ -12148,7 +11549,7 @@ rec {
         dependencies = [
           {
             name = "rustls";
-            packageId = "rustls 0.22.2";
+            packageId = "rustls 0.22.4";
             usesDefaultFeatures = false;
           }
           {
@@ -12492,7 +11893,7 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "serde" ];
       };
-      "tonic 0.11.0" = rec {
+      "tonic" = rec {
         crateName = "tonic";
         version = "0.11.0";
         edition = "2021";
@@ -12513,7 +11914,7 @@ rec {
           }
           {
             name = "axum";
-            packageId = "axum 0.6.20";
+            packageId = "axum";
             optional = true;
             usesDefaultFeatures = false;
           }
@@ -12527,20 +11928,20 @@ rec {
           }
           {
             name = "h2";
-            packageId = "h2 0.3.24";
+            packageId = "h2";
             optional = true;
           }
           {
             name = "http";
-            packageId = "http 0.2.11";
+            packageId = "http";
           }
           {
             name = "http-body";
-            packageId = "http-body 0.4.6";
+            packageId = "http-body";
           }
           {
             name = "hyper";
-            packageId = "hyper 0.14.28";
+            packageId = "hyper";
             optional = true;
             features = [ "full" ];
           }
@@ -12559,7 +11960,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
             optional = true;
             usesDefaultFeatures = false;
             features = [ "std" ];
@@ -12638,139 +12039,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "channel" "codegen" "default" "prost" "tls" "tls-roots" "tls-roots-common" "transport" ];
       };
-      "tonic 0.9.2" = rec {
-        crateName = "tonic";
-        version = "0.9.2";
-        edition = "2021";
-        sha256 = "0nlx35lvah5hdcp6lg1d6dlprq0zz8ijj6f727szfcv479m6d0ih";
-        authors = [
-          "Lucio Franco <luciofranco14@gmail.com>"
-        ];
-        dependencies = [
-          {
-            name = "async-trait";
-            packageId = "async-trait";
-            optional = true;
-          }
-          {
-            name = "axum";
-            packageId = "axum 0.6.20";
-            optional = true;
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "base64";
-            packageId = "base64";
-          }
-          {
-            name = "bytes";
-            packageId = "bytes";
-          }
-          {
-            name = "futures-core";
-            packageId = "futures-core";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "futures-util";
-            packageId = "futures-util";
-            usesDefaultFeatures = false;
-          }
-          {
-            name = "h2";
-            packageId = "h2 0.3.24";
-            optional = true;
-          }
-          {
-            name = "http";
-            packageId = "http 0.2.11";
-          }
-          {
-            name = "http-body";
-            packageId = "http-body 0.4.6";
-          }
-          {
-            name = "hyper";
-            packageId = "hyper 0.14.28";
-            optional = true;
-            features = [ "full" ];
-          }
-          {
-            name = "hyper-timeout";
-            packageId = "hyper-timeout";
-            optional = true;
-          }
-          {
-            name = "percent-encoding";
-            packageId = "percent-encoding";
-          }
-          {
-            name = "pin-project";
-            packageId = "pin-project";
-          }
-          {
-            name = "prost";
-            packageId = "prost 0.11.9";
-            optional = true;
-            usesDefaultFeatures = false;
-            features = [ "std" ];
-          }
-          {
-            name = "tokio";
-            packageId = "tokio";
-            optional = true;
-            features = [ "net" "time" "macros" ];
-          }
-          {
-            name = "tokio-stream";
-            packageId = "tokio-stream";
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            optional = true;
-            usesDefaultFeatures = false;
-            features = [ "balance" "buffer" "discover" "limit" "load" "make" "timeout" "util" ];
-          }
-          {
-            name = "tower-layer";
-            packageId = "tower-layer";
-          }
-          {
-            name = "tower-service";
-            packageId = "tower-service";
-          }
-          {
-            name = "tracing";
-            packageId = "tracing";
-          }
-        ];
-        devDependencies = [
-          {
-            name = "tokio";
-            packageId = "tokio";
-            features = [ "rt" "macros" ];
-          }
-          {
-            name = "tower";
-            packageId = "tower";
-            features = [ "full" ];
-          }
-        ];
-        features = {
-          "channel" = [ "dep:h2" "dep:hyper" "dep:tokio" "dep:tower" "dep:hyper-timeout" ];
-          "codegen" = [ "dep:async-trait" ];
-          "default" = [ "transport" "codegen" "prost" ];
-          "gzip" = [ "dep:flate2" ];
-          "prost" = [ "dep:prost" ];
-          "tls" = [ "dep:rustls-pemfile" "transport" "dep:tokio-rustls" "dep:async-stream" ];
-          "tls-roots" = [ "tls-roots-common" "dep:rustls-native-certs" ];
-          "tls-roots-common" = [ "tls" ];
-          "tls-webpki-roots" = [ "tls-roots-common" "dep:webpki-roots" ];
-          "transport" = [ "dep:axum" "channel" ];
-        };
-        resolvedDefaultFeatures = [ "channel" "codegen" "default" "prost" "transport" ];
-      };
       "tonic-build" = rec {
         crateName = "tonic-build";
         version = "0.11.0";
@@ -12822,7 +12090,7 @@ rec {
         dependencies = [
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "prost-types";
@@ -12843,7 +12111,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             usesDefaultFeatures = false;
             features = [ "codegen" "prost" ];
           }
@@ -12851,7 +12119,7 @@ rec {
         devDependencies = [
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             usesDefaultFeatures = false;
             features = [ "transport" ];
           }
@@ -13179,9 +12447,9 @@ rec {
       };
       "tracing-opentelemetry" = rec {
         crateName = "tracing-opentelemetry";
-        version = "0.22.0";
+        version = "0.23.0";
         edition = "2018";
-        sha256 = "15jmwmbp6wz15bx20bfsmabx53wmlnd7wvzwz9hvkrq7aifc4yn6";
+        sha256 = "1112kmckw0qwyckhbwarb230n4ldmfgzixr9jagbfjmy3fx19gm9";
         authors = [
           "Julian Tescher <julian@tescher.me>"
           "Tokio Contributors <team@tokio.rs>"
@@ -13269,6 +12537,7 @@ rec {
         features = {
           "async-trait" = [ "dep:async-trait" ];
           "default" = [ "tracing-log" "metrics" ];
+          "futures-util" = [ "dep:futures-util" ];
           "metrics" = [ "opentelemetry/metrics" "opentelemetry_sdk/metrics" "smallvec" ];
           "smallvec" = [ "dep:smallvec" ];
           "thiserror" = [ "dep:thiserror" ];
@@ -13276,30 +12545,6 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "metrics" "smallvec" "tracing-log" ];
       };
-      "tracing-serde" = rec {
-        crateName = "tracing-serde";
-        version = "0.1.3";
-        edition = "2018";
-        sha256 = "1qfr0va69djvxqvjrx4vqq7p6myy414lx4w1f6amcn0hfwqj2sxw";
-        authors = [
-          "Tokio Contributors <team@tokio.rs>"
-        ];
-        dependencies = [
-          {
-            name = "serde";
-            packageId = "serde";
-          }
-          {
-            name = "tracing-core";
-            packageId = "tracing-core";
-          }
-        ];
-        features = {
-          "valuable" = [ "valuable_crate" "valuable-serde" "tracing-core/valuable" ];
-          "valuable-serde" = [ "dep:valuable-serde" ];
-          "valuable_crate" = [ "dep:valuable_crate" ];
-        };
-      };
       "tracing-subscriber" = rec {
         crateName = "tracing-subscriber";
         version = "0.3.18";
@@ -13334,16 +12579,6 @@ rec {
             features = [ "std" "unicode-case" "unicode-perl" ];
           }
           {
-            name = "serde";
-            packageId = "serde";
-            optional = true;
-          }
-          {
-            name = "serde_json";
-            packageId = "serde_json";
-            optional = true;
-          }
-          {
             name = "sharded-slab";
             packageId = "sharded-slab";
             optional = true;
@@ -13376,11 +12611,6 @@ rec {
             usesDefaultFeatures = false;
             features = [ "log-tracer" "std" ];
           }
-          {
-            name = "tracing-serde";
-            packageId = "tracing-serde";
-            optional = true;
-          }
         ];
         devDependencies = [
           {
@@ -13426,7 +12656,7 @@ rec {
           "valuable-serde" = [ "dep:valuable-serde" ];
           "valuable_crate" = [ "dep:valuable_crate" ];
         };
-        resolvedDefaultFeatures = [ "alloc" "ansi" "default" "env-filter" "fmt" "json" "matchers" "nu-ansi-term" "once_cell" "regex" "registry" "serde" "serde_json" "sharded-slab" "smallvec" "std" "thread_local" "tracing" "tracing-log" "tracing-serde" ];
+        resolvedDefaultFeatures = [ "alloc" "ansi" "default" "env-filter" "fmt" "matchers" "nu-ansi-term" "once_cell" "regex" "registry" "sharded-slab" "smallvec" "std" "thread_local" "tracing" "tracing-log" ];
       };
       "try-lock" = rec {
         crateName = "try-lock";
@@ -13471,7 +12701,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "thiserror";
@@ -13488,7 +12718,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "tls-roots" ];
           }
           {
@@ -13503,7 +12733,6 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "json" ];
           }
           {
             name = "tvix-castore";
@@ -13547,6 +12776,11 @@ rec {
           else ./castore;
         dependencies = [
           {
+            name = "async-compression";
+            packageId = "async-compression";
+            features = [ "tokio" "zstd" ];
+          }
+          {
             name = "async-stream";
             packageId = "async-stream";
           }
@@ -13610,7 +12844,7 @@ rec {
           }
           {
             name = "parking_lot";
-            packageId = "parking_lot 0.12.1";
+            packageId = "parking_lot 0.12.2";
           }
           {
             name = "petgraph";
@@ -13622,7 +12856,7 @@ rec {
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "serde";
@@ -13662,11 +12896,11 @@ rec {
           {
             name = "tokio-util";
             packageId = "tokio-util";
-            features = [ "io" "io-util" ];
+            features = [ "io" "io-util" "codec" ];
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
           }
           {
             name = "tonic-reflection";
@@ -13828,7 +13062,6 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "json" ];
           }
           {
             name = "tvix-build";
@@ -14078,10 +13311,6 @@ rec {
             features = [ "tokio" "gzip" "bzip2" "xz" ];
           }
           {
-            name = "async-recursion";
-            packageId = "async-recursion";
-          }
-          {
             name = "bstr";
             packageId = "bstr";
           }
@@ -14275,8 +13504,9 @@ rec {
             packageId = "anyhow";
           }
           {
-            name = "async-recursion";
-            packageId = "async-recursion";
+            name = "async-compression";
+            packageId = "async-compression";
+            features = [ "tokio" "bzip2" "gzip" "xz" "zstd" ];
           }
           {
             name = "async-stream";
@@ -14322,6 +13552,10 @@ rec {
             packageId = "lazy_static";
           }
           {
+            name = "lru";
+            packageId = "lru";
+          }
+          {
             name = "nix-compat";
             packageId = "nix-compat";
             features = [ "async" ];
@@ -14343,12 +13577,16 @@ rec {
             features = [ "rt-tokio" ];
           }
           {
+            name = "parking_lot";
+            packageId = "parking_lot 0.12.2";
+          }
+          {
             name = "pin-project-lite";
             packageId = "pin-project-lite";
           }
           {
             name = "prost";
-            packageId = "prost 0.12.3";
+            packageId = "prost";
           }
           {
             name = "reqwest";
@@ -14407,7 +13645,7 @@ rec {
           }
           {
             name = "tonic";
-            packageId = "tonic 0.11.0";
+            packageId = "tonic";
             features = [ "tls" "tls-roots" ];
           }
           {
@@ -14430,7 +13668,7 @@ rec {
           {
             name = "tracing-subscriber";
             packageId = "tracing-subscriber";
-            features = [ "env-filter" "json" ];
+            features = [ "env-filter" ];
           }
           {
             name = "tvix-castore";
@@ -14444,10 +13682,6 @@ rec {
             name = "walkdir";
             packageId = "walkdir";
           }
-          {
-            name = "xz2";
-            packageId = "xz2";
-          }
         ];
         buildDependencies = [
           {
@@ -15778,23 +15012,25 @@ rec {
       };
       "web-time" = rec {
         crateName = "web-time";
-        version = "0.2.4";
+        version = "1.1.0";
         edition = "2021";
-        sha256 = "1q6gk0nkwbfz30g1pz8g52mq00zjx7m5im36k3474aw73jdh8c5a";
+        sha256 = "1fx05yqx83dhx628wb70fyy10yjfq1jpl20qfqhdkymi13rq0ras";
         dependencies = [
           {
             name = "js-sys";
             packageId = "js-sys";
-            target = { target, features }: ((builtins.elem "wasm" target."family") && (!(("emscripten" == target."os" or null) || ("wasi" == target."os" or null))));
+            target = { target, features }: ((builtins.elem "wasm" target."family") && ("unknown" == target."os" or null));
           }
           {
             name = "wasm-bindgen";
             packageId = "wasm-bindgen";
             usesDefaultFeatures = false;
-            target = { target, features }: ((builtins.elem "wasm" target."family") && (!(("emscripten" == target."os" or null) || ("wasi" == target."os" or null))));
+            target = { target, features }: ((builtins.elem "wasm" target."family") && ("unknown" == target."os" or null));
           }
         ];
-
+        features = {
+          "serde" = [ "dep:serde" ];
+        };
       };
       "which 4.4.2" = rec {
         crateName = "which";
@@ -16848,6 +16084,67 @@ rec {
         ];
 
       };
+      "zerocopy" = rec {
+        crateName = "zerocopy";
+        version = "0.7.34";
+        edition = "2018";
+        sha256 = "11xhrwixm78m6ca1jdxf584wdwvpgg7q00vg21fhwl0psvyf71xf";
+        authors = [
+          "Joshua Liebow-Feeser <joshlf@google.com>"
+        ];
+        dependencies = [
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+            optional = true;
+          }
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+            target = { target, features }: false;
+          }
+        ];
+        devDependencies = [
+          {
+            name = "zerocopy-derive";
+            packageId = "zerocopy-derive";
+          }
+        ];
+        features = {
+          "__internal_use_only_features_that_work_on_stable" = [ "alloc" "derive" "simd" ];
+          "byteorder" = [ "dep:byteorder" ];
+          "default" = [ "byteorder" ];
+          "derive" = [ "zerocopy-derive" ];
+          "simd-nightly" = [ "simd" ];
+          "zerocopy-derive" = [ "dep:zerocopy-derive" ];
+        };
+        resolvedDefaultFeatures = [ "simd" ];
+      };
+      "zerocopy-derive" = rec {
+        crateName = "zerocopy-derive";
+        version = "0.7.34";
+        edition = "2018";
+        sha256 = "0fqvglw01w3hp7xj9gdk1800x9j7v58s9w8ijiyiz2a7krb39s8m";
+        procMacro = true;
+        authors = [
+          "Joshua Liebow-Feeser <joshlf@google.com>"
+        ];
+        dependencies = [
+          {
+            name = "proc-macro2";
+            packageId = "proc-macro2";
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.48";
+          }
+        ];
+
+      };
       "zeroize" = rec {
         crateName = "zeroize";
         version = "1.7.0";
diff --git a/tvix/Cargo.toml b/tvix/Cargo.toml
index 6cd19831d..847d9acee 100644
--- a/tvix/Cargo.toml
+++ b/tvix/Cargo.toml
@@ -30,6 +30,11 @@ members = [
   "store",
 ]
 
+[workspace.lints.clippy]
+# Allow blocks_in_conditions due to false positives with #[tracing::instrument(…)]:
+# https://github.com/rust-lang/rust-clippy/issues/12281
+blocks_in_conditions = "allow"
+
 # Add a profile to all targets that enables release optimisations, but
 # retains debug symbols. This is great for use with
 # benchmarking/profiling tools.
diff --git a/tvix/README.md b/tvix/README.md
index bf96afa4b..fb536bc22 100644
--- a/tvix/README.md
+++ b/tvix/README.md
@@ -61,8 +61,7 @@ This folder contains the following components:
 * `//tvix/castore` - subtree storage/transfer in a content-addressed fashion
 * `//tvix/cli` - preliminary REPL & CLI implementation for Tvix
 * `//tvix/eval` - an implementation of the Nix programming language
-* `//tvix/nar-bridge`
-  * `nar-bridge-http`: A HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
+* `//tvix/nar-bridge-go` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
 * `//tvix/nix-compat` - a Rust library for compatibility with C++ Nix, features like encodings and hashing schemes and formats
 * `//tvix/serde` - a Rust library for using the Nix language for app configuration
 * `//tvix/store` - a "filesystem" linking Nix store paths and metadata with the content-addressed layer
diff --git a/tvix/boot/README.md b/tvix/boot/README.md
index 13a485506..9c7b722a7 100644
--- a/tvix/boot/README.md
+++ b/tvix/boot/README.md
@@ -43,7 +43,7 @@ Potentially copy some data into tvix-store (via nar-bridge):
 
 ```
 mg run //tvix:store -- daemon &
-$(mg build //tvix:nar-bridge)/bin/nar-bridge-http &
+$(mg build //tvix:nar-bridge-go)/bin/nar-bridge-http &
 rm -Rf ~/.cache/nix; nix copy --to http://localhost:9000\?compression\=none $(mg build //third_party/nixpkgs:hello)
 pkill nar-bridge-http; pkill tvix-store
 ```
diff --git a/tvix/boot/tests/default.nix b/tvix/boot/tests/default.nix
index d16dba79f..5c7f97a1c 100644
--- a/tvix/boot/tests/default.nix
+++ b/tvix/boot/tests/default.nix
@@ -109,18 +109,13 @@ depot.nix.readTree.drvTargets
     path = ../../docs;
     importPathName = "docs";
   });
-  docs-sled = (mkBootTest {
-    blobServiceAddr = "sled://$PWD/blobs.sled";
+  docs-persistent = (mkBootTest {
+    blobServiceAddr = "objectstore+file://$PWD/blobs";
     directoryServiceAddr = "sled://$PWD/directories.sled";
     pathInfoServiceAddr = "sled://$PWD/pathinfo.sled";
     path = ../../docs;
     importPathName = "docs";
   });
-  docs-objectstore-local = (mkBootTest {
-    blobServiceAddr = "objectstore+file://$PWD/blobs";
-    path = ../../docs;
-    importPathName = "docs";
-  });
 
   closure-tvix = (mkBootTest {
     blobServiceAddr = "objectstore+file://$PWD/blobs";
diff --git a/tvix/build/Cargo.toml b/tvix/build/Cargo.toml
index 626fd35d7..cf25465cc 100644
--- a/tvix/build/Cargo.toml
+++ b/tvix/build/Cargo.toml
@@ -10,11 +10,11 @@ itertools = "0.12.0"
 prost = "0.12.1"
 thiserror = "1.0.56"
 tokio = { version = "1.32.0" }
-tokio-listener = { version = "0.3.2", features = [ "tonic011" ] }
+tokio-listener = { version = "0.4.1", features = [ "tonic011" ] }
 tonic = { version = "0.11.0", features = ["tls", "tls-roots"] }
 tvix-castore = { path = "../castore" }
 tracing = "0.1.37"
-tracing-subscriber = { version = "0.3.16", features = ["json"] }
+tracing-subscriber = "0.3.16"
 url = "2.4.0"
 
 [dependencies.tonic-reflection]
@@ -31,3 +31,6 @@ tonic-reflection = ["dep:tonic-reflection"]
 
 [dev-dependencies]
 rstest = "0.19.0"
+
+[lints]
+workspace = true
diff --git a/tvix/build/src/bin/tvix-build.rs b/tvix/build/src/bin/tvix-build.rs
index ed36c8933..07d7e30df 100644
--- a/tvix/build/src/bin/tvix-build.rs
+++ b/tvix/build/src/bin/tvix-build.rs
@@ -23,10 +23,6 @@ use tvix_castore::proto::FILE_DESCRIPTOR_SET as CASTORE_FILE_DESCRIPTOR_SET;
 #[derive(Parser)]
 #[command(author, version, about, long_about = None)]
 struct Cli {
-    /// Whether to log in JSON
-    #[arg(long)]
-    json: bool,
-
     #[arg(long)]
     log_level: Option<Level>,
 
@@ -58,23 +54,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
     // configure log settings
     let level = cli.log_level.unwrap_or(Level::INFO);
 
-    let subscriber = tracing_subscriber::registry()
+    tracing_subscriber::registry()
         .with(
-            cli.json.then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr.with_max_level(level))
-                    .json(),
-            ),
+            tracing_subscriber::fmt::Layer::new()
+                .with_writer(std::io::stderr.with_max_level(level))
+                .pretty(),
         )
-        .with(
-            (!cli.json).then_some(
-                tracing_subscriber::fmt::Layer::new()
-                    .with_writer(std::io::stderr.with_max_level(level))
-                    .pretty(),
-            ),
-        );
-
-    tracing::subscriber::set_global_default(subscriber).expect("Unable to set global subscriber");
+        .init();
 
     match cli.command {
         Commands::Daemon {
diff --git a/tvix/castore/Cargo.toml b/tvix/castore/Cargo.toml
index 2797ef08f..4cbc29053 100644
--- a/tvix/castore/Cargo.toml
+++ b/tvix/castore/Cargo.toml
@@ -4,6 +4,7 @@ version = "0.1.0"
 edition = "2021"
 
 [dependencies]
+async-compression = { version = "0.4.9", features = ["tokio", "zstd"]}
 async-stream = "0.3.5"
 async-tempfile = "0.4.0"
 blake3 = { version = "1.3.1", features = ["rayon", "std", "traits-preview"] }
@@ -21,7 +22,7 @@ prost = "0.12.1"
 sled = { version = "0.34.7" }
 thiserror = "1.0.38"
 tokio-stream = { version = "0.1.14", features = ["fs", "net"] }
-tokio-util = { version = "0.7.9", features = ["io", "io-util"] }
+tokio-util = { version = "0.7.9", features = ["io", "io-util", "codec"] }
 tokio-tar = "0.3.1"
 tokio = { version = "1.32.0", features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
 tonic = "0.11.0"
@@ -116,3 +117,6 @@ tonic-reflection = ["dep:tonic-reflection"]
 # Requires the following packages in $PATH:
 # cbtemulator, google-cloud-bigtable-tool
 integration = []
+
+[lints]
+workspace = true
diff --git a/tvix/castore/src/blobservice/from_addr.rs b/tvix/castore/src/blobservice/from_addr.rs
index 3e3f943e5..8898bbfb9 100644
--- a/tvix/castore/src/blobservice/from_addr.rs
+++ b/tvix/castore/src/blobservice/from_addr.rs
@@ -2,15 +2,12 @@ use url::Url;
 
 use crate::{proto::blob_service_client::BlobServiceClient, Error};
 
-use super::{
-    BlobService, GRPCBlobService, MemoryBlobService, ObjectStoreBlobService, SledBlobService,
-};
+use super::{BlobService, GRPCBlobService, MemoryBlobService, ObjectStoreBlobService};
 
 /// Constructs a new instance of a [BlobService] from an URI.
 ///
 /// The following schemes are supported by the following services:
 /// - `memory://` ([MemoryBlobService])
-/// - `sled://` ([SledBlobService])
 /// - `grpc+*://` ([GRPCBlobService])
 /// - `objectstore+*://` ([ObjectStoreBlobService])
 ///
@@ -27,27 +24,6 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn BlobService>, crate::Error>
             }
             Box::<MemoryBlobService>::default()
         }
-        "sled" => {
-            // sled doesn't support host, and a path can be provided (otherwise
-            // it'll live in memory only).
-            if url.has_host() {
-                return Err(Error::StorageError("no host allowed".to_string()));
-            }
-
-            if url.path() == "/" {
-                return Err(Error::StorageError(
-                    "cowardly refusing to open / with sled".to_string(),
-                ));
-            }
-
-            // TODO: expose other parameters as URL parameters?
-
-            Box::new(if url.path().is_empty() {
-                SledBlobService::new_temporary().map_err(|e| Error::StorageError(e.to_string()))?
-            } else {
-                SledBlobService::new(url.path()).map_err(|e| Error::StorageError(e.to_string()))?
-            })
-        }
         scheme if scheme.starts_with("grpc+") => {
             // schemes starting with grpc+ go to the GRPCPathInfoService.
             //   That's normally grpc+unix for unix sockets, and grpc+http(s) for the HTTP counterparts.
@@ -83,28 +59,11 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn BlobService>, crate::Error>
 #[cfg(test)]
 mod tests {
     use super::from_addr;
-    use lazy_static::lazy_static;
     use rstest::rstest;
-    use tempfile::TempDir;
-
-    lazy_static! {
-        static ref TMPDIR_SLED_1: TempDir = TempDir::new().unwrap();
-        static ref TMPDIR_SLED_2: TempDir = TempDir::new().unwrap();
-    }
 
     #[rstest]
     /// This uses an unsupported scheme.
     #[case::unsupported_scheme("http://foo.example/test", false)]
-    /// This configures sled in temporary mode.
-    #[case::sled_temporary("sled://", true)]
-    /// This configures sled with /, which should fail.
-    #[case::sled_invalid_root("sled:///", false)]
-    /// This configures sled with a host, not path, which should fail.
-    #[case::sled_invalid_host("sled://foo.example", false)]
-    /// This configures sled with a valid path path, which should succeed.
-    #[case::sled_valid_path(&format!("sled://{}", &TMPDIR_SLED_1.path().to_str().unwrap()), true)]
-    /// This configures sled with a host, and a valid path path, which should fail.
-    #[case::sled_invalid_host_with_valid_path(&format!("sled://foo.example{}", &TMPDIR_SLED_2.path().to_str().unwrap()), false)]
     /// This correctly sets the scheme, and doesn't set a path.
     #[case::memory_valid("memory://", true)]
     /// This sets a memory url host to `foo`
diff --git a/tvix/castore/src/blobservice/memory.rs b/tvix/castore/src/blobservice/memory.rs
index 25eec334d..873d06b46 100644
--- a/tvix/castore/src/blobservice/memory.rs
+++ b/tvix/castore/src/blobservice/memory.rs
@@ -1,9 +1,7 @@
+use parking_lot::RwLock;
 use std::io::{self, Cursor, Write};
 use std::task::Poll;
-use std::{
-    collections::HashMap,
-    sync::{Arc, RwLock},
-};
+use std::{collections::HashMap, sync::Arc};
 use tonic::async_trait;
 use tracing::instrument;
 
@@ -19,13 +17,13 @@ pub struct MemoryBlobService {
 impl BlobService for MemoryBlobService {
     #[instrument(skip_all, ret, err, fields(blob.digest=%digest))]
     async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read();
         Ok(db.contains_key(digest))
     }
 
     #[instrument(skip_all, err, fields(blob.digest=%digest))]
     async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read();
 
         match db.get(digest).map(|x| Cursor::new(x.clone())) {
             Some(result) => Ok(Some(Box::new(result))),
@@ -109,24 +107,16 @@ impl BlobWriter for MemoryBlobWriter {
         } else {
             let (buf, hasher) = self.writers.take().unwrap();
 
-            // We know self.hasher is doing blake3 hashing, so this won't fail.
             let digest: B3Digest = hasher.finalize().as_bytes().into();
 
             // Only insert if the blob doesn't already exist.
-            let db = self.db.read().map_err(|e| {
-                io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
-            })?;
+            let mut db = self.db.upgradable_read();
             if !db.contains_key(&digest) {
-                // drop the read lock, so we can open for writing.
-                drop(db);
-
                 // open the database for writing.
-                let mut db = self.db.write().map_err(|e| {
-                    io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
-                })?;
-
-                // and put buf in there. This will move buf out.
-                db.insert(digest.clone(), buf);
+                db.with_upgraded(|db| {
+                    // and put buf in there. This will move buf out.
+                    db.insert(digest.clone(), buf);
+                });
             }
 
             self.digest = Some(digest.clone());
diff --git a/tvix/castore/src/blobservice/mod.rs b/tvix/castore/src/blobservice/mod.rs
index 4ba56a4af..50acd40bf 100644
--- a/tvix/castore/src/blobservice/mod.rs
+++ b/tvix/castore/src/blobservice/mod.rs
@@ -11,7 +11,6 @@ mod grpc;
 mod memory;
 mod naive_seeker;
 mod object_store;
-mod sled;
 
 #[cfg(test)]
 pub mod tests;
@@ -22,7 +21,6 @@ pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCBlobService;
 pub use self::memory::MemoryBlobService;
 pub use self::object_store::ObjectStoreBlobService;
-pub use self::sled::SledBlobService;
 
 /// The base trait all BlobService services need to implement.
 /// It provides functions to check whether a given blob exists,
diff --git a/tvix/castore/src/blobservice/sled.rs b/tvix/castore/src/blobservice/sled.rs
deleted file mode 100644
index 3dd4bff7b..000000000
--- a/tvix/castore/src/blobservice/sled.rs
+++ /dev/null
@@ -1,150 +0,0 @@
-use super::{BlobReader, BlobService, BlobWriter};
-use crate::{B3Digest, Error};
-use std::{
-    io::{self, Cursor, Write},
-    path::Path,
-    task::Poll,
-};
-use tonic::async_trait;
-use tracing::instrument;
-
-#[derive(Clone)]
-pub struct SledBlobService {
-    db: sled::Db,
-}
-
-impl SledBlobService {
-    pub fn new<P: AsRef<Path>>(p: P) -> Result<Self, sled::Error> {
-        let config = sled::Config::default()
-            .use_compression(false) // is a required parameter
-            .path(p);
-        let db = config.open()?;
-
-        Ok(Self { db })
-    }
-
-    pub fn new_temporary() -> Result<Self, sled::Error> {
-        let config = sled::Config::default().temporary(true);
-        let db = config.open()?;
-
-        Ok(Self { db })
-    }
-}
-
-#[async_trait]
-impl BlobService for SledBlobService {
-    #[instrument(skip(self), fields(blob.digest=%digest))]
-    async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
-        match self.db.contains_key(digest.as_slice()) {
-            Ok(has) => Ok(has),
-            Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
-        }
-    }
-
-    #[instrument(skip(self), fields(blob.digest=%digest))]
-    async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
-        match self.db.get(digest.as_slice()) {
-            Ok(None) => Ok(None),
-            Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))),
-            Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
-        }
-    }
-
-    #[instrument(skip(self))]
-    async fn open_write(&self) -> Box<dyn BlobWriter> {
-        Box::new(SledBlobWriter::new(self.db.clone()))
-    }
-}
-
-pub struct SledBlobWriter {
-    db: sled::Db,
-
-    /// Contains the buffer Vec and hasher, or None if already closed
-    writers: Option<(Vec<u8>, blake3::Hasher)>,
-
-    /// The digest that has been returned, if we successfully closed.
-    digest: Option<B3Digest>,
-}
-
-impl SledBlobWriter {
-    pub fn new(db: sled::Db) -> Self {
-        Self {
-            db,
-            writers: Some((Vec::new(), blake3::Hasher::new())),
-            digest: None,
-        }
-    }
-}
-
-impl tokio::io::AsyncWrite for SledBlobWriter {
-    fn poll_write(
-        mut self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-        b: &[u8],
-    ) -> std::task::Poll<Result<usize, io::Error>> {
-        Poll::Ready(match &mut self.writers {
-            None => Err(io::Error::new(
-                io::ErrorKind::NotConnected,
-                "already closed",
-            )),
-            Some((ref mut buf, ref mut hasher)) => {
-                let bytes_written = buf.write(b)?;
-                hasher.write(&b[..bytes_written])
-            }
-        })
-    }
-
-    fn poll_flush(
-        mut self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-    ) -> std::task::Poll<Result<(), io::Error>> {
-        Poll::Ready(match &mut self.writers {
-            None => Err(io::Error::new(
-                io::ErrorKind::NotConnected,
-                "already closed",
-            )),
-            Some(_) => Ok(()),
-        })
-    }
-
-    fn poll_shutdown(
-        self: std::pin::Pin<&mut Self>,
-        _cx: &mut std::task::Context<'_>,
-    ) -> std::task::Poll<Result<(), io::Error>> {
-        // shutdown is "instantaneous", we only write to a Vec<u8> as buffer.
-        Poll::Ready(Ok(()))
-    }
-}
-
-#[async_trait]
-impl BlobWriter for SledBlobWriter {
-    async fn close(&mut self) -> io::Result<B3Digest> {
-        if self.writers.is_none() {
-            match &self.digest {
-                Some(digest) => Ok(digest.clone()),
-                None => Err(io::Error::new(
-                    io::ErrorKind::NotConnected,
-                    "already closed",
-                )),
-            }
-        } else {
-            let (buf, hasher) = self.writers.take().unwrap();
-
-            let digest: B3Digest = hasher.finalize().as_bytes().into();
-
-            // Only insert if the blob doesn't already exist.
-            if !self.db.contains_key(digest.as_slice()).map_err(|e| {
-                Error::StorageError(format!("Unable to check if we have blob {}: {}", digest, e))
-            })? {
-                // put buf in there. This will move buf out.
-                self.db
-                    .insert(digest.as_slice(), buf)
-                    .map_err(|e| Error::StorageError(format!("unable to insert blob: {}", e)))?;
-            }
-
-            self.digest = Some(digest.clone());
-
-            Ok(digest)
-        }
-    }
-}
diff --git a/tvix/castore/src/blobservice/tests/mod.rs b/tvix/castore/src/blobservice/tests/mod.rs
index 30c4e9763..0280faebb 100644
--- a/tvix/castore/src/blobservice/tests/mod.rs
+++ b/tvix/castore/src/blobservice/tests/mod.rs
@@ -25,7 +25,6 @@ use self::utils::make_grpc_blob_service_client;
 #[case::grpc(make_grpc_blob_service_client().await)]
 #[case::memory(blobservice::from_addr("memory://").await.unwrap())]
 #[case::objectstore_memory(blobservice::from_addr("objectstore+memory://").await.unwrap())]
-#[case::sled(blobservice::from_addr("sled://").await.unwrap())]
 pub fn blob_services(#[case] blob_service: impl BlobService) {}
 
 /// Using [BlobService::has] on a non-existing blob should return false.
diff --git a/tvix/castore/src/directoryservice/bigtable.rs b/tvix/castore/src/directoryservice/bigtable.rs
index 0fdb24628..1194c6ddc 100644
--- a/tvix/castore/src/directoryservice/bigtable.rs
+++ b/tvix/castore/src/directoryservice/bigtable.rs
@@ -343,7 +343,7 @@ impl DirectoryService for BigtableDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/closure_validator.rs b/tvix/castore/src/directoryservice/closure_validator.rs
index 183928a86..b9746a5a0 100644
--- a/tvix/castore/src/directoryservice/closure_validator.rs
+++ b/tvix/castore/src/directoryservice/closure_validator.rs
@@ -4,7 +4,7 @@ use bstr::ByteSlice;
 
 use petgraph::{
     graph::{DiGraph, NodeIndex},
-    visit::Bfs,
+    visit::{Bfs, Walker},
 };
 use tracing::instrument;
 
@@ -13,6 +13,8 @@ use crate::{
     B3Digest, Error,
 };
 
+type DirectoryGraph = DiGraph<Directory, ()>;
+
 /// This can be used to validate a Directory closure (DAG of connected
 /// Directories), and their insertion order.
 ///
@@ -37,7 +39,7 @@ use crate::{
 pub struct ClosureValidator {
     // A directed graph, using Directory as node weight, without edge weights.
     // Edges point from parents to children.
-    graph: DiGraph<Directory, ()>,
+    graph: DirectoryGraph,
 
     // A lookup table from directory digest to node index.
     digest_to_node_ix: HashMap<B3Digest, NodeIndex>,
@@ -122,11 +124,54 @@ impl ClosureValidator {
     /// In case no elements have been inserted, returns an empty list.
     #[instrument(level = "trace", skip_all, err)]
     pub(crate) fn finalize(self) -> Result<Vec<Directory>, Error> {
+        let (graph, _) = match self.finalize_raw()? {
+            None => return Ok(vec![]),
+            Some(v) => v,
+        };
+        // Dissolve the graph, returning the nodes as a Vec.
+        // As the graph was populated in a valid DFS PostOrder, we can return
+        // nodes in that same order.
+        let (nodes, _edges) = graph.into_nodes_edges();
+        Ok(nodes.into_iter().map(|x| x.weight).collect())
+    }
+
+    /// Ensure that all inserted Directories are connected, then return a
+    /// (deduplicated) and validated list of directories, in from-root-to-leaves
+    /// order.
+    /// In case no elements have been inserted, returns an empty list.
+    #[instrument(level = "trace", skip_all, err)]
+    pub(crate) fn finalize_root_to_leaves(self) -> Result<Vec<Directory>, Error> {
+        let (graph, root) = match self.finalize_raw()? {
+            None => return Ok(vec![]),
+            Some(v) => v,
+        };
+
+        // do a BFS traversal of the graph, starting with the root node to get
+        // all nodes reachable from there.
+        let traversal = Bfs::new(&graph, root);
+
+        let order = traversal.iter(&graph).collect::<Vec<_>>();
+
+        let (nodes, _edges) = graph.into_nodes_edges();
+
+        // Convert to option, so that we can take individual nodes out without messing up the
+        // indices
+        let mut nodes = nodes.into_iter().map(Some).collect::<Vec<_>>();
+
+        Ok(order
+            .iter()
+            .map(|i| nodes[i.index()].take().unwrap().weight)
+            .collect())
+    }
+
+    /// Internal implementation of closure validation
+    #[instrument(level = "trace", skip_all, err)]
+    fn finalize_raw(self) -> Result<Option<(DirectoryGraph, NodeIndex)>, Error> {
         // If no nodes were inserted, an empty list is returned.
         let last_directory_ix = if let Some(x) = self.last_directory_ix {
             x
         } else {
-            return Ok(vec![]);
+            return Ok(None);
         };
 
         // do a BFS traversal of the graph, starting with the root node to get
@@ -172,11 +217,7 @@ impl ClosureValidator {
             }
         }
 
-        // Dissolve the graph, returning the nodes as a Vec.
-        // As the graph was populated in a valid DFS PostOrder, we can return
-        // nodes in that same order.
-        let (nodes, _edges) = self.graph.into_nodes_edges();
-        Ok(nodes.into_iter().map(|x| x.weight).collect())
+        Ok(Some((self.graph, last_directory_ix)))
     }
 }
 
diff --git a/tvix/castore/src/directoryservice/from_addr.rs b/tvix/castore/src/directoryservice/from_addr.rs
index ae51df637..ee675ca68 100644
--- a/tvix/castore/src/directoryservice/from_addr.rs
+++ b/tvix/castore/src/directoryservice/from_addr.rs
@@ -2,7 +2,10 @@ use url::Url;
 
 use crate::{proto::directory_service_client::DirectoryServiceClient, Error};
 
-use super::{DirectoryService, GRPCDirectoryService, MemoryDirectoryService, SledDirectoryService};
+use super::{
+    DirectoryService, GRPCDirectoryService, MemoryDirectoryService, ObjectStoreDirectoryService,
+    SledDirectoryService,
+};
 
 /// Constructs a new instance of a [DirectoryService] from an URI.
 ///
@@ -63,6 +66,18 @@ pub async fn from_addr(uri: &str) -> Result<Box<dyn DirectoryService>, crate::Er
             let client = DirectoryServiceClient::new(crate::tonic::channel_from_url(&url).await?);
             Box::new(GRPCDirectoryService::from_client(client))
         }
+        scheme if scheme.starts_with("objectstore+") => {
+            // We need to convert the URL to string, strip the prefix there, and then
+            // parse it back as url, as Url::set_scheme() rejects some of the transitions we want to do.
+            let trimmed_url = {
+                let s = url.to_string();
+                Url::parse(s.strip_prefix("objectstore+").unwrap()).unwrap()
+            };
+            Box::new(
+                ObjectStoreDirectoryService::parse_url(&trimmed_url)
+                    .map_err(|e| Error::StorageError(e.to_string()))?,
+            )
+        }
         #[cfg(feature = "cloud")]
         "bigtable" => {
             use super::bigtable::BigtableParameters;
diff --git a/tvix/castore/src/directoryservice/grpc.rs b/tvix/castore/src/directoryservice/grpc.rs
index 7402fe1b5..fe935629b 100644
--- a/tvix/castore/src/directoryservice/grpc.rs
+++ b/tvix/castore/src/directoryservice/grpc.rs
@@ -107,7 +107,7 @@ impl DirectoryService for GRPCDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         let mut grpc_client = self.grpc_client.clone();
         let root_directory_digest = root_directory_digest.clone();
 
diff --git a/tvix/castore/src/directoryservice/memory.rs b/tvix/castore/src/directoryservice/memory.rs
index 2cbbbd1b1..3b2795c39 100644
--- a/tvix/castore/src/directoryservice/memory.rs
+++ b/tvix/castore/src/directoryservice/memory.rs
@@ -1,7 +1,8 @@
 use crate::{proto, B3Digest, Error};
 use futures::stream::BoxStream;
 use std::collections::HashMap;
-use std::sync::{Arc, RwLock};
+use std::sync::Arc;
+use tokio::sync::RwLock;
 use tonic::async_trait;
 use tracing::{instrument, warn};
 
@@ -17,7 +18,7 @@ pub struct MemoryDirectoryService {
 impl DirectoryService for MemoryDirectoryService {
     #[instrument(skip(self, digest), fields(directory.digest = %digest))]
     async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
-        let db = self.db.read()?;
+        let db = self.db.read().await;
 
         match db.get(digest) {
             // The directory was not found, return
@@ -62,7 +63,7 @@ impl DirectoryService for MemoryDirectoryService {
         }
 
         // store it
-        let mut db = self.db.write()?;
+        let mut db = self.db.write().await;
         db.insert(digest.clone(), directory);
 
         Ok(digest)
@@ -72,7 +73,7 @@ impl DirectoryService for MemoryDirectoryService {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/mod.rs b/tvix/castore/src/directoryservice/mod.rs
index cf6bea39d..3f180ef16 100644
--- a/tvix/castore/src/directoryservice/mod.rs
+++ b/tvix/castore/src/directoryservice/mod.rs
@@ -6,6 +6,7 @@ mod closure_validator;
 mod from_addr;
 mod grpc;
 mod memory;
+mod object_store;
 mod simple_putter;
 mod sled;
 #[cfg(test)]
@@ -17,6 +18,7 @@ pub use self::closure_validator::ClosureValidator;
 pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCDirectoryService;
 pub use self::memory::MemoryDirectoryService;
+pub use self::object_store::ObjectStoreDirectoryService;
 pub use self::simple_putter::SimplePutter;
 pub use self::sled::SledDirectoryService;
 pub use self::traverse::descend_to;
@@ -64,7 +66,7 @@ pub trait DirectoryService: Send + Sync {
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>>;
+    ) -> BoxStream<'static, Result<proto::Directory, Error>>;
 
     /// Allows persisting a closure of [proto::Directory], which is a graph of
     /// connected Directory messages.
@@ -87,7 +89,7 @@ where
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         self.as_ref().get_recursive(root_directory_digest)
     }
 
diff --git a/tvix/castore/src/directoryservice/object_store.rs b/tvix/castore/src/directoryservice/object_store.rs
new file mode 100644
index 000000000..64ce335ed
--- /dev/null
+++ b/tvix/castore/src/directoryservice/object_store.rs
@@ -0,0 +1,261 @@
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use data_encoding::HEXLOWER;
+use futures::future::Either;
+use futures::stream::BoxStream;
+use futures::SinkExt;
+use futures::StreamExt;
+use futures::TryFutureExt;
+use futures::TryStreamExt;
+use object_store::{path::Path, ObjectStore};
+use prost::Message;
+use tokio::io::AsyncWriteExt;
+use tokio_util::codec::LengthDelimitedCodec;
+use tonic::async_trait;
+use tracing::{instrument, trace, warn, Level};
+use url::Url;
+
+use super::{ClosureValidator, DirectoryPutter, DirectoryService};
+use crate::{proto, B3Digest, Error};
+
+/// Stores directory closures in an object store.
+/// Notably, this makes use of the option to disallow accessing child directories except when
+/// fetching them recursively via the top-level directory, since all batched writes
+/// (using `put_multiple_start`) are stored in a single object.
+/// Directories are stored in a length-delimited format with a 1MiB limit. The length field is a
+/// u32 and the directories are stored in root-to-leaves topological order, the same way they will
+/// be returned to the client in get_recursive.
+#[derive(Clone)]
+pub struct ObjectStoreDirectoryService {
+    object_store: Arc<dyn ObjectStore>,
+    base_path: Path,
+}
+
+#[instrument(level=Level::TRACE, skip_all,fields(base_path=%base_path,blob.digest=%digest),ret(Display))]
+fn derive_dirs_path(base_path: &Path, digest: &B3Digest) -> Path {
+    base_path
+        .child("dirs")
+        .child("b3")
+        .child(HEXLOWER.encode(&digest.as_slice()[..2]))
+        .child(HEXLOWER.encode(digest.as_slice()))
+}
+
+#[allow(clippy::identity_op)]
+const MAX_FRAME_LENGTH: usize = 1 * 1024 * 1024 * 1000; // 1 MiB
+                                                        //
+impl ObjectStoreDirectoryService {
+    /// Constructs a new [ObjectStoreBlobService] from a [Url] supported by
+    /// [object_store].
+    /// Any path suffix becomes the base path of the object store.
+    /// additional options, the same as in [object_store::parse_url_opts] can
+    /// be passed.
+    pub fn parse_url_opts<I, K, V>(url: &Url, options: I) -> Result<Self, object_store::Error>
+    where
+        I: IntoIterator<Item = (K, V)>,
+        K: AsRef<str>,
+        V: Into<String>,
+    {
+        let (object_store, path) = object_store::parse_url_opts(url, options)?;
+
+        Ok(Self {
+            object_store: Arc::new(object_store),
+            base_path: path,
+        })
+    }
+
+    /// Like [Self::parse_url_opts], except without the options.
+    pub fn parse_url(url: &Url) -> Result<Self, object_store::Error> {
+        Self::parse_url_opts(url, Vec::<(String, String)>::new())
+    }
+}
+
+#[async_trait]
+impl DirectoryService for ObjectStoreDirectoryService {
+    /// This is the same steps as for get_recursive anyways, so we just call get_recursive and
+    /// return the first element of the stream and drop the request.
+    #[instrument(skip(self, digest), fields(directory.digest = %digest))]
+    async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
+        self.get_recursive(digest).take(1).next().await.transpose()
+    }
+
+    #[instrument(skip(self, directory), fields(directory.digest = %directory.digest()))]
+    async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
+        if !directory.directories.is_empty() {
+            return Err(Error::InvalidRequest(
+                    "only put_multiple_start is supported by the ObjectStoreDirectoryService for directories with children".into(),
+            ));
+        }
+
+        let mut handle = self.put_multiple_start();
+        handle.put(directory).await?;
+        handle.close().await
+    }
+
+    #[instrument(skip_all, fields(directory.digest = %root_directory_digest))]
+    fn get_recursive(
+        &self,
+        root_directory_digest: &B3Digest,
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
+        // The Directory digests we're expecting to receive.
+        let mut expected_directory_digests: HashSet<B3Digest> =
+            HashSet::from([root_directory_digest.clone()]);
+
+        let dir_path = derive_dirs_path(&self.base_path, root_directory_digest);
+        let object_store = self.object_store.clone();
+
+        Box::pin(
+            (async move {
+                let stream = match object_store.get(&dir_path).await {
+                    Ok(v) => v.into_stream(),
+                    Err(object_store::Error::NotFound { .. }) => {
+                        return Ok(Either::Left(futures::stream::empty()))
+                    }
+                    Err(e) => return Err(std::io::Error::from(e).into()),
+                };
+
+                // get a reader of the response body.
+                let r = tokio_util::io::StreamReader::new(stream);
+                let decompressed_stream = async_compression::tokio::bufread::ZstdDecoder::new(r);
+
+                // the subdirectories are stored in a length delimited format
+                let delimited_stream = LengthDelimitedCodec::builder()
+                    .max_frame_length(MAX_FRAME_LENGTH)
+                    .length_field_type::<u32>()
+                    .new_read(decompressed_stream);
+
+                let dirs_stream = delimited_stream.map_err(Error::from).and_then(move |buf| {
+                    futures::future::ready((|| {
+                        let mut hasher = blake3::Hasher::new();
+                        let digest: B3Digest = hasher.update(&buf).finalize().as_bytes().into();
+
+                        // Ensure to only decode the directory objects whose digests we trust
+                        let was_expected = expected_directory_digests.remove(&digest);
+                        if !was_expected {
+                            return Err(crate::Error::StorageError(format!(
+                                "received unexpected directory {}",
+                                digest
+                            )));
+                        }
+
+                        let directory = proto::Directory::decode(&*buf).map_err(|e| {
+                            warn!("unable to parse directory {}: {}", digest, e);
+                            Error::StorageError(e.to_string())
+                        })?;
+
+                        for directory in &directory.directories {
+                            // Allow the children to appear next
+                            expected_directory_digests.insert(
+                                B3Digest::try_from(directory.digest.clone())
+                                    .map_err(|e| Error::StorageError(e.to_string()))?,
+                            );
+                        }
+
+                        Ok(directory)
+                    })())
+                });
+
+                Ok(Either::Right(dirs_stream))
+            })
+            .try_flatten_stream(),
+        )
+    }
+
+    #[instrument(skip_all)]
+    fn put_multiple_start(&self) -> Box<(dyn DirectoryPutter + 'static)>
+    where
+        Self: Clone,
+    {
+        Box::new(ObjectStoreDirectoryPutter::new(
+            self.object_store.clone(),
+            self.base_path.clone(),
+        ))
+    }
+}
+
+struct ObjectStoreDirectoryPutter {
+    object_store: Arc<dyn ObjectStore>,
+    base_path: Path,
+
+    directory_validator: Option<ClosureValidator>,
+}
+
+impl ObjectStoreDirectoryPutter {
+    fn new(object_store: Arc<dyn ObjectStore>, base_path: Path) -> Self {
+        Self {
+            object_store,
+            base_path,
+            directory_validator: Some(Default::default()),
+        }
+    }
+}
+
+#[async_trait]
+impl DirectoryPutter for ObjectStoreDirectoryPutter {
+    #[instrument(level = "trace", skip_all, fields(directory.digest=%directory.digest()), err)]
+    async fn put(&mut self, directory: proto::Directory) -> Result<(), Error> {
+        match self.directory_validator {
+            None => return Err(Error::StorageError("already closed".to_string())),
+            Some(ref mut validator) => {
+                validator.add(directory)?;
+            }
+        }
+
+        Ok(())
+    }
+
+    #[instrument(level = "trace", skip_all, ret, err)]
+    async fn close(&mut self) -> Result<B3Digest, Error> {
+        let validator = match self.directory_validator.take() {
+            None => return Err(Error::InvalidRequest("already closed".to_string())),
+            Some(validator) => validator,
+        };
+
+        // retrieve the validated directories.
+        // It is important that they are in topological order (root first),
+        // since that's how we want to retrieve them from the object store in the end.
+        let directories = validator.finalize_root_to_leaves()?;
+
+        // Get the root digest
+        let root_digest = directories
+            .first()
+            .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
+            .digest();
+
+        let dir_path = derive_dirs_path(&self.base_path, &root_digest);
+
+        match self.object_store.head(&dir_path).await {
+            // directory tree already exists, nothing to do
+            Ok(_) => {
+                trace!("directory tree already exists");
+            }
+
+            // directory tree does not yet exist, compress and upload.
+            Err(object_store::Error::NotFound { .. }) => {
+                trace!("uploading directory tree");
+
+                let object_store_writer =
+                    object_store::buffered::BufWriter::new(self.object_store.clone(), dir_path);
+                let compressed_writer =
+                    async_compression::tokio::write::ZstdEncoder::new(object_store_writer);
+                let mut directories_sink = LengthDelimitedCodec::builder()
+                    .max_frame_length(MAX_FRAME_LENGTH)
+                    .length_field_type::<u32>()
+                    .new_write(compressed_writer);
+
+                for directory in directories {
+                    directories_sink
+                        .send(directory.encode_to_vec().into())
+                        .await?;
+                }
+
+                let mut compressed_writer = directories_sink.into_inner();
+                compressed_writer.shutdown().await?;
+            }
+            // other error
+            Err(err) => Err(std::io::Error::from(err))?,
+        }
+
+        Ok(root_digest)
+    }
+}
diff --git a/tvix/castore/src/directoryservice/sled.rs b/tvix/castore/src/directoryservice/sled.rs
index e4a4c2bbe..9490a49c0 100644
--- a/tvix/castore/src/directoryservice/sled.rs
+++ b/tvix/castore/src/directoryservice/sled.rs
@@ -37,12 +37,23 @@ impl SledDirectoryService {
 impl DirectoryService for SledDirectoryService {
     #[instrument(skip(self, digest), fields(directory.digest = %digest))]
     async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
-        match self.db.get(digest.as_slice()) {
+        let resp = tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            let digest = digest.clone();
+            move || db.get(digest.as_slice())
+        })
+        .await?
+        .map_err(|e| {
+            warn!("failed to retrieve directory: {}", e);
+            Error::StorageError(format!("failed to retrieve directory: {}", e))
+        })?;
+
+        match resp {
             // The directory was not found, return
-            Ok(None) => Ok(None),
+            None => Ok(None),
 
             // The directory was found, try to parse the data as Directory message
-            Ok(Some(data)) => match Directory::decode(&*data) {
+            Some(data) => match Directory::decode(&*data) {
                 Ok(directory) => {
                     // Validate the retrieved Directory indeed has the
                     // digest we expect it to have, to detect corruptions.
@@ -70,35 +81,38 @@ impl DirectoryService for SledDirectoryService {
                     Err(Error::StorageError(e.to_string()))
                 }
             },
-            // some storage error?
-            Err(e) => Err(Error::StorageError(e.to_string())),
         }
     }
 
     #[instrument(skip(self, directory), fields(directory.digest = %directory.digest()))]
     async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
-        let digest = directory.digest();
-
-        // validate the directory itself.
-        if let Err(e) = directory.validate() {
-            return Err(Error::InvalidRequest(format!(
-                "directory {} failed validation: {}",
-                digest, e,
-            )));
-        }
-        // store it
-        let result = self.db.insert(digest.as_slice(), directory.encode_to_vec());
-        if let Err(e) = result {
-            return Err(Error::StorageError(e.to_string()));
-        }
-        Ok(digest)
+        tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            move || {
+                let digest = directory.digest();
+
+                // validate the directory itself.
+                if let Err(e) = directory.validate() {
+                    return Err(Error::InvalidRequest(format!(
+                        "directory {} failed validation: {}",
+                        digest, e,
+                    )));
+                }
+                // store it
+                db.insert(digest.as_slice(), directory.encode_to_vec())
+                    .map_err(|e| Error::StorageError(e.to_string()))?;
+
+                Ok(digest)
+            }
+        })
+        .await?
     }
 
     #[instrument(skip_all, fields(directory.digest = %root_directory_digest))]
     fn get_recursive(
         &self,
         root_directory_digest: &B3Digest,
-    ) -> BoxStream<Result<proto::Directory, Error>> {
+    ) -> BoxStream<'static, Result<proto::Directory, Error>> {
         traverse_directory(self.clone(), root_directory_digest)
     }
 
@@ -143,25 +157,32 @@ impl DirectoryPutter for SledDirectoryPutter {
         match self.directory_validator.take() {
             None => Err(Error::InvalidRequest("already closed".to_string())),
             Some(validator) => {
-                // retrieve the validated directories.
-                let directories = validator.finalize()?;
-
-                // Get the root digest, which is at the end (cf. insertion order)
-                let root_digest = directories
-                    .last()
-                    .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
-                    .digest();
-
-                let mut batch = sled::Batch::default();
-                for directory in directories {
-                    batch.insert(directory.digest().as_slice(), directory.encode_to_vec());
-                }
-
-                self.tree
-                    .apply_batch(batch)
-                    .map_err(|e| Error::StorageError(format!("unable to apply batch: {}", e)))?;
-
-                Ok(root_digest)
+                // Insert all directories as a batch.
+                tokio::task::spawn_blocking({
+                    let tree = self.tree.clone();
+                    move || {
+                        // retrieve the validated directories.
+                        let directories = validator.finalize()?;
+
+                        // Get the root digest, which is at the end (cf. insertion order)
+                        let root_digest = directories
+                            .last()
+                            .ok_or_else(|| Error::InvalidRequest("got no directories".to_string()))?
+                            .digest();
+
+                        let mut batch = sled::Batch::default();
+                        for directory in directories {
+                            batch.insert(directory.digest().as_slice(), directory.encode_to_vec());
+                        }
+
+                        tree.apply_batch(batch).map_err(|e| {
+                            Error::StorageError(format!("unable to apply batch: {}", e))
+                        })?;
+
+                        Ok(root_digest)
+                    }
+                })
+                .await?
             }
         }
     }
diff --git a/tvix/castore/src/directoryservice/tests/mod.rs b/tvix/castore/src/directoryservice/tests/mod.rs
index 1b40d9feb..cc3c5b788 100644
--- a/tvix/castore/src/directoryservice/tests/mod.rs
+++ b/tvix/castore/src/directoryservice/tests/mod.rs
@@ -26,6 +26,7 @@ use self::utils::make_grpc_directory_service_client;
 #[case::grpc(make_grpc_directory_service_client().await)]
 #[case::memory(directoryservice::from_addr("memory://").await.unwrap())]
 #[case::sled(directoryservice::from_addr("sled://").await.unwrap())]
+#[case::objectstore(directoryservice::from_addr("objectstore+memory://").await.unwrap())]
 #[cfg_attr(all(feature = "cloud", feature = "integration"), case::bigtable(directoryservice::from_addr("bigtable://instance-1?project_id=project-1&table_name=table-1&family_name=cf1").await.unwrap()))]
 pub fn directory_services(#[case] directory_service: impl DirectoryService) {}
 
diff --git a/tvix/castore/src/directoryservice/traverse.rs b/tvix/castore/src/directoryservice/traverse.rs
index 8a668c868..17a51ae2b 100644
--- a/tvix/castore/src/directoryservice/traverse.rs
+++ b/tvix/castore/src/directoryservice/traverse.rs
@@ -1,5 +1,8 @@
 use super::DirectoryService;
-use crate::{proto::NamedNode, B3Digest, Error, Path};
+use crate::{
+    proto::{node::Node, NamedNode},
+    B3Digest, Error, Path,
+};
 use tracing::{instrument, warn};
 
 /// This descends from a (root) node to the given (sub)path, returning the Node
@@ -7,69 +10,55 @@ use tracing::{instrument, warn};
 #[instrument(skip(directory_service, path), fields(%path))]
 pub async fn descend_to<DS>(
     directory_service: DS,
-    root_node: crate::proto::node::Node,
+    root_node: Node,
     path: impl AsRef<Path> + std::fmt::Display,
-) -> Result<Option<crate::proto::node::Node>, Error>
+) -> Result<Option<Node>, Error>
 where
     DS: AsRef<dyn DirectoryService>,
 {
-    let mut cur_node = root_node;
-    let mut it = path.as_ref().components();
-
-    loop {
-        match it.next() {
-            None => {
-                // the (remaining) path is empty, return the node we're current at.
-                return Ok(Some(cur_node));
+    let mut parent_node = root_node;
+    for component in path.as_ref().components() {
+        match parent_node {
+            Node::File(_) | Node::Symlink(_) => {
+                // There's still some path left, but the parent node is no directory.
+                // This means the path doesn't exist, as we can't reach it.
+                return Ok(None);
             }
-            Some(first_component) => {
-                match cur_node {
-                    crate::proto::node::Node::File(_) | crate::proto::node::Node::Symlink(_) => {
-                        // There's still some path left, but the current node is no directory.
-                        // This means the path doesn't exist, as we can't reach it.
-                        return Ok(None);
-                    }
-                    crate::proto::node::Node::Directory(directory_node) => {
-                        let digest: B3Digest = directory_node.digest.try_into().map_err(|_e| {
-                            Error::StorageError("invalid digest length".to_string())
+            Node::Directory(directory_node) => {
+                let digest: B3Digest = directory_node
+                    .digest
+                    .try_into()
+                    .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
+
+                // fetch the linked node from the directory_service.
+                let directory =
+                    directory_service
+                        .as_ref()
+                        .get(&digest)
+                        .await?
+                        .ok_or_else(|| {
+                            // If we didn't get the directory node that's linked, that's a store inconsistency, bail out!
+                            warn!("directory {} does not exist", digest);
+
+                            Error::StorageError(format!("directory {} does not exist", digest))
                         })?;
 
-                        // fetch the linked node from the directory_service
-                        match directory_service.as_ref().get(&digest).await? {
-                            // If we didn't get the directory node that's linked, that's a store inconsistency, bail out!
-                            None => {
-                                warn!("directory {} does not exist", digest);
-
-                                return Err(Error::StorageError(format!(
-                                    "directory {} does not exist",
-                                    digest
-                                )));
-                            }
-                            Some(directory) => {
-                                // look for first_component in the [Directory].
-                                // FUTUREWORK: as the nodes() iterator returns in a sorted fashion, we
-                                // could stop as soon as e.name is larger than the search string.
-                                let child_node =
-                                    directory.nodes().find(|n| n.get_name() == first_component);
-
-                                match child_node {
-                                    // child node not found means there's no such element inside the directory.
-                                    None => {
-                                        return Ok(None);
-                                    }
-                                    // child node found, return to top-of loop to find the next
-                                    // node in the path.
-                                    Some(child_node) => {
-                                        cur_node = child_node;
-                                    }
-                                }
-                            }
-                        }
-                    }
+                // look for the component in the [Directory].
+                // FUTUREWORK: as the nodes() iterator returns in a sorted fashion, we
+                // could stop as soon as e.name is larger than the search string.
+                if let Some(child_node) = directory.nodes().find(|n| n.get_name() == component) {
+                    // child node found, update prev_node to that and continue.
+                    parent_node = child_node;
+                } else {
+                    // child node not found means there's no such element inside the directory.
+                    return Ok(None);
                 }
             }
         }
     }
+
+    // We traversed the entire path, so this must be the node.
+    Ok(Some(parent_node))
 }
 
 #[cfg(test)]
diff --git a/tvix/castore/src/directoryservice/utils.rs b/tvix/castore/src/directoryservice/utils.rs
index 01c521076..a0ba395ec 100644
--- a/tvix/castore/src/directoryservice/utils.rs
+++ b/tvix/castore/src/directoryservice/utils.rs
@@ -2,14 +2,16 @@ use super::DirectoryService;
 use crate::proto;
 use crate::B3Digest;
 use crate::Error;
-use async_stream::stream;
+use async_stream::try_stream;
 use futures::stream::BoxStream;
 use std::collections::{HashSet, VecDeque};
+use tracing::instrument;
 use tracing::warn;
 
 /// Traverses a [proto::Directory] from the root to the children.
 ///
 /// This is mostly BFS, but directories are only returned once.
+#[instrument(skip(directory_service))]
 pub fn traverse_directory<'a, DS: DirectoryService + 'static>(
     directory_service: DS,
     root_directory_digest: &B3Digest,
@@ -23,60 +25,53 @@ pub fn traverse_directory<'a, DS: DirectoryService + 'static>(
     // We omit sending the same directories multiple times.
     let mut sent_directory_digests: HashSet<B3Digest> = HashSet::new();
 
-    let stream = stream! {
+    Box::pin(try_stream! {
         while let Some(current_directory_digest) = worklist_directory_digests.pop_front() {
-            match directory_service.get(&current_directory_digest).await {
+            let current_directory = directory_service.get(&current_directory_digest).await.map_err(|e| {
+                warn!("failed to look up directory");
+                Error::StorageError(format!(
+                    "unable to look up directory {}: {}",
+                    current_directory_digest, e
+                ))
+            })?.ok_or_else(|| {
                 // if it's not there, we have an inconsistent store!
-                Ok(None) => {
-                    warn!("directory {} does not exist", current_directory_digest);
-                    yield Err(Error::StorageError(format!(
-                        "directory {} does not exist",
-                        current_directory_digest
-                    )));
-                }
-                Err(e) => {
-                    warn!("failed to look up directory");
-                    yield Err(Error::StorageError(format!(
-                        "unable to look up directory {}: {}",
-                        current_directory_digest, e
-                    )));
-                }
+                warn!("directory {} does not exist", current_directory_digest);
+                Error::StorageError(format!(
+                    "directory {} does not exist",
+                    current_directory_digest
+                ))
 
-                // if we got it
-                Ok(Some(current_directory)) => {
-                    // validate, we don't want to send invalid directories.
-                    if let Err(e) = current_directory.validate() {
-                        warn!("directory failed validation: {}", e.to_string());
-                        yield Err(Error::StorageError(format!(
-                            "invalid directory: {}",
-                            current_directory_digest
-                        )));
-                    }
+            })?;
 
-                    // We're about to send this directory, so let's avoid sending it again if a
-                    // descendant has it.
-                    sent_directory_digests.insert(current_directory_digest);
+            // validate, we don't want to send invalid directories.
+            current_directory.validate().map_err(|e| {
+               warn!("directory failed validation: {}", e.to_string());
+               Error::StorageError(format!(
+                   "invalid directory: {}",
+                   current_directory_digest
+               ))
+            })?;
 
-                    // enqueue all child directory digests to the work queue, as
-                    // long as they're not part of the worklist or already sent.
-                    // This panics if the digest looks invalid, it's supposed to be checked first.
-                    for child_directory_node in &current_directory.directories {
-                        // TODO: propagate error
-                        let child_digest: B3Digest = child_directory_node.digest.clone().try_into().unwrap();
+            // We're about to send this directory, so let's avoid sending it again if a
+            // descendant has it.
+            sent_directory_digests.insert(current_directory_digest);
 
-                        if worklist_directory_digests.contains(&child_digest)
-                            || sent_directory_digests.contains(&child_digest)
-                        {
-                            continue;
-                        }
-                        worklist_directory_digests.push_back(child_digest);
-                    }
+            // enqueue all child directory digests to the work queue, as
+            // long as they're not part of the worklist or already sent.
+            // This panics if the digest looks invalid, it's supposed to be checked first.
+            for child_directory_node in &current_directory.directories {
+                // TODO: propagate error
+                let child_digest: B3Digest = child_directory_node.digest.clone().try_into().unwrap();
 
-                    yield Ok(current_directory);
+                if worklist_directory_digests.contains(&child_digest)
+                    || sent_directory_digests.contains(&child_digest)
+                {
+                    continue;
                 }
-            };
-        }
-    };
+                worklist_directory_digests.push_back(child_digest);
+            }
 
-    Box::pin(stream)
+            yield current_directory;
+        }
+    })
 }
diff --git a/tvix/castore/src/errors.rs b/tvix/castore/src/errors.rs
index e807a19b9..8343d0774 100644
--- a/tvix/castore/src/errors.rs
+++ b/tvix/castore/src/errors.rs
@@ -1,4 +1,3 @@
-use std::sync::PoisonError;
 use thiserror::Error;
 use tokio::task::JoinError;
 use tonic::Status;
@@ -13,12 +12,6 @@ pub enum Error {
     StorageError(String),
 }
 
-impl<T> From<PoisonError<T>> for Error {
-    fn from(value: PoisonError<T>) -> Self {
-        Error::StorageError(value.to_string())
-    }
-}
-
 impl From<JoinError> for Error {
     fn from(value: JoinError) -> Self {
         Error::StorageError(value.to_string())
diff --git a/tvix/castore/src/import/archive.rs b/tvix/castore/src/import/archive.rs
index 0d21481d4..0ebb4a236 100644
--- a/tvix/castore/src/import/archive.rs
+++ b/tvix/castore/src/import/archive.rs
@@ -288,12 +288,12 @@ impl IngestionEntryGraph {
             None => self.graph.add_node(entry),
         };
 
-        // A path with 1 component is the root node
+        // for archives, a path with 1 component is the root node
         if path.components().count() == 1 {
             // We expect archives to contain a single root node, if there is another root node
             // entry with a different path name, this is unsupported.
             if let Some(root_node) = self.root_node {
-                if self.get_node(root_node).path() != &path {
+                if self.get_node(root_node).path() != path.as_ref() {
                     return Err(Error::UnexpectedNumberOfTopLevelEntries);
                 }
             }
diff --git a/tvix/castore/src/import/mod.rs b/tvix/castore/src/import/mod.rs
index 7b42644a2..e8b27e469 100644
--- a/tvix/castore/src/import/mod.rs
+++ b/tvix/castore/src/import/mod.rs
@@ -6,7 +6,7 @@
 
 use crate::directoryservice::DirectoryPutter;
 use crate::directoryservice::DirectoryService;
-use crate::path::PathBuf;
+use crate::path::{Path, PathBuf};
 use crate::proto::node::Node;
 use crate::proto::Directory;
 use crate::proto::DirectoryNode;
@@ -77,7 +77,8 @@ where
             IngestionEntry::Dir { .. } => {
                 // If the entry is a directory, we traversed all its children (and
                 // populated it in `directories`).
-                // If we don't have it in there, it's an empty directory.
+                // If we don't have it in directories, it's a directory without
+                // children.
                 let directory = directories
                     .remove(entry.path())
                     // In that case, it contained no children
@@ -120,18 +121,25 @@ where
             }),
         };
 
-        if entry.path().components().count() == 1 {
+        let parent = entry
+            .path()
+            .parent()
+            .expect("Tvix bug: got entry with root node");
+
+        if parent == crate::Path::ROOT {
             break node;
+        } else {
+            // record node in parent directory, creating a new [Directory] if not there yet.
+            directories.entry(parent.to_owned()).or_default().add(node);
         }
-
-        // record node in parent directory, creating a new [Directory] if not there yet.
-        directories
-            .entry(entry.path().parent().unwrap().to_owned())
-            .or_default()
-            .add(node);
     };
 
     assert!(
+        entries.count().await == 0,
+        "Tvix bug: left over elements in the stream"
+    );
+
+    assert!(
         directories.is_empty(),
         "Tvix bug: left over directories after processing ingestion stream"
     );
@@ -183,7 +191,7 @@ pub enum IngestionEntry {
 }
 
 impl IngestionEntry {
-    fn path(&self) -> &PathBuf {
+    fn path(&self) -> &Path {
         match self {
             IngestionEntry::Regular { path, .. } => path,
             IngestionEntry::Symlink { path, .. } => path,
@@ -195,3 +203,138 @@ impl IngestionEntry {
         matches!(self, IngestionEntry::Dir { .. })
     }
 }
+
+#[cfg(test)]
+mod test {
+    use rstest::rstest;
+
+    use crate::fixtures::{DIRECTORY_COMPLICATED, DIRECTORY_WITH_KEEP, EMPTY_BLOB_DIGEST};
+    use crate::proto::node::Node;
+    use crate::proto::{Directory, DirectoryNode, FileNode, SymlinkNode};
+    use crate::{directoryservice::MemoryDirectoryService, fixtures::DUMMY_DIGEST};
+
+    use super::ingest_entries;
+    use super::IngestionEntry;
+
+    #[rstest]
+    #[case::single_file(vec![IngestionEntry::Regular {
+        path: "foo".parse().unwrap(),
+        size: 42,
+        executable: true,
+        digest: DUMMY_DIGEST.clone(),
+    }],
+        Node::File(FileNode { name: "foo".into(), digest: DUMMY_DIGEST.clone().into(), size: 42, executable: true }
+    ))]
+    #[case::single_symlink(vec![IngestionEntry::Symlink {
+        path: "foo".parse().unwrap(),
+        target: b"blub".into(),
+    }],
+        Node::Symlink(SymlinkNode { name: "foo".into(), target: "blub".into()})
+    )]
+    #[case::single_dir(vec![IngestionEntry::Dir {
+        path: "foo".parse().unwrap(),
+    }],
+        Node::Directory(DirectoryNode { name: "foo".into(), digest: Directory::default().digest().into(), size: Directory::default().size()})
+    )]
+    #[case::dir_with_keep(vec![
+        IngestionEntry::Regular {
+            path: "foo/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "foo".parse().unwrap(),
+        },
+    ],
+        Node::Directory(DirectoryNode { name: "foo".into(), digest: DIRECTORY_WITH_KEEP.digest().into(), size: DIRECTORY_WITH_KEEP.size() })
+    )]
+    /// This is intentionally a bit unsorted, though it still satisfies all
+    /// requirements we have on the order of elements in the stream.
+    #[case::directory_complicated(vec![
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Regular {
+            path: "blub/keep/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "blub/keep".parse().unwrap(),
+        },
+        IngestionEntry::Symlink {
+            path: "blub/aa".parse().unwrap(),
+            target: b"/nix/store/somewhereelse".into(),
+        },
+        IngestionEntry::Dir {
+            path: "blub".parse().unwrap(),
+        },
+    ],
+        Node::Directory(DirectoryNode { name: "blub".into(), digest: DIRECTORY_COMPLICATED.digest().into(), size:DIRECTORY_COMPLICATED.size() })
+    )]
+    #[tokio::test]
+    async fn test_ingestion(#[case] entries: Vec<IngestionEntry>, #[case] exp_root_node: Node) {
+        let directory_service = MemoryDirectoryService::default();
+
+        let root_node = ingest_entries(
+            directory_service.clone(),
+            futures::stream::iter(entries.into_iter().map(Ok::<_, std::io::Error>)),
+        )
+        .await
+        .expect("must succeed");
+
+        assert_eq!(exp_root_node, root_node, "root node should match");
+    }
+
+    #[rstest]
+    #[should_panic]
+    #[case::empty_entries(vec![])]
+    #[should_panic]
+    #[case::missing_intermediate_dir(vec![
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+    ])]
+    #[should_panic]
+    #[case::leaf_after_parent(vec![
+        IngestionEntry::Dir {
+            path: "blub".parse().unwrap(),
+        },
+        IngestionEntry::Regular {
+            path: "blub/.keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+    ])]
+    #[should_panic]
+    #[case::root_in_entry(vec![
+        IngestionEntry::Regular {
+            path: ".keep".parse().unwrap(),
+            size: 0,
+            executable: false,
+            digest: EMPTY_BLOB_DIGEST.clone(),
+        },
+        IngestionEntry::Dir {
+            path: "".parse().unwrap(),
+        },
+    ])]
+    #[tokio::test]
+    async fn test_ingestion_fail(#[case] entries: Vec<IngestionEntry>) {
+        let directory_service = MemoryDirectoryService::default();
+
+        let _ = ingest_entries(
+            directory_service.clone(),
+            futures::stream::iter(entries.into_iter().map(Ok::<_, std::io::Error>)),
+        )
+        .await;
+    }
+}
diff --git a/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs b/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
index 7d741a3f0..5c1428690 100644
--- a/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
+++ b/tvix/castore/src/proto/grpc_directoryservice_wrapper.rs
@@ -1,12 +1,12 @@
 use crate::directoryservice::ClosureValidator;
 use crate::proto;
 use crate::{directoryservice::DirectoryService, B3Digest};
-use futures::StreamExt;
+use futures::stream::BoxStream;
+use futures::TryStreamExt;
 use std::ops::Deref;
-use tokio::sync::mpsc::channel;
-use tokio_stream::wrappers::ReceiverStream;
+use tokio_stream::once;
 use tonic::{async_trait, Request, Response, Status, Streaming};
-use tracing::{debug, instrument, warn};
+use tracing::{instrument, warn};
 
 pub struct GRPCDirectoryServiceWrapper<T> {
     directory_service: T,
@@ -23,63 +23,52 @@ impl<T> proto::directory_service_server::DirectoryService for GRPCDirectoryServi
 where
     T: Deref<Target = dyn DirectoryService> + Send + Sync + 'static,
 {
-    type GetStream = ReceiverStream<tonic::Result<proto::Directory, Status>>;
+    type GetStream = BoxStream<'static, tonic::Result<proto::Directory, Status>>;
 
     #[instrument(skip_all)]
-    async fn get(
-        &self,
+    async fn get<'a>(
+        &'a self,
         request: Request<proto::GetDirectoryRequest>,
     ) -> Result<Response<Self::GetStream>, Status> {
-        let (tx, rx) = channel(5);
-
         let req_inner = request.into_inner();
 
-        // look at the digest in the request and put it in the top of the queue.
-        match &req_inner.by_what {
-            None => return Err(Status::invalid_argument("by_what needs to be specified")),
-            Some(proto::get_directory_request::ByWhat::Digest(ref digest)) => {
+        let by_what = &req_inner
+            .by_what
+            .ok_or_else(|| Status::invalid_argument("invalid by_what"))?;
+
+        match by_what {
+            proto::get_directory_request::ByWhat::Digest(ref digest) => {
                 let digest: B3Digest = digest
                     .clone()
                     .try_into()
                     .map_err(|_e| Status::invalid_argument("invalid digest length"))?;
 
-                if !req_inner.recursive {
-                    let e: Result<proto::Directory, Status> = match self
-                        .directory_service
-                        .get(&digest)
-                        .await
-                    {
-                        Ok(Some(directory)) => Ok(directory),
-                        Ok(None) => {
-                            Err(Status::not_found(format!("directory {} not found", digest)))
-                        }
-                        Err(e) => {
-                            warn!(err = %e, directory.digest=%digest, "failed to get directory");
-                            Err(e.into())
-                        }
-                    };
-
-                    if tx.send(e).await.is_err() {
-                        debug!("receiver dropped");
+                Ok(tonic::Response::new({
+                    if !req_inner.recursive {
+                        let directory = self
+                            .directory_service
+                            .get(&digest)
+                            .await
+                            .map_err(|e| {
+                                warn!(err = %e, directory.digest=%digest, "failed to get directory");
+                                tonic::Status::new(tonic::Code::Internal, e.to_string())
+                            })?
+                            .ok_or_else(|| {
+                                Status::not_found(format!("directory {} not found", digest))
+                            })?;
+
+                        Box::pin(once(Ok(directory)))
+                    } else {
+                        // If recursive was requested, traverse via get_recursive.
+                        Box::pin(
+                            self.directory_service.get_recursive(&digest).map_err(|e| {
+                                tonic::Status::new(tonic::Code::Internal, e.to_string())
+                            }),
+                        )
                     }
-                } else {
-                    // If recursive was requested, traverse via get_recursive.
-                    let mut directories_it = self.directory_service.get_recursive(&digest);
-
-                    while let Some(e) = directories_it.next().await {
-                        // map err in res from Error to Status
-                        let res = e.map_err(|e| Status::internal(e.to_string()));
-                        if tx.send(res).await.is_err() {
-                            debug!("receiver dropped");
-                            break;
-                        }
-                    }
-                }
+                }))
             }
         }
-
-        let receiver_stream = ReceiverStream::new(rx);
-        Ok(Response::new(receiver_stream))
     }
 
     #[instrument(skip_all)]
diff --git a/tvix/cli/Cargo.toml b/tvix/cli/Cargo.toml
index ce8d36177..1fa235182 100644
--- a/tvix/cli/Cargo.toml
+++ b/tvix/cli/Cargo.toml
@@ -21,7 +21,7 @@ rustyline = "10.0.0"
 thiserror = "1.0.38"
 tokio = "1.28.0"
 tracing = { version = "0.1.37", features = ["max_level_trace", "release_max_level_info"] }
-tracing-subscriber = { version = "0.3.16", features = ["json"] }
+tracing-subscriber = "0.3.16"
 
 [dependencies.wu-manber]
 git = "https://github.com/tvlfyi/wu-manber.git"
diff --git a/tvix/cli/src/main.rs b/tvix/cli/src/main.rs
index 5635f446b..d66d2ce4c 100644
--- a/tvix/cli/src/main.rs
+++ b/tvix/cli/src/main.rs
@@ -80,27 +80,23 @@ struct Args {
     build_service_addr: String,
 }
 
-/// Interprets the given code snippet, printing out warnings, errors
-/// and the result itself. The return value indicates whether
-/// evaluation succeeded.
-fn interpret(code: &str, path: Option<PathBuf>, args: &Args, explain: bool) -> bool {
-    let tokio_runtime = tokio::runtime::Runtime::new().expect("failed to setup tokio runtime");
-
-    let (blob_service, directory_service, path_info_service) = tokio_runtime
-        .block_on({
-            let blob_service_addr = args.blob_service_addr.clone();
-            let directory_service_addr = args.directory_service_addr.clone();
-            let path_info_service_addr = args.path_info_service_addr.clone();
-            async move {
-                tvix_store::utils::construct_services(
-                    blob_service_addr,
-                    directory_service_addr,
-                    path_info_service_addr,
-                )
-                .await
-            }
-        })
-        .expect("unable to setup {blob|directory|pathinfo}service before interpreter setup");
+fn init_io_handle(tokio_runtime: &tokio::runtime::Runtime, args: &Args) -> Rc<TvixStoreIO> {
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        tokio_runtime
+            .block_on({
+                let blob_service_addr = args.blob_service_addr.clone();
+                let directory_service_addr = args.directory_service_addr.clone();
+                let path_info_service_addr = args.path_info_service_addr.clone();
+                async move {
+                    tvix_store::utils::construct_services(
+                        blob_service_addr,
+                        directory_service_addr,
+                        path_info_service_addr,
+                    )
+                    .await
+                }
+            })
+            .expect("unable to setup {blob|directory|pathinfo}service before interpreter setup");
 
     let build_service = tokio_runtime
         .block_on({
@@ -117,14 +113,26 @@ fn interpret(code: &str, path: Option<PathBuf>, args: &Args, explain: bool) -> b
         })
         .expect("unable to setup buildservice before interpreter setup");
 
-    let tvix_store_io = Rc::new(TvixStoreIO::new(
+    Rc::new(TvixStoreIO::new(
         blob_service.clone(),
         directory_service.clone(),
         path_info_service.into(),
+        nar_calculation_service.into(),
         build_service.into(),
         tokio_runtime.handle().clone(),
-    ));
+    ))
+}
 
+/// Interprets the given code snippet, printing out warnings, errors
+/// and the result itself. The return value indicates whether
+/// evaluation succeeded.
+fn interpret(
+    tvix_store_io: Rc<TvixStoreIO>,
+    code: &str,
+    path: Option<PathBuf>,
+    args: &Args,
+    explain: bool,
+) -> bool {
     let mut eval = tvix_eval::Evaluation::new(
         Box::new(TvixIO::new(tvix_store_io.clone() as Rc<dyn EvalIO>)) as Box<dyn EvalIO>,
         true,
@@ -242,18 +250,22 @@ fn main() {
         .try_init()
         .expect("unable to set up tracing subscriber");
 
+    let tokio_runtime = tokio::runtime::Runtime::new().expect("failed to setup tokio runtime");
+
+    let io_handle = init_io_handle(&tokio_runtime, &args);
+
     if let Some(file) = &args.script {
-        run_file(file.clone(), &args)
+        run_file(io_handle, file.clone(), &args)
     } else if let Some(expr) = &args.expr {
-        if !interpret(expr, None, &args, false) {
+        if !interpret(io_handle, expr, None, &args, false) {
             std::process::exit(1);
         }
     } else {
-        run_prompt(&args)
+        run_prompt(io_handle, &args)
     }
 }
 
-fn run_file(mut path: PathBuf, args: &Args) {
+fn run_file(io_handle: Rc<TvixStoreIO>, mut path: PathBuf, args: &Args) {
     if path.is_dir() {
         path.push("default.nix");
     }
@@ -262,7 +274,7 @@ fn run_file(mut path: PathBuf, args: &Args) {
     let success = if args.compile_only {
         lint(&contents, Some(path), args)
     } else {
-        interpret(&contents, Some(path), args, false)
+        interpret(io_handle, &contents, Some(path), args, false)
     };
 
     if !success {
@@ -286,7 +298,7 @@ fn state_dir() -> Option<PathBuf> {
     path
 }
 
-fn run_prompt(args: &Args) {
+fn run_prompt(io_handle: Rc<TvixStoreIO>, args: &Args) {
     let mut rl = Editor::<()>::new().expect("should be able to launch rustyline");
 
     if args.compile_only {
@@ -317,9 +329,9 @@ fn run_prompt(args: &Args) {
                 rl.add_history_entry(&line);
 
                 if let Some(without_prefix) = line.strip_prefix(":d ") {
-                    interpret(without_prefix, None, args, true);
+                    interpret(Rc::clone(&io_handle), without_prefix, None, args, true);
                 } else {
-                    interpret(&line, None, args, false);
+                    interpret(Rc::clone(&io_handle), &line, None, args, false);
                 }
             }
             Err(ReadlineError::Interrupted) | Err(ReadlineError::Eof) => break,
diff --git a/tvix/default.nix b/tvix/default.nix
index f562cf37d..a3a4d35df 100644
--- a/tvix/default.nix
+++ b/tvix/default.nix
@@ -224,9 +224,7 @@ in
       rustPlatform.cargoSetupHook
     ];
 
-    # Allow blocks_in_conditions due to false positives with #[tracing::instrument(…)]:
-    # https://github.com/rust-lang/rust-clippy/issues/12281
-    buildPhase = "cargo clippy --tests --all-features --benches --examples -- -Dwarnings -A clippy::blocks_in_conditions | tee $out";
+    buildPhase = "cargo clippy --tests --all-features --benches --examples -- -Dwarnings | tee $out";
   };
 
   meta.ci.targets = [
diff --git a/tvix/docs/src/SUMMARY.md b/tvix/docs/src/SUMMARY.md
index b0e47a001..954abae33 100644
--- a/tvix/docs/src/SUMMARY.md
+++ b/tvix/docs/src/SUMMARY.md
@@ -8,3 +8,7 @@
 - [Specification of the Nix Language](./language-spec.md)
 - [Nix language version history](./lang-version.md)
 - [Value Pointer Equality](./value-pointer-equality.md)
+- [Daemon protocol changelog](./nix-daemon/changelog.md)
+- [Daemon protocol logging](./nix-daemon/logging.md)
+- [Daemon protocol operations](./nix-daemon/operations.md)
+- [Daemon protocol serialization](./nix-daemon/serialization.md)
\ No newline at end of file
diff --git a/tvix/docs/src/TODO.md b/tvix/docs/src/TODO.md
index 6644bb6ba..8fb22ea82 100644
--- a/tvix/docs/src/TODO.md
+++ b/tvix/docs/src/TODO.md
@@ -10,11 +10,27 @@ Feel free to add new ideas. Before picking something, ask in `#tvix-dev` to make
 sure noone is working on this, or has some specific design in mind already.
 
 ## Cleanups
+### Nix language test suite
+ - Think about how to merge, but "categorize" `tvix_tests` in `glue` and `eval`.
+   We currently only have this split as they need a different feature set /
+   builtins.
  - move some of the rstest cases in `tvix-glue` to the `.nix`/`.exp` mechanism.
-   - Parts requiring test fixtures need some special convention.
-     Some of these also cannot be checked into the repo, like the import tests
-     adding special files to test filtering.
- - add `nix_oracle` mechanism from `tvix-eval` to `tvix-glue`.
+   Some of them need test fixtures, which cannot be represented in git (special
+   file types in the import tests for example). Needs some support from the test
+   suite to create these fixtures on demand.
+ - extend `verify-lang-tests/default.nix` mechanism to validate `tvix-eval` and
+   `tvix-glue` test cases (or the common structure above).
+ - absorb `eval/tests/nix_oracle.rs` into `tvix_tests`, or figure out why it's
+   not possible (and document) it. It looks like it's only as nix is invoked
+   with a different level of `--strict`, but the toplevel doc-comment suggests
+   its generic?
+
+### Error cleanup
+ - Currently, all services use tvix_castore::Error, which only has two kinds
+   (invalid request, storage error), containing an (owned) string.
+   This is quite primitive. We should have individual error types for BS, DS, PS.
+   Maybe these should have some generics to still be able to carry errors from
+   the underlying backend, similar to `IngestionError`.
 
 ## Fixes towards correctness
  - `builtins.toXML` is missing string context. See b/398.
@@ -114,11 +130,8 @@ logs etc, but this is something requiring a lot of designing.
  - [redb](https://www.redb.org/) backend
  - sqlite backend (different schema than the Nix one, we need the root nodes data!)
 
-### Nix-compat
-- Async NAR reader (@edef?)
-
 ### Nix Daemon protocol
-- Some work ongoing on the worker operation parsing. Partially blocked on the async NAR reader.
+- Some work ongoing on the worker operation parsing (griff, picnoir)
 
 ### O11Y
  - gRPC trace propagation (cl/10532)
diff --git a/tvix/docs/src/nix-daemon/changelog.md b/tvix/docs/src/nix-daemon/changelog.md
new file mode 100644
index 000000000..bc99dc6af
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/changelog.md
@@ -0,0 +1,202 @@
+
+
+## Nix version protocol
+
+| Nix version     | Protocol |
+| --------------- | -------- |
+| 0.11            | 1.02     |
+| 0.12            | 1.04     |
+| 0.13            | 1.05     |
+| 0.14            | 1.05     |
+| 0.15            | 1.05     |
+| 0.16            | 1.06     |
+| 1.0             | 1.10     |
+| 1.1             | 1.11     |
+| 1.2             | 1.12     |
+| 1.3 - 1.5.3     | 1.13     |
+| 1.6 - 1.10      | 1.14     |
+| 1.11 - 1.11.16  | 1.15     |
+| 2.0 - 2.0.4     | 1.20     |
+| 2.1 - 2.3.18    | 1.21     |
+| 2.4 - 2.6.1     | 1.32     |
+| 2.7.0           | 1.33     |
+| 2.8.0 - 2.14.1  | 1.34     |
+| 2.15.0 - 2.19.4 | 1.35     |
+| 2.20.0 - 2.22.0 | 1.37     |
+
+In commit [be64fbb501][be64fbb501] support was droped for protocol versions older than 1.10.
+This happened when the protocol was between 1.17 and 1.18 and was released with Nix 2.0.
+So this means that any version of Nix 2.x can't talk to Nix 0.x.
+
+## Operation History
+
+| Op              | Id | Commit         | Protocol | Nix Version | Notes |
+| --------------- | -- | -------------- | -------- | ----------- | ----- |
+| *Quit           | 0  | [a711689368][a711689368] || 0.11 | Became dead code in [7951c3c54][7951c3c54] (Nix 0.11) and removed in [d3c61d83b][d3c61d83b] (Nix 1.8) |
+| IsValidPath     | 1  | [a711689368][a711689368] || 0.11 ||
+| HasSubstitutes  | 3  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| QueryPathHash   | 4  | [0565b5f2b3][0565b5f2b3] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| QueryReferences | 5  | [0565b5f2b3][0565b5f2b3] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| QueryReferrers  | 6  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| AddToStore      | 7  | [0263279071][0263279071] || 0.11 ||
+| AddTextToStore  | 8  | [0263279071][0263279071] || 0.11 | Obsolete [c602ebfb34][c602ebfb34]<br>Nix 2.4 Protocol 1.25 |
+| BuildPaths      | 9  | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| EnsurePath      | 10 | [0565b5f2b3][0565b5f2b3] || 0.11 ||
+| AddTempRoot     | 11 | [e25fad691a][e25fad691a] || 0.11 ||
+| AddIndirectRoot | 12 | [74033a844f][74033a844f] || 0.11 ||
+| SyncWithGC      | 13 | [e25fad691a][e25fad691a] || 0.11 | Obsolete [9947f1646a][9947f1646a]<br> Nix 2.5.0 Protocol 1.32 |
+| FindRoots       | 14 | [29cf434a35][29cf434a35] || 0.11 ||
+| *CollectGarbage | 15 | [a9c4f66cfb][a9c4f66cfb] || 0.11 | Removed [a72709afd8][a72709afd8]<br>Nix 0.12 Protocol 1.02 |
+| ExportPath      | 16 | [0f5da8a83c][0f5da8a83c] || 0.11 | Obsolete [538a64e8c3][538a64e8c3]<br>Nix 2.0 Protocol 1.17 |
+| *ImportPath     | 17 | [0f5da8a83c][0f5da8a83c] || 0.11 | Removed [273b288a7e][273b288a7e]<br>Nix 1.0 Protocol 1.09 |
+| QueryDeriver    | 18 | [6d1a1191b0][6d1a1191b0] || 0.11 | Obsolete [e0204f8d46][e0204f8d46]<br>Nix 2.0 Protocol 1.16 |
+| SetOptions      | 19 | [f3441e6122][f3441e6122] || 0.11 ||
+| CollectGarbage              | 20 | [a72709afd8][a72709afd8] | 1.02  | 0.12   ||
+| QuerySubstitutablePathInfo  | 21 | [03427e76f1][03427e76f1] | 1.02  | 0.12   ||
+| QueryDerivationOutputs      | 22 | [e42401ee7b][e42401ee7b] | 1.05  | 1.0    | Obsolete [d38f860c3e][d38f860c3e]<br>Nix 2.4 Protocol 1.22* |
+| QueryAllValidPaths          | 23 | [24035b98b1][24035b98b1] | 1.05  | 1.0    ||
+| *QueryFailedPaths            | 24 | [f92c9a0ac5][f92c9a0ac5] | 1.05  | 1.0    | Removed [8cffec848][8cffec848]<br>Nix 2.0 Protocol 1.16 |
+| *ClearFailedPaths            | 25 | [f92c9a0ac5][f92c9a0ac5] | 1.05  | 1.0    | Removed [8cffec848][8cffec848]<br>Nix 2.0 Protocol 1.16 |
+| QueryPathInfo               | 26 | [1db6259076][1db6259076] | 1.06  | 1.0    ||
+| ImportPaths                 | 27 | [273b288a7e][273b288a7e] | 1.09  | 1.0    | Obsolete [538a64e8c3][538a64e8c3]<br>Nix 2.0 Protocol 1.17 |
+| QueryDerivationOutputNames  | 28 | [af2e53fd48][af2e53fd48]<br>([194d21f9f6][194d21f9f6]) | 1.08      | 1.0 | Obsolete<br>[045b07200c][045b07200c]<br>Nix 2.4 Protocol 1.21 |
+| QueryPathFromHashPart       | 29 | [ccc52adfb2][ccc52adfb2] | 1.11  | 1.1    ||
+| QuerySubstitutablePathInfos | 30 | [eb3036da87][eb3036da87] | 1.12* | 1.2    ||
+| QueryValidPaths             | 31 | [58ef4d9a95][58ef4d9a95] | 1.12  | 1.2    ||
+| QuerySubstitutablePaths     | 32 | [09a6321aeb][09a6321aeb] | 1.12  | 1.2    ||
+| QueryValidDerivers          | 33 | [2754a07ead][2754a07ead] | 1.13* | 1.3    ||
+| OptimiseStore               | 34 | [8fb8c26b6d][2754a07ead] | 1.14  | 1.8    ||
+| VerifyStore                 | 35 | [b755752f76][b755752f76] | 1.14  | 1.9    ||
+| BuildDerivation             | 36 | [71a5161365][71a5161365] | 1.14  | 1.10   ||
+| AddSignatures               | 37 | [d0f5719c2a][d0f5719c2a] | 1.16  | 2.0    ||
+| NarFromPath                 | 38 | [b4b5e9ce2f][b4b5e9ce2f] | 1.17  | 2.0    ||
+| AddToStoreNar               | 39 | [584f8a62de][584f8a62de] | 1.17  | 2.0    ||
+| QueryMissing                | 40 | [ba20730b3f][ba20730b3f] | 1.19* | 2.0    ||
+| QueryDerivationOutputMap    | 41 | [d38f860c3e][d38f860c3e] | 1.22* | 2.4    ||
+| RegisterDrvOutput           | 42 | [58cdab64ac][58cdab64ac] | 1.27  | 2.4    ||
+| QueryRealisation            | 43 | [58cdab64ac][58cdab64ac] | 1.27  | 2.4    ||
+| AddMultipleToStore          | 44 | [fe1f34fa60][fe1f34fa60] | 1.32* | 2.4    ||
+| AddBuildLog                 | 45 | [4dda1f92aa][4dda1f92aa] | 1.32  | 2.6.0  ||
+| BuildPathsWithResults       | 46 | [a4604f1928][a4604f1928] | 1.34* | 2.8.0  ||
+| AddPermRoot                 | 47 | [226b0f3956][226b0f3956] | 1.36* | 2.20.0 ||
+
+Notes: Ops that start with * have been removed.
+Protocol version that ends with * was bumped while adding that operation. Otherwise protocol version referes to the protocol version at the time the operation was added (so only at the next protocol version can you assume the operation is present/removed/obsolete since it was added/removed/obsoleted between protocol versions).
+
+## Protocol version change log
+
+- 1.01 [f3441e6122][f3441e6122] Initial Version
+- 1.02 [c370755583][c370755583] Use build hook
+- 1.03 [db4f4a8425][db4f4a8425] Backward compatibility check
+- 1.04 [96598e7b06][96598e7b06] SetOptions buildVerbosity
+- 1.05 [60ec75048a][60ec75048a] SetOptions useAtime & maxAtime
+- 1.06 [6846ed8b44][6846ed8b44] SetOptions buildCores
+- 1.07 [bdf089f463][bdf089f463] QuerySubstitutablePathInfo narSize
+- 1.08 [b1eb252172][b1eb252172] STDERR_ERROR exit status
+- 1.09 [e0bd307802][e0bd307802] ImportPath not supported on versions older than 1.09
+- 1.10 [db5b86ef13][db5b86ef13] SetOptions build-use-substitutess
+- 1.11 [4bc4da331a][4bc4da331a] open connection reserveSpace
+- 1.12 [eb3036da87][eb3036da87] Implement QuerySubstitutablePathInfos
+- 1.13 [2754a07ead][2754a07ead] Implement QueryValidDerivers
+- 1.14 [a583a2bc59][a583a2bc59] open connection cpu affinity
+- 1.15 [d1e3bf01bc][d1e3bf01bc] BuildPaths buildMode
+- 1.16 [9cee600c88][9cee600c88] QueryPathInfo ultimate & sigs
+- 1.17 [ddea253ff8][ddea253ff8] QueryPathInfo returns valid bool
+- 1.18 [4b8f1b0ec0][4b8f1b0ec0] Select between AddToStoreNar and ImportPaths
+- 1.19 [ba20730b3f][ba20730b3f] Implement QueryMissing
+- 1.20 [cfc8132391][cfc8132391] Don't send activity and result logs to old clients
+- 1.21 [6185d25e52][6185d25e52] AddToStoreNar uses TunnelLogger for data
+- 1.22 [d38f860c3e][d38f860c3e] Implement QueryDerivationOutputMap and obsolete QueryDerivationOutputs
+- 1.23 [4c0077a07d][4c0077a07d] AddToStoreNar uses FramedSink/-Source for data
+- 1.24 [5ccd94501d][5ccd94501d] Allow trustless building of CA derivations
+- 1.25 [e34fe47d0c][e34fe47d0c] New implementation of AddToStore
+- 1.26 [c43e882f54][c43e882f54] STDERR_ERROR serialize exception
+- 1.27 [3a63fc6cd5][3a63fc6cd5] QueryValidPaths substitute flag
+- 1.28 [27b5747ca7][27b5747ca7] BuildDerivation returns builtOutputs
+- 1.29 [9d309de0de][9d309de0de] BuildDerivation returns timesBuilt, isNonDeterministic, startTime & stopTime
+- 1.30 [e5951a6b2f][e5951a6b2f] Bump version number for DerivedPath changes
+- 1.31 [a8416866cf][a8416866cf] RegisterDrvOutput & QueryRealisation send realisations as JSON
+- 1.32 [fe1f34fa60][fe1f34fa60] Implement AddMultipleToStore
+- 1.33 [35dbdbedd4][35dbdbedd4] open connection sends nix version
+- 1.34 [a4604f1928][a4604f1928] Implement BuildPathsWithResults
+- 1.35 [9207f94582][9207f94582] open connection sends trusted option
+- 1.36 [226b0f3956][226b0f3956] Implement AddPermRoot
+- 1.37 [1e3d811840][1e3d811840] Serialize BuildResult send cpuUser & cpuSystem
+
+
+
+[0263279071]: https://github.com/NixOS/nix/commit/0263279071
+[03427e76f1]: https://github.com/NixOS/nix/commit/03427e76f1
+[045b07200c]: https://github.com/NixOS/nix/commit/045b07200c
+[0565b5f2b3]: https://github.com/NixOS/nix/commit/0565b5f2b3
+[09a6321aeb]: https://github.com/NixOS/nix/commit/09a6321aeb
+[0f5da8a83c]: https://github.com/NixOS/nix/commit/0f5da8a83c
+[194d21f9f6]: https://github.com/NixOS/nix/commit/194d21f9f6
+[1db6259076]: https://github.com/NixOS/nix/commit/1db6259076
+[1e3d811840]: https://github.com/NixOS/nix/commit/1e3d811840
+[24035b98b1]: https://github.com/NixOS/nix/commit/24035b98b1
+[226b0f3956]: https://github.com/NixOS/nix/commit/226b0f3956
+[273b288a7e]: https://github.com/NixOS/nix/commit/273b288a7e
+[2754a07ead]: https://github.com/NixOS/nix/commit/2754a07ead
+[27b5747ca7]: https://github.com/NixOS/nix/commit/27b5747ca7
+[29cf434a35]: https://github.com/NixOS/nix/commit/29cf434a35
+[35dbdbedd4]: https://github.com/NixOS/nix/commit/35dbdbedd4
+[3a63fc6cd5]: https://github.com/NixOS/nix/commit/3a63fc6cd5
+[4b8f1b0ec0]: https://github.com/NixOS/nix/commit/4b8f1b0ec0
+[4bc4da331a]: https://github.com/NixOS/nix/commit/4bc4da331a
+[4c0077a07d]: https://github.com/NixOS/nix/commit/4c0077a07d
+[4dda1f92aa]: https://github.com/NixOS/nix/commit/4dda1f92aa
+[538a64e8c3]: https://github.com/NixOS/nix/commit/538a64e8c3
+[584f8a62de]: https://github.com/NixOS/nix/commit/584f8a62de
+[58cdab64ac]: https://github.com/NixOS/nix/commit/58cdab64ac
+[58ef4d9a95]: https://github.com/NixOS/nix/commit/58ef4d9a95
+[5ccd94501d]: https://github.com/NixOS/nix/commit/5ccd94501d
+[60ec75048a]: https://github.com/NixOS/nix/commit/60ec75048a
+[6185d25e52]: https://github.com/NixOS/nix/commit/6185d25e52
+[6846ed8b44]: https://github.com/NixOS/nix/commit/6846ed8b44
+[6d1a1191b0]: https://github.com/NixOS/nix/commit/6d1a1191b0
+[71a5161365]: https://github.com/NixOS/nix/commit/71a5161365
+[74033a844f]: https://github.com/NixOS/nix/commit/74033a844f
+[7951c3c54]: https://github.com/NixOS/nix/commit/7951c3c54
+[8cffec848]: https://github.com/NixOS/nix/commit/8cffec848
+[8fb8c26b6d]: https://github.com/NixOS/nix/commit/8fb8c26b6d
+[9207f94582]: https://github.com/NixOS/nix/commit/9207f94582
+[96598e7b06]: https://github.com/NixOS/nix/commit/96598e7b06
+[9947f1646a]: https://github.com/NixOS/nix/commit/9947f1646a
+[9cee600c88]: https://github.com/NixOS/nix/commit/9cee600c88
+[9d309de0de]: https://github.com/NixOS/nix/commit/9d309de0de
+[a4604f1928]: https://github.com/NixOS/nix/commit/a4604f1928
+[a583a2bc59]: https://github.com/NixOS/nix/commit/a583a2bc59
+[a711689368]: https://github.com/NixOS/nix/commit/a711689368
+[a72709afd8]: https://github.com/NixOS/nix/commit/a72709afd8
+[a8416866cf]: https://github.com/NixOS/nix/commit/a8416866cf
+[a9c4f66cfb]: https://github.com/NixOS/nix/commit/a9c4f66cfb
+[af2e53fd48]: https://github.com/NixOS/nix/commit/af2e53fd48
+[b1eb252172]: https://github.com/NixOS/nix/commit/b1eb252172
+[b4b5e9ce2f]: https://github.com/NixOS/nix/commit/b4b5e9ce2f
+[b755752f76]: https://github.com/NixOS/nix/commit/b755752f76
+[ba20730b3f]: https://github.com/NixOS/nix/commit/ba20730b3f
+[bdf089f463]: https://github.com/NixOS/nix/commit/bdf089f463
+[be64fbb501]: https://github.com/NixOS/nix/commit/be64fbb501
+[c370755583]: https://github.com/NixOS/nix/commit/c370755583
+[c43e882f54]: https://github.com/NixOS/nix/commit/c43e882f54
+[c602ebfb34]: https://github.com/NixOS/nix/commit/c602ebfb34
+[ccc52adfb2]: https://github.com/NixOS/nix/commit/ccc52adfb2
+[cfc8132391]: https://github.com/NixOS/nix/commit/cfc8132391
+[d0f5719c2a]: https://github.com/NixOS/nix/commit/d0f5719c2a
+[d1e3bf01bc]: https://github.com/NixOS/nix/commit/d1e3bf01bc
+[d38f860c3e]: https://github.com/NixOS/nix/commit/d38f860c3e
+[d3c61d83b]: https://github.com/NixOS/nix/commit/d3c61d83b
+[db4f4a8425]: https://github.com/NixOS/nix/commit/db4f4a8425
+[db5b86ef13]: https://github.com/NixOS/nix/commit/db5b86ef13
+[ddea253ff8]: https://github.com/NixOS/nix/commit/ddea253ff8
+[e0204f8d46]: https://github.com/NixOS/nix/commit/e0204f8d46
+[e0bd307802]: https://github.com/NixOS/nix/commit/e0bd307802
+[e25fad691a]: https://github.com/NixOS/nix/commit/e25fad691a
+[e34fe47d0c]: https://github.com/NixOS/nix/commit/e34fe47d0c
+[e42401ee7b]: https://github.com/NixOS/nix/commit/e42401ee7b
+[e5951a6b2f]: https://github.com/NixOS/nix/commit/e5951a6b2f
+[eb3036da87]: https://github.com/NixOS/nix/commit/eb3036da87
+[f3441e6122]: https://github.com/NixOS/nix/commit/f3441e6122
+[f92c9a0ac5]: https://github.com/NixOS/nix/commit/f92c9a0ac5
+[fe1f34fa60]: https://github.com/NixOS/nix/commit/fe1f34fa60
diff --git a/tvix/docs/src/nix-daemon/logging.md b/tvix/docs/src/nix-daemon/logging.md
new file mode 100644
index 000000000..70fe0882f
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/logging.md
@@ -0,0 +1,122 @@
+# Logging
+
+Because the daemon protocol only has one sender stream and one receiver stream
+logging messages need to be carefully interleaved with requests and responses.
+Usually this means that after the operation and all of its inputs (the request)
+has been read logging hijacks the sender stream (in the server case) and uses
+it to send typed logging messages while the request is being processed. When
+the response has been generated it will send `STDERR_LAST` to mark that what
+follows is the response data to the request. If the request failed a
+`STDERR_ERROR` message is sent with the error and no response is sent.
+
+While not in this state between request reading and response sending all
+messages and activities are buffered until next time the logger can send data.
+
+The logging messages supported are:
+- `STDERR_LAST`
+- `STDERR_ERROR`
+- `STDERR_NEXT`
+- `STDERR_READ`
+- `STDERR_WRITE`
+- `STDERR_START_ACTIVITY`
+- `STDERR_STOP_ACTIVITY`
+- `STDERR_RESULT`
+
+
+### `STDERR_LAST`
+Marks the end of the logs, normal processing can resume.
+
+- 0x616c7473 :: [UInt64][se-UInt64] (hardcoded)
+
+### `STDERR_ERROR`
+This also marks the end of this log "session" and so it
+has the same effect as `STDERR_LAST`.
+On the client the error is thrown as an exception and no response is read.
+
+#### If protocol version is 1.26 or newer
+- 0x63787470 :: [UInt64][se-UInt64] (hardcoded)
+- error :: [Error][se-Error]
+
+#### If protocol version is older than 1.26
+- 0x63787470 :: [UInt64][se-UInt64] (hardcoded)
+- msg :: [String][se-String]
+- exitStatus :: [Int][se-Int]
+
+
+### `STDERR_NEXT`
+Normal string log message.
+
+- 0x6f6c6d67 :: [UInt64][se-UInt64] (hardcoded)
+- msg :: [String][se-String]
+
+
+### `STDERR_READ`
+Reader interface used by ImportsPaths and AddToStoreNar (between 1.21 and 1.23).
+It works by sending a desired buffer length and then on the receiver stream it
+reads bytes buffer of that length. If it receives 0 bytes it sees this as an
+unexpected EOF.
+
+- 0x64617461 :: [UInt64][se-UInt64] (hardcoded)
+- desiredLen :: [Size][se-Size]
+
+### `STDERR_WRITE`
+Writer interface used by ExportPath. Simply writes a buffer.
+
+- 0x64617416 :: [UInt64][se-UInt64] (hardcoded)
+- buffer :: [Bytes][se-Bytes]
+
+### `STDERR_START_ACTIVITY`
+Begins an activity. In other tracing frameworks this would be called a span.
+
+Implemented in protocol 1.20. To achieve backwards compatible with older
+versions of the protocol instead of sending an `STDERR_START_ACTIVITY`
+the level is checked against enabled logging level and the text field is
+sent as a simple log message with `STDERR_NEXT`.
+
+- 0x53545254 :: [UInt64][se-UInt64] (hardcoded)
+- act :: [UInt64][se-UInt64]
+- level :: [Verbosity][se-Verbosity]
+- type :: [ActivityType][se-ActivityType]
+- text :: [String][se-String]
+- fields :: [List][se-List] of [Field][se-Field]
+- parent :: [UInt64][se-UInt64]
+
+
+act is atomic (nextId++ + (getPid() << 32))
+
+
+### `STDERR_STOP_ACTIVITY`
+Stops the given activity. The activity id should not send any more results.
+Just sends `ActivityId`.
+
+Implemented in protocol 1.20. When backwards compatible with older versions of
+the protocol and this message would have been sent it is instead ignored.
+
+- 0x53544f50 :: [UInt64][se-UInt64] (hardcoded)
+
+
+### `STDERR_RESULT`
+Sends results for a given activity.
+
+Implemented in protocol 1.20. When backwards compatible with older versions of
+the protocol and this message would have been sent it is instead ignored.
+
+- 0x52534c54 :: [UInt64][se-UInt64] (hardcoded)
+- act :: [UInt64][se-UInt64]
+- type :: [ResultType][se-ResultType]
+- fields :: [List][se-List] of [Field][se-Field]
+
+
+
+
+[se-UInt64]: ./serialization.md#uint64
+[se-Int]: ./serialization.md#int
+[se-Size]: ./serialization.md#size
+[se-Verbosity]: ./serialization.md#verbosity
+[se-ActivityType]: ./serialization.md#activitytype
+[se-ResultType]: ./serialization.md#resulttype
+[se-Bytes]: ./serialization.md#bytes
+[se-String]: ./serialization.md#string
+[se-List]: ./serialization.md#list-of-x
+[se-Error]: ./serialization.md#error
+[se-Field]: ./serialization.md#field
\ No newline at end of file
diff --git a/tvix/docs/src/nix-daemon/operations.md b/tvix/docs/src/nix-daemon/operations.md
new file mode 100644
index 000000000..0683ab070
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/operations.md
@@ -0,0 +1,894 @@
+
+# TOC
+
+| Operation                                                   | Id |
+| ----------------------------------------------------------- | -- |
+| [IsValidPath](#isvalidpath)                                 | 1  |
+| [HasSubstitutes](#hassubstitutes)                           | 3  |
+| [QueryReferrers](#queryreferrers)                           | 6  |
+| [AddToStore](#addtostore)                                   | 7  |
+| [BuildPaths](#buildpaths)                                   | 9  |
+| [EnsurePath](#ensurepath)                                   | 10 |
+| [AddTempRoot](#addtemproot)                                 | 11 |
+| [AddIndirectRoot](#addindirectroot)                         | 12 |
+| [FindRoots](#findroots)                                     | 14 |
+| [SetOptions](#setoptions)                                   | 19 |
+| [CollectGarbage](#collectgarbage)                           | 20 |
+| [QuerySubstitutablePathInfo](#querysubstitutablepathinfo)   | 21 |
+| [QueryAllValidPaths](#queryallvalidpaths)                   | 23 |
+| [QueryPathInfo](#querypathinfo)                             | 26 |
+| [QueryPathFromHashPart](#querypathfromhashpart)             | 29 |
+| [QuerySubstitutablePathInfos](#querysubstitutablepathinfos) | 30 |
+| [QueryValidPaths](#queryvalidpaths)                         | 31 |
+| [QuerySubstitutablePaths](#querysubstitutablepaths)         | 32 |
+| [QueryValidDerivers](#queryvalidderivers)                   | 33 |
+| [OptimiseStore](#optimisestore)                             | 34 |
+| [VerifyStore](#verifystore)                                 | 35 |
+| [BuildDerivation](#buildderivation)                         | 36 |
+| [AddSignatures](#addsignatures)                             | 37 |
+| [NarFromPath](#narfrompath)                                 | 38 |
+| [AddToStoreNar](#addtostore)                                | 39 |
+| [QueryMissing](#querymissing)                               | 40 |
+| [QueryDerivationOutputMap](#queryderivationoutputmap)       | 41 |
+| [RegisterDrvOutput](#registerdrvoutput)                     | 42 |
+| [QueryRealisation](#queryrealisation)                       | 43 |
+| [AddMultipleToStore](#addmultipletostore)                   | 44 |
+| [AddBuildLog](#addbuildlog)                                 | 45 |
+| [BuildPathsWithResults](#buildpathswithresults)             | 46 |
+| [AddPermRoot](#addpermroot)                                 | 47 |
+
+
+## Obsolete operations
+
+| Operation                                                 | Id |
+| --------------------------------------------------------- | -- |
+| [QueryPathHash](#querypathhash)                           | 4  |
+| [QueryReferences](#queryreferences)                       | 5  |
+| [AddTextToStore](#addtexttostore)                         | 8  |
+| [SyncWithGC](#syncwithgc)                                 | 13 |
+| [ExportPath](#exportpath)                                 | 16 |
+| [QueryDeriver](#queryderiver)                             | 18 |
+| [QueryDerivationOutputs](#queryderivationoutputs)         | 22 |
+| [ImportPaths](#importpaths)                               | 27 |
+| [QueryDerivationOutputNames](#queryderivationoutputnames) | 28 |
+
+
+## Removed operations
+
+| Operation                                         | Id |
+| ------------------------------------------------- | -- |
+| [Quit](#quit-removed)                             | 0  |
+| [ImportPath](#importpath-removed)                 | 17 |
+| [Old CollectGarbage](#old-collectgarbage-removed) | 15 |
+| [QueryFailedPaths](#queryfailedpaths)             | 24 |
+| [ClearFailedPaths](#clearfailedpaths)             | 25 |
+
+
+
+## Quit (removed)
+
+**Id:** 0<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Became dead code in Nix 0.11 and removed in Nix 1.8
+
+
+## IsValidPath
+
+**Id:** 1<br>
+**Introduced:** Nix 0.11<br>
+
+As the name says checks that a store path is valid i.e. in the store.
+
+This is a pretty core operation used everywhere.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+isValid :: [Bool][se-Bool]
+
+
+## HasSubstitutes
+
+**Id:** 3<br>
+**Introduced:** Nix 0.11<br>
+
+Checks if we can substitute the input path from a substituter. Uses
+QuerySubstitutablePaths under the hood :/
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+hasSubstitutes :: [Bool][se-Bool]
+
+
+## QueryPathHash
+
+**Id:** 4<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Retrieves the base16 NAR hash of a given store path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+hash :: [String][se-String] (base16-encoded NAR hash without algorithm prefix)
+
+
+## QueryReferences
+
+**Id:** 5<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Retrieves the references of a given path
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+references :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryReferrers
+
+**Id:** 6<br>
+**Introduced:** Nix 0.11<br>
+
+Retrieves the referrers of a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+referrers :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## AddToStore
+
+**Id:** 7<br>
+**Introduced:** Nix 0.11<br>
+
+Add a new path to the store.
+
+### Before protocol version 1.25
+#### Inputs
+- baseName :: [String][se-String]
+- fixed :: [Bool64][se-Bool64]
+- recursive :: [FileIngestionMethod][se-FileIngestionMethod]
+- hashAlgo :: [String][se-String]
+- NAR dump
+
+If fixed is `true`, hashAlgo is forced to `sha256` and recursive is forced to
+`Recursive`.
+
+Only `Flat` and `Recursive` values are supported for the recursive input
+parameter.
+
+#### Outputs
+path :: [StorePath][se-StorePath]
+
+### Protocol version 1.25 or newer
+#### Inputs
+- name :: [String][se-String]
+- camStr :: [ContentAddressMethodWithAlgo][se-ContentAddressMethodWithAlgo]
+- refs :: [List][se-List] of [StorePath][se-StorePath]
+- repairBool :: [Bool64][se-Bool64]
+- [Framed][se-Framed] NAR dump
+
+#### Outputs
+info :: [ValidPathInfo][se-ValidPathInfo]
+
+
+## AddTextToStore
+
+**Id:** 8<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.25, Nix 2.4
+
+Add a text file as a store path.
+
+This was obsoleted by adding the functionality implemented by this operation
+to [AddToStore](#addtostore). And so this corresponds to calling
+[AddToStore](#addtostore) with `camStr` set to `text:sha256` and `text`
+wrapped as a NAR.
+
+### Inputs
+- suffix :: [String][se-String]
+- text :: [Bytes][se-Bytes]
+- refs :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outpus
+path :: [StorePath][se-StorePath]
+
+
+## BuildPaths
+
+**Id:** 9<br>
+****Introduced:**** Nix 0.11<br>
+
+Build (or substitute) a list of derivations.
+
+### Inputs
+paths :: [List][se-List] of [DerivedPath][se-DerivedPath]
+
+#### Protocol 1.15 or newer
+mode :: [BuildMode][se-BuildMode]
+
+Check that connection is trusted before allowing Repair mode.
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## EnsurePath
+
+**Id:** 10<br>
+**Introduced:** Nix 0.11<br>
+
+Checks if a path is valid. Note: it may be made valid by running a substitute.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## AddTempRoot
+
+**Id:** 11<br>
+**Introduced:** Nix 0.11<br>
+
+Creates a temporary GC root for the given store path.
+
+Temporary GC roots are valid only for the life of the connection and are used
+primarily to prevent the GC from pulling the rug out from under the client and
+deleting store paths that the client is actively doing something with.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## AddIndirectRoot
+
+**Id:** 12<br>
+**Introduced:** Nix 0.11<br>
+
+Add an indirect root, which is a weak reference to the user-facing symlink
+created by [AddPermRoot](#addpermroot).
+
+Only ever sent on the local unix socket nix daemon.
+
+### Inputs
+path :: [String][se-String]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## SyncWithGC
+
+**Id:** 13<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.32, Nix 2.5.0
+
+Acquire the global GC lock, then immediately release it.  This function must be
+called after registering a new permanent root, but before exiting.  Otherwise,
+it is possible that a running garbage collector doesn't see the new root and
+deletes the stuff we've just built.  By acquiring the lock briefly, we ensure
+that either:
+
+- The collector is already running, and so we block until the
+    collector is finished.  The collector will know about our
+    *temporary* locks, which should include whatever it is we
+    want to register as a permanent lock.
+- The collector isn't running, or it's just started but hasn't
+    acquired the GC lock yet.  In that case we get and release
+    the lock right away, then exit.  The collector scans the
+    permanent root and sees ours.
+
+In either case the permanent root is seen by the collector.
+
+Was made obsolete by using [AddTempRoot](#addtemproot) to accomplish the same
+thing.
+
+
+## FindRoots
+
+**Id:** 14<br>
+**Introduced:** Nix 0.11<br>
+
+Find the GC roots.
+
+### Outputs
+roots :: [Map][se-Map] of [String][se-String] to [StorePath][se-StorePath]
+
+The key is the link pointing to the given store path.
+
+
+## Old CollectGarbage (removed)
+
+**Id:** 15<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Protocol 1.02, Nix 0.12<br>
+
+
+## ExportPath
+
+**Id:** 16<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.17, Nix 2.0<br>
+
+Export a store path in the binary format nix-store --import expects. See implementation there https://github.com/NixOS/nix/blob/db3bf180a569cb20db42c5e4669d2277be6f46b6/src/libstore/export-import.cc#L29 for more details.
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- sign :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+
+### Outputs
+Uses `STDERR_WRITE` to send dump in export format
+
+After dump it outputs.
+
+1 :: [Int][se-Int] (hardcoded)
+
+
+## ImportPath (removed)
+
+**Id:** 17<br>
+**Introduced:** Nix 0.11<br>
+**Removed:** Protocol 1.09, Nix 1.0<br>
+
+
+## QueryDeriver
+
+**Id:** 18<br>
+**Introduced:** Nix 0.11<br>
+**Obsolete:** Protocol 1.16, Nix 2.0<br>
+
+Returns the store path of the derivation for a given store path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+deriver :: [OptStorePath][se-OptStorePath]
+
+
+## SetOptions
+
+**Id:** 19<br>
+**Introduced:** Nix 0.11<br>
+
+Sends client options to the remote side.
+
+Only ever used right after the handshake.
+
+### Inputs
+
+- keepFailed :: [Int][se-Int]
+- keepGoing :: [Int][se-Int]
+- tryFallback :: [Int][se-Int]
+- verbosity :: [Verbosity][se-Verbosity]
+- maxbuildJobs :: [Int][se-Int]
+- maxSilentTime :: [Int][se-Int]
+- useBuildHook :: [Bool][se-Bool] (ignored and hardcoded to true in client)
+- verboseBuild :: [Verbosity][se-Verbosity]
+- logType :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- printBuildTrace :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- buildCores :: [Int][se-Int]
+- useSubstitutes :: [Int][se-Int]
+
+### Protocol 1.12 or newer
+otherSettings :: [Map][se-Map] of [String][se-String] to [String][se-String]
+
+
+## CollectGarbage
+
+**Id:** 20<br>
+**Introduced:** Protocol 1.02, Nix 0.12<br>
+
+Find the GC roots.
+
+### Inputs
+- action :: [GCAction][se-GCAction]
+- pathsToDelete :: [List][se-List] of [StorePath][se-StorePath]
+- ignoreLiveness :: [Bool64][se-Bool64]
+- maxFreed :: [UInt64][se-UInt64]
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+- removed :: [Int][se-Int] (ignored and hardcoded to 0 in client)
+
+### Outputs
+- pathsDeleted :: [List][se-List] of [String][se-String]
+- bytesFreed :: [UInt64][se-UInt64]
+- 0 :: [UInt64][se-UInt64] (hardcoded)
+
+Depending on the action pathsDeleted is, the GC roots, or the paths that would
+be or have been deleted.
+
+
+## QuerySubstitutablePathInfo
+
+**Id:** 21<br>
+**Introduced:** Protocol 1.02, Nix 0.12<br>
+
+Retrieves the various substitutable paths infos for a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+found :: [Bool][se-Bool]
+
+#### If found is true
+- deriver :: [OptStorePath][se-OptStorePath]
+- references :: [List][se-List] of [StorePath][se-StorePath]
+- downloadSize :: [UInt64][se-UInt64]
+- narSize :: [UInt64][se-UInt64]
+
+
+## QueryDerivationOutputs
+
+**Id:** 22<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Obsolete:** Protocol 1.22*, Nix 2.4<br>
+
+Retrieves all the outputs paths of a given derivation.
+
+### Inputs
+path :: [StorePath][se-StorePath] (must point to a derivation)
+
+### Outputs
+derivationOutputs :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryAllValidPaths
+
+**Id:** 23<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+
+Retrieves all the valid paths contained in the store.
+
+### Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryFailedPaths (removed)
+
+**Id:** 24<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Removed:** Protocol 1.16, Nix 2.0<br>
+
+Failed build caching API only ever used by Hydra.
+
+
+## ClearFailedPaths (removed)
+
+**Id:** 25<br>
+**Introduced:** Protocol 1.05, Nix 1.0<br>
+**Removed:** Protocol 1.16, Nix 2.0<br>
+
+Failed build caching API only ever used by Hydra.
+
+
+## QueryPathInfo
+
+**Id:** 26<br>
+**Introduced:** Protocol 1.06, Nix 1.0<br>
+
+Retrieves the pathInfo for a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+
+#### If protocol version is 1.17 or newer
+success :: [Bool64][se-Bool64]
+
+##### If success is true
+pathInfo :: [UnkeyedValidPathInfo][se-UnkeyedValidPathInfo]
+
+#### If protocol version is older than 1.17
+If info not found return error with `STDERR_ERROR`
+
+pathInfo :: [UnkeyedValidPathInfo][se-UnkeyedValidPathInfo]
+
+
+## ImportPaths
+
+**Id:** 27<br>
+**Introduced:** Protocol 1.09, Nix 1.0<br>
+**Obsolete:** Protocol 1.17, Nix 2.0<br>
+
+Older way of adding a store path to the remote store.
+
+It was obsoleted and replaced by AddToStoreNar because it sends the NAR
+before the metadata about the store path and so you would typically have
+to store the NAR in memory or temporarily on disk before processing it.
+
+### Inputs
+List of NAR dumps coming from the ExportPaths operations.
+
+### Outputs
+importedPaths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryDerivationOutputNames
+
+**Id:** 28<br>
+**Introduced:** Protocol 1.08, Nix 1.0<br>
+**Obsolete:** Protocol 1.21, Nix 2.4<br>
+
+Retrieves the name of the outputs of a given derivation. EG. out, dev, etc.
+
+### Inputs
+path :: [StorePath][se-StorePath] (must be a derivation path)
+
+### Outputs
+names :: [List][se-List] of [String][se-String]
+
+
+## QueryPathFromHashPart
+
+**Id:** 29<br>
+**Introduced:** Protocol 1.11, Nix 1.1<br>
+
+Retrieves a store path from a base16 (input) hash. Returns "" if no path was
+found.
+
+### Inputs
+hashPart :: [String][se-String]  (must be a base-16 hash)
+
+### Outputs
+path :: [OptStorePath][se-OptStorePath]
+
+
+## QuerySubstitutablePathInfos
+
+**Id:** 30<br>
+**Introduced:** Protocol 1.12*, Nix 1.2<br>
+
+Retrieves the various substitutable paths infos for set of store paths.
+
+### Inputs
+#### If protocol version is 1.22 or newer
+paths :: [Map][se-Map] of [StorePath][se-StorePath] to [OptContentAddress][se-OptContentAddress] 
+
+#### If protocol version older than 1.22
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outputs
+infos :: [List][se-List] of [SubstitutablePathInfo][se-SubstitutablePathInfo]
+
+
+## QueryValidPaths
+
+**Id:** 31<br>
+**Introduced:** Protocol 1.12, Nix 1.2<br>
+
+Takes a list of store paths and returns a new list only containing the valid store paths
+
+## Inputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### If protocol version is 1.27 or newer
+substitute :: [Bool][se-Bool] (defaults to false if not sent)
+
+## Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QuerySubstitutablePaths
+
+**Id:** 32<br>
+**Introduced:** Protocol 1.12, Nix 1.2<br>
+
+Takes a set of store path, returns a filtered new set of paths that can be
+substituted.
+
+In versions of the protocol prior to 1.12 [HasSubstitutes](#hassubstitutes)
+is used to implement the functionality that this operation provides.
+
+### Inputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+### Outputs
+paths :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## QueryValidDerivers
+
+**Id:** 33<br>
+**Introduced:** Protocol 1.13*, Nix 1.3<br>
+
+Retrieves the derivers of a given path.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+derivers :: [List][se-List] of [StorePath][se-StorePath]
+
+
+## OptimiseStore
+
+**Id:** 34<br>
+**Introduced:** Protocol 1.14, Nix 1.8<br>
+
+Optimise store by hardlinking files with the same content.
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## VerifyStore
+
+**Id:** 35<br>
+**Introduced:** Protocol 1.14, Nix 1.9<br>
+
+Verify store either only db and existence of path or entire contents of store
+paths against the NAR hash. 
+
+### Inputs
+- checkContents :: [Bool64][se-Bool64]
+- repair :: [Bool64][se-Bool64]
+
+### Outputs
+errors :: [Bool][se-Bool]
+
+
+## BuildDerivation
+
+**Id:** 36<br>
+**Introduced:** Protocol 1.14, Nix 1.10<br>
+
+Main build operation used when remote building.
+
+When functioning as a remote builder this operation is used instead of
+BuildPaths so that it doesn't have to send the entire tree of derivations
+to the remote side first before it can start building. What this does
+instead is have a reduced version of the derivation to be built sent as
+part of its input and then only building that derivation.
+
+The paths required by the build need to be part of the remote store
+(by copying with AddToStoreNar or substituting) before this operation is
+called.
+
+### Inputs
+- drvPath :: [StorePath][se-StorePath]
+- drv :: [BasicDerivation][se-BasicDerivation]
+- buildMode :: [BuildMode][se-BuildMode]
+
+### Outputs
+buildResult :: [BuildResult][se-BuildResult]
+
+
+## AddSignatures
+
+**Id:** 37<br>
+**Introduced:** Protocol 1.16, Nix 2.0<br>
+
+Add the signatures associated to a given path.
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- signatures :: [List][se-List] of [String][se-String]
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## NarFromPath
+
+**Id:** 38<br>
+**Introduced:** Protocol 1.17, Nix 2.0<br>
+
+Main way of getting the contents of a store path to the client.
+
+As the name suggests this is done by sending a NAR file.
+
+It replaced the now obsolete ExportPath operation and is used by newer clients to
+implement the export functionality for cli. It is also used when remote building
+to transfer build results from remote builder to client.
+
+### Inputs
+path :: [StorePath][se-StorePath]
+
+### Outputs
+NAR dumped straight to the stream.
+
+
+## AddToStoreNar
+
+**Id:** 39<br>
+**Introduced:** Protocol 1.17, Nix 2.0<br>
+
+Dumps a path as a NAR
+
+### Inputs
+- path :: [StorePath][se-StorePath]
+- deriver :: [OptStorePath][se-OptStorePath]
+- narHash :: [String][se-String] SHA256 NAR hash base 16
+- references :: [List][se-List] of [StorePath][se-StorePath]
+- registrationTime :: [Time][se-Time]
+- narSize :: [UInt64][se-UInt64]
+- ultimate :: [Bool64][se-Bool64]
+- signatures :: [List][se-List] of [String][se-String]
+- ca :: [OptContentAddress][se-OptContentAddress]
+- repair :: [Bool64][se-Bool64]
+- dontCheckSigs :: [Bool64][se-Bool64]
+
+#### If protocol version is 1.23 or newer
+[Framed][se-Framed] NAR dump
+
+#### If protocol version is between 1.21 and 1.23
+NAR dump sent using `STDERR_READ`
+
+#### If protocol version is older than 1.21
+NAR dump sent raw on stream
+
+
+## QueryMissing
+
+**Id:** 40<br>
+**Introduced:** Protocol 1.19*, Nix 2.0<br>
+
+### Inputs
+targets :: [List][se-List] of [DerivedPath][se-DerivedPath]
+
+### Outputs
+- willBuild :: [List][se-List] of [StorePath][se-StorePath]
+- willSubstitute :: [List][se-List] of [StorePath][se-StorePath]
+- unknown :: [List][se-List] of [StorePath][se-StorePath]
+- downloadSize :: [UInt64][se-UInt64]
+- narSize :: [UInt64][se-UInt64]
+
+
+## QueryDerivationOutputMap
+
+**Id:** 41<br>
+**Introduced:** Protocol 1.22*, Nix 2.4<br>
+
+Retrieves an associative map outputName -> storePath for a given derivation.
+
+### Inputs
+path :: [StorePath][se-StorePath]  (must be a derivation path)
+
+### Outputs
+outputs :: [Map][se-Map] of [String][se-String] to [OptStorePath][se-OptStorePath]
+
+
+## RegisterDrvOutput
+
+**Id:** 42<br>
+**Introduced:** Protocol 1.27, Nix 2.4<br>
+
+Registers a DRV output
+
+### Inputs
+#### If protocol is 1.31 or newer
+realisation :: [Realisation][se-Realisation]
+
+#### If protocol is older than 1.31
+- outputId :: [DrvOutput][se-DrvOutput]
+- outputPath :: [StorePath][se-StorePath]
+
+
+## QueryRealisation
+
+**Id:** 43<br>
+**Introduced:** Protocol 1.27, Nix 2.4<br>
+
+Retrieves the realisations attached to a drv output id realisations.
+
+### Inputs
+outputId :: [DrvOutput][se-DrvOutput]
+
+### Outputs
+#### If protocol is 1.31 or newer
+realisations :: [List][se-List] of [Realisation][se-Realisation]
+
+#### If protocol is older than 1.31
+outPaths :: [List][se-List] of [BaseStorePath][se-BaseStorePath]
+
+
+## AddMultipleToStore
+
+**Id:** 44<br>
+**Introduced:** Protocol 1.32*, Nix 2.4<br>
+
+A pipelined version of [AddToStoreNar](#addtostorenar) where you can add
+multiple paths in one go.
+
+Added because the protocol doesn't support pipelining and so on a low latency
+connection waiting for the request/response of [AddToStoreNar](#addtostorenar)
+for each small NAR was costly.
+
+### Inputs
+- repair :: [Bool64][se-Bool64]
+- dontCheckSigs :: [Bool64][se-Bool64]
+- [Framed][se-Framed] stream of add multiple NAR dump
+
+
+## AddBuildLog
+
+**Id:** 45<br>
+**Introduced:** Protocol 1.32, Nix 2.6.0<br>
+
+Attach some build logs to a given build.
+
+### Inputs
+- path :: [String][se-String] (might be [BaseStorePath][se-BaseStorePath])
+- [Framed][se-Framed] stream of log lines
+
+### Outputs
+1 :: [Int][se-Int] (hardcoded)
+
+
+## BuildPathsWithResults
+
+**Id:** 46<br>
+**Introduced:** Protocol 1.34*, Nix 2.8.0<br>
+
+Build (or substitute) a list of derivations and returns a list of results.
+
+### Inputs
+- drvs :: [List][se-List] of [DerivedPath][se-DerivedPath]
+- mode :: [BuildMode][se-BuildMode]
+
+### Outputs
+results :: [List][se-List] of [KeyedBuildResult][se-KeyedBuildResult]
+
+
+## AddPermRoot
+
+**Id:** 47<br>
+**Introduced:** Protocol 1.36*, Nix 2.20.0<br>
+
+### Inputs
+- storePath :: [StorePath][se-StorePath]
+- gcRoot :: [String][se-String]
+
+### Outputs
+gcRoot :: [String][se-String]
+
+
+
+[se-Int]: ./serialization.md#int
+[se-UInt8]: ./serialization.md#uint8
+[se-UInt64]: ./serialization.md#uint64
+[se-Bool]: ./serialization.md#bool
+[se-Bool64]: ./serialization.md#bool64
+[se-Time]: ./serialization.md#time
+[se-FileIngestionMethod]: ./serialization.md#fileingestionmethod
+[se-BuildMode]: ./serialization.md#buildmode
+[se-Verbosity]: ./serialization.md#verbosity
+[se-GCAction]: ./serialization.md#gcaction
+[se-Bytes]: ./serialization.md#bytes
+[se-String]: ./serialization.md#string
+[se-StorePath]: ./serialization.md#storepath
+[se-BaseStorePath]: ./serialization.md#basestorepath
+[se-OptStorePath]: ./serialization.md#optstorepath
+[se-ContentAddressMethodWithAlgo]: ./serialization.md#contentaddressmethodwithalgo
+[se-OptContentAddress]: ./serialization.md#optcontentaddress
+[se-DerivedPath]: ./serialization.md#derivedpath
+[se-DrvOutput]: ./serialization.md#drvoutput
+[se-Realisation]: ./serialization.md#realisation
+[se-List]: ./serialization.md#list-of-x
+[se-Map]: ./serialization.md#map-of-x-to-y
+[se-SubstitutablePathInfo]: ./serialization.md#substitutablepathinfo
+[se-ValidPathInfo]: ./serialization.md#validpathinfo
+[se-UnkeyedValidPathInfo]: ./serialization.md#unkeyedvalidpathinfo
+[se-BuildResult]: ./serialization.md#buildmode
+[se-KeyedBuildResult]: ./serialization.md#keyedbuildresult
+[se-BasicDerivation]: ./serialization.md#basicderivation
+[se-Framed]: ./serialization.md#framed
\ No newline at end of file
diff --git a/tvix/docs/src/nix-daemon/serialization.md b/tvix/docs/src/nix-daemon/serialization.md
new file mode 100644
index 000000000..a2694a4de
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/serialization.md
@@ -0,0 +1,305 @@
+
+### UInt64
+Little endian byte order
+
+### Bytes
+
+- len :: [UInt64](#uint64)
+- len bytes of content
+- padding with zeros to ensure 64 bit alignment of content with padding
+
+
+## Int serializers
+
+### Int
+[UInt64](#uint64) cast to C `unsigned int` with upper bounds checking.
+
+### Int64
+[UInt64](#uint64) cast to C `int64_t` with upper bounds checking.
+
+### UInt8
+[UInt64](#uint64) cast to C `uint8_t` with upper bounds checking.
+
+### Size
+[UInt64](#uint64) cast to C `size_t` with upper bounds checking.
+
+### Time
+[UInt64](#uint64) cast to C `time_t` with upper bounds checking.
+s
+### Bool
+Sent as an [Int](#int) where 0 is false and everything else is true.
+
+### Bool64
+Sent as an [UInt64](#uint64) where 0 is false and everything else is true.
+
+### FileIngestionMethod
+An [UInt8](#uint8) enum with the following possible values:
+
+| Name      | Int |
+| --------- | --- |
+| Flat      |  0  |
+| Recursive |  1  |
+
+### BuildMode
+An [Int](#int) enum with the following possible values:
+
+| Name   | Int |
+| ------ | --- |
+| Normal |  0  |
+| Repair |  1  |
+| Check  |  2  |
+
+### Verbosity
+An [Int](#int) enum with the following possible values:
+
+| Name      | Int |
+| --------- | --- |
+| Error     |  0  |
+| Warn      |  1  |
+| Notice    |  2  |
+| Info      |  3  |
+| Talkative |  4  |
+| Chatty    |  5  |
+| Debug     |  6  |
+| Vomit     |  7  |
+
+### GCAction
+An [Int](#int) enum with the following possible values:
+
+| Name           | Int |
+| -------------- | --- |
+| ReturnLive     |  0  |
+| ReturnDead     |  1  |
+| DeleteDead     |  2  |
+| DeleteSpecific |  3  |
+
+### BuildStatus
+An [Int](#int) enum with the following possible values:
+
+| Name                   | Int |
+| ---------------------- | --- |
+| Built                  |  0  |
+| Substituted            |  1  |
+| AlreadyValid           |  2  |
+| PermanentFailure       |  3  |
+| InputRejected          |  4  |
+| OutputRejected         |  5  |
+| TransientFailure       |  6  |
+| CachedFailure          |  7  |
+| TimedOut               |  8  |
+| MiscFailure            |  9  |
+| DependencyFailed       | 10  |
+| LogLimitExceeded       | 11  |
+| NotDeterministic       | 12  |
+| ResolvesToAlreadyValid | 13  |
+| NoSubstituters         | 14  |
+
+### ActivityType
+An [Int](#int) enum with the following possible values:
+
+| Name          | Int |
+| ------------- | --- |
+| Unknown       |   0 |
+| CopyPath      | 100 |
+| FileTransfer  | 101 |
+| Realise       | 102 |
+| CopyPaths     | 103 |
+| Builds        | 104 |
+| Build         | 105 |
+| OptimiseStore | 106 |
+| VerifyPaths   | 107 |
+| Substitute    | 108 |
+| QueryPathInfo | 109 |
+| PostBuildHook | 110 |
+| BuildWaiting  | 111 |
+| FetchTree     | 112 |
+
+### ResultType
+An [Int](#int) enum with the following possible values:
+
+| Name             | Int |
+| ---------------- | --- |
+| FileLinked       | 100 |
+| BuildLogLine     | 101 |
+| UntrustedPath    | 102 |
+| CorruptedPath    | 103 |
+| SetPhase         | 104 |
+| Progress         | 105 |
+| SetExpected      | 106 |
+| PostBuildLogLine | 107 |
+| FetchStatus      | 108 |
+
+### FieldType
+An [Int](#int) enum with the following possible values:
+
+| Name   | Int |
+| ------ | --- |
+| Int    |  0  |
+| String |  1  |
+
+
+## Bytes serializers
+
+### String
+Simply a [Bytes](#bytes) that has some UTF-8 string like semantics sometimes.
+
+### StorePath
+String representation of a full store path.
+
+### BaseStorePath
+String representation of the basename of a store path. That is the store path
+without the /nix/store prefix.
+
+### OptStorePath
+Optional store path.
+
+If no store path this is serialized as the empty string otherwise it is the same as
+[StorePath](#storepath).
+
+### ContentAddressMethodWithAlgo
+One of the following strings:
+- text:`hash algorithm`
+- fixed:r:`hash algorithm`
+- fixed:`hash algorithm`
+
+### DerivedPath
+#### If protocol is 1.30 or newer
+        return DerivedPath::parseLegacy(store, s);
+#### If protocol is older than 1.30
+        return parsePathWithOutputs(store, s).toDerivedPath();
+
+### ContentAddress
+String with the format:
+- [ContentAddressMethodWithAlgo](#contentaddressmethodwithalgo):`hash`
+
+### OptContentAddress
+Optional version of [ContentAddress](#contentaddress) where empty string means
+no content address.
+
+### DrvOutput
+String with format:
+- `hash with any prefix`!`output name`
+
+### Realisation
+A JSON object sent as a string.
+
+The JSON object has the following keys:
+| Key                   | Value                   |
+| --------------------- | ----------------------- |
+| id                    | [DrvOutput](#drvoutput) |
+| outPath               | [StorePath](#storepath) |
+| signatures            | Array of String         |
+| dependentRealisations | Object where key is [DrvOutput](#drvoutput) and value is [StorePath](#storepath) |
+
+
+## Complex serializers
+
+### List of x
+A list is encoded as a [Size](#size) length n followed by n encodings of x
+
+### Map of x to y
+A map is encoded as a [Size](#size) length n followed by n encodings of pairs of x and y
+
+
+### BuildResult
+- status :: [BuildStatus](#buildstatus)
+- errorMsg :: [String](#string)
+
+#### Protocol 1.29 or newer
+- timesBuilt :: [Int](#int)
+- isNonDeterministic :: [Bool64](#bool64)
+- startTime :: [Time](#time)
+- stopTime :: [Time](#time)
+
+#### Protocol 1.37 or newer
+- cpuUser :: [OptMicroseconds](#optmicroseconds)
+- cpuSystem :: [OptMicroseconds](#optmicroseconds)
+
+#### Protocol 1.28 or newer
+builtOutputs ::  [Map](#map-of-x-to-y) of [DrvOutput](#drvoutput) to [Realisation](#realisations)
+
+### KeyedBuildResult
+- path :: [DerivedPath](#derivedpath)
+- result :: [BuildResult](#buildresult)
+
+### OptMicroseconds
+Optional microseconds.
+
+- tag :: [UInt8](#uint8)
+
+#### If tag is 1
+- seconds :: [Int64](#int64)
+
+
+### SubstitutablePathInfo
+- storePath :: [StorePath](#storepath)
+- deriver :: [OptStorePath](#optstorepath)
+- references :: [List](#list-of-x) of [StorePath](#storepath)
+- downloadSize :: [UInt64](#uint64)
+- narSize :: [UInt64](#uint64)
+
+
+### UnkeyedValidPathInfo
+- deriver :: [OptStorePath](#optstorepath)
+- narHash :: [String](#string) SHA256 NAR hash base16 encoded
+- references :: [List](#list-of-x) of [StorePath](#storepath)
+- registrationTime :: [Time](#time)
+- narSize :: [UInt64](#uint64)
+
+#### If protocol version is 1.16 or above
+- ultimate :: [Bool64](#bool64)
+- signatures :: [List](#list-of-x) of [String](#string)
+- ca :: [OptContentAddress](#optcontentaddress)
+
+
+### ValidPathInfo
+- path :: [StorePath](#storepath)
+- info :: [UnkeyedValidPathInfo](#unkeyedvalidpathinfo)
+
+### DerivationOutput
+- path :: [String](#string)
+- hashAlgo :: [String](#string)
+- hash :: [String](#string)
+
+### BasicDerivation
+- outputs :: [Map](#map-of-x-to-y) of [String](#string) to [DerivationOutput](#derivationoutput)
+- inputSrcs :: [List](#list-of-x) of [StorePath](#storepath)
+- platform :: [String](#string)
+- builder :: [String](#string)
+- args :: [List](#list-of-x) of [String](#string)
+- env :: [Map](#map-of-x-to-y) of [String](#string) to [String](#string)
+
+### TraceLine
+- havePos :: [Size](#size) (hardcoded to 0)
+- hint :: [String](#string)
+
+### Error
+- type :: [String](#string) (hardcoded to `Error`)
+- level :: [Verbosity](#verbosity)
+- name :: [String](#string) (removed and hardcoded to `Error`)
+- msg :: [String](#string)
+- havePos :: [Size](#size) (hardcoded to 0)
+- traces :: [List](#list-of-x) of [TraceLine](#traceline)
+
+## Field
+- type :: [FieldType](#fieldtype)
+
+### If type is Int
+- value :: [UInt64](#uint64)
+
+### If type is String
+- value :: [String](#string)
+
+
+## Framed
+
+At protocol 1.23 [AddToStoreNar](./operations.md#addtostorenar) introduced a
+framed streaming for sending the NAR dump and later versions of the protocol
+also used this framing for other operations.
+
+At its core the framed streaming is just a series of [Bytes](#bytes) of
+varying length and terminated by an empty [Bytes](#bytes).
+
+This method of sending data has the advantage of not having to parse the data
+to find where it ends. Older versions of the protocol would potentially parse
+the NAR twice.
\ No newline at end of file
diff --git a/tvix/glue/Cargo.toml b/tvix/glue/Cargo.toml
index f929d720a..0afdefeaa 100644
--- a/tvix/glue/Cargo.toml
+++ b/tvix/glue/Cargo.toml
@@ -4,7 +4,7 @@ version = "0.1.0"
 edition = "2021"
 
 [dependencies]
-async-recursion = "1.0.5"
+async-compression = { version = "0.4.9", features = ["tokio", "gzip", "bzip2", "xz"]}
 bstr = "1.6.0"
 bytes = "1.4.0"
 data-encoding = "2.3.3"
@@ -30,10 +30,6 @@ md-5 = "0.10.6"
 url = "2.4.0"
 walkdir = "2.4.0"
 
-[dependencies.async-compression]
-version = "0.4.6"
-features = ["tokio", "gzip", "bzip2", "xz"]
-
 [dependencies.wu-manber]
 git = "https://github.com/tvlfyi/wu-manber.git"
 
diff --git a/tvix/glue/benches/eval.rs b/tvix/glue/benches/eval.rs
index dfb4fabe4..202278c1a 100644
--- a/tvix/glue/benches/eval.rs
+++ b/tvix/glue/benches/eval.rs
@@ -2,10 +2,6 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion};
 use lazy_static::lazy_static;
 use std::{env, rc::Rc, sync::Arc, time::Duration};
 use tvix_build::buildservice::DummyBuildService;
-use tvix_castore::{
-    blobservice::{BlobService, MemoryBlobService},
-    directoryservice::{DirectoryService, MemoryDirectoryService},
-};
 use tvix_eval::{builtins::impure_builtins, EvalIO};
 use tvix_glue::{
     builtins::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins},
@@ -13,16 +9,9 @@ use tvix_glue::{
     tvix_io::TvixIO,
     tvix_store_io::TvixStoreIO,
 };
-use tvix_store::pathinfoservice::{MemoryPathInfoService, PathInfoService};
+use tvix_store::utils::construct_services;
 
 lazy_static! {
-    static ref BLOB_SERVICE: Arc<dyn BlobService> = Arc::new(MemoryBlobService::default());
-    static ref DIRECTORY_SERVICE: Arc<dyn DirectoryService> =
-        Arc::new(MemoryDirectoryService::default());
-    static ref PATH_INFO_SERVICE: Arc<dyn PathInfoService> = Arc::new(MemoryPathInfoService::new(
-        BLOB_SERVICE.clone(),
-        DIRECTORY_SERVICE.clone(),
-    ));
     static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new().unwrap();
 }
 
@@ -30,12 +19,17 @@ fn interpret(code: &str) {
     // TODO: this is a bit annoying.
     // It'd be nice if we could set this up once and then run evaluate() with a
     // piece of code. b/262
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        TOKIO_RUNTIME
+            .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+            .unwrap();
 
     // We assemble a complete store in memory.
     let tvix_store_io = Rc::new(TvixStoreIO::new(
-        BLOB_SERVICE.clone(),
-        DIRECTORY_SERVICE.clone(),
-        PATH_INFO_SERVICE.clone(),
+        blob_service,
+        directory_service,
+        path_info_service.into(),
+        nar_calculation_service.into(),
         Arc::<DummyBuildService>::default(),
         TOKIO_RUNTIME.handle().clone(),
     ));
diff --git a/tvix/glue/src/builtins/import.rs b/tvix/glue/src/builtins/import.rs
index 219695b69..4a15afa81 100644
--- a/tvix/glue/src/builtins/import.rs
+++ b/tvix/glue/src/builtins/import.rs
@@ -178,7 +178,7 @@ mod import_builtins {
             CAHash::Nar(NixHash::Sha256(state.tokio_handle.block_on(async {
                 Ok::<_, tvix_eval::ErrorKind>(
                     state
-                        .path_info_service
+                        .nar_calculation_service
                         .as_ref()
                         .calculate_nar(&root_node)
                         .await
@@ -255,7 +255,7 @@ mod import_builtins {
             .tokio_handle
             .block_on(async {
                 let (_, nar_sha256) = state
-                    .path_info_service
+                    .nar_calculation_service
                     .as_ref()
                     .calculate_nar(&root_node)
                     .await?;
diff --git a/tvix/glue/src/builtins/mod.rs b/tvix/glue/src/builtins/mod.rs
index 0c7bcc880..3d6263286 100644
--- a/tvix/glue/src/builtins/mod.rs
+++ b/tvix/glue/src/builtins/mod.rs
@@ -68,7 +68,7 @@ mod tests {
     fn eval(str: &str) -> EvaluationResult {
         // We assemble a complete store in memory.
         let runtime = tokio::runtime::Runtime::new().expect("Failed to build a Tokio runtime");
-        let (blob_service, directory_service, path_info_service) = runtime
+        let (blob_service, directory_service, path_info_service, nar_calculation_service) = runtime
             .block_on(async { construct_services("memory://", "memory://", "memory://").await })
             .expect("Failed to construct store services in memory");
 
@@ -76,6 +76,7 @@ mod tests {
             blob_service,
             directory_service,
             path_info_service.into(),
+            nar_calculation_service.into(),
             Arc::<DummyBuildService>::default(),
             runtime.handle().clone(),
         ));
diff --git a/tvix/glue/src/fetchers/mod.rs b/tvix/glue/src/fetchers/mod.rs
index 342dfd84e..1b2e1ee20 100644
--- a/tvix/glue/src/fetchers/mod.rs
+++ b/tvix/glue/src/fetchers/mod.rs
@@ -14,7 +14,7 @@ use tvix_castore::{
     directoryservice::DirectoryService,
     proto::{node::Node, FileNode},
 };
-use tvix_store::{pathinfoservice::PathInfoService, proto::PathInfo};
+use tvix_store::{nar::NarCalculationService, pathinfoservice::PathInfoService, proto::PathInfo};
 use url::Url;
 
 use crate::builtins::FetcherError;
@@ -106,20 +106,27 @@ impl Fetch {
 }
 
 /// Knows how to fetch a given [Fetch].
-pub struct Fetcher<BS, DS, PS> {
+pub struct Fetcher<BS, DS, PS, NS> {
     http_client: reqwest::Client,
     blob_service: BS,
     directory_service: DS,
     path_info_service: PS,
+    nar_calculation_service: NS,
 }
 
-impl<BS, DS, PS> Fetcher<BS, DS, PS> {
-    pub fn new(blob_service: BS, directory_service: DS, path_info_service: PS) -> Self {
+impl<BS, DS, PS, NS> Fetcher<BS, DS, PS, NS> {
+    pub fn new(
+        blob_service: BS,
+        directory_service: DS,
+        path_info_service: PS,
+        nar_calculation_service: NS,
+    ) -> Self {
         Self {
             http_client: reqwest::Client::new(),
             blob_service,
             directory_service,
             path_info_service,
+            nar_calculation_service,
         }
     }
 
@@ -170,11 +177,12 @@ async fn hash<D: Digest + std::io::Write>(
     Ok((hasher.finalize(), bytes_copied))
 }
 
-impl<BS, DS, PS> Fetcher<BS, DS, PS>
+impl<BS, DS, PS, NS> Fetcher<BS, DS, PS, NS>
 where
     BS: BlobService + Clone + 'static,
     DS: DirectoryService + Clone,
     PS: PathInfoService,
+    NS: NarCalculationService,
 {
     /// Ingest the data from a specified [Fetch].
     /// On success, return the root node, a content digest and length.
@@ -257,7 +265,7 @@ where
                 // Even if no expected NAR sha256 has been provided, we need
                 // the actual one later.
                 let (nar_size, actual_nar_sha256) = self
-                    .path_info_service
+                    .nar_calculation_service
                     .calculate_nar(&node)
                     .await
                     .map_err(|e| {
@@ -309,7 +317,7 @@ where
         // the [PathInfo].
         let (nar_size, nar_sha256) = match &ca_hash {
             CAHash::Flat(_nix_hash) => self
-                .path_info_service
+                .nar_calculation_service
                 .calculate_nar(&node)
                 .await
                 .map_err(|e| FetcherError::Io(e.into()))?,
diff --git a/tvix/glue/src/tests/mod.rs b/tvix/glue/src/tests/mod.rs
index 8e1572b6e..9fe0c2227 100644
--- a/tvix/glue/src/tests/mod.rs
+++ b/tvix/glue/src/tests/mod.rs
@@ -3,12 +3,8 @@ use std::{rc::Rc, sync::Arc};
 use pretty_assertions::assert_eq;
 use std::path::PathBuf;
 use tvix_build::buildservice::DummyBuildService;
-use tvix_castore::{
-    blobservice::{BlobService, MemoryBlobService},
-    directoryservice::{DirectoryService, MemoryDirectoryService},
-};
 use tvix_eval::{EvalIO, Value};
-use tvix_store::pathinfoservice::{MemoryPathInfoService, PathInfoService};
+use tvix_store::utils::construct_services;
 
 use rstest::rstest;
 
@@ -36,19 +32,17 @@ fn eval_test(code_path: PathBuf, expect_success: bool) {
         return;
     }
 
-    let blob_service = Arc::new(MemoryBlobService::default()) as Arc<dyn BlobService>;
-    let directory_service =
-        Arc::new(MemoryDirectoryService::default()) as Arc<dyn DirectoryService>;
-    let path_info_service = Box::new(MemoryPathInfoService::new(
-        blob_service.clone(),
-        directory_service.clone(),
-    )) as Box<dyn PathInfoService>;
     let tokio_runtime = tokio::runtime::Runtime::new().unwrap();
+    let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+        tokio_runtime
+            .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+            .unwrap();
 
     let tvix_store_io = Rc::new(TvixStoreIO::new(
         blob_service,
         directory_service,
         path_info_service.into(),
+        nar_calculation_service.into(),
         Arc::new(DummyBuildService::default()),
         tokio_runtime.handle().clone(),
     ));
diff --git a/tvix/glue/src/tvix_store_io.rs b/tvix/glue/src/tvix_store_io.rs
index 7daefffe8..7b8ef3ff0 100644
--- a/tvix/glue/src/tvix_store_io.rs
+++ b/tvix/glue/src/tvix_store_io.rs
@@ -1,6 +1,5 @@
 //! This module provides an implementation of EvalIO talking to tvix-store.
 
-use async_recursion::async_recursion;
 use bytes::Bytes;
 use futures::{StreamExt, TryStreamExt};
 use nix_compat::nixhash::NixHash;
@@ -19,6 +18,7 @@ use tracing::{error, info, instrument, warn, Level};
 use tvix_build::buildservice::BuildService;
 use tvix_castore::proto::node::Node;
 use tvix_eval::{EvalIO, FileType, StdIO};
+use tvix_store::nar::NarCalculationService;
 use tvix_store::utils::AsyncIoBridge;
 
 use tvix_castore::{
@@ -53,13 +53,20 @@ pub struct TvixStoreIO {
     pub(crate) blob_service: Arc<dyn BlobService>,
     pub(crate) directory_service: Arc<dyn DirectoryService>,
     pub(crate) path_info_service: Arc<dyn PathInfoService>,
+    pub(crate) nar_calculation_service: Arc<dyn NarCalculationService>,
+
     std_io: StdIO,
     #[allow(dead_code)]
     build_service: Arc<dyn BuildService>,
     pub(crate) tokio_handle: tokio::runtime::Handle,
 
-    pub(crate) fetcher:
-        Fetcher<Arc<dyn BlobService>, Arc<dyn DirectoryService>, Arc<dyn PathInfoService>>,
+    #[allow(clippy::type_complexity)]
+    pub(crate) fetcher: Fetcher<
+        Arc<dyn BlobService>,
+        Arc<dyn DirectoryService>,
+        Arc<dyn PathInfoService>,
+        Arc<dyn NarCalculationService>,
+    >,
 
     // Paths known how to produce, by building or fetching.
     pub(crate) known_paths: RefCell<KnownPaths>,
@@ -70,6 +77,7 @@ impl TvixStoreIO {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
         path_info_service: Arc<dyn PathInfoService>,
+        nar_calculation_service: Arc<dyn NarCalculationService>,
         build_service: Arc<dyn BuildService>,
         tokio_handle: tokio::runtime::Handle,
     ) -> Self {
@@ -77,10 +85,16 @@ impl TvixStoreIO {
             blob_service: blob_service.clone(),
             directory_service: directory_service.clone(),
             path_info_service: path_info_service.clone(),
+            nar_calculation_service: nar_calculation_service.clone(),
             std_io: StdIO {},
             build_service,
             tokio_handle,
-            fetcher: Fetcher::new(blob_service, directory_service, path_info_service),
+            fetcher: Fetcher::new(
+                blob_service,
+                directory_service,
+                path_info_service,
+                nar_calculation_service,
+            ),
             known_paths: Default::default(),
         }
     }
@@ -92,7 +106,6 @@ impl TvixStoreIO {
     ///
     /// In case there is no PathInfo yet, this means we need to build it
     /// (which currently is stubbed out still).
-    #[async_recursion(?Send)]
     #[instrument(skip(self, store_path), fields(store_path=%store_path), ret(level = Level::TRACE), err)]
     async fn store_path_to_node(
         &self,
@@ -249,8 +262,10 @@ impl TvixStoreIO {
                             let root_node = output.node.as_ref().expect("invalid root node");
 
                             // calculate the nar representation
-                            let (nar_size, nar_sha256) =
-                                self.path_info_service.calculate_nar(root_node).await?;
+                            let (nar_size, nar_sha256) = self
+                                .nar_calculation_service
+                                .calculate_nar(root_node)
+                                .await?;
 
                             // assemble the PathInfo to persist
                             let path_info = PathInfo {
@@ -325,7 +340,7 @@ impl TvixStoreIO {
         // because the path info construct a narinfo which *always*
         // require a SHA256 of the NAR representation and the NAR size.
         let (nar_size, nar_sha256) = self
-            .path_info_service
+            .nar_calculation_service
             .as_ref()
             .calculate_nar(&root_node)
             .await?;
@@ -566,6 +581,7 @@ impl EvalIO for TvixStoreIO {
                 &self.blob_service,
                 &self.directory_service,
                 &self.path_info_service,
+                &self.nar_calculation_service,
             )
             .await
         })?;
@@ -586,12 +602,8 @@ mod tests {
     use bstr::ByteSlice;
     use tempfile::TempDir;
     use tvix_build::buildservice::DummyBuildService;
-    use tvix_castore::{
-        blobservice::{BlobService, MemoryBlobService},
-        directoryservice::{DirectoryService, MemoryDirectoryService},
-    };
     use tvix_eval::{EvalIO, EvaluationResult};
-    use tvix_store::pathinfoservice::MemoryPathInfoService;
+    use tvix_store::utils::construct_services;
 
     use super::TvixStoreIO;
     use crate::builtins::{add_derivation_builtins, add_fetcher_builtins, add_import_builtins};
@@ -600,22 +612,19 @@ mod tests {
     /// Takes care of setting up the evaluator so it knows about the
     // `derivation` builtin.
     fn eval(str: &str) -> EvaluationResult {
-        let blob_service = Arc::new(MemoryBlobService::default()) as Arc<dyn BlobService>;
-        let directory_service =
-            Arc::new(MemoryDirectoryService::default()) as Arc<dyn DirectoryService>;
-        let path_info_service = Arc::new(MemoryPathInfoService::new(
-            blob_service.clone(),
-            directory_service.clone(),
-        ));
-
-        let runtime = tokio::runtime::Runtime::new().unwrap();
+        let tokio_runtime = tokio::runtime::Runtime::new().unwrap();
+        let (blob_service, directory_service, path_info_service, nar_calculation_service) =
+            tokio_runtime
+                .block_on(async { construct_services("memory://", "memory://", "memory://").await })
+                .unwrap();
 
         let io = Rc::new(TvixStoreIO::new(
-            blob_service.clone(),
-            directory_service.clone(),
-            path_info_service,
+            blob_service,
+            directory_service,
+            path_info_service.into(),
+            nar_calculation_service.into(),
             Arc::<DummyBuildService>::default(),
-            runtime.handle().clone(),
+            tokio_runtime.handle().clone(),
         ));
         let mut eval = tvix_eval::Evaluation::new(io.clone() as Rc<dyn EvalIO>, true);
 
diff --git a/tvix/nar-bridge/.gitignore b/tvix/nar-bridge-go/.gitignore
index d70e1f812..d70e1f812 100644
--- a/tvix/nar-bridge/.gitignore
+++ b/tvix/nar-bridge-go/.gitignore
diff --git a/tvix/nar-bridge/README.md b/tvix/nar-bridge-go/README.md
index b14ee7af7..81431daf3 100644
--- a/tvix/nar-bridge/README.md
+++ b/tvix/nar-bridge-go/README.md
@@ -1,4 +1,4 @@
-# //tvix/nar-bridge
+# //tvix/nar-bridge-go
 
 This exposes a HTTP Binary cache interface (GET/HEAD/PUT requests) for a `tvix-
 store`.
diff --git a/tvix/nar-bridge/cmd/nar-bridge-http/main.go b/tvix/nar-bridge-go/cmd/nar-bridge-http/main.go
index 171ea7f5b..cf2aaf490 100644
--- a/tvix/nar-bridge/cmd/nar-bridge-http/main.go
+++ b/tvix/nar-bridge-go/cmd/nar-bridge-http/main.go
@@ -14,7 +14,7 @@ import (
 	"google.golang.org/grpc/credentials/insecure"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	narBridgeHttp "code.tvl.fyi/tvix/nar-bridge/pkg/http"
+	narBridgeHttp "code.tvl.fyi/tvix/nar-bridge-go/pkg/http"
 	storev1pb "code.tvl.fyi/tvix/store-go"
 	log "github.com/sirupsen/logrus"
 )
@@ -47,7 +47,7 @@ func main() {
 			log.Fatal("failed to read build info")
 		}
 
-		shutdown, err := setupOpenTelemetry(ctx, "nar-bridge", buildInfo.Main.Version)
+		shutdown, err := setupOpenTelemetry(ctx, "nar-bridge-http", buildInfo.Main.Version)
 		if err != nil {
 			log.WithError(err).Fatal("failed to setup OpenTelemetry")
 		}
diff --git a/tvix/nar-bridge/cmd/nar-bridge-http/otel.go b/tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go
index c446c6ec1..c446c6ec1 100644
--- a/tvix/nar-bridge/cmd/nar-bridge-http/otel.go
+++ b/tvix/nar-bridge-go/cmd/nar-bridge-http/otel.go
diff --git a/tvix/nar-bridge/default.nix b/tvix/nar-bridge-go/default.nix
index c0247f279..303d9c504 100644
--- a/tvix/nar-bridge/default.nix
+++ b/tvix/nar-bridge-go/default.nix
@@ -3,7 +3,7 @@
 { depot, pkgs, lib, ... }:
 
 pkgs.buildGoModule {
-  name = "nar-bridge";
+  name = "nar-bridge-go";
   src = depot.third_party.gitignoreSource ./.;
 
   vendorHash = "sha256-7jugbC5sEGhppjiZgnoLP5A6kQSaHK9vE6cXVZBG22s=";
diff --git a/tvix/nar-bridge/go.mod b/tvix/nar-bridge-go/go.mod
index deb6943e2..3aa0694ff 100644
--- a/tvix/nar-bridge/go.mod
+++ b/tvix/nar-bridge-go/go.mod
@@ -1,4 +1,4 @@
-module code.tvl.fyi/tvix/nar-bridge
+module code.tvl.fyi/tvix/nar-bridge-go
 
 require (
 	code.tvl.fyi/tvix/castore-go v0.0.0-20231105151352-990d6ba2175e
diff --git a/tvix/nar-bridge/go.sum b/tvix/nar-bridge-go/go.sum
index 39f77b906..39f77b906 100644
--- a/tvix/nar-bridge/go.sum
+++ b/tvix/nar-bridge-go/go.sum
diff --git a/tvix/nar-bridge/pkg/http/nar_get.go b/tvix/nar-bridge-go/pkg/http/nar_get.go
index 75797f8da..75797f8da 100644
--- a/tvix/nar-bridge/pkg/http/nar_get.go
+++ b/tvix/nar-bridge-go/pkg/http/nar_get.go
diff --git a/tvix/nar-bridge/pkg/http/nar_put.go b/tvix/nar-bridge-go/pkg/http/nar_put.go
index fdfa20f9c..96bdd38b7 100644
--- a/tvix/nar-bridge/pkg/http/nar_put.go
+++ b/tvix/nar-bridge-go/pkg/http/nar_put.go
@@ -7,7 +7,7 @@ import (
 	"net/http"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/go-chi/chi/v5"
 	mh "github.com/multiformats/go-multihash/core"
 	nixhash "github.com/nix-community/go-nix/pkg/hash"
diff --git a/tvix/nar-bridge/pkg/http/narinfo.go b/tvix/nar-bridge-go/pkg/http/narinfo.go
index e5b99a950..e5b99a950 100644
--- a/tvix/nar-bridge/pkg/http/narinfo.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo.go
diff --git a/tvix/nar-bridge/pkg/http/narinfo_get.go b/tvix/nar-bridge-go/pkg/http/narinfo_get.go
index d43cb5807..d43cb5807 100644
--- a/tvix/nar-bridge/pkg/http/narinfo_get.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo_get.go
diff --git a/tvix/nar-bridge/pkg/http/narinfo_put.go b/tvix/nar-bridge-go/pkg/http/narinfo_put.go
index fd588bec8..0e2ae989c 100644
--- a/tvix/nar-bridge/pkg/http/narinfo_put.go
+++ b/tvix/nar-bridge-go/pkg/http/narinfo_put.go
@@ -3,7 +3,7 @@ package http
 import (
 	"net/http"
 
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/go-chi/chi/v5"
 	"github.com/nix-community/go-nix/pkg/narinfo"
 	"github.com/nix-community/go-nix/pkg/nixbase32"
diff --git a/tvix/nar-bridge/pkg/http/server.go b/tvix/nar-bridge-go/pkg/http/server.go
index fbcb20be1..fbcb20be1 100644
--- a/tvix/nar-bridge/pkg/http/server.go
+++ b/tvix/nar-bridge-go/pkg/http/server.go
diff --git a/tvix/nar-bridge/pkg/http/util.go b/tvix/nar-bridge-go/pkg/http/util.go
index 60febea1f..60febea1f 100644
--- a/tvix/nar-bridge/pkg/http/util.go
+++ b/tvix/nar-bridge-go/pkg/http/util.go
diff --git a/tvix/nar-bridge/pkg/importer/blob_upload.go b/tvix/nar-bridge-go/pkg/importer/blob_upload.go
index c1255dd3a..c1255dd3a 100644
--- a/tvix/nar-bridge/pkg/importer/blob_upload.go
+++ b/tvix/nar-bridge-go/pkg/importer/blob_upload.go
diff --git a/tvix/nar-bridge/pkg/importer/counting_writer.go b/tvix/nar-bridge-go/pkg/importer/counting_writer.go
index d003a4b11..d003a4b11 100644
--- a/tvix/nar-bridge/pkg/importer/counting_writer.go
+++ b/tvix/nar-bridge-go/pkg/importer/counting_writer.go
diff --git a/tvix/nar-bridge/pkg/importer/directory_upload.go b/tvix/nar-bridge-go/pkg/importer/directory_upload.go
index 117f442fa..117f442fa 100644
--- a/tvix/nar-bridge/pkg/importer/directory_upload.go
+++ b/tvix/nar-bridge-go/pkg/importer/directory_upload.go
diff --git a/tvix/nar-bridge/pkg/importer/gen_pathinfo.go b/tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go
index bdc298a9a..bdc298a9a 100644
--- a/tvix/nar-bridge/pkg/importer/gen_pathinfo.go
+++ b/tvix/nar-bridge-go/pkg/importer/gen_pathinfo.go
diff --git a/tvix/nar-bridge/pkg/importer/importer.go b/tvix/nar-bridge-go/pkg/importer/importer.go
index fce6c5f29..fce6c5f29 100644
--- a/tvix/nar-bridge/pkg/importer/importer.go
+++ b/tvix/nar-bridge-go/pkg/importer/importer.go
diff --git a/tvix/nar-bridge/pkg/importer/importer_test.go b/tvix/nar-bridge-go/pkg/importer/importer_test.go
index 8ff63b925..313677084 100644
--- a/tvix/nar-bridge/pkg/importer/importer_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/importer_test.go
@@ -9,7 +9,7 @@ import (
 	"testing"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	"github.com/stretchr/testify/require"
 )
 
diff --git a/tvix/nar-bridge/pkg/importer/roundtrip_test.go b/tvix/nar-bridge-go/pkg/importer/roundtrip_test.go
index 6d6fcb9ee..c50d332d8 100644
--- a/tvix/nar-bridge/pkg/importer/roundtrip_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/roundtrip_test.go
@@ -11,7 +11,7 @@ import (
 	"testing"
 
 	castorev1pb "code.tvl.fyi/tvix/castore-go"
-	"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
+	"code.tvl.fyi/tvix/nar-bridge-go/pkg/importer"
 	storev1pb "code.tvl.fyi/tvix/store-go"
 	"github.com/stretchr/testify/require"
 )
diff --git a/tvix/nar-bridge/pkg/importer/util_test.go b/tvix/nar-bridge-go/pkg/importer/util_test.go
index 06353cf58..06353cf58 100644
--- a/tvix/nar-bridge/pkg/importer/util_test.go
+++ b/tvix/nar-bridge-go/pkg/importer/util_test.go
diff --git a/tvix/nar-bridge/testdata/emptydirectory.nar b/tvix/nar-bridge-go/testdata/emptydirectory.nar
index baba55862..baba55862 100644
--- a/tvix/nar-bridge/testdata/emptydirectory.nar
+++ b/tvix/nar-bridge-go/testdata/emptydirectory.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar b/tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
index 6cb0b16e5..6cb0b16e5 100644
--- a/tvix/nar-bridge/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
+++ b/tvix/nar-bridge-go/testdata/nar_1094wph9z4nwlgvsd53abfz8i117ykiv5dwnq9nnhz846s7xqd7d.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteexecutable.nar b/tvix/nar-bridge-go/testdata/onebyteexecutable.nar
index 686821966..686821966 100644
--- a/tvix/nar-bridge/testdata/onebyteexecutable.nar
+++ b/tvix/nar-bridge-go/testdata/onebyteexecutable.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/onebyteregular.nar b/tvix/nar-bridge-go/testdata/onebyteregular.nar
index b8c94932b..b8c94932b 100644
--- a/tvix/nar-bridge/testdata/onebyteregular.nar
+++ b/tvix/nar-bridge-go/testdata/onebyteregular.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/popdirectories.nar b/tvix/nar-bridge-go/testdata/popdirectories.nar
index 74313aca5..74313aca5 100644
--- a/tvix/nar-bridge/testdata/popdirectories.nar
+++ b/tvix/nar-bridge-go/testdata/popdirectories.nar
Binary files differdiff --git a/tvix/nar-bridge/testdata/symlink.nar b/tvix/nar-bridge-go/testdata/symlink.nar
index 7990e4ad5..7990e4ad5 100644
--- a/tvix/nar-bridge/testdata/symlink.nar
+++ b/tvix/nar-bridge-go/testdata/symlink.nar
Binary files differdiff --git a/tvix/nix-compat/src/nar/mod.rs b/tvix/nix-compat/src/nar/mod.rs
index 058977f4f..c678d26ff 100644
--- a/tvix/nix-compat/src/nar/mod.rs
+++ b/tvix/nix-compat/src/nar/mod.rs
@@ -1,4 +1,4 @@
-mod wire;
+pub(crate) mod wire;
 
 pub mod reader;
 pub mod writer;
diff --git a/tvix/nix-compat/src/nar/reader/async/mod.rs b/tvix/nix-compat/src/nar/reader/async/mod.rs
index aaf00faf4..0808fba38 100644
--- a/tvix/nix-compat/src/nar/reader/async/mod.rs
+++ b/tvix/nix-compat/src/nar/reader/async/mod.rs
@@ -1,4 +1,5 @@
 use std::{
+    mem::MaybeUninit,
     pin::Pin,
     task::{self, Poll},
 };
@@ -94,17 +95,27 @@ impl<'a, 'r> AsyncRead for FileReader<'a, 'r> {
     }
 }
 
+impl<'a, 'r> AsyncBufRead for FileReader<'a, 'r> {
+    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<io::Result<&[u8]>> {
+        Pin::new(&mut self.get_mut().inner).poll_fill_buf(cx)
+    }
+
+    fn consume(self: Pin<&mut Self>, amt: usize) {
+        Pin::new(&mut self.get_mut().inner).consume(amt)
+    }
+}
+
 /// A directory iterator, yielding a sequence of [Node]s.
 /// It must be fully consumed before reading further from the [DirReader] that produced it, if any.
 pub struct DirReader<'a, 'r> {
     reader: &'a mut Reader<'r>,
     /// Previous directory entry name.
     /// We have to hang onto this to enforce name monotonicity.
-    prev_name: Option<Vec<u8>>,
+    prev_name: Vec<u8>,
 }
 
 pub struct Entry<'a, 'r> {
-    pub name: Vec<u8>,
+    pub name: &'a [u8],
     pub node: Node<'a, 'r>,
 }
 
@@ -112,7 +123,7 @@ impl<'a, 'r> DirReader<'a, 'r> {
     fn new(reader: &'a mut Reader<'r>) -> Self {
         Self {
             reader,
-            prev_name: None,
+            prev_name: vec![],
         }
     }
 
@@ -128,7 +139,7 @@ impl<'a, 'r> DirReader<'a, 'r> {
     pub async fn next(&mut self) -> io::Result<Option<Entry<'_, 'r>>> {
         // COME FROM the previous iteration: if we've already read an entry,
         // read its terminating TOK_PAR here.
-        if self.prev_name.is_some() {
+        if !self.prev_name.is_empty() {
             read::token(self.reader, &nar::wire::TOK_PAR).await?;
         }
 
@@ -136,30 +147,26 @@ impl<'a, 'r> DirReader<'a, 'r> {
             return Ok(None);
         }
 
-        let name = wire::read_bytes(self.reader, 1..=nar::wire::MAX_NAME_LEN).await?;
+        let mut name = [MaybeUninit::uninit(); nar::wire::MAX_NAME_LEN + 1];
+        let name =
+            wire::read_bytes_buf(self.reader, &mut name, 1..=nar::wire::MAX_NAME_LEN).await?;
 
         if name.contains(&0) || name.contains(&b'/') || name == b"." || name == b".." {
             return Err(InvalidData.into());
         }
 
         // Enforce strict monotonicity of directory entry names.
-        match &mut self.prev_name {
-            None => {
-                self.prev_name = Some(name.clone());
-            }
-            Some(prev_name) => {
-                if *prev_name >= name {
-                    return Err(InvalidData.into());
-                }
-
-                name[..].clone_into(prev_name);
-            }
+        if &self.prev_name[..] >= name {
+            return Err(InvalidData.into());
         }
 
+        self.prev_name.clear();
+        self.prev_name.extend_from_slice(name);
+
         read::token(self.reader, &nar::wire::TOK_NOD).await?;
 
         Ok(Some(Entry {
-            name,
+            name: &self.prev_name,
             node: Node::new(self.reader).await?,
         }))
     }
diff --git a/tvix/nix-compat/src/nar/reader/async/test.rs b/tvix/nix-compat/src/nar/reader/async/test.rs
index 58bb651fc..7bc1f8942 100644
--- a/tvix/nix-compat/src/nar/reader/async/test.rs
+++ b/tvix/nix-compat/src/nar/reader/async/test.rs
@@ -80,7 +80,7 @@ async fn complicated() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(mut subdir_reader) => {
@@ -133,7 +133,7 @@ async fn file_read_abandoned() {
                     .expect("next must succeed")
                     .expect("must be some");
 
-                assert_eq!(&b".keep"[..], &entry.name);
+                assert_eq!(b".keep", entry.name);
                 // don't bother to finish reading it.
             };
 
@@ -183,7 +183,7 @@ async fn dir_read_abandoned() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(_) => {
@@ -239,7 +239,7 @@ async fn dir_read_after_none() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(mut subdir_reader) => {
@@ -280,7 +280,7 @@ async fn dir_read_after_none() {
 }
 
 async fn must_read_file(name: &'static str, entry: nar::reader::Entry<'_, '_>) {
-    assert_eq!(name.as_bytes(), &entry.name);
+    assert_eq!(name.as_bytes(), entry.name);
 
     match entry.node {
         nar::reader::Node::File {
@@ -299,7 +299,7 @@ fn must_be_symlink(
     exp_target: &'static str,
     entry: nar::reader::Entry<'_, '_>,
 ) {
-    assert_eq!(name.as_bytes(), &entry.name);
+    assert_eq!(name.as_bytes(), entry.name);
 
     match entry.node {
         nar::reader::Node::Symlink { target } => {
diff --git a/tvix/nix-compat/src/nar/reader/mod.rs b/tvix/nix-compat/src/nar/reader/mod.rs
index bddf17508..9e9237ead 100644
--- a/tvix/nix-compat/src/nar/reader/mod.rs
+++ b/tvix/nix-compat/src/nar/reader/mod.rs
@@ -264,11 +264,11 @@ pub struct DirReader<'a, 'r> {
     reader: ArchiveReader<'a, 'r>,
     /// Previous directory entry name.
     /// We have to hang onto this to enforce name monotonicity.
-    prev_name: Option<Vec<u8>>,
+    prev_name: Vec<u8>,
 }
 
 pub struct Entry<'a, 'r> {
-    pub name: Vec<u8>,
+    pub name: &'a [u8],
     pub node: Node<'a, 'r>,
 }
 
@@ -276,7 +276,7 @@ impl<'a, 'r> DirReader<'a, 'r> {
     fn new(reader: ArchiveReader<'a, 'r>) -> Self {
         Self {
             reader,
-            prev_name: None,
+            prev_name: vec![],
         }
     }
 
@@ -295,7 +295,7 @@ impl<'a, 'r> DirReader<'a, 'r> {
 
         // COME FROM the previous iteration: if we've already read an entry,
         // read its terminating TOK_PAR here.
-        if self.prev_name.is_some() {
+        if !self.prev_name.is_empty() {
             try_or_poison!(self.reader, read::token(self.reader.inner, &wire::TOK_PAR));
         }
 
@@ -306,9 +306,10 @@ impl<'a, 'r> DirReader<'a, 'r> {
             return Ok(None);
         }
 
+        let mut name = [0; wire::MAX_NAME_LEN + 1];
         let name = try_or_poison!(
             self.reader,
-            read::bytes(self.reader.inner, wire::MAX_NAME_LEN)
+            read::bytes_buf(self.reader.inner, &mut name, wire::MAX_NAME_LEN)
         );
 
         if name.is_empty()
@@ -322,24 +323,18 @@ impl<'a, 'r> DirReader<'a, 'r> {
         }
 
         // Enforce strict monotonicity of directory entry names.
-        match &mut self.prev_name {
-            None => {
-                self.prev_name = Some(name.clone());
-            }
-            Some(prev_name) => {
-                if *prev_name >= name {
-                    self.reader.status.poison();
-                    return Err(InvalidData.into());
-                }
-
-                name[..].clone_into(prev_name);
-            }
+        if &self.prev_name[..] >= name {
+            self.reader.status.poison();
+            return Err(InvalidData.into());
         }
 
+        self.prev_name.clear();
+        self.prev_name.extend_from_slice(name);
+
         try_or_poison!(self.reader, read::token(self.reader.inner, &wire::TOK_NOD));
 
         Ok(Some(Entry {
-            name,
+            name: &self.prev_name,
             // Don't need to worry about poisoning here: Node::new will do it for us if needed
             node: Node::new(self.reader.child())?,
         }))
diff --git a/tvix/nix-compat/src/nar/reader/read.rs b/tvix/nix-compat/src/nar/reader/read.rs
index 1ce161376..9938581f2 100644
--- a/tvix/nix-compat/src/nar/reader/read.rs
+++ b/tvix/nix-compat/src/nar/reader/read.rs
@@ -15,6 +15,38 @@ pub fn u64(reader: &mut Reader) -> io::Result<u64> {
     Ok(u64::from_le_bytes(buf))
 }
 
+/// Consume a byte string from the reader into a provided buffer,
+/// returning the data bytes.
+pub fn bytes_buf<'a, const N: usize>(
+    reader: &mut Reader,
+    buf: &'a mut [u8; N],
+    max_len: usize,
+) -> io::Result<&'a [u8]> {
+    assert_eq!(N % 8, 0);
+    assert!(max_len <= N);
+
+    // read the length, and reject excessively large values
+    let len = self::u64(reader)?;
+    if len > max_len as u64 {
+        return Err(InvalidData.into());
+    }
+    // we know the length fits in a usize now
+    let len = len as usize;
+
+    // read the data and padding into a buffer
+    let buf_len = (len + 7) & !7;
+    reader.read_exact(&mut buf[..buf_len])?;
+
+    // verify that the padding is all zeroes
+    for &b in &buf[len..buf_len] {
+        if b != 0 {
+            return Err(InvalidData.into());
+        }
+    }
+
+    Ok(&buf[..len])
+}
+
 /// Consume a byte string of up to `max_len` bytes from the reader.
 pub fn bytes(reader: &mut Reader, max_len: usize) -> io::Result<Vec<u8>> {
     assert!(max_len <= isize::MAX as usize);
diff --git a/tvix/nix-compat/src/nar/reader/test.rs b/tvix/nix-compat/src/nar/reader/test.rs
index 02dc4767c..63e4fb289 100644
--- a/tvix/nix-compat/src/nar/reader/test.rs
+++ b/tvix/nix-compat/src/nar/reader/test.rs
@@ -71,7 +71,7 @@ fn complicated() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(mut subdir_reader) => {
@@ -117,7 +117,7 @@ fn file_read_abandoned() {
                     .expect("next must succeed")
                     .expect("must be some");
 
-                assert_eq!(&b".keep"[..], &entry.name);
+                assert_eq!(b".keep", entry.name);
                 // don't bother to finish reading it.
             };
 
@@ -162,7 +162,7 @@ fn dir_read_abandoned() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(_) => {
@@ -213,7 +213,7 @@ fn dir_read_after_none() {
                     .expect("next must be some")
                     .expect("must be some");
 
-                assert_eq!(&b"keep"[..], &entry.name);
+                assert_eq!(b"keep", entry.name);
 
                 match entry.node {
                     nar::reader::Node::Directory(mut subdir_reader) => {
@@ -248,7 +248,7 @@ fn dir_read_after_none() {
 }
 
 fn must_read_file(name: &'static str, entry: nar::reader::Entry<'_, '_>) {
-    assert_eq!(name.as_bytes(), &entry.name);
+    assert_eq!(name.as_bytes(), entry.name);
 
     match entry.node {
         nar::reader::Node::File {
@@ -267,7 +267,7 @@ fn must_be_symlink(
     exp_target: &'static str,
     entry: nar::reader::Entry<'_, '_>,
 ) {
-    assert_eq!(name.as_bytes(), &entry.name);
+    assert_eq!(name.as_bytes(), entry.name);
 
     match entry.node {
         nar::reader::Node::Symlink { target } => {
diff --git a/tvix/nix-compat/src/store_path/mod.rs b/tvix/nix-compat/src/store_path/mod.rs
index ff7ede77e..707c41a92 100644
--- a/tvix/nix-compat/src/store_path/mod.rs
+++ b/tvix/nix-compat/src/store_path/mod.rs
@@ -56,7 +56,7 @@ pub enum Error {
 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
 pub struct StorePath {
     digest: [u8; DIGEST_SIZE],
-    name: String,
+    name: Box<str>,
 }
 
 impl StorePath {
@@ -65,7 +65,7 @@ impl StorePath {
     }
 
     pub fn name(&self) -> &str {
-        self.name.as_ref()
+        &self.name
     }
 
     pub fn as_ref(&self) -> StorePathRef<'_> {
@@ -176,10 +176,7 @@ pub struct StorePathRef<'a> {
 
 impl<'a> From<&'a StorePath> for StorePathRef<'a> {
     fn from(&StorePath { digest, ref name }: &'a StorePath) -> Self {
-        StorePathRef {
-            digest,
-            name: name.as_ref(),
-        }
+        StorePathRef { digest, name }
     }
 }
 
@@ -209,7 +206,7 @@ impl<'a> StorePathRef<'a> {
     pub fn to_owned(&self) -> StorePath {
         StorePath {
             digest: self.digest,
-            name: self.name.to_owned(),
+            name: self.name.into(),
         }
     }
 
@@ -394,7 +391,7 @@ mod tests {
 
         let expected_digest: [u8; DIGEST_SIZE] = hex!("8a12321522fd91efbd60ebb2481af88580f61600");
 
-        assert_eq!("net-tools-1.60_p20170221182432", nixpath.name);
+        assert_eq!("net-tools-1.60_p20170221182432", nixpath.name());
         assert_eq!(nixpath.digest, expected_digest);
 
         assert_eq!(example_nix_path_str, nixpath.to_string())
diff --git a/tvix/nix-compat/src/wire/bytes/mod.rs b/tvix/nix-compat/src/wire/bytes/mod.rs
index 5ed5e15a6..2ed071e37 100644
--- a/tvix/nix-compat/src/wire/bytes/mod.rs
+++ b/tvix/nix-compat/src/wire/bytes/mod.rs
@@ -1,8 +1,9 @@
 use std::{
     io::{Error, ErrorKind},
+    mem::MaybeUninit,
     ops::RangeInclusive,
 };
-use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::io::{self, AsyncReadExt, AsyncWriteExt, ReadBuf};
 
 pub(crate) mod reader;
 pub use reader::BytesReader;
@@ -15,7 +16,6 @@ const EMPTY_BYTES: &[u8; 8] = &[0u8; 8];
 /// The length of the size field, in bytes is always 8.
 const LEN_SIZE: usize = 8;
 
-#[allow(dead_code)]
 /// Read a "bytes wire packet" from the AsyncRead.
 /// Rejects reading more than `allowed_size` bytes of payload.
 ///
@@ -36,7 +36,7 @@ const LEN_SIZE: usize = 8;
 pub async fn read_bytes<R: ?Sized>(
     r: &mut R,
     allowed_size: RangeInclusive<usize>,
-) -> std::io::Result<Vec<u8>>
+) -> io::Result<Vec<u8>>
 where
     R: AsyncReadExt + Unpin,
 {
@@ -47,8 +47,8 @@ where
         .ok()
         .filter(|len| allowed_size.contains(len))
         .ok_or_else(|| {
-            std::io::Error::new(
-                std::io::ErrorKind::InvalidData,
+            io::Error::new(
+                io::ErrorKind::InvalidData,
                 "signalled package size not in allowed range",
             )
         })?;
@@ -64,15 +64,15 @@ where
 
     // make sure we got exactly the number of bytes, and not less.
     if s as u64 != padded_len {
-        return Err(std::io::ErrorKind::UnexpectedEof.into());
+        return Err(io::ErrorKind::UnexpectedEof.into());
     }
 
     let (_content, padding) = buf.split_at(len);
 
     // ensure the padding is all zeroes.
-    if !padding.iter().all(|e| *e == b'\0') {
-        return Err(std::io::Error::new(
-            std::io::ErrorKind::InvalidData,
+    if padding.iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
             "padding is not all zeroes",
         ));
     }
@@ -82,13 +82,67 @@ where
     Ok(buf)
 }
 
+pub(crate) async fn read_bytes_buf<'a, const N: usize, R: ?Sized>(
+    reader: &mut R,
+    buf: &'a mut [MaybeUninit<u8>; N],
+    allowed_size: RangeInclusive<usize>,
+) -> io::Result<&'a [u8]>
+where
+    R: AsyncReadExt + Unpin,
+{
+    assert_eq!(N % 8, 0);
+    assert!(*allowed_size.end() <= N);
+
+    let len = reader.read_u64_le().await?;
+    let len: usize = len
+        .try_into()
+        .ok()
+        .filter(|len| allowed_size.contains(len))
+        .ok_or_else(|| {
+            io::Error::new(
+                io::ErrorKind::InvalidData,
+                "signalled package size not in allowed range",
+            )
+        })?;
+
+    let buf_len = (len + 7) & !7;
+    let buf = {
+        let mut read_buf = ReadBuf::uninit(&mut buf[..buf_len]);
+
+        while read_buf.filled().len() < buf_len {
+            reader.read_buf(&mut read_buf).await?;
+        }
+
+        // ReadBuf::filled does not pass the underlying buffer's lifetime through,
+        // so we must make a trip to hell.
+        //
+        // SAFETY: `read_buf` is filled up to `buf_len`, and we verify that it is
+        // still pointing at the same underlying buffer.
+        unsafe {
+            assert_eq!(read_buf.filled().as_ptr(), buf.as_ptr() as *const u8);
+            assume_init_bytes(&buf[..buf_len])
+        }
+    };
+
+    if buf[len..buf_len].iter().any(|&b| b != 0) {
+        return Err(io::Error::new(
+            io::ErrorKind::InvalidData,
+            "padding is not all zeroes",
+        ));
+    }
+
+    Ok(&buf[..len])
+}
+
+/// SAFETY: The bytes have to actually be initialized.
+unsafe fn assume_init_bytes(slice: &[MaybeUninit<u8>]) -> &[u8] {
+    &*(slice as *const [MaybeUninit<u8>] as *const [u8])
+}
+
 /// Read a "bytes wire packet" of from the AsyncRead and tries to parse as string.
 /// Internally uses [read_bytes].
 /// Rejects reading more than `allowed_size` bytes of payload.
-pub async fn read_string<R>(
-    r: &mut R,
-    allowed_size: RangeInclusive<usize>,
-) -> std::io::Result<String>
+pub async fn read_string<R>(r: &mut R, allowed_size: RangeInclusive<usize>) -> io::Result<String>
 where
     R: AsyncReadExt + Unpin,
 {
@@ -108,7 +162,7 @@ where
 pub async fn write_bytes<W: AsyncWriteExt + Unpin, B: AsRef<[u8]>>(
     w: &mut W,
     b: B,
-) -> std::io::Result<()> {
+) -> io::Result<()> {
     // write the size packet.
     w.write_u64_le(b.as_ref().len() as u64).await?;
 
diff --git a/tvix/nix-compat/src/wire/bytes/reader/mod.rs b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
index cd45f78a0..6bd376c06 100644
--- a/tvix/nix-compat/src/wire/bytes/reader/mod.rs
+++ b/tvix/nix-compat/src/wire/bytes/reader/mod.rs
@@ -1,11 +1,12 @@
 use std::{
     future::Future,
     io,
+    num::NonZeroU64,
     ops::RangeBounds,
     pin::Pin,
     task::{self, ready, Poll},
 };
-use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
+use tokio::io::{AsyncBufRead, AsyncRead, AsyncReadExt, ReadBuf};
 
 use trailer::{read_trailer, ReadTrailer, Trailer};
 
@@ -33,14 +34,26 @@ pub struct BytesReader<R, T: Tag = Pad> {
     state: State<R, T>,
 }
 
+/// Split the `user_len` into `body_len` and `tail_len`, which are respectively
+/// the non-terminal 8-byte blocks, and the ≤8 bytes of user data contained in
+/// the trailer block.
+#[inline(always)]
+fn split_user_len(user_len: NonZeroU64) -> (u64, u8) {
+    let n = user_len.get() - 1;
+    let body_len = n & !7;
+    let tail_len = (n & 7) as u8 + 1;
+    (body_len, tail_len)
+}
+
 #[derive(Debug)]
 enum State<R, T: Tag> {
     /// Full 8-byte blocks are being read and released to the caller.
+    /// NOTE: The final 8-byte block is *always* part of the trailer.
     Body {
         reader: Option<R>,
         consumed: u64,
         /// The total length of all user data contained in both the body and trailer.
-        user_len: u64,
+        user_len: NonZeroU64,
     },
     /// The trailer is in the process of being read.
     ReadTrailer(ReadTrailer<R, T>),
@@ -76,10 +89,16 @@ where
         }
 
         Ok(Self {
-            state: State::Body {
-                reader: Some(reader),
-                consumed: 0,
-                user_len: size,
+            state: match NonZeroU64::new(size) {
+                Some(size) => State::Body {
+                    reader: Some(reader),
+                    consumed: 0,
+                    user_len: size,
+                },
+                None => State::ReleaseTrailer {
+                    consumed: 0,
+                    data: read_trailer::<R, T>(reader, 0).await?,
+                },
             },
         })
     }
@@ -96,7 +115,7 @@ where
         match self.state {
             State::Body {
                 consumed, user_len, ..
-            } => user_len - consumed,
+            } => user_len.get() - consumed,
             State::ReadTrailer(ref fut) => fut.len() as u64,
             State::ReleaseTrailer { consumed, ref data } => data.len() as u64 - consumed as u64,
         }
@@ -119,21 +138,20 @@ impl<R: AsyncRead + Unpin, T: Tag> AsyncRead for BytesReader<R, T> {
                     consumed,
                     user_len,
                 } => {
-                    let body_len = *user_len & !7;
+                    let (body_len, tail_len) = split_user_len(*user_len);
                     let remaining = body_len - *consumed;
 
                     let reader = if remaining == 0 {
                         let reader = reader.take().unwrap();
-                        let user_len = (*user_len & 7) as u8;
-                        *this = State::ReadTrailer(read_trailer(reader, user_len));
+                        *this = State::ReadTrailer(read_trailer(reader, tail_len));
                         continue;
                     } else {
-                        reader.as_mut().unwrap()
+                        Pin::new(reader.as_mut().unwrap())
                     };
 
                     let mut bytes_read = 0;
                     ready!(with_limited(buf, remaining, |buf| {
-                        let ret = Pin::new(reader).poll_read(cx, buf);
+                        let ret = reader.poll_read(cx, buf);
                         bytes_read = buf.initialized().len();
                         ret
                     }))?;
@@ -167,6 +185,96 @@ impl<R: AsyncRead + Unpin, T: Tag> AsyncRead for BytesReader<R, T> {
     }
 }
 
+#[allow(private_bounds)]
+impl<R: AsyncBufRead + Unpin, T: Tag> AsyncBufRead for BytesReader<R, T> {
+    fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<io::Result<&[u8]>> {
+        let this = &mut self.get_mut().state;
+
+        loop {
+            match this {
+                // This state comes *after* the following case,
+                // but we can't keep it in logical order because
+                // that would lengthen the borrow lifetime.
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } if {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    remaining == 0
+                } =>
+                {
+                    let reader = reader.take().unwrap();
+                    let (_, tail_len) = split_user_len(*user_len);
+
+                    *this = State::ReadTrailer(read_trailer(reader, tail_len));
+                }
+                State::Body {
+                    reader,
+                    consumed,
+                    user_len,
+                } => {
+                    let (body_len, _) = split_user_len(*user_len);
+                    let remaining = body_len - *consumed;
+
+                    let reader = Pin::new(reader.as_mut().unwrap());
+
+                    match ready!(reader.poll_fill_buf(cx))? {
+                        &[] => {
+                            return Err(io::ErrorKind::UnexpectedEof.into()).into();
+                        }
+                        mut buf => {
+                            if buf.len() as u64 > remaining {
+                                buf = &buf[..remaining as usize];
+                            }
+
+                            return Ok(buf).into();
+                        }
+                    }
+                }
+                State::ReadTrailer(fut) => {
+                    *this = State::ReleaseTrailer {
+                        consumed: 0,
+                        data: ready!(Pin::new(fut).poll(cx))?,
+                    };
+                }
+                State::ReleaseTrailer { consumed, data } => {
+                    return Ok(&data[*consumed as usize..]).into();
+                }
+            }
+        }
+    }
+
+    fn consume(mut self: Pin<&mut Self>, amt: usize) {
+        match &mut self.state {
+            State::Body {
+                reader,
+                consumed,
+                user_len,
+            } => {
+                let reader = Pin::new(reader.as_mut().unwrap());
+                let (body_len, _) = split_user_len(*user_len);
+
+                *consumed = consumed
+                    .checked_add(amt as u64)
+                    .filter(|&consumed| consumed <= body_len)
+                    .expect("consumed out of bounds");
+
+                reader.consume(amt);
+            }
+            State::ReadTrailer(_) => unreachable!(),
+            State::ReleaseTrailer { consumed, data } => {
+                *consumed = amt
+                    .checked_add(*consumed as usize)
+                    .filter(|&consumed| consumed <= data.len())
+                    .expect("consumed out of bounds") as u8;
+            }
+        }
+    }
+}
+
 /// Make a limited version of `buf`, consisting only of up to `n` bytes of the unfilled section, and call `f` with it.
 /// After `f` returns, we propagate the filled cursor advancement back to `buf`.
 fn with_limited<R>(buf: &mut ReadBuf, n: u64, f: impl FnOnce(&mut ReadBuf) -> R) -> R {
@@ -199,7 +307,7 @@ mod tests {
     use hex_literal::hex;
     use lazy_static::lazy_static;
     use rstest::rstest;
-    use tokio::io::AsyncReadExt;
+    use tokio::io::{AsyncReadExt, BufReader};
     use tokio_test::io::Builder;
 
     use super::*;
@@ -243,6 +351,34 @@ mod tests {
         assert_eq!(payload, &buf[..]);
     }
 
+    /// Read bytes packets of various length, and ensure copy_buf reads the
+    /// expected payload.
+    #[rstest]
+    #[case::empty(&[])] // empty bytes packet
+    #[case::size_1b(&[0xff])] // 1 bytes payload
+    #[case::size_8b(&hex!("0001020304050607"))] // 8 bytes payload (no padding)
+    #[case::size_9b(&hex!("000102030405060708"))] // 9 bytes payload (7 bytes padding)
+    #[case::size_1m(LARGE_PAYLOAD.as_slice())] // larger bytes packet
+    #[tokio::test]
+    async fn read_payload_correct_readbuf(#[case] payload: &[u8]) {
+        let mut mock = BufReader::new(
+            Builder::new()
+                .read(&produce_packet_bytes(payload).await)
+                .build(),
+        );
+
+        let mut r = BytesReader::new(&mut mock, ..=LARGE_PAYLOAD.len() as u64)
+            .await
+            .unwrap();
+
+        let mut buf = Vec::new();
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("copy_buf must succeed");
+
+        assert_eq!(payload, &buf[..]);
+    }
+
     /// Fail if the bytes packet is larger than allowed
     #[tokio::test]
     async fn read_bigger_than_allowed_fail() {
@@ -277,6 +413,45 @@ mod tests {
         );
     }
 
+    /// Read the trailer immediately if there is no payload.
+    #[tokio::test]
+    async fn read_trailer_immediately() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&[0; 8])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
+    /// Read the trailer even if we only read the exact payload size.
+    #[tokio::test]
+    async fn read_exact_trailer() {
+        use crate::nar::wire::PadPar;
+
+        let mut mock = Builder::new()
+            .read(&16u64.to_le_bytes())
+            .read(&[0x55; 16])
+            .read(&PadPar::PATTERN[8..])
+            .build();
+
+        let mut reader = BytesReader::<_, PadPar>::new_internal(&mut mock, ..)
+            .await
+            .unwrap();
+
+        let mut buf = [0; 16];
+        reader.read_exact(&mut buf).await.unwrap();
+        assert_eq!(buf, [0x55; 16]);
+
+        // The mock reader will panic if dropped without reading all data.
+    }
+
     /// Fail if the padding is not all zeroes
     #[tokio::test]
     async fn read_fail_if_nonzero_padding() {
@@ -402,6 +577,48 @@ mod tests {
         );
     }
 
+    /// Start a 9 bytes payload packet, but return an error after a certain position.
+    /// Ensure that error is propagated (AsyncReadBuf case)
+    #[rstest]
+    #[case::during_size(4)]
+    #[case::before_payload(8)]
+    #[case::during_payload(8 + 4)]
+    #[case::before_padding(8 + 4)]
+    #[case::during_padding(8 + 9 + 2)]
+    #[tokio::test]
+    async fn propagate_error_from_reader_buffered(#[case] offset: usize) {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await[..offset])
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        // Either length reading or data reading can fail, depending on which test case we're in.
+        let err: io::Error = async {
+            let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await?;
+            let mut buf = Vec::new();
+
+            tokio::io::copy_buf(&mut r, &mut buf).await?;
+
+            Ok(())
+        }
+        .await
+        .expect_err("must fail");
+
+        assert_eq!(
+            err.kind(),
+            std::io::ErrorKind::Other,
+            "error kind must match"
+        );
+
+        assert_eq!(
+            err.into_inner().unwrap().to_string(),
+            "foo",
+            "error payload must contain foo"
+        );
+    }
+
     /// If there's an error right after the padding, we don't propagate it, as
     /// we're done reading. We just return EOF.
     #[tokio::test]
@@ -419,6 +636,26 @@ mod tests {
         assert_eq!(buf.as_slice(), payload);
     }
 
+    /// If there's an error right after the padding, we don't propagate it, as
+    /// we're done reading. We just return EOF.
+    #[tokio::test]
+    async fn no_error_after_eof_buffered() {
+        let payload = &hex!("FF0102030405060708");
+        let mock = Builder::new()
+            .read(&produce_packet_bytes(payload).await)
+            .read_error(std::io::Error::new(std::io::ErrorKind::Other, "foo"))
+            .build();
+        let mut mock = BufReader::new(mock);
+
+        let mut r = BytesReader::new(&mut mock, ..MAX_LEN).await.unwrap();
+        let mut buf = Vec::new();
+
+        tokio::io::copy_buf(&mut r, &mut buf)
+            .await
+            .expect("must succeed");
+        assert_eq!(buf.as_slice(), payload);
+    }
+
     /// Introduce various stalls in various places of the packet, to ensure we
     /// handle these cases properly, too.
     #[rstest]
diff --git a/tvix/nix-compat/src/wire/bytes/reader/trailer.rs b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
index 0b0c7b135..3a5bb75e7 100644
--- a/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
+++ b/tvix/nix-compat/src/wire/bytes/reader/trailer.rs
@@ -9,11 +9,11 @@ use std::{
 
 use tokio::io::{self, AsyncRead, ReadBuf};
 
-/// Trailer represents up to 7 bytes of data read as part of the trailer block(s)
+/// Trailer represents up to 8 bytes of data read as part of the trailer block(s)
 #[derive(Debug)]
 pub(crate) struct Trailer {
     data_len: u8,
-    buf: [u8; 7],
+    buf: [u8; 8],
 }
 
 impl Deref for Trailer {
@@ -28,7 +28,7 @@ impl Deref for Trailer {
 pub(crate) trait Tag {
     /// The expected suffix
     ///
-    /// The first 7 bytes may be ignored, and it must be an 8-byte aligned size.
+    /// The first 8 bytes may be ignored, and it must be an 8-byte aligned size.
     const PATTERN: &'static [u8];
 
     /// Suitably sized buffer for reading [Self::PATTERN]
@@ -67,7 +67,7 @@ pub(crate) fn read_trailer<R: AsyncRead + Unpin, T: Tag>(
     reader: R,
     data_len: u8,
 ) -> ReadTrailer<R, T> {
-    assert!(data_len < 8, "payload in trailer must be less than 8 bytes");
+    assert!(data_len <= 8, "payload in trailer must be <= 8 bytes");
 
     let buf = T::make_buf();
     assert_eq!(buf.as_ref().len(), T::PATTERN.len());
@@ -108,8 +108,8 @@ impl<R: AsyncRead + Unpin, T: Tag> Future for ReadTrailer<R, T> {
             }
 
             if this.filled as usize == T::PATTERN.len() {
-                let mut buf = [0; 7];
-                buf.copy_from_slice(&this.buf.as_ref()[..7]);
+                let mut buf = [0; 8];
+                buf.copy_from_slice(&this.buf.as_ref()[..8]);
 
                 return Ok(Trailer {
                     data_len: this.data_len,
@@ -124,10 +124,9 @@ impl<R: AsyncRead + Unpin, T: Tag> Future for ReadTrailer<R, T> {
             ready!(Pin::new(&mut this.reader).poll_read(cx, &mut buf))?;
 
             this.filled = {
-                let prev_filled = this.filled;
                 let filled = buf.filled().len() as u8;
 
-                if filled == prev_filled {
+                if filled == this.filled {
                     return Err(io::ErrorKind::UnexpectedEof.into()).into();
                 }
 
diff --git a/tvix/store/Cargo.toml b/tvix/store/Cargo.toml
index 4c0e9be49..4727f43f7 100644
--- a/tvix/store/Cargo.toml
+++ b/tvix/store/Cargo.toml
@@ -5,6 +5,7 @@ edition = "2021"
 
 [dependencies]
 anyhow = "1.0.68"
+async-compression = { version = "0.4.9", features = ["tokio", "bzip2", "gzip", "xz", "zstd"]}
 async-stream = "0.3.5"
 blake3 = { version = "1.3.1", features = ["rayon", "std"] }
 bstr = "1.6.0"
@@ -17,9 +18,9 @@ lazy_static = "1.4.0"
 nix-compat = { path = "../nix-compat", features = ["async"] }
 pin-project-lite = "0.2.13"
 prost = "0.12.1"
-opentelemetry = { version = "0.21.0", optional = true}
-opentelemetry-otlp = { version = "0.14.0", optional = true }
-opentelemetry_sdk = { version = "0.21.0", features = ["rt-tokio"], optional = true}
+opentelemetry = { version = "0.22.0", optional = true}
+opentelemetry-otlp = { version = "0.15.0", optional = true }
+opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"], optional = true}
 serde = { version = "1.0.197", features = [ "derive" ] }
 serde_json = "1.0"
 serde_with = "3.7.0"
@@ -28,20 +29,20 @@ sha2 = "0.10.6"
 sled = { version = "0.34.7" }
 thiserror = "1.0.38"
 tokio = { version = "1.32.0", features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
-tokio-listener = { version = "0.3.2", features = [ "tonic011" ] }
+tokio-listener = { version = "0.4.1", features = [ "tonic011" ] }
 tokio-stream = { version = "0.1.14", features = ["fs"] }
 tokio-util = { version = "0.7.9", features = ["io", "io-util", "compat"] }
 tonic = { version = "0.11.0", features = ["tls", "tls-roots"] }
 tower = "0.4.13"
 tracing = "0.1.37"
-tracing-opentelemetry = "0.22.0"
-tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }
+tracing-opentelemetry = "0.23.0"
+tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
 tvix-castore = { path = "../castore" }
 url = "2.4.0"
 walkdir = "2.4.0"
-async-recursion = "1.0.5"
 reqwest = { version = "0.11.22", features = ["rustls-tls-native-roots", "stream"], default-features = false }
-xz2 = "0.1.7"
+lru = "0.12.3"
+parking_lot = "0.12.2"
 
 [dependencies.tonic-reflection]
 optional = true
@@ -78,3 +79,6 @@ virtiofs = ["tvix-castore/virtiofs"]
 # Requires the following packages in $PATH:
 # cbtemulator, google-cloud-bigtable-tool
 integration = []
+
+[lints]
+workspace = true
diff --git a/tvix/store/docs/api.md b/tvix/store/docs/api.md
index c1dacc89a..01e72671a 100644
--- a/tvix/store/docs/api.md
+++ b/tvix/store/docs/api.md
@@ -218,7 +218,7 @@ This is useful for people running a Tvix-only system, or running builds on a
 In a system with Nix installed, we can't simply manually "extract" things to
 `/nix/store`, as Nix assumes to own all writes to this location.
 In these use cases, we're probably better off exposing a tvix-store as a local
-binary cache (that's what `//tvix/nar-bridge` does).
+binary cache (that's what `//tvix/nar-bridge-go` does).
 
 Assuming we are in an environment where we control `/nix/store` exclusively, a
 "realize to disk" would either "extract" things from the `tvix-store` to a
diff --git a/tvix/store/src/bin/tvix-store.rs b/tvix/store/src/bin/tvix-store.rs
index fa30501e7..906d0ab52 100644
--- a/tvix/store/src/bin/tvix-store.rs
+++ b/tvix/store/src/bin/tvix-store.rs
@@ -19,6 +19,7 @@ use tracing_subscriber::EnvFilter;
 use tracing_subscriber::Layer;
 use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
 use tvix_castore::import::fs::ingest_path;
+use tvix_store::nar::NarCalculationService;
 use tvix_store::proto::NarInfo;
 use tvix_store::proto::PathInfo;
 
@@ -286,7 +287,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
         } => {
             // initialize stores
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -311,6 +312,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                 ))
                 .add_service(PathInfoServiceServer::new(GRPCPathInfoServiceWrapper::new(
                     Arc::from(path_info_service),
+                    nar_calculation_service,
                 )));
 
             #[cfg(feature = "tonic-reflection")]
@@ -340,7 +342,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
         } => {
             // FUTUREWORK: allow flat for single files?
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -348,8 +350,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                 )
                 .await?;
 
-            // Arc the PathInfoService, as we clone it .
+            // Arc PathInfoService and NarCalculationService, as we clone it .
             let path_info_service: Arc<dyn PathInfoService> = path_info_service.into();
+            let nar_calculation_service: Arc<dyn NarCalculationService> =
+                nar_calculation_service.into();
 
             let tasks = paths
                 .into_iter()
@@ -358,6 +362,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                         let blob_service = blob_service.clone();
                         let directory_service = directory_service.clone();
                         let path_info_service = path_info_service.clone();
+                        let nar_calculation_service = nar_calculation_service.clone();
 
                         async move {
                             if let Ok(name) = tvix_store::import::path_to_name(&path) {
@@ -367,6 +372,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
                                     blob_service,
                                     directory_service,
                                     path_info_service,
+                                    nar_calculation_service,
                                 )
                                 .await;
                                 if let Ok(output_path) = resp {
@@ -387,7 +393,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             path_info_service_addr,
             reference_graph_path,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -494,7 +500,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             allow_other,
             show_xattr,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
@@ -536,7 +542,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
             list_root,
             show_xattr,
         } => {
-            let (blob_service, directory_service, path_info_service) =
+            let (blob_service, directory_service, path_info_service, _nar_calculation_service) =
                 tvix_store::utils::construct_services(
                     blob_service_addr,
                     directory_service_addr,
diff --git a/tvix/store/src/import.rs b/tvix/store/src/import.rs
index 2331fd77e..888380bca 100644
--- a/tvix/store/src/import.rs
+++ b/tvix/store/src/import.rs
@@ -11,6 +11,7 @@ use nix_compat::{
 };
 
 use crate::{
+    nar::NarCalculationService,
     pathinfoservice::PathInfoService,
     proto::{nar_info, NarInfo, PathInfo},
 };
@@ -104,25 +105,27 @@ pub fn derive_nar_ca_path_info(
 /// Ingest the given path `path` and register the resulting output path in the
 /// [`PathInfoService`] as a recursive fixed output NAR.
 #[instrument(skip_all, fields(store_name=name, path=?path), err)]
-pub async fn import_path_as_nar_ca<BS, DS, PS, P>(
+pub async fn import_path_as_nar_ca<BS, DS, PS, NS, P>(
     path: P,
     name: &str,
     blob_service: BS,
     directory_service: DS,
     path_info_service: PS,
+    nar_calculation_service: NS,
 ) -> Result<StorePath, std::io::Error>
 where
     P: AsRef<Path> + std::fmt::Debug,
     BS: BlobService + Clone,
     DS: DirectoryService,
     PS: AsRef<dyn PathInfoService>,
+    NS: NarCalculationService,
 {
     let root_node = ingest_path(blob_service, directory_service, path.as_ref())
         .await
         .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
 
-    // Ask the PathInfoService for the NAR size and sha256
-    let (nar_size, nar_sha256) = path_info_service.as_ref().calculate_nar(&root_node).await?;
+    // Ask for the NAR size and sha256
+    let (nar_size, nar_sha256) = nar_calculation_service.calculate_nar(&root_node).await?;
 
     // Calculate the output path. This might still fail, as some names are illegal.
     // FUTUREWORK: express the `name` at the type level to be valid and move the conversion
diff --git a/tvix/store/src/nar/import.rs b/tvix/store/src/nar/import.rs
index 70f8137e8..3d7c50014 100644
--- a/tvix/store/src/nar/import.rs
+++ b/tvix/store/src/nar/import.rs
@@ -1,219 +1,122 @@
-use bytes::Bytes;
-use nix_compat::nar;
-use std::io::{self, BufRead};
-use tokio_util::io::SyncIoBridge;
-use tracing::warn;
+use nix_compat::nar::reader::r#async as nar_reader;
+use tokio::{io::AsyncBufRead, sync::mpsc, try_join};
 use tvix_castore::{
     blobservice::BlobService,
-    directoryservice::{DirectoryPutter, DirectoryService},
-    proto::{self as castorepb},
-    B3Digest,
+    directoryservice::DirectoryService,
+    import::{ingest_entries, IngestionEntry, IngestionError},
+    proto::{node::Node, NamedNode},
+    PathBuf,
 };
 
-/// Accepts a reader providing a NAR.
-/// Will traverse it, uploading blobs to the given [BlobService], and
-/// directories to the given [DirectoryService].
-/// On success, the root node is returned.
-/// This function is not async (because the NAR reader is not)
-/// and calls [tokio::task::block_in_place] when interacting with backing
-/// services, so make sure to only call this with spawn_blocking.
-pub fn read_nar<R, BS, DS>(
-    r: &mut R,
+/// Ingests the contents from a [AsyncRead] providing NAR into the tvix store,
+/// interacting with a [BlobService] and [DirectoryService].
+/// It returns the castore root node or an error.
+pub async fn ingest_nar<R, BS, DS>(
     blob_service: BS,
     directory_service: DS,
-) -> io::Result<castorepb::node::Node>
+    r: &mut R,
+) -> Result<Node, IngestionError<Error>>
 where
-    R: BufRead + Send,
+    R: AsyncBufRead + Unpin + Send,
     BS: BlobService + Clone,
     DS: DirectoryService,
 {
-    let handle = tokio::runtime::Handle::current();
+    // open the NAR for reading.
+    // The NAR reader emits nodes in DFS preorder.
+    let root_node = nar_reader::open(r).await.map_err(Error::IO)?;
 
-    let directory_putter = directory_service.put_multiple_start();
+    let (tx, rx) = mpsc::channel(1);
+    let rx = tokio_stream::wrappers::ReceiverStream::new(rx);
 
-    let node = nix_compat::nar::reader::open(r)?;
-    let (root_node, mut directory_putter) = process_node(
-        handle.clone(),
-        "".into(), // this is the root node, it has an empty name
-        node,
-        blob_service,
-        directory_putter,
-    )?;
+    let produce = async move {
+        let res = produce_nar_inner(
+            blob_service,
+            root_node,
+            "root".parse().unwrap(), // HACK: the root node sent to ingest_entries may not be ROOT.
+            tx.clone(),
+        )
+        .await;
 
-    // In case the root node points to a directory, we need to close
-    // [directory_putter], and ensure the digest we got back from there matches
-    // what the root node is pointing to.
-    if let castorepb::node::Node::Directory(ref directory_node) = root_node {
-        // Close directory_putter to make sure all directories have been inserted.
-        let directory_putter_digest =
-            handle.block_on(handle.spawn(async move { directory_putter.close().await }))??;
-        let root_directory_node_digest: B3Digest =
-            directory_node.digest.clone().try_into().unwrap();
+        tx.send(res)
+            .await
+            .map_err(|e| Error::IO(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)))?;
 
-        if directory_putter_digest != root_directory_node_digest {
-            warn!(
-                root_directory_node_digest = %root_directory_node_digest,
-                directory_putter_digest =%directory_putter_digest,
-                "directory digest mismatch",
-            );
-            return Err(io::Error::new(
-                io::ErrorKind::Other,
-                "directory digest mismatch",
-            ));
-        }
-    }
-    // In case it's not a Directory, [directory_putter] doesn't need to be
-    // closed (as we didn't end up uploading anything).
-    // It can just be dropped, as documented in its trait.
+        Ok(())
+    };
+
+    let consume = ingest_entries(directory_service, rx);
+
+    let (_, node) = try_join!(produce, consume)?;
 
-    Ok(root_node)
+    // remove the fake "root" name again
+    debug_assert_eq!(&node.get_name(), b"root");
+    Ok(node.rename("".into()))
 }
 
-/// This is called on a [nar::reader::Node] and returns a [castorepb::node::Node].
-/// It does so by handling all three kinds, and recursing for directories.
-///
-/// [DirectoryPutter] is passed around, so a single instance of it can be used,
-/// which is sufficient, as this reads through the whole NAR linerarly.
-fn process_node<BS>(
-    handle: tokio::runtime::Handle,
-    name: bytes::Bytes,
-    node: nar::reader::Node,
+async fn produce_nar_inner<BS>(
     blob_service: BS,
-    directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>)>
+    node: nar_reader::Node<'_, '_>,
+    path: PathBuf,
+    tx: mpsc::Sender<Result<IngestionEntry, Error>>,
+) -> Result<IngestionEntry, Error>
 where
     BS: BlobService + Clone,
 {
     Ok(match node {
-        nar::reader::Node::Symlink { target } => (
-            castorepb::node::Node::Symlink(castorepb::SymlinkNode {
-                name,
-                target: target.into(),
-            }),
-            directory_putter,
-        ),
-        nar::reader::Node::File { executable, reader } => (
-            castorepb::node::Node::File(process_file_reader(
-                handle,
-                name,
-                reader,
+        nar_reader::Node::Symlink { target } => IngestionEntry::Symlink { path, target },
+        nar_reader::Node::File {
+            executable,
+            mut reader,
+        } => {
+            let (digest, size) = {
+                let mut blob_writer = blob_service.open_write().await;
+                let size = tokio::io::copy_buf(&mut reader, &mut blob_writer).await?;
+
+                (blob_writer.close().await?, size)
+            };
+
+            IngestionEntry::Regular {
+                path,
+                size,
                 executable,
-                blob_service,
-            )?),
-            directory_putter,
-        ),
-        nar::reader::Node::Directory(dir_reader) => {
-            let (directory_node, directory_putter) =
-                process_dir_reader(handle, name, dir_reader, blob_service, directory_putter)?;
-
-            (
-                castorepb::node::Node::Directory(directory_node),
-                directory_putter,
-            )
+                digest,
+            }
+        }
+        nar_reader::Node::Directory(mut dir_reader) => {
+            while let Some(entry) = dir_reader.next().await? {
+                let mut path = path.clone();
+
+                // valid NAR names are valid castore names
+                path.try_push(entry.name)
+                    .expect("Tvix bug: failed to join name");
+
+                let entry = Box::pin(produce_nar_inner(
+                    blob_service.clone(),
+                    entry.node,
+                    path,
+                    tx.clone(),
+                ))
+                .await?;
+
+                tx.send(Ok(entry)).await.map_err(|e| {
+                    Error::IO(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e))
+                })?;
+            }
+
+            IngestionEntry::Dir { path }
         }
     })
 }
 
-/// Given a name and [nar::reader::FileReader], this ingests the file into the
-/// passed [BlobService] and returns a [castorepb::FileNode].
-fn process_file_reader<BS>(
-    handle: tokio::runtime::Handle,
-    name: Bytes,
-    mut file_reader: nar::reader::FileReader,
-    executable: bool,
-    blob_service: BS,
-) -> io::Result<castorepb::FileNode>
-where
-    BS: BlobService,
-{
-    // store the length. If we read any other length, reading will fail.
-    let expected_len = file_reader.len();
-
-    // prepare writing a new blob.
-    let blob_writer = handle.block_on(async { blob_service.open_write().await });
-
-    // write the blob.
-    let mut blob_writer = {
-        let mut dst = SyncIoBridge::new(blob_writer);
-
-        file_reader.copy(&mut dst)?;
-        dst.shutdown()?;
-
-        // return back the blob_writer
-        dst.into_inner()
-    };
-
-    // close the blob_writer, retrieve the digest.
-    let blob_digest = handle.block_on(async { blob_writer.close().await })?;
-
-    Ok(castorepb::FileNode {
-        name,
-        digest: blob_digest.into(),
-        size: expected_len,
-        executable,
-    })
-}
-
-/// Given a name and [nar::reader::DirReader], this returns a [castorepb::DirectoryNode].
-/// It uses [process_node] to iterate over all children.
-///
-/// [DirectoryPutter] is passed around, so a single instance of it can be used,
-/// which is sufficient, as this reads through the whole NAR linerarly.
-fn process_dir_reader<BS>(
-    handle: tokio::runtime::Handle,
-    name: Bytes,
-    mut dir_reader: nar::reader::DirReader,
-    blob_service: BS,
-    directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>)>
-where
-    BS: BlobService + Clone,
-{
-    let mut directory = castorepb::Directory::default();
-
-    let mut directory_putter = directory_putter;
-    while let Some(entry) = dir_reader.next()? {
-        let (node, directory_putter_back) = process_node(
-            handle.clone(),
-            entry.name.into(),
-            entry.node,
-            blob_service.clone(),
-            directory_putter,
-        )?;
-
-        directory_putter = directory_putter_back;
-
-        match node {
-            castorepb::node::Node::Directory(node) => directory.directories.push(node),
-            castorepb::node::Node::File(node) => directory.files.push(node),
-            castorepb::node::Node::Symlink(node) => directory.symlinks.push(node),
-        }
-    }
-
-    // calculate digest and size.
-    let directory_digest = directory.digest();
-    let directory_size = directory.size();
-
-    // upload the directory. This is a bit more verbose, as we want to get back
-    // directory_putter for later reuse.
-    let directory_putter = handle.block_on(handle.spawn(async move {
-        directory_putter.put(directory).await?;
-        Ok::<_, io::Error>(directory_putter)
-    }))??;
-
-    Ok((
-        castorepb::DirectoryNode {
-            name,
-            digest: directory_digest.into(),
-            size: directory_size,
-        },
-        directory_putter,
-    ))
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+    #[error(transparent)]
+    IO(#[from] std::io::Error),
 }
 
 #[cfg(test)]
 mod test {
-    use crate::nar::read_nar;
+    use crate::nar::ingest_nar;
     use std::io::Cursor;
     use std::sync::Arc;
 
@@ -238,19 +141,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking(|| {
-                read_nar(
-                    &mut Cursor::new(&NAR_CONTENTS_SYMLINK.clone()),
-                    blob_service,
-                    directory_service,
-                )
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service,
+            directory_service,
+            &mut Cursor::new(&NAR_CONTENTS_SYMLINK.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::Symlink(castorepb::SymlinkNode {
@@ -267,22 +164,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking({
-                let blob_service = blob_service.clone();
-                move || {
-                    read_nar(
-                        &mut Cursor::new(&NAR_CONTENTS_HELLOWORLD.clone()),
-                        blob_service,
-                        directory_service,
-                    )
-                }
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service.clone(),
+            directory_service,
+            &mut Cursor::new(&NAR_CONTENTS_HELLOWORLD.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::File(castorepb::FileNode {
@@ -304,23 +192,13 @@ mod test {
         blob_service: Arc<dyn BlobService>,
         directory_service: Arc<dyn DirectoryService>,
     ) {
-        let handle = tokio::runtime::Handle::current();
-
-        let root_node = handle
-            .spawn_blocking({
-                let blob_service = blob_service.clone();
-                let directory_service = directory_service.clone();
-                || {
-                    read_nar(
-                        &mut Cursor::new(&NAR_CONTENTS_COMPLICATED.clone()),
-                        blob_service,
-                        directory_service,
-                    )
-                }
-            })
-            .await
-            .unwrap()
-            .expect("must parse");
+        let root_node = ingest_nar(
+            blob_service.clone(),
+            directory_service.clone(),
+            &mut Cursor::new(&NAR_CONTENTS_COMPLICATED.clone()),
+        )
+        .await
+        .expect("must parse");
 
         assert_eq!(
             castorepb::node::Node::Directory(castorepb::DirectoryNode {
diff --git a/tvix/store/src/nar/mod.rs b/tvix/store/src/nar/mod.rs
index 49bb92fb0..164748a65 100644
--- a/tvix/store/src/nar/mod.rs
+++ b/tvix/store/src/nar/mod.rs
@@ -1,10 +1,36 @@
+use tonic::async_trait;
 use tvix_castore::B3Digest;
 
 mod import;
 mod renderer;
-pub use import::read_nar;
+pub use import::ingest_nar;
 pub use renderer::calculate_size_and_sha256;
 pub use renderer::write_nar;
+pub use renderer::SimpleRenderer;
+use tvix_castore::proto as castorepb;
+
+#[async_trait]
+pub trait NarCalculationService: Send + Sync {
+    /// Return the nar size and nar sha256 digest for a given root node.
+    /// This can be used to calculate NAR-based output paths.
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error>;
+}
+
+#[async_trait]
+impl<A> NarCalculationService for A
+where
+    A: AsRef<dyn NarCalculationService> + Send + Sync,
+{
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error> {
+        self.as_ref().calculate_nar(root_node).await
+    }
+}
 
 /// Errors that can encounter while rendering NARs.
 #[derive(Debug, thiserror::Error)]
diff --git a/tvix/store/src/nar/renderer.rs b/tvix/store/src/nar/renderer.rs
index 0816b8e97..efd67671d 100644
--- a/tvix/store/src/nar/renderer.rs
+++ b/tvix/store/src/nar/renderer.rs
@@ -1,17 +1,51 @@
 use crate::utils::AsyncIoBridge;
 
-use super::RenderError;
-use async_recursion::async_recursion;
+use super::{NarCalculationService, RenderError};
 use count_write::CountWrite;
 use nix_compat::nar::writer::r#async as nar_writer;
 use sha2::{Digest, Sha256};
 use tokio::io::{self, AsyncWrite, BufReader};
+use tonic::async_trait;
 use tvix_castore::{
     blobservice::BlobService,
     directoryservice::DirectoryService,
     proto::{self as castorepb, NamedNode},
 };
 
+pub struct SimpleRenderer<BS, DS> {
+    blob_service: BS,
+    directory_service: DS,
+}
+
+impl<BS, DS> SimpleRenderer<BS, DS> {
+    pub fn new(blob_service: BS, directory_service: DS) -> Self {
+        Self {
+            blob_service,
+            directory_service,
+        }
+    }
+}
+
+#[async_trait]
+impl<BS, DS> NarCalculationService for SimpleRenderer<BS, DS>
+where
+    BS: BlobService + Clone,
+    DS: DirectoryService + Clone,
+{
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), tvix_castore::Error> {
+        calculate_size_and_sha256(
+            root_node,
+            self.blob_service.clone(),
+            self.directory_service.clone(),
+        )
+        .await
+        .map_err(|e| tvix_castore::Error::StorageError(format!("failed rendering nar: {}", e)))
+    }
+}
+
 /// Invoke [write_nar], and return the size and sha256 digest of the produced
 /// NAR output.
 pub async fn calculate_size_and_sha256<BS, DS>(
@@ -72,9 +106,8 @@ where
 
 /// Process an intermediate node in the structure.
 /// This consumes the node.
-#[async_recursion]
 async fn walk_node<BS, DS>(
-    nar_node: nar_writer::Node<'async_recursion, '_>,
+    nar_node: nar_writer::Node<'_, '_>,
     proto_node: &castorepb::node::Node,
     blob_service: BS,
     directory_service: DS,
@@ -164,9 +197,13 @@ where
                             .await
                             .map_err(RenderError::NARWriterError)?;
 
-                        (blob_service, directory_service) =
-                            walk_node(child_node, &proto_node, blob_service, directory_service)
-                                .await?;
+                        (blob_service, directory_service) = Box::pin(walk_node(
+                            child_node,
+                            &proto_node,
+                            blob_service,
+                            directory_service,
+                        ))
+                        .await?;
                     }
 
                     // close the directory
diff --git a/tvix/store/src/pathinfoservice/bigtable.rs b/tvix/store/src/pathinfoservice/bigtable.rs
index 6fb52abbf..7df9989fc 100644
--- a/tvix/store/src/pathinfoservice/bigtable.rs
+++ b/tvix/store/src/pathinfoservice/bigtable.rs
@@ -6,12 +6,12 @@ use bigtable_rs::{bigtable, google::bigtable::v2 as bigtable_v2};
 use bytes::Bytes;
 use data_encoding::HEXLOWER;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use prost::Message;
 use serde::{Deserialize, Serialize};
 use serde_with::{serde_as, DurationSeconds};
 use tonic::async_trait;
-use tracing::trace;
-use tvix_castore::proto as castorepb;
+use tracing::{instrument, trace};
 use tvix_castore::Error;
 
 /// There should not be more than 10 MiB in a single cell.
@@ -182,6 +182,7 @@ fn derive_pathinfo_key(digest: &[u8; 20]) -> String {
 
 #[async_trait]
 impl PathInfoService for BigtablePathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let mut client = self.client.clone();
         let path_info_key = derive_pathinfo_key(&digest);
@@ -278,6 +279,7 @@ impl PathInfoService for BigtablePathInfoService {
         Ok(Some(path_info))
     }
 
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
         let store_path = path_info
             .validate()
@@ -330,13 +332,6 @@ impl PathInfoService for BigtablePathInfoService {
         Ok(path_info)
     }
 
-    async fn calculate_nar(
-        &self,
-        _root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        return Err(Error::StorageError("unimplemented".into()));
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         let mut client = self.client.clone();
 
diff --git a/tvix/store/src/pathinfoservice/combinators.rs b/tvix/store/src/pathinfoservice/combinators.rs
new file mode 100644
index 000000000..664144ef4
--- /dev/null
+++ b/tvix/store/src/pathinfoservice/combinators.rs
@@ -0,0 +1,111 @@
+use crate::proto::PathInfo;
+use futures::stream::BoxStream;
+use nix_compat::nixbase32;
+use tonic::async_trait;
+use tracing::{debug, instrument};
+use tvix_castore::Error;
+
+use super::PathInfoService;
+
+/// Asks near first, if not found, asks far.
+/// If found in there, returns it, and *inserts* it into
+/// near.
+/// There is no negative cache.
+/// Inserts and listings are not implemented for now.
+pub struct Cache<PS1, PS2> {
+    near: PS1,
+    far: PS2,
+}
+
+impl<PS1, PS2> Cache<PS1, PS2> {
+    pub fn new(near: PS1, far: PS2) -> Self {
+        Self { near, far }
+    }
+}
+
+#[async_trait]
+impl<PS1, PS2> PathInfoService for Cache<PS1, PS2>
+where
+    PS1: PathInfoService,
+    PS2: PathInfoService,
+{
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
+    async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
+        match self.near.get(digest).await? {
+            Some(path_info) => {
+                debug!("serving from cache");
+                Ok(Some(path_info))
+            }
+            None => {
+                debug!("not found in near, asking remote…");
+                match self.far.get(digest).await? {
+                    None => Ok(None),
+                    Some(path_info) => {
+                        debug!("found in remote, adding to cache");
+                        self.near.put(path_info.clone()).await?;
+                        Ok(Some(path_info))
+                    }
+                }
+            }
+        }
+    }
+
+    async fn put(&self, _path_info: PathInfo) -> Result<PathInfo, Error> {
+        Err(Error::StorageError("unimplemented".to_string()))
+    }
+
+    fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
+        Box::pin(tokio_stream::once(Err(Error::StorageError(
+            "unimplemented".to_string(),
+        ))))
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::num::NonZeroUsize;
+
+    use crate::{
+        pathinfoservice::{LruPathInfoService, MemoryPathInfoService, PathInfoService},
+        tests::fixtures::PATH_INFO_WITH_NARINFO,
+    };
+
+    const PATH_INFO_DIGEST: [u8; 20] = [0; 20];
+
+    /// Helper function setting up an instance of a "far" and "near"
+    /// PathInfoService.
+    async fn create_pathinfoservice() -> super::Cache<LruPathInfoService, MemoryPathInfoService> {
+        // Create an instance of a "far" PathInfoService.
+        let far = MemoryPathInfoService::default();
+
+        // … and an instance of a "near" PathInfoService.
+        let near = LruPathInfoService::with_capacity(NonZeroUsize::new(1).unwrap());
+
+        // create a Pathinfoservice combining the two and return it.
+        super::Cache::new(near, far)
+    }
+
+    /// Getting from the far backend is gonna insert it into the near one.
+    #[tokio::test]
+    async fn test_populate_cache() {
+        let svc = create_pathinfoservice().await;
+
+        // query the PathInfo, things should not be there.
+        assert!(svc.get(PATH_INFO_DIGEST).await.unwrap().is_none());
+
+        // insert it into the far one.
+        svc.far.put(PATH_INFO_WITH_NARINFO.clone()).await.unwrap();
+
+        // now try getting it again, it should succeed.
+        assert_eq!(
+            Some(PATH_INFO_WITH_NARINFO.clone()),
+            svc.get(PATH_INFO_DIGEST).await.unwrap()
+        );
+
+        // peek near, it should now be there.
+        assert_eq!(
+            Some(PATH_INFO_WITH_NARINFO.clone()),
+            svc.near.get(PATH_INFO_DIGEST).await.unwrap()
+        );
+    }
+}
diff --git a/tvix/store/src/pathinfoservice/from_addr.rs b/tvix/store/src/pathinfoservice/from_addr.rs
index f22884ca4..455909e7f 100644
--- a/tvix/store/src/pathinfoservice/from_addr.rs
+++ b/tvix/store/src/pathinfoservice/from_addr.rs
@@ -47,7 +47,7 @@ pub async fn from_addr(
             if url.has_host() || !url.path().is_empty() {
                 return Err(Error::StorageError("invalid url".to_string()));
             }
-            Box::new(MemoryPathInfoService::new(blob_service, directory_service))
+            Box::<MemoryPathInfoService>::default()
         }
         "sled" => {
             // sled doesn't support host, and a path can be provided (otherwise
@@ -65,10 +65,10 @@ pub async fn from_addr(
             // TODO: expose other parameters as URL parameters?
 
             Box::new(if url.path().is_empty() {
-                SledPathInfoService::new_temporary(blob_service, directory_service)
+                SledPathInfoService::new_temporary()
                     .map_err(|e| Error::StorageError(e.to_string()))?
             } else {
-                SledPathInfoService::new(url.path(), blob_service, directory_service)
+                SledPathInfoService::new(url.path())
                     .map_err(|e| Error::StorageError(e.to_string()))?
             })
         }
diff --git a/tvix/store/src/pathinfoservice/grpc.rs b/tvix/store/src/pathinfoservice/grpc.rs
index 1138ebdc1..93d2d67c3 100644
--- a/tvix/store/src/pathinfoservice/grpc.rs
+++ b/tvix/store/src/pathinfoservice/grpc.rs
@@ -1,8 +1,11 @@
 use super::PathInfoService;
-use crate::proto::{self, ListPathInfoRequest, PathInfo};
+use crate::{
+    nar::NarCalculationService,
+    proto::{self, ListPathInfoRequest, PathInfo},
+};
 use async_stream::try_stream;
-use data_encoding::BASE64;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use tonic::{async_trait, transport::Channel, Code};
 use tracing::instrument;
 use tvix_castore::{proto as castorepb, Error};
@@ -27,7 +30,7 @@ impl GRPCPathInfoService {
 
 #[async_trait]
 impl PathInfoService for GRPCPathInfoService {
-    #[instrument(level = "trace", skip_all, fields(path_info.digest = BASE64.encode(&digest)))]
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let path_info = self
             .grpc_client
@@ -67,30 +70,6 @@ impl PathInfoService for GRPCPathInfoService {
         Ok(path_info)
     }
 
-    #[instrument(level = "trace", skip_all, fields(root_node = ?root_node))]
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        let path_info = self
-            .grpc_client
-            .clone()
-            .calculate_nar(castorepb::Node {
-                node: Some(root_node.clone()),
-            })
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))?
-            .into_inner();
-
-        let nar_sha256: [u8; 32] = path_info
-            .nar_sha256
-            .to_vec()
-            .try_into()
-            .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
-
-        Ok((path_info.nar_size, nar_sha256))
-    }
-
     #[instrument(level = "trace", skip_all)]
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         let mut grpc_client = self.grpc_client.clone();
@@ -126,87 +105,45 @@ impl PathInfoService for GRPCPathInfoService {
     }
 }
 
+#[async_trait]
+impl NarCalculationService for GRPCPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(root_node = ?root_node))]
+    async fn calculate_nar(
+        &self,
+        root_node: &castorepb::node::Node,
+    ) -> Result<(u64, [u8; 32]), Error> {
+        let path_info = self
+            .grpc_client
+            .clone()
+            .calculate_nar(castorepb::Node {
+                node: Some(root_node.clone()),
+            })
+            .await
+            .map_err(|e| Error::StorageError(e.to_string()))?
+            .into_inner();
+
+        let nar_sha256: [u8; 32] = path_info
+            .nar_sha256
+            .to_vec()
+            .try_into()
+            .map_err(|_e| Error::StorageError("invalid digest length".to_string()))?;
+
+        Ok((path_info.nar_size, nar_sha256))
+    }
+}
+
 #[cfg(test)]
 mod tests {
-    use std::sync::Arc;
-    use std::time::Duration;
-
-    use rstest::*;
-    use tempfile::TempDir;
-    use tokio::net::UnixListener;
-    use tokio_retry::strategy::ExponentialBackoff;
-    use tokio_retry::Retry;
-    use tokio_stream::wrappers::UnixListenerStream;
-    use tvix_castore::blobservice::BlobService;
-    use tvix_castore::directoryservice::DirectoryService;
-
-    use crate::pathinfoservice::MemoryPathInfoService;
-    use crate::proto::path_info_service_client::PathInfoServiceClient;
-    use crate::proto::GRPCPathInfoServiceWrapper;
-    use crate::tests::fixtures::{self, blob_service, directory_service};
-
-    use super::GRPCPathInfoService;
-    use super::PathInfoService;
+    use crate::pathinfoservice::tests::make_grpc_path_info_service_client;
+    use crate::tests::fixtures;
 
     /// This ensures connecting via gRPC works as expected.
-    #[rstest]
     #[tokio::test]
-    async fn test_valid_unix_path_ping_pong(
-        blob_service: Arc<dyn BlobService>,
-        directory_service: Arc<dyn DirectoryService>,
-    ) {
-        let tmpdir = TempDir::new().unwrap();
-        let socket_path = tmpdir.path().join("daemon");
-
-        let path_clone = socket_path.clone();
-
-        // Spin up a server
-        tokio::spawn(async {
-            let uds = UnixListener::bind(path_clone).unwrap();
-            let uds_stream = UnixListenerStream::new(uds);
-
-            // spin up a new server
-            let mut server = tonic::transport::Server::builder();
-            let router = server.add_service(
-                crate::proto::path_info_service_server::PathInfoServiceServer::new(
-                    GRPCPathInfoServiceWrapper::new(Box::new(MemoryPathInfoService::new(
-                        blob_service,
-                        directory_service,
-                    ))
-                        as Box<dyn PathInfoService>),
-                ),
-            );
-            router.serve_with_incoming(uds_stream).await
-        });
-
-        // wait for the socket to be created
-        Retry::spawn(
-            ExponentialBackoff::from_millis(20).max_delay(Duration::from_secs(10)),
-            || async {
-                if socket_path.exists() {
-                    Ok(())
-                } else {
-                    Err(())
-                }
-            },
-        )
-        .await
-        .expect("failed to wait for socket");
-
-        // prepare a client
-        let grpc_client = {
-            let url = url::Url::parse(&format!("grpc+unix://{}", socket_path.display()))
-                .expect("must parse");
-            let client = PathInfoServiceClient::new(
-                tvix_castore::tonic::channel_from_url(&url)
-                    .await
-                    .expect("must succeed"),
-            );
-
-            GRPCPathInfoService::from_client(client)
-        };
+    async fn test_valid_unix_path_ping_pong() {
+        let (_blob_service, _directory_service, path_info_service) =
+            make_grpc_path_info_service_client().await;
 
-        let path_info = grpc_client
+        let path_info = path_info_service
             .get(fixtures::DUMMY_PATH_DIGEST)
             .await
             .expect("must not be error");
diff --git a/tvix/store/src/pathinfoservice/lru.rs b/tvix/store/src/pathinfoservice/lru.rs
new file mode 100644
index 000000000..da674f497
--- /dev/null
+++ b/tvix/store/src/pathinfoservice/lru.rs
@@ -0,0 +1,128 @@
+use async_stream::try_stream;
+use futures::stream::BoxStream;
+use lru::LruCache;
+use nix_compat::nixbase32;
+use std::num::NonZeroUsize;
+use std::sync::Arc;
+use tokio::sync::RwLock;
+use tonic::async_trait;
+use tracing::instrument;
+
+use crate::proto::PathInfo;
+use tvix_castore::Error;
+
+use super::PathInfoService;
+
+pub struct LruPathInfoService {
+    lru: Arc<RwLock<LruCache<[u8; 20], PathInfo>>>,
+}
+
+impl LruPathInfoService {
+    pub fn with_capacity(capacity: NonZeroUsize) -> Self {
+        Self {
+            lru: Arc::new(RwLock::new(LruCache::new(capacity))),
+        }
+    }
+}
+
+#[async_trait]
+impl PathInfoService for LruPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
+    async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
+        Ok(self.lru.write().await.get(&digest).cloned())
+    }
+
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
+    async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
+        // call validate
+        let store_path = path_info
+            .validate()
+            .map_err(|e| Error::InvalidRequest(format!("invalid PathInfo: {}", e)))?;
+
+        self.lru
+            .write()
+            .await
+            .put(*store_path.digest(), path_info.clone());
+
+        Ok(path_info)
+    }
+
+    fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
+        let lru = self.lru.clone();
+        Box::pin(try_stream! {
+            let lru = lru.read().await;
+            let it = lru.iter();
+
+            for (_k,v) in it {
+                yield v.clone()
+            }
+        })
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::num::NonZeroUsize;
+
+    use crate::{
+        pathinfoservice::{LruPathInfoService, PathInfoService},
+        proto::PathInfo,
+        tests::fixtures::PATH_INFO_WITH_NARINFO,
+    };
+    use lazy_static::lazy_static;
+    use tvix_castore::proto as castorepb;
+
+    lazy_static! {
+        static ref PATHINFO_1: PathInfo = PATH_INFO_WITH_NARINFO.clone();
+        static ref PATHINFO_1_DIGEST: [u8; 20] = [0; 20];
+        static ref PATHINFO_2: PathInfo = {
+            let mut p = PATHINFO_1.clone();
+            let root_node = p.node.as_mut().unwrap();
+            if let castorepb::Node { node: Some(node) } = root_node {
+                let n = node.to_owned();
+                *node = n.rename("11111111111111111111111111111111-dummy2".into());
+            } else {
+                unreachable!()
+            }
+            p
+        };
+        static ref PATHINFO_2_DIGEST: [u8; 20] = *(PATHINFO_2.validate().unwrap()).digest();
+    }
+
+    #[tokio::test]
+    async fn evict() {
+        let svc = LruPathInfoService::with_capacity(NonZeroUsize::new(1).unwrap());
+
+        // pathinfo_1 should not be there
+        assert!(svc
+            .get(*PATHINFO_1_DIGEST)
+            .await
+            .expect("no error")
+            .is_none());
+
+        // insert it
+        svc.put(PATHINFO_1.clone()).await.expect("no error");
+
+        // now it should be there.
+        assert_eq!(
+            Some(PATHINFO_1.clone()),
+            svc.get(*PATHINFO_1_DIGEST).await.expect("no error")
+        );
+
+        // insert pathinfo_2. This will evict pathinfo 1
+        svc.put(PATHINFO_2.clone()).await.expect("no error");
+
+        // now pathinfo 2 should be there.
+        assert_eq!(
+            Some(PATHINFO_2.clone()),
+            svc.get(*PATHINFO_2_DIGEST).await.expect("no error")
+        );
+
+        // … but pathinfo 1 not anymore.
+        assert!(svc
+            .get(*PATHINFO_1_DIGEST)
+            .await
+            .expect("no error")
+            .is_none());
+    }
+}
diff --git a/tvix/store/src/pathinfoservice/memory.rs b/tvix/store/src/pathinfoservice/memory.rs
index f8435dbbf..3de3221df 100644
--- a/tvix/store/src/pathinfoservice/memory.rs
+++ b/tvix/store/src/pathinfoservice/memory.rs
@@ -1,40 +1,24 @@
 use super::PathInfoService;
-use crate::{nar::calculate_size_and_sha256, proto::PathInfo};
-use futures::stream::{iter, BoxStream};
-use std::{
-    collections::HashMap,
-    sync::{Arc, RwLock},
-};
+use crate::proto::PathInfo;
+use async_stream::try_stream;
+use futures::stream::BoxStream;
+use nix_compat::nixbase32;
+use std::{collections::HashMap, sync::Arc};
+use tokio::sync::RwLock;
 use tonic::async_trait;
-use tvix_castore::proto as castorepb;
+use tracing::instrument;
 use tvix_castore::Error;
-use tvix_castore::{blobservice::BlobService, directoryservice::DirectoryService};
 
-pub struct MemoryPathInfoService<BS, DS> {
+#[derive(Default)]
+pub struct MemoryPathInfoService {
     db: Arc<RwLock<HashMap<[u8; 20], PathInfo>>>,
-
-    blob_service: BS,
-    directory_service: DS,
-}
-
-impl<BS, DS> MemoryPathInfoService<BS, DS> {
-    pub fn new(blob_service: BS, directory_service: DS) -> Self {
-        Self {
-            db: Default::default(),
-            blob_service,
-            directory_service,
-        }
-    }
 }
 
 #[async_trait]
-impl<BS, DS> PathInfoService for MemoryPathInfoService<BS, DS>
-where
-    BS: AsRef<dyn BlobService> + Send + Sync,
-    DS: AsRef<dyn DirectoryService> + Send + Sync,
-{
+impl PathInfoService for MemoryPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
-        let db = self.db.read().unwrap();
+        let db = self.db.read().await;
 
         match db.get(&digest) {
             None => Ok(None),
@@ -42,6 +26,7 @@ where
         }
     }
 
+    #[instrument(level = "trace", skip_all, fields(path_info.root_node = ?path_info.node))]
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error> {
         // Call validate on the received PathInfo message.
         match path_info.validate() {
@@ -53,7 +38,7 @@ where
             // In case the PathInfo is valid, and we were able to extract a NixPath, store it in the database.
             // This overwrites existing PathInfo objects.
             Ok(nix_path) => {
-                let mut db = self.db.write().unwrap();
+                let mut db = self.db.write().await;
                 db.insert(*nix_path.digest(), path_info.clone());
 
                 Ok(path_info)
@@ -61,24 +46,16 @@ where
         }
     }
 
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        calculate_size_and_sha256(root_node, &self.blob_service, &self.directory_service)
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
-        let db = self.db.read().unwrap();
+        let db = self.db.clone();
 
-        // Copy all elements into a list.
-        // This is a bit ugly, because we can't have db escape the lifetime
-        // of this function, but elements need to be returned owned anyways, and this in-
-        // memory impl is only for testing purposes anyways.
-        let items: Vec<_> = db.iter().map(|(_k, v)| Ok(v.clone())).collect();
+        Box::pin(try_stream! {
+            let db = db.read().await;
+            let it = db.iter();
 
-        Box::pin(iter(items))
+            for (_k, v) in it {
+                yield v.clone()
+            }
+        })
     }
 }
diff --git a/tvix/store/src/pathinfoservice/mod.rs b/tvix/store/src/pathinfoservice/mod.rs
index c1a482bbb..574bcc0b8 100644
--- a/tvix/store/src/pathinfoservice/mod.rs
+++ b/tvix/store/src/pathinfoservice/mod.rs
@@ -1,5 +1,7 @@
+mod combinators;
 mod from_addr;
 mod grpc;
+mod lru;
 mod memory;
 mod nix_http;
 mod sled;
@@ -12,13 +14,14 @@ mod tests;
 
 use futures::stream::BoxStream;
 use tonic::async_trait;
-use tvix_castore::proto as castorepb;
 use tvix_castore::Error;
 
 use crate::proto::PathInfo;
 
+pub use self::combinators::Cache as CachePathInfoService;
 pub use self::from_addr::from_addr;
 pub use self::grpc::GRPCPathInfoService;
+pub use self::lru::LruPathInfoService;
 pub use self::memory::MemoryPathInfoService;
 pub use self::nix_http::NixHTTPPathInfoService;
 pub use self::sled::SledPathInfoService;
@@ -41,14 +44,6 @@ pub trait PathInfoService: Send + Sync {
     /// invalid messages.
     async fn put(&self, path_info: PathInfo) -> Result<PathInfo, Error>;
 
-    /// Return the nar size and nar sha256 digest for a given root node.
-    /// This can be used to calculate NAR-based output paths,
-    /// and implementations are encouraged to cache it.
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error>;
-
     /// Iterate over all PathInfo objects in the store.
     /// Implementations can decide to disallow listing.
     ///
@@ -72,13 +67,6 @@ where
         self.as_ref().put(path_info).await
     }
 
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        self.as_ref().calculate_nar(root_node).await
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         self.as_ref().list()
     }
diff --git a/tvix/store/src/pathinfoservice/nix_http.rs b/tvix/store/src/pathinfoservice/nix_http.rs
index bdb0e2c3c..cccd4805c 100644
--- a/tvix/store/src/pathinfoservice/nix_http.rs
+++ b/tvix/store/src/pathinfoservice/nix_http.rs
@@ -1,6 +1,3 @@
-use std::io::{self, BufRead, Read, Write};
-
-use data_encoding::BASE64;
 use futures::{stream::BoxStream, TryStreamExt};
 use nix_compat::{
     narinfo::{self, NarInfo},
@@ -8,7 +5,10 @@ use nix_compat::{
     nixhash::NixHash,
 };
 use reqwest::StatusCode;
-use sha2::{digest::FixedOutput, Digest, Sha256};
+use sha2::Digest;
+use std::io::{self, Write};
+use tokio::io::{AsyncRead, BufReader};
+use tokio_util::io::InspectReader;
 use tonic::async_trait;
 use tracing::{debug, instrument, warn};
 use tvix_castore::{
@@ -32,8 +32,7 @@ use super::PathInfoService;
 ///
 /// The client is expected to be (indirectly) using the same [BlobService] and
 /// [DirectoryService], so able to fetch referred Directories and Blobs.
-/// [PathInfoService::put] and [PathInfoService::calculate_nar] are not
-/// implemented and return an error if called.
+/// [PathInfoService::put] is not implemented and returns an error if called.
 /// TODO: what about reading from nix-cache-info?
 pub struct NixHTTPPathInfoService<BS, DS> {
     base_url: url::Url,
@@ -71,7 +70,7 @@ where
     BS: AsRef<dyn BlobService> + Send + Sync + Clone + 'static,
     DS: AsRef<dyn DirectoryService> + Send + Sync + Clone + 'static,
 {
-    #[instrument(skip_all, err, fields(path.digest=BASE64.encode(&digest)))]
+    #[instrument(skip_all, err, fields(path.digest=nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
         let narinfo_url = self
             .base_url
@@ -171,85 +170,83 @@ where
             )));
         }
 
-        // get an AsyncRead of the response body.
-        let async_r = tokio_util::io::StreamReader::new(resp.bytes_stream().map_err(|e| {
+        // get a reader of the response body.
+        let r = tokio_util::io::StreamReader::new(resp.bytes_stream().map_err(|e| {
             let e = e.without_url();
             warn!(e=%e, "failed to get response body");
             io::Error::new(io::ErrorKind::BrokenPipe, e.to_string())
         }));
-        let sync_r = tokio_util::io::SyncIoBridge::new(async_r);
 
-        // handle decompression, by wrapping the reader.
-        let sync_r: Box<dyn BufRead + Send> = match narinfo.compression {
-            Some("none") => Box::new(sync_r),
-            Some("xz") => Box::new(io::BufReader::new(xz2::read::XzDecoder::new(sync_r))),
-            Some(comp) => {
-                return Err(Error::InvalidRequest(
-                    format!("unsupported compression: {}", comp).to_string(),
-                ))
-            }
-            None => {
-                return Err(Error::InvalidRequest(
-                    "unsupported compression: bzip2".to_string(),
-                ))
+        // handle decompression, depending on the compression field.
+        let r: Box<dyn AsyncRead + Send + Unpin> = match narinfo.compression {
+            Some("none") => Box::new(r) as Box<dyn AsyncRead + Send + Unpin>,
+            Some("bzip2") | None => Box::new(async_compression::tokio::bufread::BzDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("gzip") => Box::new(async_compression::tokio::bufread::GzipDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("xz") => Box::new(async_compression::tokio::bufread::XzDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some("zstd") => Box::new(async_compression::tokio::bufread::ZstdDecoder::new(r))
+                as Box<dyn AsyncRead + Send + Unpin>,
+            Some(comp_str) => {
+                return Err(Error::StorageError(format!(
+                    "unsupported compression: {comp_str}"
+                )));
             }
         };
-
-        let res = tokio::task::spawn_blocking({
-            let blob_service = self.blob_service.clone();
-            let directory_service = self.directory_service.clone();
-            move || -> io::Result<_> {
-                // Wrap the reader once more, so we can calculate NarSize and NarHash
-                let mut sync_r = io::BufReader::new(NarReader::from(sync_r));
-                let root_node = crate::nar::read_nar(&mut sync_r, blob_service, directory_service)?;
-
-                let (_, nar_hash, nar_size) = sync_r.into_inner().into_inner();
-
-                Ok((root_node, nar_hash, nar_size))
-            }
-        })
+        let mut nar_hash = sha2::Sha256::new();
+        let mut nar_size = 0;
+
+        // Assemble NarHash and NarSize as we read bytes.
+        let r = InspectReader::new(r, |b| {
+            nar_size += b.len() as u64;
+            nar_hash.write_all(b).unwrap();
+        });
+
+        // HACK: InspectReader doesn't implement AsyncBufRead, but neither do our decompressors.
+        let mut r = BufReader::new(r);
+
+        let root_node = crate::nar::ingest_nar(
+            self.blob_service.clone(),
+            self.directory_service.clone(),
+            &mut r,
+        )
         .await
-        .unwrap();
-
-        match res {
-            Ok((root_node, nar_hash, nar_size)) => {
-                // ensure the ingested narhash and narsize do actually match.
-                if narinfo.nar_size != nar_size {
-                    warn!(
-                        narinfo.nar_size = narinfo.nar_size,
-                        http.nar_size = nar_size,
-                        "NARSize mismatch"
-                    );
-                    Err(io::Error::new(
-                        io::ErrorKind::InvalidData,
-                        "NarSize mismatch".to_string(),
-                    ))?;
-                }
-                if narinfo.nar_hash != nar_hash {
-                    warn!(
-                        narinfo.nar_hash = %NixHash::Sha256(narinfo.nar_hash),
-                        http.nar_hash = %NixHash::Sha256(nar_hash),
-                        "NarHash mismatch"
-                    );
-                    Err(io::Error::new(
-                        io::ErrorKind::InvalidData,
-                        "NarHash mismatch".to_string(),
-                    ))?;
-                }
-
-                Ok(Some(PathInfo {
-                    node: Some(castorepb::Node {
-                        // set the name of the root node to the digest-name of the store path.
-                        node: Some(
-                            root_node.rename(narinfo.store_path.to_string().to_owned().into()),
-                        ),
-                    }),
-                    references: pathinfo.references,
-                    narinfo: pathinfo.narinfo,
-                }))
-            }
-            Err(e) => Err(e.into()),
+        .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+
+        // ensure the ingested narhash and narsize do actually match.
+        if narinfo.nar_size != nar_size {
+            warn!(
+                narinfo.nar_size = narinfo.nar_size,
+                http.nar_size = nar_size,
+                "NarSize mismatch"
+            );
+            Err(io::Error::new(
+                io::ErrorKind::InvalidData,
+                "NarSize mismatch".to_string(),
+            ))?;
+        }
+        let nar_hash: [u8; 32] = nar_hash.finalize().into();
+        if narinfo.nar_hash != nar_hash {
+            warn!(
+                narinfo.nar_hash = %NixHash::Sha256(narinfo.nar_hash),
+                http.nar_hash = %NixHash::Sha256(nar_hash),
+                "NarHash mismatch"
+            );
+            Err(io::Error::new(
+                io::ErrorKind::InvalidData,
+                "NarHash mismatch".to_string(),
+            ))?;
         }
+
+        Ok(Some(PathInfo {
+            node: Some(castorepb::Node {
+                // set the name of the root node to the digest-name of the store path.
+                node: Some(root_node.rename(narinfo.store_path.to_string().to_owned().into())),
+            }),
+            references: pathinfo.references,
+            narinfo: pathinfo.narinfo,
+        }))
     }
 
     #[instrument(skip_all, fields(path_info=?_path_info))]
@@ -259,16 +256,6 @@ where
         ))
     }
 
-    #[instrument(skip_all, fields(root_node=?root_node))]
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        Err(Error::InvalidRequest(
-            "calculate_nar not supported for this backend".to_string(),
-        ))
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
         Box::pin(futures::stream::once(async {
             Err(Error::InvalidRequest(
@@ -277,38 +264,3 @@ where
         }))
     }
 }
-
-/// Small helper reader implementing [std::io::Read].
-/// It can be used to wrap another reader, counts the number of bytes read
-/// and the sha256 digest of the contents.
-struct NarReader<R: Read> {
-    r: R,
-
-    sha256: sha2::Sha256,
-    bytes_read: u64,
-}
-
-impl<R: Read> NarReader<R> {
-    pub fn from(inner: R) -> Self {
-        Self {
-            r: inner,
-            sha256: Sha256::new(),
-            bytes_read: 0,
-        }
-    }
-
-    /// Returns the (remaining) inner reader, the sha256 digest and the number of bytes read.
-    pub fn into_inner(self) -> (R, [u8; 32], u64) {
-        (self.r, self.sha256.finalize_fixed().into(), self.bytes_read)
-    }
-}
-
-impl<R: Read> Read for NarReader<R> {
-    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
-        self.r.read(buf).map(|n| {
-            self.bytes_read += n as u64;
-            self.sha256.write_all(&buf[..n]).unwrap();
-            n
-        })
-    }
-}
diff --git a/tvix/store/src/pathinfoservice/sled.rs b/tvix/store/src/pathinfoservice/sled.rs
index 0255c031e..eb3cf2ff1 100644
--- a/tvix/store/src/pathinfoservice/sled.rs
+++ b/tvix/store/src/pathinfoservice/sled.rs
@@ -1,70 +1,55 @@
 use super::PathInfoService;
-use crate::nar::calculate_size_and_sha256;
 use crate::proto::PathInfo;
-use data_encoding::BASE64;
-use futures::stream::iter;
+use async_stream::try_stream;
 use futures::stream::BoxStream;
+use nix_compat::nixbase32;
 use prost::Message;
 use std::path::Path;
 use tonic::async_trait;
 use tracing::instrument;
 use tracing::warn;
-use tvix_castore::proto as castorepb;
-use tvix_castore::{blobservice::BlobService, directoryservice::DirectoryService, Error};
+use tvix_castore::Error;
 
 /// SledPathInfoService stores PathInfo in a [sled](https://github.com/spacejam/sled).
 ///
 /// The PathInfo messages are stored as encoded protos, and keyed by their output hash,
 /// as that's currently the only request type available.
-pub struct SledPathInfoService<BS, DS> {
+pub struct SledPathInfoService {
     db: sled::Db,
-
-    blob_service: BS,
-    directory_service: DS,
 }
 
-impl<BS, DS> SledPathInfoService<BS, DS> {
-    pub fn new<P: AsRef<Path>>(
-        p: P,
-        blob_service: BS,
-        directory_service: DS,
-    ) -> Result<Self, sled::Error> {
+impl SledPathInfoService {
+    pub fn new<P: AsRef<Path>>(p: P) -> Result<Self, sled::Error> {
         let config = sled::Config::default()
             .use_compression(false) // is a required parameter
             .path(p);
         let db = config.open()?;
 
-        Ok(Self {
-            db,
-            blob_service,
-            directory_service,
-        })
+        Ok(Self { db })
     }
 
-    pub fn new_temporary(blob_service: BS, directory_service: DS) -> Result<Self, sled::Error> {
+    pub fn new_temporary() -> Result<Self, sled::Error> {
         let config = sled::Config::default().temporary(true);
         let db = config.open()?;
 
-        Ok(Self {
-            db,
-            blob_service,
-            directory_service,
-        })
+        Ok(Self { db })
     }
 }
 
 #[async_trait]
-impl<BS, DS> PathInfoService for SledPathInfoService<BS, DS>
-where
-    BS: AsRef<dyn BlobService> + Send + Sync,
-    DS: AsRef<dyn DirectoryService> + Send + Sync,
-{
-    #[instrument(level = "trace", skip_all, fields(path_info.digest = BASE64.encode(&digest)))]
+impl PathInfoService for SledPathInfoService {
+    #[instrument(level = "trace", skip_all, fields(path_info.digest = nixbase32::encode(&digest)))]
     async fn get(&self, digest: [u8; 20]) -> Result<Option<PathInfo>, Error> {
-        match self.db.get(digest).map_err(|e| {
+        let resp = tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            move || db.get(digest.as_slice())
+        })
+        .await?
+        .map_err(|e| {
             warn!("failed to retrieve PathInfo: {}", e);
             Error::StorageError(format!("failed to retrieve PathInfo: {}", e))
-        })? {
+        })?;
+        match resp {
             None => Ok(None),
             Some(data) => {
                 let path_info = PathInfo::decode(&*data).map_err(|e| {
@@ -86,40 +71,47 @@ where
         // In case the PathInfo is valid, we were able to parse a StorePath.
         // Store it in the database, keyed by its digest.
         // This overwrites existing PathInfo objects.
-        self.db
-            .insert(store_path.digest(), path_info.encode_to_vec())
-            .map_err(|e| {
-                warn!("failed to insert PathInfo: {}", e);
-                Error::StorageError(format! {
-                    "failed to insert PathInfo: {}", e
-                })
-            })?;
+        tokio::task::spawn_blocking({
+            let db = self.db.clone();
+            let k = *store_path.digest();
+            let data = path_info.encode_to_vec();
+            move || db.insert(k, data)
+        })
+        .await?
+        .map_err(|e| {
+            warn!("failed to insert PathInfo: {}", e);
+            Error::StorageError(format! {
+                "failed to insert PathInfo: {}", e
+            })
+        })?;
 
         Ok(path_info)
     }
 
-    #[instrument(level = "trace", skip_all, fields(root_node = ?root_node))]
-    async fn calculate_nar(
-        &self,
-        root_node: &castorepb::node::Node,
-    ) -> Result<(u64, [u8; 32]), Error> {
-        calculate_size_and_sha256(root_node, &self.blob_service, &self.directory_service)
-            .await
-            .map_err(|e| Error::StorageError(e.to_string()))
-    }
-
     fn list(&self) -> BoxStream<'static, Result<PathInfo, Error>> {
-        Box::pin(iter(self.db.iter().values().map(|v| {
-            let data = v.map_err(|e| {
-                warn!("failed to retrieve PathInfo: {}", e);
-                Error::StorageError(format!("failed to retrieve PathInfo: {}", e))
-            })?;
+        let db = self.db.clone();
+        let mut it = db.iter().values();
+
+        Box::pin(try_stream! {
+            // Don't block the executor while waiting for .next(), so wrap that
+            // in a spawn_blocking call.
+            // We need to pass around it to be able to reuse it.
+            while let (Some(elem), new_it) = tokio::task::spawn_blocking(move || {
+                (it.next(), it)
+            }).await? {
+                it = new_it;
+                let data = elem.map_err(|e| {
+                    warn!("failed to retrieve PathInfo: {}", e);
+                    Error::StorageError(format!("failed to retrieve PathInfo: {}", e))
+                })?;
+
+                let path_info = PathInfo::decode(&*data).map_err(|e| {
+                    warn!("failed to decode stored PathInfo: {}", e);
+                    Error::StorageError(format!("failed to decode stored PathInfo: {}", e))
+                })?;
 
-            let path_info = PathInfo::decode(&*data).map_err(|e| {
-                warn!("failed to decode stored PathInfo: {}", e);
-                Error::StorageError(format!("failed to decode stored PathInfo: {}", e))
-            })?;
-            Ok(path_info)
-        })))
+                yield path_info
+            }
+        })
     }
 }
diff --git a/tvix/store/src/pathinfoservice/tests/mod.rs b/tvix/store/src/pathinfoservice/tests/mod.rs
index 971937159..26166d1b7 100644
--- a/tvix/store/src/pathinfoservice/tests/mod.rs
+++ b/tvix/store/src/pathinfoservice/tests/mod.rs
@@ -13,7 +13,7 @@ use crate::proto::PathInfo;
 use crate::tests::fixtures::DUMMY_PATH_DIGEST;
 
 mod utils;
-use self::utils::make_grpc_path_info_service_client;
+pub use self::utils::make_grpc_path_info_service_client;
 
 /// Convenience type alias batching all three servives together.
 #[allow(clippy::upper_case_acronyms)]
diff --git a/tvix/store/src/pathinfoservice/tests/utils.rs b/tvix/store/src/pathinfoservice/tests/utils.rs
index 31ec57aad..30c5902b6 100644
--- a/tvix/store/src/pathinfoservice/tests/utils.rs
+++ b/tvix/store/src/pathinfoservice/tests/utils.rs
@@ -3,6 +3,7 @@ use std::sync::Arc;
 use tonic::transport::{Endpoint, Server, Uri};
 
 use crate::{
+    nar::{NarCalculationService, SimpleRenderer},
     pathinfoservice::{GRPCPathInfoService, MemoryPathInfoService, PathInfoService},
     proto::{
         path_info_service_client::PathInfoServiceClient,
@@ -26,12 +27,15 @@ pub async fn make_grpc_path_info_service_client() -> super::BSDSPS {
         let directory_service = directory_service.clone();
         async move {
             let path_info_service: Arc<dyn PathInfoService> =
-                Arc::from(MemoryPathInfoService::new(blob_service, directory_service));
+                Arc::from(MemoryPathInfoService::default());
+            let nar_calculation_service =
+                Box::new(SimpleRenderer::new(blob_service, directory_service))
+                    as Box<dyn NarCalculationService>;
 
-            // spin up a new DirectoryService
+            // spin up a new PathInfoService
             let mut server = Server::builder();
             let router = server.add_service(PathInfoServiceServer::new(
-                GRPCPathInfoServiceWrapper::new(path_info_service),
+                GRPCPathInfoServiceWrapper::new(path_info_service, nar_calculation_service),
             ));
 
             router
diff --git a/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs b/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
index 9f4581822..68f557567 100644
--- a/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
+++ b/tvix/store/src/proto/grpc_pathinfoservice_wrapper.rs
@@ -1,4 +1,4 @@
-use crate::nar::RenderError;
+use crate::nar::{NarCalculationService, RenderError};
 use crate::pathinfoservice::PathInfoService;
 use crate::proto;
 use futures::{stream::BoxStream, TryStreamExt};
@@ -7,23 +7,26 @@ use tonic::{async_trait, Request, Response, Result, Status};
 use tracing::{instrument, warn};
 use tvix_castore::proto as castorepb;
 
-pub struct GRPCPathInfoServiceWrapper<PS> {
-    inner: PS,
+pub struct GRPCPathInfoServiceWrapper<PS, NS> {
+    path_info_service: PS,
     // FUTUREWORK: allow exposing without allowing listing
+    nar_calculation_service: NS,
 }
 
-impl<PS> GRPCPathInfoServiceWrapper<PS> {
-    pub fn new(path_info_service: PS) -> Self {
+impl<PS, NS> GRPCPathInfoServiceWrapper<PS, NS> {
+    pub fn new(path_info_service: PS, nar_calculation_service: NS) -> Self {
         Self {
-            inner: path_info_service,
+            path_info_service,
+            nar_calculation_service,
         }
     }
 }
 
 #[async_trait]
-impl<PS> proto::path_info_service_server::PathInfoService for GRPCPathInfoServiceWrapper<PS>
+impl<PS, NS> proto::path_info_service_server::PathInfoService for GRPCPathInfoServiceWrapper<PS, NS>
 where
     PS: Deref<Target = dyn PathInfoService> + Send + Sync + 'static,
+    NS: NarCalculationService + Send + Sync + 'static,
 {
     type ListStream = BoxStream<'static, tonic::Result<proto::PathInfo, Status>>;
 
@@ -39,7 +42,7 @@ where
                     .to_vec()
                     .try_into()
                     .map_err(|_e| Status::invalid_argument("invalid output digest length"))?;
-                match self.inner.get(digest).await {
+                match self.path_info_service.get(digest).await {
                     Ok(None) => Err(Status::not_found("PathInfo not found")),
                     Ok(Some(path_info)) => Ok(Response::new(path_info)),
                     Err(e) => {
@@ -57,7 +60,7 @@ where
 
         // Store the PathInfo in the client. Clients MUST validate the data
         // they receive, so we don't validate additionally here.
-        match self.inner.put(path_info).await {
+        match self.path_info_service.put(path_info).await {
             Ok(path_info_new) => Ok(Response::new(path_info_new)),
             Err(e) => {
                 warn!(err = %e, "failed to put PathInfo");
@@ -79,7 +82,7 @@ where
                     Err(Status::invalid_argument("invalid root node"))?
                 }
 
-                match self.inner.calculate_nar(&root_node).await {
+                match self.nar_calculation_service.calculate_nar(&root_node).await {
                     Ok((nar_size, nar_sha256)) => Ok(Response::new(proto::CalculateNarResponse {
                         nar_size,
                         nar_sha256: nar_sha256.to_vec().into(),
@@ -99,7 +102,7 @@ where
         _request: Request<proto::ListPathInfoRequest>,
     ) -> Result<Response<Self::ListStream>, Status> {
         let stream = Box::pin(
-            self.inner
+            self.path_info_service
                 .list()
                 .map_err(|e| Status::internal(e.to_string())),
         );
diff --git a/tvix/store/src/utils.rs b/tvix/store/src/utils.rs
index 0b171377b..e6e42f6ec 100644
--- a/tvix/store/src/utils.rs
+++ b/tvix/store/src/utils.rs
@@ -10,9 +10,10 @@ use tvix_castore::{
     directoryservice::{self, DirectoryService},
 };
 
+use crate::nar::{NarCalculationService, SimpleRenderer};
 use crate::pathinfoservice::{self, PathInfoService};
 
-/// Construct the three store handles from their addrs.
+/// Construct the store handles from their addrs.
 pub async fn construct_services(
     blob_service_addr: impl AsRef<str>,
     directory_service_addr: impl AsRef<str>,
@@ -21,6 +22,7 @@ pub async fn construct_services(
     Arc<dyn BlobService>,
     Arc<dyn DirectoryService>,
     Box<dyn PathInfoService>,
+    Box<dyn NarCalculationService>,
 )> {
     let blob_service: Arc<dyn BlobService> = blobservice::from_addr(blob_service_addr.as_ref())
         .await?
@@ -36,7 +38,18 @@ pub async fn construct_services(
     )
     .await?;
 
-    Ok((blob_service, directory_service, path_info_service))
+    // TODO: grpc client also implements NarCalculationService
+    let nar_calculation_service = Box::new(SimpleRenderer::new(
+        blob_service.clone(),
+        directory_service.clone(),
+    )) as Box<dyn NarCalculationService>;
+
+    Ok((
+        blob_service,
+        directory_service,
+        path_info_service,
+        nar_calculation_service,
+    ))
 }
 
 /// The inverse of [tokio_util::io::SyncIoBridge].
diff --git a/tvix/tools/crunch-v2/Cargo.lock b/tvix/tools/crunch-v2/Cargo.lock
index cff5509d0..3748d7e4e 100644
--- a/tvix/tools/crunch-v2/Cargo.lock
+++ b/tvix/tools/crunch-v2/Cargo.lock
@@ -752,6 +752,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
 
 [[package]]
+name = "enum-primitive-derive"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
+dependencies = [
+ "num-traits",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "enum_dispatch"
 version = "0.3.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1394,12 +1405,16 @@ dependencies = [
  "data-encoding",
  "ed25519",
  "ed25519-dalek",
+ "enum-primitive-derive",
  "glob",
  "nom",
+ "num-traits",
+ "pin-project-lite",
  "serde",
  "serde_json",
  "sha2 0.10.8",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -1432,9 +1447,9 @@ dependencies = [
 
 [[package]]
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
@@ -2682,9 +2697,9 @@ dependencies = [
 
 [[package]]
 name = "tokio"
-version = "1.34.0"
+version = "1.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
 dependencies = [
  "backtrace",
  "bytes",
diff --git a/tvix/tools/crunch-v2/Cargo.toml b/tvix/tools/crunch-v2/Cargo.toml
index 1e3f02525..d2b7126bd 100644
--- a/tvix/tools/crunch-v2/Cargo.toml
+++ b/tvix/tools/crunch-v2/Cargo.toml
@@ -14,7 +14,7 @@ bstr = "1.8.0"
 bytes = "1.5.0"
 
 futures = "0.3.29"
-tokio = { version = "1.34.0", features = ["full"] }
+tokio = { version = "1.37.0", features = ["full"] }
 
 rusoto_core = { version = "0.48.0", default-features = false, features = ["hyper-rustls"] }
 rusoto_s3 = { version = "0.48.0", default-features = false, features = ["rustls"] }
diff --git a/tvix/tools/crunch-v2/src/main.rs b/tvix/tools/crunch-v2/src/main.rs
index a5d538f6b..5be8c28e2 100644
--- a/tvix/tools/crunch-v2/src/main.rs
+++ b/tvix/tools/crunch-v2/src/main.rs
@@ -147,7 +147,7 @@ fn ingest(node: nar::Node, name: Vec<u8>, avg_chunk_size: u32) -> Result<proto::
             let mut symlinks = vec![];
 
             while let Some(node) = reader.next()? {
-                match ingest(node.node, node.name, avg_chunk_size)? {
+                match ingest(node.node, node.name.to_owned(), avg_chunk_size)? {
                     proto::path::Node::Directory(node) => {
                         directories.push(node);
                     }
diff --git a/tvix/tools/narinfo2parquet/Cargo.lock b/tvix/tools/narinfo2parquet/Cargo.lock
index e59f70732..070a46851 100644
--- a/tvix/tools/narinfo2parquet/Cargo.lock
+++ b/tvix/tools/narinfo2parquet/Cargo.lock
@@ -487,6 +487,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
 
 [[package]]
+name = "enum-primitive-derive"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c"
+dependencies = [
+ "num-traits",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "enum_dispatch"
 version = "0.3.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -878,9 +889,9 @@ dependencies = [
 
 [[package]]
 name = "mio"
-version = "0.8.9"
+version = "0.8.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
 dependencies = [
  "libc",
  "wasi",
@@ -930,12 +941,16 @@ dependencies = [
  "data-encoding",
  "ed25519",
  "ed25519-dalek",
+ "enum-primitive-derive",
  "glob",
  "nom",
+ "num-traits",
+ "pin-project-lite",
  "serde",
  "serde_json",
  "sha2",
  "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -968,9 +983,9 @@ dependencies = [
 
 [[package]]
 name = "num-traits"
-version = "0.2.17"
+version = "0.2.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
 dependencies = [
  "autocfg",
  "libm",
@@ -1805,9 +1820,9 @@ dependencies = [
 
 [[package]]
 name = "tokio"
-version = "1.33.0"
+version = "1.37.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
 dependencies = [
  "backtrace",
  "bytes",
@@ -1816,10 +1831,22 @@ dependencies = [
  "num_cpus",
  "pin-project-lite",
  "socket2",
+ "tokio-macros",
  "windows-sys",
 ]
 
 [[package]]
+name = "tokio-macros"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.39",
+]
+
+[[package]]
 name = "tokio-util"
 version = "0.7.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2091,18 +2118,18 @@ checksum = "9828b178da53440fa9c766a3d2f73f7cf5d0ac1fe3980c1e5018d899fd19e07b"
 
 [[package]]
 name = "zerocopy"
-version = "0.7.25"
+version = "0.7.34"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557"
+checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
 dependencies = [
  "zerocopy-derive",
 ]
 
 [[package]]
 name = "zerocopy-derive"
-version = "0.7.25"
+version = "0.7.34"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b"
+checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
 dependencies = [
  "proc-macro2",
  "quote",
diff --git a/tvix/tools/narinfo2parquet/Cargo.nix b/tvix/tools/narinfo2parquet/Cargo.nix
index 27a5d684b..0ae873c84 100644
--- a/tvix/tools/narinfo2parquet/Cargo.nix
+++ b/tvix/tools/narinfo2parquet/Cargo.nix
@@ -1,4 +1,4 @@
-# This file was @generated by crate2nix 0.13.0 with the command:
+# This file was @generated by crate2nix 0.14.0 with the command:
 #   "generate" "--all-features"
 # See https://github.com/kolloch/crate2nix for more info.
 
@@ -1429,6 +1429,32 @@ rec {
         };
         resolvedDefaultFeatures = [ "default" "use_std" ];
       };
+      "enum-primitive-derive" = rec {
+        crateName = "enum-primitive-derive";
+        version = "0.3.0";
+        edition = "2018";
+        sha256 = "0k6wcf58h5kh64yq5nfq71va53kaya0kzxwsjwbgwm2n2zd9axxs";
+        procMacro = true;
+        authors = [
+          "Doug Goldstein <cardoe@cardoe.com>"
+        ];
+        dependencies = [
+          {
+            name = "num-traits";
+            packageId = "num-traits";
+            usesDefaultFeatures = false;
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.39";
+          }
+        ];
+
+      };
       "enum_dispatch" = rec {
         crateName = "enum_dispatch";
         version = "0.3.12";
@@ -2511,9 +2537,9 @@ rec {
       };
       "mio" = rec {
         crateName = "mio";
-        version = "0.8.9";
+        version = "0.8.11";
         edition = "2018";
-        sha256 = "1l23hg513c23nhcdzvk25caaj28mic6qgqadbn8axgj6bqf2ikix";
+        sha256 = "034byyl0ardml5yliy1hmvx8arkmn9rv479pid794sm07ia519m4";
         authors = [
           "Carl Lerche <me@carllerche.com>"
           "Thomas de Zeeuw <thomasdezeeuw@gmail.com>"
@@ -2689,6 +2715,10 @@ rec {
             packageId = "ed25519-dalek";
           }
           {
+            name = "enum-primitive-derive";
+            packageId = "enum-primitive-derive";
+          }
+          {
             name = "glob";
             packageId = "glob";
           }
@@ -2697,6 +2727,15 @@ rec {
             packageId = "nom";
           }
           {
+            name = "num-traits";
+            packageId = "num-traits";
+          }
+          {
+            name = "pin-project-lite";
+            packageId = "pin-project-lite";
+            optional = true;
+          }
+          {
             name = "serde";
             packageId = "serde";
             features = [ "derive" ];
@@ -2713,6 +2752,12 @@ rec {
             name = "thiserror";
             packageId = "thiserror";
           }
+          {
+            name = "tokio";
+            packageId = "tokio";
+            optional = true;
+            features = [ "io-util" "macros" ];
+          }
         ];
         devDependencies = [
           {
@@ -2721,9 +2766,13 @@ rec {
           }
         ];
         features = {
-          "async" = [ "futures-util" ];
-          "futures-util" = [ "dep:futures-util" ];
+          "async" = [ "tokio" ];
+          "default" = [ "async" "wire" ];
+          "pin-project-lite" = [ "dep:pin-project-lite" ];
+          "tokio" = [ "dep:tokio" ];
+          "wire" = [ "tokio" "pin-project-lite" ];
         };
+        resolvedDefaultFeatures = [ "async" "default" "pin-project-lite" "tokio" "wire" ];
       };
       "nom" = rec {
         crateName = "nom";
@@ -2792,9 +2841,9 @@ rec {
       };
       "num-traits" = rec {
         crateName = "num-traits";
-        version = "0.2.17";
-        edition = "2018";
-        sha256 = "0z16bi5zwgfysz6765v3rd6whfbjpihx3mhsn4dg8dzj2c221qrr";
+        version = "0.2.19";
+        edition = "2021";
+        sha256 = "0h984rhdkkqd4ny9cif7y2azl3xdfb7768hb9irhpsch4q3gq787";
         authors = [
           "The Rust Project Developers"
         ];
@@ -6026,9 +6075,9 @@ rec {
       };
       "tokio" = rec {
         crateName = "tokio";
-        version = "1.33.0";
+        version = "1.37.0";
         edition = "2021";
-        sha256 = "0lynj8nfqziviw72qns9mjlhmnm66bsc5bivy5g5x6gp7q720f2g";
+        sha256 = "11v7qhvpwsf976frqgrjl1jy308bdkxq195gb38cypx7xkzypnqs";
         authors = [
           "Tokio Contributors <team@tokio.rs>"
         ];
@@ -6072,6 +6121,11 @@ rec {
             features = [ "all" ];
           }
           {
+            name = "tokio-macros";
+            packageId = "tokio-macros";
+            optional = true;
+          }
+          {
             name = "windows-sys";
             packageId = "windows-sys";
             optional = true;
@@ -6116,7 +6170,33 @@ rec {
           "tracing" = [ "dep:tracing" ];
           "windows-sys" = [ "dep:windows-sys" ];
         };
-        resolvedDefaultFeatures = [ "bytes" "default" "io-util" "libc" "mio" "net" "num_cpus" "rt" "rt-multi-thread" "socket2" "sync" "time" "windows-sys" ];
+        resolvedDefaultFeatures = [ "bytes" "default" "io-util" "libc" "macros" "mio" "net" "num_cpus" "rt" "rt-multi-thread" "socket2" "sync" "time" "tokio-macros" "windows-sys" ];
+      };
+      "tokio-macros" = rec {
+        crateName = "tokio-macros";
+        version = "2.2.0";
+        edition = "2021";
+        sha256 = "0fwjy4vdx1h9pi4g2nml72wi0fr27b5m954p13ji9anyy8l1x2jv";
+        procMacro = true;
+        authors = [
+          "Tokio Contributors <team@tokio.rs>"
+        ];
+        dependencies = [
+          {
+            name = "proc-macro2";
+            packageId = "proc-macro2";
+          }
+          {
+            name = "quote";
+            packageId = "quote";
+          }
+          {
+            name = "syn";
+            packageId = "syn 2.0.39";
+            features = [ "full" ];
+          }
+        ];
+
       };
       "tokio-util" = rec {
         crateName = "tokio-util";
@@ -7673,9 +7753,9 @@ rec {
       };
       "zerocopy" = rec {
         crateName = "zerocopy";
-        version = "0.7.25";
+        version = "0.7.34";
         edition = "2018";
-        sha256 = "0mv5w4fq1kcpw1ydcb5cvr8zdms5pqy0r60g04ayzpqfgjk6klwc";
+        sha256 = "11xhrwixm78m6ca1jdxf584wdwvpgg7q00vg21fhwl0psvyf71xf";
         authors = [
           "Joshua Liebow-Feeser <joshlf@google.com>"
         ];
@@ -7709,9 +7789,9 @@ rec {
       };
       "zerocopy-derive" = rec {
         crateName = "zerocopy-derive";
-        version = "0.7.25";
+        version = "0.7.34";
         edition = "2018";
-        sha256 = "0svxr32pp4lav1vjar127g2r09gpiajxn0yv1k66r8hrlayl1wf2";
+        sha256 = "0fqvglw01w3hp7xj9gdk1800x9j7v58s9w8ijiyiz2a7krb39s8m";
         procMacro = true;
         authors = [
           "Joshua Liebow-Feeser <joshlf@google.com>"
@@ -7969,8 +8049,9 @@ rec {
             # because we compiled those test binaries in the former and not the latter.
             # So all paths will expect source tree to be there and not in the build top directly.
             # For example: $NIX_BUILD_TOP := /build in general, if you ask yourself.
-            # TODO(raitobezarius): I believe there could be more edge cases if `crate.sourceRoot`
-            # do exist but it's very hard to reason about them, so let's wait until the first bug report.
+            # NOTE: There could be edge cases if `crate.sourceRoot` does exist but
+            # it's very hard to reason about them.
+            # Open a bug if you run into this!
             mkdir -p source/
             cd source/
 
diff --git a/tvix/website/landing-en.md b/tvix/website/landing-en.md
index 61a011dee..f677f20f2 100644
--- a/tvix/website/landing-en.md
+++ b/tvix/website/landing-en.md
@@ -15,7 +15,7 @@ There are several projects within Tvix, such as:
 * `//tvix/castore` - subtree storage/transfer in a content-addressed fashion
 * `//tvix/cli` - preliminary REPL & CLI implementation for Tvix
 * `//tvix/eval` - an implementation of the Nix programming language
-* `//tvix/nar-bridge` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
+* `//tvix/nar-bridge[-go]` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
 * `//tvix/nix-compat` - a Rust library for compatibility with C++ Nix, features like encodings and hashing schemes and formats
 * `//tvix/serde` - a Rust library for using the Nix language for app configuration
 * `//tvix/store` - a "filesystem" linking Nix store paths and metadata with the content-addressed layer
diff --git a/users/Profpatsch/my-prelude/default.nix b/users/Profpatsch/my-prelude/default.nix
index e44511541..4bca8ea49 100644
--- a/users/Profpatsch/my-prelude/default.nix
+++ b/users/Profpatsch/my-prelude/default.nix
@@ -7,6 +7,7 @@ pkgs.haskellPackages.mkDerivation {
   src = depot.users.Profpatsch.exactSource ./. [
     ./my-prelude.cabal
     ./src/Aeson.hs
+    ./src/Arg.hs
     ./src/AtLeast.hs
     ./src/MyPrelude.hs
     ./src/Test.hs
diff --git a/users/Profpatsch/my-prelude/my-prelude.cabal b/users/Profpatsch/my-prelude/my-prelude.cabal
index 95a8399f3..2f7882a52 100644
--- a/users/Profpatsch/my-prelude/my-prelude.cabal
+++ b/users/Profpatsch/my-prelude/my-prelude.cabal
@@ -59,6 +59,7 @@ library
     exposed-modules:
       MyPrelude
       Aeson
+      Arg
       AtLeast
       Test
       Postgres.Decoder
diff --git a/users/Profpatsch/my-prelude/src/Arg.hs b/users/Profpatsch/my-prelude/src/Arg.hs
new file mode 100644
index 000000000..a6ffa9092
--- /dev/null
+++ b/users/Profpatsch/my-prelude/src/Arg.hs
@@ -0,0 +1,34 @@
+module Arg where
+
+import Data.String (IsString)
+import GHC.Exts (IsList)
+import GHC.TypeLits (Symbol)
+
+-- | Wrap a function argument into this helper to give it a better description for the caller without disturbing the callsite too much.
+--
+-- This has instances for IsString and Num, meaning if the caller is usually a string or number literal, it should Just Work.
+--
+-- e.g.
+--
+-- @
+-- myFoo :: Arg "used as the name in error message" Text -> IO ()
+-- myFoo (Arg name) = …
+-- @
+--
+-- Will display the description in the inferred type of the callsite.
+--
+-- Due to IsString you can call @myFoo@ like
+--
+-- @myFoo "name in error"@
+--
+-- This is mostly intended for literals, if you want to wrap arbitrary data, use @Label@.
+newtype Arg (description :: Symbol) a = Arg {unArg :: a}
+  deriving newtype
+    ( Show,
+      Eq,
+      IsString,
+      IsList,
+      Num,
+      Monoid,
+      Semigroup
+    )
diff --git a/users/Profpatsch/my-prelude/src/Postgres/MonadPostgres.hs b/users/Profpatsch/my-prelude/src/Postgres/MonadPostgres.hs
index f83a6d7fc..a542f8c7b 100644
--- a/users/Profpatsch/my-prelude/src/Postgres/MonadPostgres.hs
+++ b/users/Profpatsch/my-prelude/src/Postgres/MonadPostgres.hs
@@ -5,13 +5,20 @@
 
 module Postgres.MonadPostgres where
 
+import Arg
 import AtLeast (AtLeast)
 import Control.Exception
+  ( Exception (displayException),
+    Handler (Handler),
+    catches,
+    try,
+  )
 import Control.Foldl qualified as Fold
 import Control.Monad.Logger.CallStack (MonadLogger, logDebug, logWarn)
 import Control.Monad.Reader (MonadReader (ask), ReaderT (..))
 import Control.Monad.Trans.Resource
 import Data.Aeson (FromJSON)
+import Data.ByteString qualified as ByteString
 import Data.Error.Tree
 import Data.HashMap.Strict qualified as HashMap
 import Data.Int (Int64)
@@ -28,8 +35,10 @@ import Database.PostgreSQL.Simple.FromRow qualified as PG
 import Database.PostgreSQL.Simple.ToField (ToField)
 import Database.PostgreSQL.Simple.ToRow (ToRow (toRow))
 import Database.PostgreSQL.Simple.Types (Query (..))
+import GHC.IO.Handle (Handle)
 import GHC.Records (getField)
 import Label
+import OpenTelemetry.Trace.Core (NewEvent (newEventName))
 import OpenTelemetry.Trace.Core qualified as Otel hiding (inSpan, inSpan')
 import OpenTelemetry.Trace.Monad qualified as Otel
 import PossehlAnalyticsPrelude
@@ -39,7 +48,9 @@ import Pretty (showPretty)
 import Seconds
 import System.Exit (ExitCode (..))
 import Tool
-import UnliftIO (MonadUnliftIO (withRunInIO))
+import UnliftIO (MonadUnliftIO (withRunInIO), bracket, hClose, mask_)
+import UnliftIO.Concurrent (forkIO)
+import UnliftIO.Process (ProcessHandle)
 import UnliftIO.Process qualified as Process
 import UnliftIO.Resource qualified as Resource
 import Prelude hiding (init, span)
@@ -357,7 +368,7 @@ handlePGException ::
   ( ToRow params,
     MonadUnliftIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool
+    HasField "pgFormat" tools PgFormatPool
   ) =>
   tools ->
   Text ->
@@ -405,6 +416,105 @@ withPGTransaction connPool f =
     connPool
     (\conn -> Postgres.withTransaction conn (f conn))
 
+-- | `pg_formatter` is a perl script that does not support any kind of streaming.
+-- Thus we initialize a pool with a bunch of these scripts running, waiting for input. This way we can have somewhat fast SQL formatting.
+--
+-- Call `initPgFormatPool` to initialize, then use `runPgFormat` to format some sql.
+data PgFormatPool = PgFormatPool
+  { pool :: Pool PgFormatProcess,
+    pgFormat :: Tool
+  }
+
+data PgFormatProcess = PgFormatProcess
+  { stdinHdl :: Handle,
+    stdoutHdl :: Handle,
+    stderrHdl :: Handle,
+    procHdl :: ProcessHandle,
+    startedAt :: Otel.Timestamp
+  }
+
+initPgFormatPool :: (HasField "pgFormat" tools Tool) => tools -> IO PgFormatPool
+initPgFormatPool tools = do
+  pool <-
+    Pool.newPool
+      ( Pool.defaultPoolConfig
+          (pgFormatStartCommandWaitForInput tools)
+          ( \pgFmt -> do
+              Process.terminateProcess pgFmt.procHdl
+              -- make sure we don’t leave any zombies
+              _ <- forkIO $ do
+                _ <- Process.waitForProcess pgFmt.procHdl
+                pure ()
+              pure ()
+          )
+          -- unused resource time
+          100
+          -- number of resources
+          10
+      )
+
+  -- fill the pool with resources
+  let go =
+        Pool.tryWithResource pool (\_ -> go) >>= \case
+          Nothing -> pure ()
+          Just () -> pure ()
+  _ <- go
+  pure (PgFormatPool {pool, pgFormat = tools.pgFormat})
+
+destroyPgFormatPool :: PgFormatPool -> IO ()
+destroyPgFormatPool pool = Pool.destroyAllResources pool.pool
+
+-- | Get the oldest resource from the pool, or stop if you find a resource that’s older than `cutoffPointMs`.
+takeOldestResource :: PgFormatPool -> Arg "cutoffPointMs" Integer -> IO (PgFormatProcess, Pool.LocalPool PgFormatProcess)
+takeOldestResource pool cutoffPointMs = do
+  now <- Otel.getTimestamp
+  mask_ $ do
+    a <- Pool.takeResource pool.pool
+    (putBack, res) <- go now [] a
+    -- make sure we don’t leak any resources we didn’t use in the end
+    for_ putBack $ \(x, xLocal) -> Pool.putResource xLocal x
+    pure res
+  where
+    mkMs ts = (ts & Otel.timestampNanoseconds & toInteger) `div` 1000_000
+    go now putBack a@(a', _) =
+      if abs (mkMs now - mkMs a'.startedAt) > cutoffPointMs.unArg
+        then pure (putBack, a)
+        else
+          Pool.tryTakeResource pool.pool >>= \case
+            Nothing -> pure (putBack, a)
+            Just b@(b', _) -> do
+              if a'.startedAt < b'.startedAt
+                then go now (b : putBack) a
+                else go now (a : putBack) b
+
+-- | Format the given SQL with pg_formatter. Will use the pool of already running formatters to speed up execution.
+runPgFormat :: PgFormatPool -> ByteString -> IO (T3 "exitCode" ExitCode "formatted" ByteString "stderr" ByteString)
+runPgFormat pool sqlStatement = do
+  bracket
+    (takeOldestResource pool 200)
+    ( \(a, localPool) -> do
+        -- we always destroy the resource, because the process exited
+        Pool.destroyResource pool.pool localPool a
+        -- create a new process to keep the pool “warm”
+        new <- pgFormatStartCommandWaitForInput pool
+        Pool.putResource localPool new
+    )
+    ( \(pgFmt, _localPool) -> do
+        putStderrLn "Running with warm pgformatter"
+        ByteString.hPut pgFmt.stdinHdl sqlStatement
+        -- close stdin to make pg_formatter format (it exits …)
+        -- issue: https://github.com/darold/pgFormatter/issues/333
+        hClose pgFmt.stdinHdl
+        formatted <- ByteString.hGetContents pgFmt.stdoutHdl
+        errs <- ByteString.hGetContents pgFmt.stderrHdl
+        exitCode <- Process.waitForProcess pgFmt.procHdl
+        pure $
+          T3
+            (label @"exitCode" exitCode)
+            (label @"formatted" formatted)
+            (label @"stderr" errs)
+    )
+
 runPGTransactionImpl ::
   (MonadUnliftIO m) =>
   m (Pool Postgres.Connection) ->
@@ -418,7 +528,7 @@ runPGTransactionImpl zoom (Transaction transaction) = do
       unliftIO $ runReaderT transaction conn
 
 executeImpl ::
-  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools Tool, Otel.MonadTracer m) =>
+  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools PgFormatPool, Otel.MonadTracer m) =>
   m tools ->
   m DebugLogDatabaseQueries ->
   Query ->
@@ -436,7 +546,7 @@ executeImpl zoomTools zoomDebugLogDatabaseQueries qry params =
       >>= toNumberOfRowsAffected "executeImpl"
 
 executeImpl_ ::
-  (MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools Tool, Otel.MonadTracer m) =>
+  (MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools PgFormatPool, Otel.MonadTracer m) =>
   m tools ->
   m DebugLogDatabaseQueries ->
   Query ->
@@ -453,14 +563,14 @@ executeImpl_ zoomTools zoomDebugLogDatabaseQueries qry =
       >>= toNumberOfRowsAffected "executeImpl_"
 
 executeManyImpl ::
-  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools Tool, Otel.MonadTracer m) =>
+  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools PgFormatPool, Otel.MonadTracer m) =>
   m tools ->
   m DebugLogDatabaseQueries ->
   Query ->
   NonEmpty params ->
   Transaction m (Label "numberOfRowsAffected" Natural)
 executeManyImpl zoomTools zoomDebugLogDatabaseQueries qry params =
-  Otel.inSpan' "Postgres Query (execute)" Otel.defaultSpanArguments $ \span -> do
+  Otel.inSpan' "Postgres Query (executeMany)" Otel.defaultSpanArguments $ \span -> do
     tools <- lift @Transaction zoomTools
     logDatabaseQueries <- lift @Transaction zoomDebugLogDatabaseQueries
     traceQueryIfEnabled tools span logDatabaseQueries qry (HasMultiParams params)
@@ -480,7 +590,7 @@ toNumberOfRowsAffected functionName i64 =
     <&> label @"numberOfRowsAffected"
 
 executeManyReturningWithImpl ::
-  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools Tool, Otel.MonadTracer m) =>
+  (ToRow params, MonadUnliftIO m, MonadLogger m, HasField "pgFormat" tools PgFormatPool, Otel.MonadTracer m) =>
   m tools ->
   m DebugLogDatabaseQueries ->
   Query ->
@@ -489,7 +599,7 @@ executeManyReturningWithImpl ::
   Transaction m [r]
 {-# INLINE executeManyReturningWithImpl #-}
 executeManyReturningWithImpl zoomTools zoomDebugLogDatabaseQueries qry params (Decoder fromRow) = do
-  Otel.inSpan' "Postgres Query (execute)" Otel.defaultSpanArguments $ \span -> do
+  Otel.inSpan' "Postgres Query (executeManyReturning)" Otel.defaultSpanArguments $ \span -> do
     tools <- lift @Transaction zoomTools
     logDatabaseQueries <- lift @Transaction zoomDebugLogDatabaseQueries
     traceQueryIfEnabled tools span logDatabaseQueries qry (HasMultiParams params)
@@ -501,7 +611,7 @@ foldRowsWithAccImpl ::
   ( ToRow params,
     MonadUnliftIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool,
+    HasField "pgFormat" tools PgFormatPool,
     Otel.MonadTracer m
   ) =>
   m tools ->
@@ -535,7 +645,7 @@ foldRowsWithAccImpl zoomTools zoomDebugLogDatabaseQueries qry params (Decoder ro
       )
 
 pgFormatQueryNoParams' ::
-  (MonadIO m, MonadLogger m, HasField "pgFormat" tools Tool) =>
+  (MonadIO m, MonadLogger m, HasField "pgFormat" tools PgFormatPool) =>
   tools ->
   Query ->
   Transaction m Text
@@ -571,7 +681,7 @@ queryWithImpl ::
   ( ToRow params,
     MonadUnliftIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool,
+    HasField "pgFormat" tools PgFormatPool,
     Otel.MonadTracer m
   ) =>
   m tools ->
@@ -582,7 +692,7 @@ queryWithImpl ::
   Transaction m [r]
 {-# INLINE queryWithImpl #-}
 queryWithImpl zoomTools zoomDebugLogDatabaseQueries qry params (Decoder fromRow) = do
-  Otel.inSpan' "Postgres Query (execute)" Otel.defaultSpanArguments $ \span -> do
+  Otel.inSpan' "Postgres Query (queryWith)" Otel.defaultSpanArguments $ \span -> do
     tools <- lift @Transaction zoomTools
     logDatabaseQueries <- lift @Transaction zoomDebugLogDatabaseQueries
     traceQueryIfEnabled tools span logDatabaseQueries qry (HasSingleParam params)
@@ -593,7 +703,7 @@ queryWithImpl zoomTools zoomDebugLogDatabaseQueries qry params (Decoder fromRow)
 queryWithImpl_ ::
   ( MonadUnliftIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool
+    HasField "pgFormat" tools PgFormatPool
   ) =>
   m tools ->
   Query ->
@@ -619,7 +729,7 @@ pgFormatQuery' ::
   ( MonadIO m,
     ToRow params,
     MonadLogger m,
-    HasField "pgFormat" tools Tool
+    HasField "pgFormat" tools PgFormatPool
   ) =>
   tools ->
   Query ->
@@ -633,7 +743,7 @@ pgFormatQueryMany' ::
   ( MonadIO m,
     ToRow params,
     MonadLogger m,
-    HasField "pgFormat" tools Tool
+    HasField "pgFormat" tools PgFormatPool
   ) =>
   tools ->
   Query ->
@@ -650,33 +760,58 @@ postgresToolsParser = label @"pgFormat" <$> readTool "pg_format"
 pgFormatQueryByteString ::
   ( MonadIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool
+    HasField "pgFormat" tools PgFormatPool
   ) =>
   tools ->
   ByteString ->
   m Text
 pgFormatQueryByteString tools queryBytes = do
+  res <-
+    liftIO $
+      runPgFormat
+        tools.pgFormat
+        (queryBytes)
+  case res.exitCode of
+    ExitSuccess -> pure (res.formatted & bytesToTextUtf8Lenient)
+    ExitFailure status -> do
+      logWarn [fmt|pg_format failed with status {status} while formatting the query, using original query string. Is there a syntax error?|]
+      logDebug
+        ( prettyErrorTree
+            ( nestedMultiError
+                "pg_format output"
+                ( nestedError "stdout" (singleError (res.formatted & bytesToTextUtf8Lenient & newError))
+                    :| [(nestedError "stderr" (singleError (res.stderr & bytesToTextUtf8Lenient & newError)))]
+                )
+            )
+        )
+      logDebug [fmt|pg_format stdout: stderr|]
+      pure (queryBytes & bytesToTextUtf8Lenient)
+
+pgFormatStartCommandWaitForInput ::
+  ( MonadIO m,
+    HasField "pgFormat" tools Tool,
+    MonadFail m
+  ) =>
+  tools ->
+  m PgFormatProcess
+pgFormatStartCommandWaitForInput tools = do
   do
-    (exitCode, stdout, stderr) <-
-      Process.readProcessWithExitCode
-        tools.pgFormat.toolPath
-        ["-"]
-        (queryBytes & bytesToTextUtf8Lenient & textToString)
-    case exitCode of
-      ExitSuccess -> pure (stdout & stringToText)
-      ExitFailure status -> do
-        logWarn [fmt|pg_format failed with status {status} while formatting the query, using original query string. Is there a syntax error?|]
-        logDebug
-          ( prettyErrorTree
-              ( nestedMultiError
-                  "pg_format output"
-                  ( nestedError "stdout" (singleError (stdout & stringToText & newError))
-                      :| [(nestedError "stderr" (singleError (stderr & stringToText & newError)))]
-                  )
-              )
+    startedAt <- Otel.getTimestamp
+    (Just stdinHdl, Just stdoutHdl, Just stderrHdl, procHdl) <-
+      Process.createProcess
+        ( ( Process.proc
+              tools.pgFormat.toolPath
+              [ "--no-rcfile",
+                "-"
+              ]
           )
-        logDebug [fmt|pg_format stdout: stderr|]
-        pure (queryBytes & bytesToTextUtf8Lenient)
+            { Process.std_in = Process.CreatePipe,
+              Process.std_out = Process.CreatePipe,
+              Process.std_err = Process.CreatePipe
+            }
+        )
+
+    pure PgFormatProcess {..}
 
 data DebugLogDatabaseQueries
   = -- | Do not log the database queries
@@ -697,7 +832,7 @@ traceQueryIfEnabled ::
   ( ToRow params,
     MonadUnliftIO m,
     MonadLogger m,
-    HasField "pgFormat" tools Tool,
+    HasField "pgFormat" tools PgFormatPool,
     Otel.MonadTracer m
   ) =>
   tools ->
@@ -708,20 +843,25 @@ traceQueryIfEnabled ::
   Transaction m ()
 traceQueryIfEnabled tools span logDatabaseQueries qry params = do
   -- In case we have query logging enabled, we want to do that
-  let formattedQuery = case params of
-        HasNoParams -> pgFormatQueryNoParams' tools qry
-        HasSingleParam p -> pgFormatQuery' tools qry p
-        HasMultiParams ps -> pgFormatQueryMany' tools qry ps
+  let formattedQuery = do
+        withEvent
+          span
+          "Query Format start"
+          "Query Format end"
+          $ case params of
+            HasNoParams -> pgFormatQueryNoParams' tools qry
+            HasSingleParam p -> pgFormatQuery' tools qry p
+            HasMultiParams ps -> pgFormatQueryMany' tools qry ps
+
   let doLog errs =
         Otel.addAttributes
           span
           $ HashMap.fromList
           $ ( ("_.postgres.query", Otel.toAttribute @Text errs.query)
                 : ( errs.explain
-                      & foldMap
-                        ( \ex ->
-                            [("_.postgres.explain", Otel.toAttribute @Text ex)]
-                        )
+                      & \case
+                        Nothing -> []
+                        Just ex -> [("_.postgres.explain", Otel.toAttribute @Text ex)]
                   )
             )
   let doExplain = do
@@ -750,6 +890,37 @@ traceQueryIfEnabled tools span logDatabaseQueries qry params = do
       ex <- doExplain
       doLog (T2 (label @"query" q) (label @"explain" (Just ex)))
 
+-- | Add a start and end event to the span, and figure out how long the difference was.
+--
+-- This is more lightweight than starting an extra span for timing things.
+withEvent :: (MonadIO f) => Otel.Span -> Text -> Text -> f b -> f b
+withEvent span start end act = do
+  let mkMs ts = (ts & Otel.timestampNanoseconds & toInteger) `div` 1000_000
+  s <- Otel.getTimestamp
+  Otel.addEvent
+    span
+    ( Otel.NewEvent
+        { newEventName = start,
+          newEventAttributes = mempty,
+          newEventTimestamp = Just s
+        }
+    )
+  res <- act
+  e <- Otel.getTimestamp
+  let tookMs =
+        (mkMs e - mkMs s)
+          -- should be small enough
+          & fromInteger @Int
+  Otel.addEvent
+    span
+    ( Otel.NewEvent
+        { newEventName = end,
+          newEventAttributes = HashMap.fromList [("took ms", Otel.toAttribute tookMs)],
+          newEventTimestamp = Just e
+        }
+    )
+  pure res
+
 instance (ToField t1) => ToRow (Label l1 t1) where
   toRow t2 = toRow $ PG.Only $ getField @l1 t2
 
diff --git a/users/Profpatsch/whatcd-resolver/src/AppT.hs b/users/Profpatsch/whatcd-resolver/src/AppT.hs
index 7afd43074..abe8ccad4 100644
--- a/users/Profpatsch/whatcd-resolver/src/AppT.hs
+++ b/users/Profpatsch/whatcd-resolver/src/AppT.hs
@@ -19,14 +19,13 @@ import OpenTelemetry.Trace.Monad qualified as Otel
 import PossehlAnalyticsPrelude
 import Postgres.MonadPostgres
 import System.IO qualified as IO
-import Tool (Tool)
 import UnliftIO
 import Prelude hiding (span)
 
 data Context = Context
   { config :: Label "logDatabaseQueries" DebugLogDatabaseQueries,
     tracer :: Otel.Tracer,
-    pgFormat :: Tool,
+    pgFormat :: PgFormatPool,
     pgConnPool :: Pool Postgres.Connection,
     transmissionSessionId :: MVar ByteString
   }
diff --git a/users/Profpatsch/whatcd-resolver/src/Redacted.hs b/users/Profpatsch/whatcd-resolver/src/Redacted.hs
index 4369c1840..c0c26b72d 100644
--- a/users/Profpatsch/whatcd-resolver/src/Redacted.hs
+++ b/users/Profpatsch/whatcd-resolver/src/Redacted.hs
@@ -382,8 +382,8 @@ getTorrentById dat = do
     >>= ensureSingleRow
 
 -- | Find the best torrent for each torrent group (based on the seeding_weight)
-getBestTorrents :: (MonadPostgres m) => Transaction m [TorrentData ()]
-getBestTorrents = do
+getBestTorrents :: (MonadPostgres m, HasField "onlyDownloaded" opts Bool) => opts -> Transaction m [TorrentData ()]
+getBestTorrents opts = do
   queryWith
     [sql|
       SELECT * FROM (
@@ -393,15 +393,18 @@ getBestTorrents = do
           seeding_weight,
           t.full_json_result AS torrent_json,
           tg.full_json_result AS torrent_group_json,
-          t.torrent_file IS NOT NULL,
+          t.torrent_file IS NOT NULL as has_torrent_file,
           t.transmission_torrent_hash
         FROM redacted.torrents t
         JOIN redacted.torrent_groups tg ON tg.id = t.torrent_group
         ORDER BY group_id, seeding_weight DESC
       ) as _
+      WHERE
+        -- onlyDownloaded
+        ((NOT ?::bool) OR has_torrent_file)
       ORDER BY seeding_weight DESC
     |]
-    ()
+    (Only opts.onlyDownloaded :: Only Bool)
     ( do
         groupId <- Dec.fromField @Int
         torrentId <- Dec.fromField @Int
diff --git a/users/Profpatsch/whatcd-resolver/src/WhatcdResolver.hs b/users/Profpatsch/whatcd-resolver/src/WhatcdResolver.hs
index f1902bac8..1ec23e1fc 100644
--- a/users/Profpatsch/whatcd-resolver/src/WhatcdResolver.hs
+++ b/users/Profpatsch/whatcd-resolver/src/WhatcdResolver.hs
@@ -36,7 +36,6 @@ import Network.HTTP.Types
 import Network.HTTP.Types qualified as Http
 import Network.URI (URI)
 import Network.URI qualified
-import Network.URI qualified as URI
 import Network.Wai (ResponseReceived)
 import Network.Wai qualified as Wai
 import Network.Wai.Handler.Warp qualified as Warp
@@ -55,7 +54,6 @@ import System.Directory qualified as Xdg
 import System.Environment qualified as Env
 import System.FilePath ((</>))
 import Text.Blaze.Html (Html)
-import Text.Blaze.Html.Renderer.Pretty qualified as Html.Pretty
 import Text.Blaze.Html.Renderer.Utf8 qualified as Html
 import Text.Blaze.Html5 qualified as Html
 import Tool (readTool, readTools)
@@ -77,7 +75,6 @@ main =
 
 htmlUi :: AppT IO ()
 htmlUi = do
-  let debug = True
   uniqueRunId <-
     runTransaction $
       querySingleRowWith
@@ -87,13 +84,13 @@ htmlUi = do
         ()
         (Dec.fromField @Text)
 
-  withRunInIO $ \runInIO -> Warp.run 9093 $ \req respond -> do
+  withRunInIO $ \runInIO -> Warp.run 9093 $ \req respondOrig -> do
     let catchAppException act =
           try act >>= \case
             Right a -> pure a
             Left (AppException err) -> do
               runInIO (logError err)
-              respond (Wai.responseLBS Http.status500 [] "")
+              respondOrig (Wai.responseLBS Http.status500 [] "")
 
     catchAppException $ do
       let mp span parser =
@@ -119,9 +116,9 @@ htmlUi = do
       let handlers :: Handlers (AppT IO)
           handlers respond =
             Map.fromList
-              [ ("", respond.h (mainHtml uniqueRunId)),
+              [ ("", respond.html (mainHtml uniqueRunId)),
                 ( "snips/redacted/search",
-                  respond.h $
+                  respond.html $
                     \span -> do
                       dat <-
                         mp
@@ -132,12 +129,12 @@ htmlUi = do
                       snipsRedactedSearch dat
                 ),
                 ( "snips/redacted/torrentDataJson",
-                  respond.h $ \span -> do
+                  respond.html $ \span -> do
                     dat <- torrentIdMp span
                     Html.mkVal <$> (runTransaction $ getTorrentById dat)
                 ),
                 ( "snips/redacted/getTorrentFile",
-                  respond.h $ \span -> do
+                  respond.html $ \span -> do
                     dat <- torrentIdMp span
                     runTransaction $ do
                       inserted <- redactedGetTorrentFileAndInsert dat
@@ -157,7 +154,7 @@ htmlUi = do
                 ),
                 -- TODO: this is bad duplication??
                 ( "snips/redacted/startTorrentFile",
-                  respond.h $ \span -> do
+                  respond.html $ \span -> do
                     dat <- torrentIdMp span
                     runTransaction $ do
                       file <-
@@ -180,7 +177,7 @@ htmlUi = do
                           "Starting"
                 ),
                 ( "snips/transmission/getTorrentState",
-                  respond.h $ \span -> do
+                  respond.html $ \span -> do
                     dat <- mp span $ label @"torrentHash" <$> Multipart.field "torrent-hash" Field.utf8
                     status <-
                       doTransmissionRequest'
@@ -199,7 +196,7 @@ htmlUi = do
                         Just _torrent -> [hsx|Running|]
                 ),
                 ( "snips/jsonld/render",
-                  respond.h $ \span -> do
+                  respond.html $ \span -> do
                     qry <-
                       parseQueryArgs
                         span
@@ -211,6 +208,16 @@ htmlUi = do
                     jsonld <- httpGetJsonLd (qry.target)
                     pure $ renderJsonld jsonld
                 ),
+                ( "artist",
+                  respond.html $ \span -> do
+                    qry <-
+                      parseQueryArgs
+                        span
+                        ( label @"dbId"
+                            <$> (singleQueryArgument "db_id" Field.utf8)
+                        )
+                    artistPage qry
+                ),
                 ( "autorefresh",
                   respond.plain $ do
                     qry <-
@@ -233,23 +240,22 @@ htmlUi = do
               ]
       runInIO $
         runHandlers
-          debug
-          (\respond -> respond.h $ (mainHtml uniqueRunId))
+          (\respond -> respond.html $ (mainHtml uniqueRunId))
           handlers
           req
-          respond
+          respondOrig
   where
     everySecond :: Text -> Enc -> Html -> Html
     everySecond call extraData innerHtml = [hsx|<div hx-trigger="every 1s" hx-swap="outerHTML" hx-post={call} hx-vals={Enc.encToBytesUtf8 extraData}>{innerHtml}</div>|]
 
     mainHtml :: Text -> Otel.Span -> AppT IO Html
     mainHtml uniqueRunId _span = runTransaction $ do
-      jsonld <-
-        httpGetJsonLd
-          ( URI.parseURI "https://musicbrainz.org/work/92000fd4-d304-406d-aeb4-6bdbeed318ec" & annotate "not an URI" & unwrapError,
-            "https://musicbrainz.org/work/92000fd4-d304-406d-aeb4-6bdbeed318ec"
-          )
-          <&> renderJsonld
+      -- jsonld <-
+      --   httpGetJsonLd
+      --     ( URI.parseURI "https://musicbrainz.org/work/92000fd4-d304-406d-aeb4-6bdbeed318ec" & annotate "not an URI" & unwrapError,
+      --       "https://musicbrainz.org/work/92000fd4-d304-406d-aeb4-6bdbeed318ec"
+      --     )
+      --     <&> renderJsonld
       bestTorrentsTable <- getBestTorrentsTable
       -- transmissionTorrentsTable <- lift @Transaction getTransmissionTorrentsTable
       pure $
@@ -271,7 +277,6 @@ htmlUi = do
         </style>
       </head>
       <body>
-        {jsonld}
         <form
           hx-post="/snips/redacted/search"
           hx-target="#redacted-search-results">
@@ -300,44 +305,49 @@ htmlUi = do
       </body>
     |]
 
+artistPage :: (HasField "dbId" dat Text, Applicative m) => dat -> m Html
+artistPage dat = do
+  pure
+    [hsx|
+    Artist ID: {dat.dbId}
+  |]
+
 type Handlers m = HandlerResponses m -> Map Text (m ResponseReceived)
 
-type HandlerResponses m = T2 "h" ((Otel.Span -> m Html) -> m ResponseReceived) "plain" (m Wai.Response -> m ResponseReceived)
+data HandlerResponses m = HandlerResponses
+  { -- | render html
+    html :: ((Otel.Span -> m Html) -> m ResponseReceived),
+    -- | render a plain wai response
+    plain :: (m Wai.Response -> m ResponseReceived)
+  }
 
 runHandlers ::
   (MonadOtel m) =>
-  Bool ->
   (HandlerResponses m -> m ResponseReceived) ->
   (HandlerResponses m -> Map Text (m ResponseReceived)) ->
   Wai.Request ->
   (Wai.Response -> IO ResponseReceived) ->
   m ResponseReceived
-runHandlers debug defaultHandler handlers req respond = withRunInIO $ \runInIO -> do
-  let renderHtml =
-        if debug
-          then Html.Pretty.renderHtml >>> stringToText >>> textToBytesUtf8 >>> toLazyBytes
-          else Html.renderHtml
-  let hh route act =
-        Otel.inSpan'
-          [fmt|Route {route }|]
-          ( Otel.defaultSpanArguments
-              { Otel.attributes =
-                  HashMap.fromList
-                    [ ("server.path", Otel.toAttribute @Text route)
-                    ]
-              }
-          )
-          ( \span -> do
-              res <- act span
-              liftIO $ respond . Wai.responseLBS Http.ok200 ([("Content-Type", "text/html")] <> res.extraHeaders) . renderHtml $ res.html
-          )
-  let h route act = hh route (\span -> act span <&> (\html -> T2 (label @"html" html) (label @"extraHeaders" [])))
-
-  let path = (req & Wai.pathInfo & Text.intercalate "/")
+runHandlers defaultHandler handlers req respond = withRunInIO $ \runInIO -> do
+  let path = req & Wai.pathInfo & Text.intercalate "/"
   let handlerResponses =
-        ( T2
-            (label @"h" (h path))
-            (label @"plain" (\m -> liftIO $ runInIO m >>= respond))
+        ( HandlerResponses
+            { plain = (\m -> liftIO $ runInIO m >>= respond),
+              html = \act ->
+                Otel.inSpan'
+                  [fmt|Route /{path}|]
+                  ( Otel.defaultSpanArguments
+                      { Otel.attributes =
+                          HashMap.fromList
+                            [ ("server.path", Otel.toAttribute @Text path)
+                            ]
+                      }
+                  )
+                  ( \span -> do
+                      res <- act span <&> (\html -> T2 (label @"html" html) (label @"extraHeaders" []))
+                      liftIO $ respond . Wai.responseLBS Http.ok200 ([("Content-Type", "text/html")] <> res.extraHeaders) . Html.renderHtml $ res.html
+                  )
+            }
         )
   let handler =
         (handlers handlerResponses)
@@ -428,7 +438,7 @@ getBestTorrentsTable ::
   ) =>
   Transaction m Html
 getBestTorrentsTable = do
-  bestStale :: [TorrentData ()] <- getBestTorrents
+  bestStale :: [TorrentData ()] <- getBestTorrents (label @"onlyDownloaded" False)
   actual <-
     getAndUpdateTransmissionTorrentsStatus
       ( bestStale
@@ -462,11 +472,16 @@ getBestTorrentsTable = do
         fresh
           & foldMap
             ( \b -> do
+                let artistLink :: Text = [fmt|/artist?db_id={b.groupId}|]
                 [hsx|
                   <tr>
                   <td>{localTorrent b}</td>
                   <td>{Html.toHtml @Int b.groupId}</td>
-                  <td>{Html.toHtml @Text b.torrentGroupJson.artist}</td>
+                  <td>
+                    <a href={artistLink}>
+                      {Html.toHtml @Text b.torrentGroupJson.artist}
+                    </a>
+                  </td>
                   <td>{Html.toHtml @Text b.torrentGroupJson.groupName}</td>
                   <td>{Html.toHtml @Int b.seedingWeight}</td>
                   <td><details hx-trigger="toggle once" hx-post="snips/redacted/torrentDataJson" hx-vals={Enc.encToBytesUtf8 $ Enc.object [("torrent-id", Enc.int b.torrentId)]}></details></td>
@@ -624,7 +639,8 @@ httpTorrent span req =
 
 runAppWith :: AppT IO a -> IO (Either TmpPg.StartError a)
 runAppWith appT = withTracer $ \tracer -> withDb $ \db -> do
-  pgFormat <- readTools (label @"toolsEnvVar" "WHATCD_RESOLVER_TOOLS") (readTool "pg_format")
+  tool <- readTools (label @"toolsEnvVar" "WHATCD_RESOLVER_TOOLS") (readTool "pg_format")
+  pgFormat <- initPgFormatPool (label @"pgFormat" tool)
   let config = label @"logDatabaseQueries" LogDatabaseQueries
   pgConnPool <-
     Pool.newPool $
diff --git a/users/Profpatsch/whatcd-resolver/whatcd-resolver.cabal b/users/Profpatsch/whatcd-resolver/whatcd-resolver.cabal
index a9bd04827..8b3258bb5 100644
--- a/users/Profpatsch/whatcd-resolver/whatcd-resolver.cabal
+++ b/users/Profpatsch/whatcd-resolver/whatcd-resolver.cabal
@@ -119,3 +119,7 @@ executable whatcd-resolver
     build-depends:
         base >=4.15 && <5,
         whatcd-resolver
+
+    ghc-options:
+      -threaded
+
diff --git a/users/amjoseph/OWNERS b/users/amjoseph/OWNERS
new file mode 100644
index 000000000..a99992be6
--- /dev/null
+++ b/users/amjoseph/OWNERS
@@ -0,0 +1,3 @@
+set noparent
+
+amjoseph
diff --git a/users/amjoseph/keys.nix b/users/amjoseph/keys.nix
new file mode 100644
index 000000000..8cc2f2436
--- /dev/null
+++ b/users/amjoseph/keys.nix
@@ -0,0 +1,22 @@
+{ ... }:
+
+let
+  # Long-term, air-gapped PGP key.  This key is used only for signing other
+  # keys.  It is a minor hassle for me to access this key.
+  airgap = "F0B74D717CDE8412A3E0D4D5F29AC8080DA8E1E0";
+
+  # Stored in an HSM.  Signed by the above key.
+  current = "D930411B675A011EB9590713DC4AB809B13BE76D";
+
+  # Chat protocols that depend on DNS, WebPKI, or E.164 are lame.  This is not.
+  ricochet = "emhxygy5mezcovm5a6q5hze5eqfqgieww56eh4ttwmrolwqmzgb6qiyd";
+
+  # This ssh key is for depot.  Please don't use it elsewhere, except to give
+  # me the ability to set a system-specific key elsewhere.  Not currently
+  # stored in an HSM, but I'm working on that.
+  ssh-for-depot = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOE5e0HrwQTI5KOaU12J0AJG5zDpWn4g/U+oFXz7SkbD";
+
+in
+{
+  all = [ ssh-for-depot ];
+}
diff --git a/users/flokli/archeology/default.nix b/users/flokli/archeology/default.nix
index d642399cb..690944403 100644
--- a/users/flokli/archeology/default.nix
+++ b/users/flokli/archeology/default.nix
@@ -10,7 +10,7 @@ let
   '';
   # clickhouse has a very odd AWS config concept.
   # Configure it to be a bit more sane.
-  clickhoseLocalFixedAWS = pkgs.runCommand "clickhouse-local-fixed"
+  clickhouseLocalFixedAWS = pkgs.runCommand "clickhouse-local-fixed"
     {
       nativeBuildInputs = [ pkgs.makeWrapper ];
     } ''
@@ -21,19 +21,19 @@ let
 in
 
 depot.nix.readTree.drvTargets {
-  inherit clickhoseLocalFixedAWS;
+  inherit clickhouseLocalFixedAWS;
   parse-bucket-logs = pkgs.runCommand "archeology-parse-bucket-logs"
     {
       nativeBuildInputs = [ pkgs.makeWrapper ];
     } ''
     mkdir -p $out/bin
     makeWrapper ${(pkgs.writers.writeRust "parse-bucket-logs-unwrapped" {} ./parse_bucket_logs.rs)} $out/bin/archeology-parse-bucket-logs \
-      --prefix PATH : ${pkgs.lib.makeBinPath [ clickhoseLocalFixedAWS ]}
+      --prefix PATH : ${pkgs.lib.makeBinPath [ clickhouseLocalFixedAWS ]}
   '';
 
   shell = pkgs.mkShell {
     name = "archeology-shell";
-    packages = with pkgs; [ awscli2 clickhoseLocalFixedAWS rust-analyzer rustc rustfmt ];
+    packages = with pkgs; [ awscli2 clickhouseLocalFixedAWS rust-analyzer rustc rustfmt ];
 
     AWS_PROFILE = "sso";
     AWS_CONFIG_FILE = pkgs.writeText "aws-config" ''
diff --git a/users/flokli/keyboards/dilemma/default.nix b/users/flokli/keyboards/dilemma/default.nix
index 265f8e56d..cd05b288e 100644
--- a/users/flokli/keyboards/dilemma/default.nix
+++ b/users/flokli/keyboards/dilemma/default.nix
@@ -1,16 +1,18 @@
 { depot, pkgs, ... }:
 
 rec {
+  qmk_firmware_src = pkgs.fetchFromGitHub {
+    owner = "qmk";
+    repo = "qmk_firmware";
+    rev = "0.24.8";
+    hash = "sha256-DRHPfJXF1KF1+EwkbeGhqhVrpfp21JY2spOZxesZFbA=";
+    fetchSubmodules = true;
+  };
+
   firmware = pkgs.stdenv.mkDerivation {
     name = "keychron-bastardkb-dilemma-firmware";
 
-    src = pkgs.fetchFromGitHub {
-      owner = "qmk";
-      repo = "qmk_firmware";
-      rev = "728aa576b0cd65c6fb7cf77132fdcd06fcedb643"; # develop branch
-      hash = "sha256-YmdX8nEsB1R8d265HAmvwejPjEHJdoTnm4QNigzrcyw=";
-      fetchSubmodules = true;
-    };
+    src = qmk_firmware_src;
 
     patches = [ ./enable-taps.patch ];
 
@@ -38,7 +40,7 @@ rec {
   };
 
   flash = pkgs.writeShellScript "flash.sh" ''
-    ${pkgs.qmk}/bin/qmk flash ${firmware}/bastardkb_dilemma_3x5_3_flokli.uf2
+    QMK_HOME=${qmk_firmware_src} ${pkgs.qmk}/bin/qmk flash ${firmware}/bastardkb_dilemma_3x5_3_flokli.uf2
   '';
 
   meta.ci.targets = [ "firmware" ];
diff --git a/users/flokli/keyboards/k6_pro/default.nix b/users/flokli/keyboards/k6_pro/default.nix
index 708bec731..49945b88a 100644
--- a/users/flokli/keyboards/k6_pro/default.nix
+++ b/users/flokli/keyboards/k6_pro/default.nix
@@ -1,16 +1,18 @@
 { depot, pkgs, ... }:
 
 rec {
+  qmk_firmware_src = pkgs.fetchFromGitHub {
+    owner = "Keychron"; # the Keychron fork of qmk/qmk_firmware
+    repo = "qmk_firmware";
+    rev = "e0a48783e7cde92d1edfc53a8fff511c45e869d4"; # bluetooth_playground branch
+    hash = "sha256-Pk9kXktmej9JyvSt7UMEW2FDrBg7k1lOssh6HjrP5ro=";
+    fetchSubmodules = true;
+  };
+
   firmware = pkgs.stdenv.mkDerivation {
     name = "keychron-k6_pro-firmware";
 
-    src = pkgs.fetchFromGitHub {
-      owner = "Keychron"; # the Keychron fork of qmk/qmk_firmware
-      repo = "qmk_firmware";
-      rev = "e0a48783e7cde92d1edfc53a8fff511c45e869d4"; # bluetooth_playground branch
-      hash = "sha256-Pk9kXktmej9JyvSt7UMEW2FDrBg7k1lOssh6HjrP5ro=";
-      fetchSubmodules = true;
-    };
+    src = qmk_firmware_src;
 
     nativeBuildInputs = [
       pkgs.qmk
@@ -32,7 +34,7 @@ rec {
   };
 
   flash = pkgs.writeShellScript "flash.sh" ''
-    ${pkgs.qmk}/bin/qmk flash ${firmware}/keychron_k6_pro_ansi_rgb_flokli.bin
+    QMK_HOME=${qmk_firmware_src} ${pkgs.qmk}/bin/qmk flash ${firmware}/keychron_k6_pro_ansi_rgb_flokli.bin
   '';
 
   meta.ci.targets = [ "firmware" ];
diff --git a/users/tazjin/nixos/modules/physical.nix b/users/tazjin/nixos/modules/physical.nix
index bb85c6fb9..d469da7e5 100644
--- a/users/tazjin/nixos/modules/physical.nix
+++ b/users/tazjin/nixos/modules/physical.nix
@@ -24,6 +24,7 @@ in
         users.tazjin.chase-geese
         config.tazjin.emacs
         third_party.agenix.cli
+        tools.when
       ]) ++
 
       # programs from nixpkgs