Commit 2bcdde2985

Andrew Kelley <andrew@ziglang.org>
2025-10-14 08:36:44
compiler: update for introduction of std.Io
only thing remaining is using libc dns resolution when linking libc
1 parent c2d1a33
lib/compiler/aro/aro/Compilation.zig
@@ -1,4 +1,5 @@
 const std = @import("std");
+const Io = std.Io;
 const assert = std.debug.assert;
 const EpochSeconds = std.time.epoch.EpochSeconds;
 const mem = std.mem;
@@ -124,6 +125,7 @@ const Compilation = @This();
 gpa: Allocator,
 /// Allocations in this arena live all the way until `Compilation.deinit`.
 arena: Allocator,
+io: Io,
 diagnostics: *Diagnostics,
 
 code_gen_options: CodeGenOptions = .default,
@@ -157,10 +159,11 @@ type_store: TypeStore = .{},
 ms_cwd_source_id: ?Source.Id = null,
 cwd: std.fs.Dir,
 
-pub fn init(gpa: Allocator, arena: Allocator, diagnostics: *Diagnostics, cwd: std.fs.Dir) Compilation {
+pub fn init(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: std.fs.Dir) Compilation {
     return .{
         .gpa = gpa,
         .arena = arena,
+        .io = io,
         .diagnostics = diagnostics,
         .cwd = cwd,
     };
@@ -222,14 +225,14 @@ pub const SystemDefinesMode = enum {
     include_system_defines,
 };
 
-fn generateSystemDefines(comp: *Compilation, w: *std.Io.Writer) !void {
+fn generateSystemDefines(comp: *Compilation, w: *Io.Writer) !void {
     const define = struct {
-        fn define(_w: *std.Io.Writer, name: []const u8) !void {
+        fn define(_w: *Io.Writer, name: []const u8) !void {
             try _w.print("#define {s} 1\n", .{name});
         }
     }.define;
     const defineStd = struct {
-        fn defineStd(_w: *std.Io.Writer, name: []const u8, is_gnu: bool) !void {
+        fn defineStd(_w: *Io.Writer, name: []const u8, is_gnu: bool) !void {
             if (is_gnu) {
                 try _w.print("#define {s} 1\n", .{name});
             }
@@ -957,7 +960,7 @@ fn generateSystemDefines(comp: *Compilation, w: *std.Io.Writer) !void {
 pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) AddSourceError!Source {
     try comp.type_store.initNamedTypes(comp);
 
-    var allocating: std.Io.Writer.Allocating = try .initCapacity(comp.gpa, 2 << 13);
+    var allocating: Io.Writer.Allocating = try .initCapacity(comp.gpa, 2 << 13);
     defer allocating.deinit();
 
     comp.writeBuiltinMacros(system_defines_mode, &allocating.writer) catch |err| switch (err) {
@@ -971,7 +974,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
     return comp.addSourceFromOwnedBuffer("<builtin>", contents, .user);
 }
 
-fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode, w: *std.Io.Writer) !void {
+fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode, w: *Io.Writer) !void {
     if (system_defines_mode == .include_system_defines) {
         try w.writeAll(
             \\#define __VERSION__ "Aro
@@ -1026,7 +1029,7 @@ fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode
     }
 }
 
-fn generateFloatMacros(w: *std.Io.Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
+fn generateFloatMacros(w: *Io.Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
     const denormMin = semantics.chooseValue(
         []const u8,
         .{
@@ -1101,7 +1104,7 @@ fn generateFloatMacros(w: *std.Io.Writer, prefix: []const u8, semantics: target_
     try w.print("#define __{s}_MIN__ {s}{s}\n", .{ prefix, min, ext });
 }
 
-fn generateTypeMacro(comp: *const Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
+fn generateTypeMacro(comp: *const Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
     try w.print("#define {s} ", .{name});
     try qt.print(comp, w);
     try w.writeByte('\n');
@@ -1136,7 +1139,7 @@ fn generateFastOrLeastType(
     bits: usize,
     kind: enum { least, fast },
     signedness: std.builtin.Signedness,
-    w: *std.Io.Writer,
+    w: *Io.Writer,
 ) !void {
     const ty = comp.intLeastN(bits, signedness); // defining the fast types as the least types is permitted
 
@@ -1166,7 +1169,7 @@ fn generateFastOrLeastType(
     try comp.generateFmt(prefix, w, ty);
 }
 
-fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
+fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *Io.Writer) !void {
     const sizes = [_]usize{ 8, 16, 32, 64 };
     for (sizes) |size| {
         try comp.generateFastOrLeastType(size, .least, .signed, w);
@@ -1176,7 +1179,7 @@ fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
     }
 }
 
-fn generateExactWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
+fn generateExactWidthTypes(comp: *Compilation, w: *Io.Writer) !void {
     try comp.generateExactWidthType(w, .schar);
 
     if (QualType.short.sizeof(comp) > QualType.char.sizeof(comp)) {
@@ -1224,7 +1227,7 @@ fn generateExactWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
     }
 }
 
-fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer, qt: QualType) !void {
+fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *Io.Writer, qt: QualType) !void {
     const unsigned = qt.signedness(comp) == .unsigned;
     const modifier = qt.formatModifier(comp);
     const formats = if (unsigned) "ouxX" else "di";
@@ -1233,7 +1236,7 @@ fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer,
     }
 }
 
-fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer, qt: QualType) !void {
+fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *Io.Writer, qt: QualType) !void {
     return w.print("#define {s}_C_SUFFIX__ {s}\n", .{ prefix, qt.intValueSuffix(comp) });
 }
 
@@ -1241,7 +1244,7 @@ fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *std.Io.
 ///     Name macro (e.g. #define __UINT32_TYPE__ unsigned int)
 ///     Format strings (e.g. #define __UINT32_FMTu__ "u")
 ///     Suffix macro (e.g. #define __UINT32_C_SUFFIX__ U)
-fn generateExactWidthType(comp: *Compilation, w: *std.Io.Writer, original_qt: QualType) !void {
+fn generateExactWidthType(comp: *Compilation, w: *Io.Writer, original_qt: QualType) !void {
     var qt = original_qt;
     const width = qt.sizeof(comp) * 8;
     const unsigned = qt.signedness(comp) == .unsigned;
@@ -1274,7 +1277,7 @@ pub fn hasHalfPrecisionFloatABI(comp: *const Compilation) bool {
     return comp.langopts.allow_half_args_and_returns or target_util.hasHalfPrecisionFloatABI(comp.target);
 }
 
-fn generateIntMax(comp: *const Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
+fn generateIntMax(comp: *const Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
     const unsigned = qt.signedness(comp) == .unsigned;
     const max: u128 = switch (qt.bitSizeof(comp)) {
         8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8),
@@ -1298,7 +1301,7 @@ pub fn wcharMax(comp: *const Compilation) u32 {
     };
 }
 
-fn generateExactWidthIntMax(comp: *Compilation, w: *std.Io.Writer, original_qt: QualType) !void {
+fn generateExactWidthIntMax(comp: *Compilation, w: *Io.Writer, original_qt: QualType) !void {
     var qt = original_qt;
     const bit_count: u8 = @intCast(qt.sizeof(comp) * 8);
     const unsigned = qt.signedness(comp) == .unsigned;
@@ -1315,16 +1318,16 @@ fn generateExactWidthIntMax(comp: *Compilation, w: *std.Io.Writer, original_qt:
     return comp.generateIntMax(w, name, qt);
 }
 
-fn generateIntWidth(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
+fn generateIntWidth(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
     try w.print("#define __{s}_WIDTH__ {d}\n", .{ name, qt.sizeof(comp) * 8 });
 }
 
-fn generateIntMaxAndWidth(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
+fn generateIntMaxAndWidth(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
     try comp.generateIntMax(w, name, qt);
     try comp.generateIntWidth(w, name, qt);
 }
 
-fn generateSizeofType(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
+fn generateSizeofType(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
     try w.print("#define {s} {d}\n", .{ name, qt.sizeof(comp) });
 }
 
@@ -1805,7 +1808,7 @@ pub const IncludeType = enum {
     angle_brackets,
 };
 
-fn getPathContents(comp: *Compilation, path: []const u8, limit: std.Io.Limit) ![]u8 {
+fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 {
     if (mem.indexOfScalar(u8, path, 0) != null) {
         return error.FileNotFound;
     }
@@ -1815,11 +1818,12 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: std.Io.Limit) ![
     return comp.getFileContents(file, limit);
 }
 
-fn getFileContents(comp: *Compilation, file: std.fs.File, limit: std.Io.Limit) ![]u8 {
+fn getFileContents(comp: *Compilation, file: std.fs.File, limit: Io.Limit) ![]u8 {
+    const io = comp.io;
     var file_buf: [4096]u8 = undefined;
-    var file_reader = file.reader(&file_buf);
+    var file_reader = file.reader(io, &file_buf);
 
-    var allocating: std.Io.Writer.Allocating = .init(comp.gpa);
+    var allocating: Io.Writer.Allocating = .init(comp.gpa);
     defer allocating.deinit();
     if (file_reader.getSize()) |size| {
         const limited_size = limit.minInt64(size);
@@ -1846,7 +1850,7 @@ pub fn findEmbed(
     includer_token_source: Source.Id,
     /// angle bracket vs quotes
     include_type: IncludeType,
-    limit: std.Io.Limit,
+    limit: Io.Limit,
     opt_dep_file: ?*DepFile,
 ) !?[]u8 {
     if (std.fs.path.isAbsolute(filename)) {
@@ -2010,8 +2014,7 @@ pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 {
 pub fn getSourceMTimeUncached(comp: *const Compilation, source_id: Source.Id) ?u64 {
     const source = comp.getSource(source_id);
     if (comp.cwd.statFile(source.path)) |stat| {
-        const mtime = @divTrunc(stat.mtime, std.time.ns_per_s);
-        return std.math.cast(u64, mtime);
+        return std.math.cast(u64, stat.mtime.toSeconds());
     } else |_| {
         return null;
     }
lib/std/http/Client.zig
@@ -377,17 +377,17 @@ pub const Connection = struct {
         }
     };
 
-    pub const ReadError = std.crypto.tls.Client.ReadError || Io.net.Stream.ReadError;
+    pub const ReadError = std.crypto.tls.Client.ReadError || Io.net.Stream.Reader.Error;
 
     pub fn getReadError(c: *const Connection) ?ReadError {
         return switch (c.protocol) {
             .tls => {
                 if (disable_tls) unreachable;
                 const tls: *const Tls = @alignCast(@fieldParentPtr("connection", c));
-                return tls.client.read_err orelse c.stream_reader.getError();
+                return tls.client.read_err orelse c.stream_reader.err.?;
             },
             .plain => {
-                return c.stream_reader.getError();
+                return c.stream_reader.err.?;
             },
         };
     }
lib/std/Io/Threaded.zig
@@ -1142,7 +1142,7 @@ fn dirOpenFile(
     }
     const fd: posix.fd_t = while (true) {
         try pool.checkCancel();
-        const rc = openat_sym(dir.handle, sub_path_posix, os_flags, 0);
+        const rc = openat_sym(dir.handle, sub_path_posix, os_flags, @as(posix.mode_t, 0));
         switch (posix.errno(rc)) {
             .SUCCESS => break @intCast(rc),
             .INTR => continue,
@@ -2259,10 +2259,11 @@ fn netSendMany(
         const rc = posix.system.sendmmsg(handle, clamped_msgs.ptr, @intCast(clamped_msgs.len), flags);
         switch (posix.errno(rc)) {
             .SUCCESS => {
-                for (clamped_messages[0..rc], clamped_msgs[0..rc]) |*message, *msg| {
+                const n: usize = @intCast(rc);
+                for (clamped_messages[0..n], clamped_msgs[0..n]) |*message, *msg| {
                     message.data_len = msg.len;
                 }
-                return rc;
+                return n;
             },
             .AGAIN => |err| return errnoBug(err),
             .ALREADY => return error.FastOpenAlreadyInProgress,
lib/std/tar/Writer.zig
@@ -39,6 +39,15 @@ pub fn writeDir(w: *Writer, sub_path: []const u8, options: Options) Error!void {
 
 pub const WriteFileError = Io.Writer.FileError || Error || Io.File.Reader.SizeError;
 
+pub fn writeFileTimestamp(
+    w: *Writer,
+    sub_path: []const u8,
+    file_reader: *Io.File.Reader,
+    mtime: Io.Timestamp,
+) WriteFileError!void {
+    return writeFile(w, sub_path, file_reader, @intCast(mtime.toSeconds()));
+}
+
 pub fn writeFile(
     w: *Writer,
     sub_path: []const u8,
lib/std/zig/ErrorBundle.zig
@@ -321,7 +321,6 @@ fn writeMsg(eb: ErrorBundle, err_msg: ErrorMessage, w: *Writer, indent: usize) !
 
 pub const Wip = struct {
     gpa: Allocator,
-    io: Io,
     string_bytes: std.ArrayListUnmanaged(u8),
     /// The first thing in this array is a ErrorMessageList.
     extra: std.ArrayListUnmanaged(u32),
lib/std/c.zig
@@ -4149,6 +4149,14 @@ const posix_msghdr_const = extern struct {
     flags: u32,
 };
 
+pub const mmsghdr = switch (native_os) {
+    .linux => linux.mmsghdr,
+    else => extern struct {
+        hdr: msghdr,
+        len: u32,
+    },
+};
+
 pub const cmsghdr = switch (native_os) {
     .linux => if (@bitSizeOf(usize) > @bitSizeOf(i32) and builtin.abi.isMusl()) posix_cmsghdr else linux.cmsghdr,
     // https://github.com/emscripten-core/emscripten/blob/96371ed7888fc78c040179f4d4faa82a6a07a116/system/lib/libc/musl/include/sys/socket.h#L44
@@ -10665,6 +10673,7 @@ pub extern "c" fn sendto(
     addrlen: socklen_t,
 ) isize;
 pub extern "c" fn sendmsg(sockfd: fd_t, msg: *const msghdr_const, flags: u32) isize;
+pub extern "c" fn sendmmsg(sockfd: fd_t, msgvec: [*]mmsghdr, n: c_uint, flags: u32) c_int;
 
 pub extern "c" fn recv(
     sockfd: fd_t,
lib/std/Io.zig
@@ -878,6 +878,10 @@ pub const Timestamp = struct {
         return @intCast(@divTrunc(t.nanoseconds, std.time.ns_per_s));
     }
 
+    pub fn toNanoseconds(t: Timestamp) i96 {
+        return t.nanoseconds;
+    }
+
     pub fn formatNumber(t: Timestamp, w: *std.Io.Writer, n: std.fmt.Number) std.Io.Writer.Error!void {
         return w.printInt(t.nanoseconds, n.mode.base() orelse 10, n.case, .{
             .precision = n.precision,
lib/std/posix.zig
@@ -5532,6 +5532,8 @@ pub const RealPathError = error{
     /// On Windows, the volume does not contain a recognized file system. File
     /// system drivers might not be loaded, or the volume may be corrupt.
     UnrecognizedVolume,
+
+    Canceled,
 } || UnexpectedError;
 
 /// Return the canonicalized absolute pathname.
@@ -5596,7 +5598,6 @@ pub fn realpathZ(pathname: [*:0]const u8, out_buffer: *[max_path_bytes]u8) RealP
             error.FileLocksNotSupported => unreachable,
             error.WouldBlock => unreachable,
             error.FileBusy => unreachable, // not asking for write permissions
-            error.InvalidUtf8 => unreachable, // WASI-only
             else => |e| return e,
         };
         defer close(fd);
lib/std/Uri.zig
@@ -39,7 +39,7 @@ pub const GetHostAllocError = GetHostError || error{OutOfMemory};
 ///
 /// See also:
 /// * `getHost`
-pub fn getHostAlloc(uri: Uri, arena: Allocator) GetHostAllocError![]const u8 {
+pub fn getHostAlloc(uri: Uri, arena: Allocator) GetHostAllocError!HostName {
     const component = uri.host orelse return error.UriMissingHost;
     const bytes = try component.toRawMaybeAlloc(arena);
     return .{ .bytes = bytes };
src/codegen/llvm.zig
@@ -794,10 +794,10 @@ pub const Object = struct {
     pub const EmitOptions = struct {
         pre_ir_path: ?[]const u8,
         pre_bc_path: ?[]const u8,
-        bin_path: ?[*:0]const u8,
-        asm_path: ?[*:0]const u8,
-        post_ir_path: ?[*:0]const u8,
-        post_bc_path: ?[*:0]const u8,
+        bin_path: ?[:0]const u8,
+        asm_path: ?[:0]const u8,
+        post_ir_path: ?[:0]const u8,
+        post_bc_path: ?[]const u8,
 
         is_debug: bool,
         is_small: bool,
@@ -1001,7 +1001,7 @@ pub const Object = struct {
                 options.post_ir_path == null and options.post_bc_path == null) return;
 
             if (options.post_bc_path) |path| {
-                var file = std.fs.cwd().createFileZ(path, .{}) catch |err|
+                var file = std.fs.cwd().createFile(path, .{}) catch |err|
                     return diags.fail("failed to create '{s}': {s}", .{ path, @errorName(err) });
                 defer file.close();
 
@@ -1110,8 +1110,8 @@ pub const Object = struct {
             // though it's clearly not ready and produces multiple miscompilations in our std tests.
             .allow_machine_outliner = !comp.root_mod.resolved_target.result.cpu.arch.isRISCV(),
             .asm_filename = null,
-            .bin_filename = options.bin_path,
-            .llvm_ir_filename = options.post_ir_path,
+            .bin_filename = if (options.bin_path) |x| x.ptr else null,
+            .llvm_ir_filename = if (options.post_ir_path) |x| x.ptr else null,
             .bitcode_filename = null,
 
             // `.coverage` value is only used when `.sancov` is enabled.
@@ -1158,7 +1158,7 @@ pub const Object = struct {
             lowered_options.time_report_out = &time_report_c_str;
         }
 
-        lowered_options.asm_filename = options.asm_path;
+        lowered_options.asm_filename = if (options.asm_path) |x| x.ptr else null;
         if (target_machine.emitToFile(module, &error_message, &lowered_options)) {
             defer llvm.disposeMessage(error_message);
             return diags.fail("LLVM failed to emit asm={s} bin={s} ir={s} bc={s}: {s}", .{
src/libs/freebsd.zig
@@ -426,6 +426,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     }
 
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
@@ -438,6 +439,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     // Use the global cache directory.
     var cache: Cache = .{
         .gpa = gpa,
+        .io = io,
         .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
     };
     cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
@@ -1017,6 +1019,7 @@ fn buildSharedLib(
     const tracy = trace(@src());
     defer tracy.end();
 
+    const io = comp.io;
     const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
     const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
     const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
@@ -1071,7 +1074,7 @@ fn buildSharedLib(
     const misc_task: Compilation.MiscTask = .@"freebsd libc shared object";
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .thread_pool = comp.thread_pool,
         .self_exe_path = comp.self_exe_path,
src/libs/glibc.zig
@@ -666,6 +666,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     }
 
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
@@ -677,6 +678,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     // Use the global cache directory.
     var cache: Cache = .{
         .gpa = gpa,
+        .io = io,
         .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
     };
     cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
@@ -1175,6 +1177,7 @@ fn buildSharedLib(
     const tracy = trace(@src());
     defer tracy.end();
 
+    const io = comp.io;
     const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
     const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
     const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
@@ -1229,7 +1232,7 @@ fn buildSharedLib(
     const misc_task: Compilation.MiscTask = .@"glibc shared object";
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .thread_pool = comp.thread_pool,
         .self_exe_path = comp.self_exe_path,
src/libs/libcxx.zig
@@ -123,6 +123,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
+    const io = comp.io;
     const root_name = "c++";
     const output_mode = .Lib;
     const link_mode = .static;
@@ -263,7 +264,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
     const misc_task: Compilation.MiscTask = .libcxx;
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .self_exe_path = comp.self_exe_path,
         .cache_mode = .whole,
@@ -318,6 +319,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
+    const io = comp.io;
     const root_name = "c++abi";
     const output_mode = .Lib;
     const link_mode = .static;
@@ -455,7 +457,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
     const misc_task: Compilation.MiscTask = .libcxxabi;
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .self_exe_path = comp.self_exe_path,
         .cache_mode = .whole,
src/libs/libtsan.zig
@@ -25,6 +25,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
+    const io = comp.io;
     const target = comp.getTarget();
     const root_name = switch (target.os.tag) {
         // On Apple platforms, we use the same name as LLVM because the
@@ -277,7 +278,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
     const misc_task: Compilation.MiscTask = .libtsan;
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .thread_pool = comp.thread_pool,
         .self_exe_path = comp.self_exe_path,
src/libs/libunwind.zig
@@ -26,6 +26,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
 
+    const io = comp.io;
     const output_mode = .Lib;
     const target = &comp.root_mod.resolved_target.result;
     const unwind_tables: std.builtin.UnwindTables =
@@ -143,7 +144,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
     const misc_task: Compilation.MiscTask = .libunwind;
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .self_exe_path = comp.self_exe_path,
         .config = config,
src/libs/mingw.zig
@@ -235,6 +235,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
     dev.check(.build_import_lib);
 
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
@@ -255,6 +256,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
     // Use the global cache directory.
     var cache: Cache = .{
         .gpa = gpa,
+        .io = io,
         .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
     };
     cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
@@ -302,7 +304,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
         .output = .{ .to_list = .{ .arena = .init(gpa) } },
     };
     defer diagnostics.deinit();
-    var aro_comp = aro.Compilation.init(gpa, arena, &diagnostics, std.fs.cwd());
+    var aro_comp = aro.Compilation.init(gpa, arena, io, &diagnostics, std.fs.cwd());
     defer aro_comp.deinit();
 
     aro_comp.target = target.*;
src/libs/musl.zig
@@ -26,6 +26,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
+    const io = comp.io;
 
     switch (in_crt_file) {
         .crt1_o => {
@@ -246,7 +247,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
             const misc_task: Compilation.MiscTask = .@"musl libc.so";
 
             var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-            const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+            const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
                 .dirs = comp.dirs.withoutLocalCache(),
                 .self_exe_path = comp.self_exe_path,
                 .cache_mode = .whole,
src/libs/netbsd.zig
@@ -372,6 +372,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     }
 
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
@@ -383,6 +384,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
     // Use the global cache directory.
     var cache: Cache = .{
         .gpa = gpa,
+        .io = io,
         .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
     };
     cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
@@ -680,6 +682,7 @@ fn buildSharedLib(
     const tracy = trace(@src());
     defer tracy.end();
 
+    const io = comp.io;
     const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
     const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
     const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
@@ -733,7 +736,7 @@ fn buildSharedLib(
     const misc_task: Compilation.MiscTask = .@"netbsd libc shared object";
 
     var sub_create_diag: Compilation.CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(comp.gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .thread_pool = comp.thread_pool,
         .self_exe_path = comp.self_exe_path,
src/link/Wasm/Flush.zig
@@ -1064,9 +1064,14 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
     }
 
     // Finally, write the entire binary into the file.
-    const file = wasm.base.file.?;
-    try file.pwriteAll(binary_bytes.items, 0);
-    try file.setEndPos(binary_bytes.items.len);
+    var file_writer = wasm.base.file.?.writer(&.{});
+    file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) {
+        error.WriteFailed => return file_writer.err.?,
+    };
+    file_writer.end() catch |err| switch (err) {
+        error.WriteFailed => return file_writer.err.?,
+        else => |e| return e,
+    };
 }
 
 const VirtualAddrs = struct {
src/link/Lld.zig
@@ -1614,11 +1614,9 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
     }
 }
 
-fn spawnLld(
-    comp: *Compilation,
-    arena: Allocator,
-    argv: []const []const u8,
-) !void {
+fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !void {
+    const io = comp.io;
+
     if (comp.verbose_link) {
         // Skip over our own name so that the LLD linker name is the first argv item.
         Compilation.dump_argv(argv[1..]);
@@ -1650,7 +1648,7 @@ fn spawnLld(
         child.stderr_behavior = .Pipe;
 
         child.spawn() catch |err| break :term err;
-        var stderr_reader = child.stderr.?.readerStreaming(&.{});
+        var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
         stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
         break :term child.wait();
     }) catch |first_err| term: {
@@ -1660,7 +1658,7 @@ fn spawnLld(
                 const rand_int = std.crypto.random.int(u64);
                 const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp";
 
-                const rsp_file = try comp.dirs.local_cache.handle.createFileZ(rsp_path, .{});
+                const rsp_file = try comp.dirs.local_cache.handle.createFile(rsp_path, .{});
                 defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
                     log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
                 {
@@ -1700,7 +1698,7 @@ fn spawnLld(
                     rsp_child.stderr_behavior = .Pipe;
 
                     rsp_child.spawn() catch |err| break :err err;
-                    var stderr_reader = rsp_child.stderr.?.readerStreaming(&.{});
+                    var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{});
                     stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
                     break :term rsp_child.wait() catch |err| break :err err;
                 }
src/link/MachO.zig
@@ -915,7 +915,7 @@ pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8
     return buffer[0..Archive.SARMAG];
 }
 
-fn addObject(self: *MachO, path: Path, handle: File.HandleIndex, offset: u64) !void {
+fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u64) !void {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -929,17 +929,15 @@ fn addObject(self: *MachO, path: Path, handle: File.HandleIndex, offset: u64) !v
     });
     errdefer gpa.free(abs_path);
 
-    const mtime: u64 = mtime: {
-        const file = self.getFileHandle(handle);
-        const stat = file.stat() catch break :mtime 0;
-        break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000)));
-    };
-    const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
+    const file = self.getFileHandle(handle_index);
+    const stat = try file.stat();
+    const mtime = stat.mtime.toSeconds();
+    const index: File.Index = @intCast(try self.files.addOne(gpa));
     self.files.set(index, .{ .object = .{
         .offset = offset,
         .path = abs_path,
-        .file_handle = handle,
-        .mtime = mtime,
+        .file_handle = handle_index,
+        .mtime = @intCast(mtime),
         .index = index,
     } });
     try self.objects.append(gpa, index);
src/link/MappedFile.zig
@@ -16,11 +16,13 @@ writers: std.SinglyLinkedList,
 
 pub const growth_factor = 4;
 
-pub const Error = std.posix.MMapError ||
-    std.posix.MRemapError ||
-    std.fs.File.SetEndPosError ||
-    std.fs.File.CopyRangeError ||
-    error{NotFile};
+pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{
+    NotFile,
+    SystemResources,
+    IsDir,
+    Unseekable,
+    NoSpaceLeft,
+};
 
 pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
     var mf: MappedFile = .{
@@ -402,7 +404,7 @@ pub const Node = extern struct {
 
                     const w: *Writer = @fieldParentPtr("interface", interface);
                     const copy_size: usize = @intCast(w.mf.copyFileRange(
-                        file_reader.file,
+                        .adaptFromNewApi(file_reader.file),
                         file_reader.pos,
                         w.ni.fileLocation(w.mf, true).offset + interface.end,
                         limit.minInt(interface.unusedCapacityLen()),
src/link/Wasm.zig
@@ -3029,18 +3029,22 @@ fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void {
 fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
     log.debug("parseObject {f}", .{obj.path});
     const gpa = wasm.base.comp.gpa;
+    const io = wasm.base.comp.io;
     const gc_sections = wasm.base.gc_sections;
 
     defer obj.file.close();
 
+    var file_reader = obj.file.reader(io, &.{});
+
     try wasm.objects.ensureUnusedCapacity(gpa, 1);
-    const stat = try obj.file.stat();
-    const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
+    const size = std.math.cast(usize, try file_reader.getSize()) orelse return error.FileTooBig;
 
     const file_contents = try gpa.alloc(u8, size);
     defer gpa.free(file_contents);
 
-    const n = try obj.file.preadAll(file_contents, 0);
+    const n = file_reader.interface.readSliceShort(file_contents) catch |err| switch (err) {
+        error.ReadFailed => return file_reader.err.?,
+    };
     if (n != file_contents.len) return error.UnexpectedEndOfFile;
 
     var ss: Object.ScratchSpace = .{};
@@ -3053,17 +3057,21 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
 fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
     log.debug("parseArchive {f}", .{obj.path});
     const gpa = wasm.base.comp.gpa;
+    const io = wasm.base.comp.io;
     const gc_sections = wasm.base.gc_sections;
 
     defer obj.file.close();
 
-    const stat = try obj.file.stat();
-    const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
+    var file_reader = obj.file.reader(io, &.{});
+
+    const size = std.math.cast(usize, try file_reader.getSize()) orelse return error.FileTooBig;
 
     const file_contents = try gpa.alloc(u8, size);
     defer gpa.free(file_contents);
 
-    const n = try obj.file.preadAll(file_contents, 0);
+    const n = file_reader.interface.readSliceShort(file_contents) catch |err| switch (err) {
+        error.ReadFailed => return file_reader.err.?,
+    };
     if (n != file_contents.len) return error.UnexpectedEndOfFile;
 
     var archive = try Archive.parse(gpa, file_contents);
src/Package/Fetch.zig
@@ -26,9 +26,13 @@
 //!
 //! All of this must be done with only referring to the state inside this struct
 //! because this work will be done in a dedicated thread.
+const Fetch = @This();
 
 const builtin = @import("builtin");
+const native_os = builtin.os.tag;
+
 const std = @import("std");
+const Io = std.Io;
 const fs = std.fs;
 const assert = std.debug.assert;
 const ascii = std.ascii;
@@ -36,14 +40,13 @@ const Allocator = std.mem.Allocator;
 const Cache = std.Build.Cache;
 const ThreadPool = std.Thread.Pool;
 const WaitGroup = std.Thread.WaitGroup;
-const Fetch = @This();
 const git = @import("Fetch/git.zig");
 const Package = @import("../Package.zig");
 const Manifest = Package.Manifest;
 const ErrorBundle = std.zig.ErrorBundle;
-const native_os = builtin.os.tag;
 
 arena: std.heap.ArenaAllocator,
+io: Io,
 location: Location,
 location_tok: std.zig.Ast.TokenIndex,
 hash_tok: std.zig.Ast.OptionalTokenIndex,
@@ -323,6 +326,7 @@ pub const RunError = error{
 };
 
 pub fn run(f: *Fetch) RunError!void {
+    const io = f.io;
     const eb = &f.error_bundle;
     const arena = f.arena.allocator();
     const gpa = f.arena.child_allocator;
@@ -389,7 +393,7 @@ pub fn run(f: *Fetch) RunError!void {
 
                 const file_err = if (dir_err == error.NotDir) e: {
                     if (fs.cwd().openFile(path_or_url, .{})) |file| {
-                        var resource: Resource = .{ .file = file.reader(&server_header_buffer) };
+                        var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) };
                         return f.runResource(path_or_url, &resource, null);
                     } else |err| break :e err;
                 } else dir_err;
@@ -484,7 +488,8 @@ fn runResource(
     resource: *Resource,
     remote_hash: ?Package.Hash,
 ) RunError!void {
-    defer resource.deinit();
+    const io = f.io;
+    defer resource.deinit(io);
     const arena = f.arena.allocator();
     const eb = &f.error_bundle;
     const s = fs.path.sep_str;
@@ -697,6 +702,7 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
 }
 
 fn queueJobsForDeps(f: *Fetch) RunError!void {
+    const io = f.io;
     assert(f.job_queue.recursive);
 
     // If the package does not have a build.zig.zon file then there are no dependencies.
@@ -786,6 +792,7 @@ fn queueJobsForDeps(f: *Fetch) RunError!void {
                 f.job_queue.all_fetches.appendAssumeCapacity(new_fetch);
             }
             new_fetch.* = .{
+                .io = io,
                 .arena = std.heap.ArenaAllocator.init(gpa),
                 .location = location,
                 .location_tok = dep.location_tok,
@@ -897,9 +904,9 @@ const Resource = union(enum) {
         decompress_buffer: []u8,
     };
 
-    fn deinit(resource: *Resource) void {
+    fn deinit(resource: *Resource, io: Io) void {
         switch (resource.*) {
-            .file => |*file_reader| file_reader.file.close(),
+            .file => |*file_reader| file_reader.file.close(io),
             .http_request => |*http_request| http_request.request.deinit(),
             .git => |*git_resource| {
                 git_resource.fetch_stream.deinit();
@@ -909,7 +916,7 @@ const Resource = union(enum) {
         resource.* = undefined;
     }
 
-    fn reader(resource: *Resource) *std.Io.Reader {
+    fn reader(resource: *Resource) *Io.Reader {
         return switch (resource.*) {
             .file => |*file_reader| return &file_reader.interface,
             .http_request => |*http_request| return http_request.response.readerDecompressing(
@@ -985,6 +992,7 @@ const FileType = enum {
 const init_resource_buffer_size = git.Packet.max_data_length;
 
 fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u8) RunError!void {
+    const io = f.io;
     const arena = f.arena.allocator();
     const eb = &f.error_bundle;
 
@@ -995,7 +1003,7 @@ fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u
                 f.parent_package_root, path, err,
             }));
         };
-        resource.* = .{ .file = file.reader(reader_buffer) };
+        resource.* = .{ .file = file.reader(io, reader_buffer) };
         return;
     }
 
@@ -1242,7 +1250,7 @@ fn unpackResource(
     }
 }
 
-fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) RunError!UnpackResult {
+fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!UnpackResult {
     const eb = &f.error_bundle;
     const arena = f.arena.allocator();
 
@@ -1273,11 +1281,12 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) RunError!Un
     return res;
 }
 
-fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) error{ ReadFailed, OutOfMemory, FetchFailed }!UnpackResult {
+fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) error{ ReadFailed, OutOfMemory, FetchFailed }!UnpackResult {
     // We write the entire contents to a file first because zip files
     // must be processed back to front and they could be too large to
     // load into memory.
 
+    const io = f.io;
     const cache_root = f.job_queue.global_cache;
     const prefix = "tmp/";
     const suffix = ".zip";
@@ -1319,7 +1328,7 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) error{ ReadFailed,
             f.location_tok,
             try eb.printString("failed writing temporary zip file: {t}", .{err}),
         );
-        break :b zip_file_writer.moveToReader();
+        break :b zip_file_writer.moveToReader(io);
     };
 
     var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
@@ -1339,7 +1348,10 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) error{ ReadFailed,
 }
 
 fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
+    const io = f.io;
     const arena = f.arena.allocator();
+    // TODO don't try to get a gpa from an arena. expose this dependency higher up
+    // because the backing of arena could be page allocator
     const gpa = f.arena.child_allocator;
     const object_format: git.Oid.Format = resource.want_oid;
 
@@ -1358,7 +1370,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
             const fetch_reader = &resource.fetch_stream.reader;
             _ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
             try pack_file_writer.interface.flush();
-            break :b pack_file_writer.moveToReader();
+            break :b pack_file_writer.moveToReader(io);
         };
 
         var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
@@ -1372,7 +1384,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
         }
 
         {
-            var index_file_reader = index_file.reader(&index_file_buffer);
+            var index_file_reader = index_file.reader(io, &index_file_buffer);
             const checkout_prog_node = f.prog_node.start("Checkout", 0);
             defer checkout_prog_node.end();
             var repository: git.Repository = undefined;
@@ -2029,7 +2041,7 @@ const UnpackResult = struct {
         // output errors to string
         var errors = try fetch.error_bundle.toOwnedBundle("");
         defer errors.deinit(gpa);
-        var aw: std.Io.Writer.Allocating = .init(gpa);
+        var aw: Io.Writer.Allocating = .init(gpa);
         defer aw.deinit();
         try errors.renderToWriter(.{ .ttyconf = .no_color }, &aw.writer);
         try std.testing.expectEqualStrings(
@@ -2338,7 +2350,7 @@ const TestFetchBuilder = struct {
         if (notes_len > 0) {
             try std.testing.expectEqual(notes_len, em.notes_len);
         }
-        var aw: std.Io.Writer.Allocating = .init(std.testing.allocator);
+        var aw: Io.Writer.Allocating = .init(std.testing.allocator);
         defer aw.deinit();
         try errors.renderToWriter(.{ .ttyconf = .no_color }, &aw.writer);
         try std.testing.expectEqualStrings(msg, aw.written());
src/Zcu/PerThread.zig
@@ -87,6 +87,7 @@ pub fn updateFile(
     const zcu = pt.zcu;
     const comp = zcu.comp;
     const gpa = zcu.gpa;
+    const io = comp.io;
 
     // In any case we need to examine the stat of the file to determine the course of action.
     var source_file = f: {
@@ -127,7 +128,7 @@ pub fn updateFile(
         .astgen_failure, .success => lock: {
             const unchanged_metadata =
                 stat.size == file.stat.size and
-                stat.mtime == file.stat.mtime and
+                stat.mtime.nanoseconds == file.stat.mtime.nanoseconds and
                 stat.inode == file.stat.inode;
 
             if (unchanged_metadata) {
@@ -173,8 +174,6 @@ pub fn updateFile(
             .lock = lock,
         }) catch |err| switch (err) {
             error.NotDir => unreachable, // no dir components
-            error.InvalidUtf8 => unreachable, // it's a hex encoded name
-            error.InvalidWtf8 => unreachable, // it's a hex encoded name
             error.BadPathName => unreachable, // it's a hex encoded name
             error.NameTooLong => unreachable, // it's a fixed size name
             error.PipeBusy => unreachable, // it's not a pipe
@@ -255,7 +254,7 @@ pub fn updateFile(
 
         const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
         defer if (file.source == null) gpa.free(source);
-        var source_fr = source_file.reader(&.{});
+        var source_fr = source_file.reader(io, &.{});
         source_fr.size = stat.size;
         source_fr.interface.readSliceAll(source) catch |err| switch (err) {
             error.ReadFailed => return source_fr.err.?,
@@ -353,6 +352,7 @@ fn loadZirZoirCache(
     assert(file.getMode() == mode);
 
     const gpa = zcu.gpa;
+    const io = zcu.comp.io;
 
     const Header = switch (mode) {
         .zig => Zir.Header,
@@ -360,7 +360,7 @@ fn loadZirZoirCache(
     };
 
     var buffer: [2000]u8 = undefined;
-    var cache_fr = cache_file.reader(&buffer);
+    var cache_fr = cache_file.reader(io, &buffer);
     cache_fr.size = stat.size;
     const cache_br = &cache_fr.interface;
 
@@ -375,7 +375,7 @@ fn loadZirZoirCache(
 
     const unchanged_metadata =
         stat.size == header.stat_size and
-        stat.mtime == header.stat_mtime and
+        stat.mtime.nanoseconds == header.stat_mtime and
         stat.inode == header.stat_inode;
 
     if (!unchanged_metadata) {
@@ -2436,6 +2436,7 @@ fn updateEmbedFileInner(
     const tid = pt.tid;
     const zcu = pt.zcu;
     const gpa = zcu.gpa;
+    const io = zcu.comp.io;
     const ip = &zcu.intern_pool;
 
     var file = f: {
@@ -2450,7 +2451,7 @@ fn updateEmbedFileInner(
         const old_stat = ef.stat;
         const unchanged_metadata =
             stat.size == old_stat.size and
-            stat.mtime == old_stat.mtime and
+            stat.mtime.nanoseconds == old_stat.mtime.nanoseconds and
             stat.inode == old_stat.inode;
         if (unchanged_metadata) return;
     }
@@ -2464,7 +2465,7 @@ fn updateEmbedFileInner(
         const old_len = string_bytes.mutate.len;
         errdefer string_bytes.shrinkRetainingCapacity(old_len);
         const bytes = (try string_bytes.addManyAsSlice(size_plus_one))[0];
-        var fr = file.reader(&.{});
+        var fr = file.reader(io, &.{});
         fr.size = stat.size;
         fr.interface.readSliceAll(bytes[0..size]) catch |err| switch (err) {
             error.ReadFailed => return fr.err.?,
src/Builtin.zig
@@ -360,7 +360,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
     file.stat = .{
         .size = file.source.?.len,
         .inode = 0, // dummy value
-        .mtime = 0, // dummy value
+        .mtime = .zero, // dummy value
     };
 }
 
src/Compilation.zig
@@ -55,6 +55,7 @@ gpa: Allocator,
 /// Not thread-safe - lock `mutex` if potentially accessing from multiple
 /// threads at once.
 arena: Allocator,
+io: Io,
 /// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
 zcu: ?*Zcu,
 /// Contains different state depending on the `CacheMode` used by this `Compilation`.
@@ -1077,26 +1078,26 @@ pub const CObject = struct {
             diag.* = undefined;
         }
 
-        pub fn count(diag: Diag) u32 {
+        pub fn count(diag: *const Diag) u32 {
             var total: u32 = 1;
             for (diag.sub_diags) |sub_diag| total += sub_diag.count();
             return total;
         }
 
-        pub fn addToErrorBundle(diag: Diag, eb: *ErrorBundle.Wip, bundle: Bundle, note: *u32) !void {
-            const err_msg = try eb.addErrorMessage(try diag.toErrorMessage(eb, bundle, 0));
+        pub fn addToErrorBundle(diag: *const Diag, io: Io, eb: *ErrorBundle.Wip, bundle: Bundle, note: *u32) !void {
+            const err_msg = try eb.addErrorMessage(try diag.toErrorMessage(io, eb, bundle, 0));
             eb.extra.items[note.*] = @intFromEnum(err_msg);
             note.* += 1;
-            for (diag.sub_diags) |sub_diag| try sub_diag.addToErrorBundle(eb, bundle, note);
+            for (diag.sub_diags) |sub_diag| try sub_diag.addToErrorBundle(io, eb, bundle, note);
         }
 
         pub fn toErrorMessage(
-            diag: Diag,
+            diag: *const Diag,
+            io: Io,
             eb: *ErrorBundle.Wip,
             bundle: Bundle,
             notes_len: u32,
         ) !ErrorBundle.ErrorMessage {
-            const io = eb.io;
             var start = diag.src_loc.offset;
             var end = diag.src_loc.offset;
             for (diag.src_ranges) |src_range| {
@@ -1307,14 +1308,14 @@ pub const CObject = struct {
                 return bundle;
             }
 
-            pub fn addToErrorBundle(bundle: Bundle, eb: *ErrorBundle.Wip) !void {
+            pub fn addToErrorBundle(bundle: Bundle, io: Io, eb: *ErrorBundle.Wip) !void {
                 for (bundle.diags) |diag| {
                     const notes_len = diag.count() - 1;
-                    try eb.addRootErrorMessage(try diag.toErrorMessage(eb, bundle, notes_len));
+                    try eb.addRootErrorMessage(try diag.toErrorMessage(io, eb, bundle, notes_len));
                     if (notes_len > 0) {
                         var note = try eb.reserveNotes(notes_len);
                         for (diag.sub_diags) |sub_diag|
-                            try sub_diag.addToErrorBundle(eb, bundle, &note);
+                            try sub_diag.addToErrorBundle(io, eb, bundle, &note);
                     }
                 }
             }
@@ -1906,7 +1907,7 @@ pub const CreateDiagnostic = union(enum) {
         return error.CreateFail;
     }
 };
-pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options: CreateOptions) error{
+pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) error{
     OutOfMemory,
     Unexpected,
     CurrentWorkingDirectoryUnlinked,
@@ -2114,6 +2115,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
         const cache = try arena.create(Cache);
         cache.* = .{
             .gpa = gpa,
+            .io = io,
             .manifest_dir = options.dirs.local_cache.handle.makeOpenPath("h", .{}) catch |err| {
                 return diag.fail(.{ .create_cache_path = .{ .which = .local, .sub = "h", .err = err } });
             },
@@ -2232,6 +2234,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
         comp.* = .{
             .gpa = gpa,
             .arena = arena,
+            .io = io,
             .zcu = opt_zcu,
             .cache_use = undefined, // populated below
             .bin_file = null, // populated below if necessary
@@ -3919,13 +3922,14 @@ fn addBuf(list: *std.array_list.Managed([]const u8), buf: []const u8) void {
 /// This function is temporally single-threaded.
 pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var bundle: ErrorBundle.Wip = undefined;
     try bundle.init(gpa);
     defer bundle.deinit();
 
     for (comp.failed_c_objects.values()) |diag_bundle| {
-        try diag_bundle.addToErrorBundle(&bundle);
+        try diag_bundle.addToErrorBundle(io, &bundle);
     }
 
     for (comp.failed_win32_resources.values()) |error_bundle| {
@@ -5310,6 +5314,7 @@ fn docsCopyModule(
     name: []const u8,
     tar_file_writer: *fs.File.Writer,
 ) !void {
+    const io = comp.io;
     const root = module.root;
     var mod_dir = d: {
         const root_dir, const sub_path = root.openInfo(comp.dirs);
@@ -5343,9 +5348,9 @@ fn docsCopyModule(
         };
         defer file.close();
         const stat = try file.stat();
-        var file_reader: fs.File.Reader = .initSize(file, &buffer, stat.size);
+        var file_reader: fs.File.Reader = .initSize(file.adaptToNewApi(), io, &buffer, stat.size);
 
-        archiver.writeFile(entry.path, &file_reader, stat.mtime) catch |err| {
+        archiver.writeFileTimestamp(entry.path, &file_reader, stat.mtime) catch |err| {
             return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive {f}{s}: {t}", .{
                 root.fmt(comp), entry.path, err,
             });
@@ -5365,6 +5370,7 @@ fn workerDocsWasm(comp: *Compilation, parent_prog_node: std.Progress.Node) void
 
 fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubUpdateError!void {
     const gpa = comp.gpa;
+    const io = comp.io;
 
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
@@ -5373,7 +5379,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
     const optimize_mode = std.builtin.OptimizeMode.ReleaseSmall;
     const output_mode = std.builtin.OutputMode.Exe;
     const resolved_target: Package.Module.ResolvedTarget = .{
-        .result = std.zig.system.resolveTargetQuery(.{
+        .result = std.zig.system.resolveTargetQuery(io, .{
             .cpu_arch = .wasm32,
             .os_tag = .freestanding,
             .cpu_features_add = std.Target.wasm.featureSet(&.{
@@ -5449,7 +5455,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
     try root_mod.deps.put(arena, "Walk", walk_mod);
 
     var sub_create_diag: CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
         .dirs = dirs,
         .self_exe_path = comp.self_exe_path,
         .config = config,
@@ -5667,6 +5673,8 @@ pub fn translateC(
 ) !CImportResult {
     dev.check(.translate_c_command);
 
+    const gpa = comp.gpa;
+    const io = comp.io;
     const tmp_basename = std.fmt.hex(std.crypto.random.int(u64));
     const tmp_sub_path = "tmp" ++ fs.path.sep_str ++ tmp_basename;
     const cache_dir = comp.dirs.local_cache.handle;
@@ -5706,9 +5714,9 @@ pub fn translateC(
 
         const mcpu = mcpu: {
             var buf: std.ArrayListUnmanaged(u8) = .empty;
-            defer buf.deinit(comp.gpa);
+            defer buf.deinit(gpa);
 
-            try buf.print(comp.gpa, "-mcpu={s}", .{target.cpu.model.name});
+            try buf.print(gpa, "-mcpu={s}", .{target.cpu.model.name});
 
             // TODO better serialization https://github.com/ziglang/zig/issues/4584
             const all_features_list = target.cpu.arch.allFeaturesList();
@@ -5718,7 +5726,7 @@ pub fn translateC(
                 const is_enabled = target.cpu.features.isEnabled(index);
 
                 const plus_or_minus = "-+"[@intFromBool(is_enabled)];
-                try buf.print(comp.gpa, "{c}{s}", .{ plus_or_minus, feature.name });
+                try buf.print(gpa, "{c}{s}", .{ plus_or_minus, feature.name });
             }
             break :mcpu try buf.toOwnedSlice(arena);
         };
@@ -5731,7 +5739,7 @@ pub fn translateC(
     }
 
     var stdout: []u8 = undefined;
-    try @import("main.zig").translateC(comp.gpa, arena, argv.items, prog_node, &stdout);
+    try @import("main.zig").translateC(gpa, arena, io, argv.items, prog_node, &stdout);
 
     if (out_dep_path) |dep_file_path| add_deps: {
         if (comp.verbose_cimport) log.info("processing dep file at {s}", .{dep_file_path});
@@ -5767,7 +5775,7 @@ pub fn translateC(
             fatal("unable to read {}-byte translate-c message body: {s}", .{ header.bytes_len, @errorName(err) });
         switch (header.tag) {
             .error_bundle => {
-                const error_bundle = try std.zig.Server.allocErrorBundle(comp.gpa, body);
+                const error_bundle = try std.zig.Server.allocErrorBundle(gpa, body);
                 return .{
                     .digest = undefined,
                     .cache_hit = false,
@@ -6154,6 +6162,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
     log.debug("updating C object: {s}", .{c_object.src.src_path});
 
     const gpa = comp.gpa;
+    const io = comp.io;
 
     if (c_object.clearStatus(gpa)) {
         // There was previous failure.
@@ -6353,7 +6362,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
 
                 try child.spawn();
 
-                var stderr_reader = child.stderr.?.readerStreaming(&.{});
+                var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
                 const stderr = try stderr_reader.interface.allocRemaining(arena, .limited(std.math.maxInt(u32)));
 
                 const term = child.wait() catch |err| {
@@ -6362,7 +6371,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
 
                 switch (term) {
                     .Exited => |code| if (code != 0) if (out_diag_path) |diag_file_path| {
-                        const bundle = CObject.Diag.Bundle.parse(gpa, diag_file_path) catch |err| {
+                        const bundle = CObject.Diag.Bundle.parse(gpa, io, diag_file_path) catch |err| {
                             log.err("{}: failed to parse clang diagnostics: {s}", .{ err, stderr });
                             return comp.failCObj(c_object, "clang exited with code {d}", .{code});
                         };
@@ -7807,6 +7816,7 @@ fn buildOutputFromZig(
     defer tracy_trace.end();
 
     const gpa = comp.gpa;
+    const io = comp.io;
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
@@ -7880,7 +7890,7 @@ fn buildOutputFromZig(
     };
 
     var sub_create_diag: CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .cache_mode = .whole,
         .parent_whole_cache = parent_whole_cache,
@@ -7948,6 +7958,7 @@ pub fn build_crt_file(
     defer tracy_trace.end();
 
     const gpa = comp.gpa;
+    const io = comp.io;
     var arena_allocator = std.heap.ArenaAllocator.init(gpa);
     defer arena_allocator.deinit();
     const arena = arena_allocator.allocator();
@@ -8016,7 +8027,7 @@ pub fn build_crt_file(
     }
 
     var sub_create_diag: CreateDiagnostic = undefined;
-    const sub_compilation = Compilation.create(gpa, arena, &sub_create_diag, .{
+    const sub_compilation = Compilation.create(gpa, arena, io, &sub_create_diag, .{
         .dirs = comp.dirs.withoutLocalCache(),
         .self_exe_path = comp.self_exe_path,
         .cache_mode = .whole,
src/fmt.zig
@@ -1,4 +1,5 @@
 const std = @import("std");
+const Io = std.Io;
 const mem = std.mem;
 const fs = std.fs;
 const process = std.process;
@@ -34,13 +35,14 @@ const Fmt = struct {
     color: Color,
     gpa: Allocator,
     arena: Allocator,
+    io: Io,
     out_buffer: std.Io.Writer.Allocating,
     stdout_writer: *fs.File.Writer,
 
     const SeenMap = std.AutoHashMap(fs.File.INode, void);
 };
 
-pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !void {
     var color: Color = .auto;
     var stdin_flag = false;
     var check_flag = false;
@@ -99,7 +101,7 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
 
         const stdin: fs.File = .stdin();
         var stdio_buffer: [1024]u8 = undefined;
-        var file_reader: fs.File.Reader = stdin.reader(&stdio_buffer);
+        var file_reader: fs.File.Reader = stdin.reader(io, &stdio_buffer);
         const source_code = std.zig.readSourceFileToEndAlloc(gpa, &file_reader) catch |err| {
             fatal("unable to read stdin: {}", .{err});
         };
@@ -165,6 +167,7 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
     var fmt: Fmt = .{
         .gpa = gpa,
         .arena = arena,
+        .io = io,
         .seen = .init(gpa),
         .any_error = false,
         .check_ast = check_ast_flag,
@@ -255,6 +258,8 @@ fn fmtPathFile(
     dir: fs.Dir,
     sub_path: []const u8,
 ) !void {
+    const io = fmt.io;
+
     const source_file = try dir.openFile(sub_path, .{});
     var file_closed = false;
     errdefer if (!file_closed) source_file.close();
@@ -265,7 +270,7 @@ fn fmtPathFile(
         return error.IsDir;
 
     var read_buffer: [1024]u8 = undefined;
-    var file_reader: fs.File.Reader = source_file.reader(&read_buffer);
+    var file_reader: fs.File.Reader = source_file.reader(io, &read_buffer);
     file_reader.size = stat.size;
 
     const gpa = fmt.gpa;
@@ -363,5 +368,8 @@ pub fn main() !void {
     var arena_instance = std.heap.ArenaAllocator.init(gpa);
     const arena = arena_instance.allocator();
     const args = try process.argsAlloc(arena);
-    return run(gpa, arena, args[1..]);
+    var threaded: std.Io.Threaded = .init(gpa);
+    defer threaded.deinit();
+    const io = threaded.io();
+    return run(gpa, arena, io, args[1..]);
 }
src/IncrementalDebugServer.zig
@@ -44,22 +44,24 @@ pub fn spawn(ids: *IncrementalDebugServer) void {
 }
 fn runThread(ids: *IncrementalDebugServer) void {
     const gpa = ids.zcu.gpa;
+    const io = ids.zcu.comp.io;
 
     var cmd_buf: [1024]u8 = undefined;
     var text_out: std.ArrayListUnmanaged(u8) = .empty;
     defer text_out.deinit(gpa);
 
-    const addr = std.net.Address.parseIp6("::", port) catch unreachable;
-    var server = addr.listen(.{}) catch @panic("IncrementalDebugServer: failed to listen");
-    defer server.deinit();
-    const conn = server.accept() catch @panic("IncrementalDebugServer: failed to accept");
-    defer conn.stream.close();
+    const addr: std.Io.net.IpAddress = .{ .ip6 = .loopback(port) };
+    var server = addr.listen(io, .{}) catch @panic("IncrementalDebugServer: failed to listen");
+    defer server.deinit(io);
+    var stream = server.accept(io) catch @panic("IncrementalDebugServer: failed to accept");
+    defer stream.close(io);
 
-    var stream_reader = conn.stream.reader(&cmd_buf);
+    var stream_reader = stream.reader(io, &cmd_buf);
+    var stream_writer = stream.writer(io, &.{});
 
     while (ids.running.load(.monotonic)) {
-        conn.stream.writeAll("zig> ") catch @panic("IncrementalDebugServer: failed to write");
-        const untrimmed = stream_reader.interface().takeSentinel('\n') catch |err| switch (err) {
+        stream_writer.interface.writeAll("zig> ") catch @panic("IncrementalDebugServer: failed to write");
+        const untrimmed = stream_reader.interface.takeSentinel('\n') catch |err| switch (err) {
             error.EndOfStream => break,
             else => @panic("IncrementalDebugServer: failed to read command"),
         };
@@ -72,7 +74,7 @@ fn runThread(ids: *IncrementalDebugServer) void {
         text_out.clearRetainingCapacity();
         {
             if (!ids.mutex.tryLock()) {
-                conn.stream.writeAll("waiting for in-progress update to finish...\n") catch @panic("IncrementalDebugServer: failed to write");
+                stream_writer.interface.writeAll("waiting for in-progress update to finish...\n") catch @panic("IncrementalDebugServer: failed to write");
                 ids.mutex.lock();
             }
             defer ids.mutex.unlock();
@@ -81,7 +83,7 @@ fn runThread(ids: *IncrementalDebugServer) void {
             handleCommand(ids.zcu, &allocating.writer, cmd, arg) catch @panic("IncrementalDebugServer: out of memory");
         }
         text_out.append(gpa, '\n') catch @panic("IncrementalDebugServer: out of memory");
-        conn.stream.writeAll(text_out.items) catch @panic("IncrementalDebugServer: failed to write");
+        stream_writer.interface.writeAll(text_out.items) catch @panic("IncrementalDebugServer: failed to write");
     }
     std.debug.print("closing incremental debug server\n", .{});
 }
@@ -373,6 +375,7 @@ fn printType(ty: Type, zcu: *const Zcu, w: anytype) !void {
 }
 
 const std = @import("std");
+const Io = std.Io;
 const Allocator = std.mem.Allocator;
 
 const Compilation = @import("Compilation.zig");
src/main.zig
@@ -312,7 +312,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
         });
     } else if (mem.eql(u8, cmd, "fmt")) {
         dev.check(.fmt_command);
-        return @import("fmt.zig").run(gpa, arena, cmd_args);
+        return @import("fmt.zig").run(gpa, arena, io, cmd_args);
     } else if (mem.eql(u8, cmd, "objcopy")) {
         return jitCmd(gpa, arena, io, cmd_args, .{
             .cmd_name = "objcopy",
@@ -376,7 +376,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
     } else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "changelist")) {
         return cmdChangelist(arena, io, cmd_args);
     } else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "dump-zir")) {
-        return cmdDumpZir(arena, cmd_args);
+        return cmdDumpZir(arena, io, cmd_args);
     } else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "llvm-ints")) {
         return cmdDumpLlvmInts(gpa, arena, cmd_args);
     } else {
@@ -3376,7 +3376,7 @@ fn buildOutputType(
     try create_module.rpath_list.appendSlice(arena, rpath_dedup.keys());
 
     var create_diag: Compilation.CreateDiagnostic = undefined;
-    const comp = Compilation.create(gpa, arena, &create_diag, .{
+    const comp = Compilation.create(gpa, arena, io, &create_diag, .{
         .dirs = dirs,
         .thread_pool = &thread_pool,
         .self_exe_path = switch (native_os) {
@@ -3554,7 +3554,6 @@ fn buildOutputType(
             var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
             var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
             try serve(
-                io,
                 comp,
                 &stdin_reader.interface,
                 &stdout_writer.interface,
@@ -3581,7 +3580,6 @@ fn buildOutputType(
             var output = stream.writer(io, &stdout_buffer);
 
             try serve(
-                io,
                 comp,
                 &input.interface,
                 &output.interface,
@@ -4051,7 +4049,6 @@ fn saveState(comp: *Compilation, incremental: bool) void {
 }
 
 fn serve(
-    io: Io,
     comp: *Compilation,
     in: *Io.Reader,
     out: *Io.Writer,
@@ -4104,7 +4101,7 @@ fn serve(
                     defer arena_instance.deinit();
                     const arena = arena_instance.allocator();
                     var output: Compilation.CImportResult = undefined;
-                    try cmdTranslateC(io, comp, arena, &output, file_system_inputs, main_progress_node);
+                    try cmdTranslateC(comp, arena, &output, file_system_inputs, main_progress_node);
                     defer output.deinit(gpa);
 
                     if (file_system_inputs.items.len != 0) {
@@ -4537,6 +4534,8 @@ fn cmdTranslateC(
 ) !void {
     dev.check(.translate_c_command);
 
+    const io = comp.io;
+
     assert(comp.c_source_files.len == 1);
     const c_source_file = comp.c_source_files[0];
 
@@ -4600,7 +4599,7 @@ fn cmdTranslateC(
         };
         defer zig_file.close();
         var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
-        var file_reader = zig_file.reader(&.{});
+        var file_reader = zig_file.reader(io, &.{});
         _ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
         try stdout_writer.interface.flush();
         return cleanExit();
@@ -5156,6 +5155,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
 
                 var fetch: Package.Fetch = .{
                     .arena = std.heap.ArenaAllocator.init(gpa),
+                    .io = io,
                     .location = .{ .relative_path = phantom_package_root },
                     .location_tok = 0,
                     .hash_tok = .none,
@@ -5278,7 +5278,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8)
             try root_mod.deps.put(arena, "@build", build_mod);
 
             var create_diag: Compilation.CreateDiagnostic = undefined;
-            const comp = Compilation.create(gpa, arena, &create_diag, .{
+            const comp = Compilation.create(gpa, arena, io, &create_diag, .{
                 .libc_installation = libc_installation,
                 .dirs = dirs,
                 .root_name = "build",
@@ -5522,7 +5522,7 @@ fn jitCmd(
         }
 
         var create_diag: Compilation.CreateDiagnostic = undefined;
-        const comp = Compilation.create(gpa, arena, &create_diag, .{
+        const comp = Compilation.create(gpa, arena, io, &create_diag, .{
             .dirs = dirs,
             .root_name = options.cmd_name,
             .config = config,
@@ -6400,10 +6400,7 @@ fn cmdDumpLlvmInts(
 }
 
 /// This is only enabled for debug builds.
-fn cmdDumpZir(
-    arena: Allocator,
-    args: []const []const u8,
-) !void {
+fn cmdDumpZir(arena: Allocator, io: Io, args: []const []const u8) !void {
     dev.check(.dump_zir_command);
 
     const Zir = std.zig.Zir;
@@ -6415,7 +6412,7 @@ fn cmdDumpZir(
     };
     defer f.close();
 
-    const zir = try Zcu.loadZirCache(arena, f);
+    const zir = try Zcu.loadZirCache(arena, io, f);
     var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
     const stdout_bw = &stdout_writer.interface;
     {
@@ -6914,6 +6911,7 @@ fn cmdFetch(
 
     var fetch: Package.Fetch = .{
         .arena = std.heap.ArenaAllocator.init(gpa),
+        .io = io,
         .location = .{ .path_or_url = path_or_url },
         .location_tok = 0,
         .hash_tok = .none,
src/Zcu.zig
@@ -4,9 +4,12 @@
 //!
 //! Each `Compilation` has exactly one or zero `Zcu`, depending on whether
 //! there is or is not any zig source code, respectively.
+const Zcu = @This();
+const builtin = @import("builtin");
 
 const std = @import("std");
-const builtin = @import("builtin");
+const Io = std.Io;
+const Writer = std.Io.Writer;
 const mem = std.mem;
 const Allocator = std.mem.Allocator;
 const assert = std.debug.assert;
@@ -15,9 +18,7 @@ const BigIntConst = std.math.big.int.Const;
 const BigIntMutable = std.math.big.int.Mutable;
 const Target = std.Target;
 const Ast = std.zig.Ast;
-const Writer = std.Io.Writer;
 
-const Zcu = @This();
 const Compilation = @import("Compilation.zig");
 const Cache = std.Build.Cache;
 pub const Value = @import("Value.zig");
@@ -1037,10 +1038,15 @@ pub const File = struct {
         stat: Cache.File.Stat,
     };
 
-    pub const GetSourceError = error{ OutOfMemory, FileTooBig } || std.fs.File.OpenError || std.fs.File.ReadError;
+    pub const GetSourceError = error{
+        OutOfMemory,
+        FileTooBig,
+        Streaming,
+    } || std.fs.File.OpenError || std.fs.File.ReadError;
 
     pub fn getSource(file: *File, zcu: *const Zcu) GetSourceError!Source {
         const gpa = zcu.gpa;
+        const io = zcu.comp.io;
 
         if (file.source) |source| return .{
             .bytes = source,
@@ -1061,7 +1067,7 @@ pub const File = struct {
         const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
         errdefer gpa.free(source);
 
-        var file_reader = f.reader(&.{});
+        var file_reader = f.reader(io, &.{});
         file_reader.size = stat.size;
         file_reader.interface.readSliceAll(source) catch return file_reader.err.?;
 
@@ -2859,9 +2865,9 @@ comptime {
     }
 }
 
-pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
+pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: std.fs.File) !Zir {
     var buffer: [2000]u8 = undefined;
-    var file_reader = cache_file.reader(&buffer);
+    var file_reader = cache_file.reader(io, &buffer);
     return result: {
         const header = file_reader.interface.takeStructPointer(Zir.Header) catch |err| break :result err;
         break :result loadZirCacheBody(gpa, header.*, &file_reader.interface);
@@ -2871,7 +2877,7 @@ pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir {
     };
 }
 
-pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.Io.Reader) !Zir {
+pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader) !Zir {
     var instructions: std.MultiArrayList(Zir.Inst) = .{};
     errdefer instructions.deinit(gpa);
 
@@ -2940,7 +2946,7 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
 
         .stat_size = stat.size,
         .stat_inode = stat.inode,
-        .stat_mtime = stat.mtime,
+        .stat_mtime = stat.mtime.toNanoseconds(),
     };
     var vecs = [_][]const u8{
         @ptrCast((&header)[0..1]),
@@ -2969,7 +2975,7 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
 
         .stat_size = stat.size,
         .stat_inode = stat.inode,
-        .stat_mtime = stat.mtime,
+        .stat_mtime = stat.mtime.toNanoseconds(),
     };
     var vecs = [_][]const u8{
         @ptrCast((&header)[0..1]),
@@ -2988,7 +2994,7 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
     };
 }
 
-pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.Io.Reader) !Zoir {
+pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *Io.Reader) !Zoir {
     var zoir: Zoir = .{
         .nodes = .empty,
         .extra = &.{},
@@ -4283,7 +4289,7 @@ const FormatAnalUnit = struct {
     zcu: *Zcu,
 };
 
-fn formatAnalUnit(data: FormatAnalUnit, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void {
     const zcu = data.zcu;
     const ip = &zcu.intern_pool;
     switch (data.unit.unwrap()) {
@@ -4309,7 +4315,7 @@ fn formatAnalUnit(data: FormatAnalUnit, writer: *std.Io.Writer) std.Io.Writer.Er
 
 const FormatDependee = struct { dependee: InternPool.Dependee, zcu: *Zcu };
 
-fn formatDependee(data: FormatDependee, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void {
     const zcu = data.zcu;
     const ip = &zcu.intern_pool;
     switch (data.dependee) {
BRANCH_TODO
@@ -10,6 +10,7 @@
 * move max_iovecs_len to std.Io
 * address the cancelation race condition (signal received between checkCancel and syscall)
 * update signal values to be an enum
+* delete the deprecated fs.File functions
 * move fs.File.Writer to Io
 * add non-blocking flag to net and fs operations, handle EAGAIN
 * finish moving std.fs to Io