Commit 9a0970a12b

Andrew Kelley <andrew@ziglang.org>
2025-08-30 05:19:23
rework std.Io.Writer.Allocating to support runtime-known alignment
Also, breaking API changes to: * std.fs.Dir.readFileAlloc * std.fs.Dir.readFileAllocOptions
1 parent 79f267f
lib/compiler/reduce.zig
@@ -398,10 +398,9 @@ fn transformationsToFixups(
 
 fn parse(gpa: Allocator, file_path: []const u8) !Ast {
     const source_code = std.fs.cwd().readFileAllocOptions(
-        gpa,
         file_path,
-        std.math.maxInt(u32),
-        null,
+        gpa,
+        .limited(std.math.maxInt(u32)),
         .fromByteUnits(1),
         0,
     ) catch |err| {
lib/compiler/std-docs.zig
@@ -173,7 +173,7 @@ fn serveDocsFile(
     // The desired API is actually sendfile, which will require enhancing std.http.Server.
     // We load the file with every request so that the user can make changes to the file
     // and refresh the HTML page without restarting this server.
-    const file_contents = try context.lib_dir.readFileAlloc(gpa, name, 10 * 1024 * 1024);
+    const file_contents = try context.lib_dir.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024));
     defer gpa.free(file_contents);
     try request.respond(file_contents, .{
         .extra_headers = &.{
@@ -263,7 +263,7 @@ fn serveWasm(
     });
     // std.http.Server does not have a sendfile API yet.
     const bin_path = try wasm_base_path.join(arena, bin_name);
-    const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024);
+    const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
     defer gpa.free(file_contents);
     try request.respond(file_contents, .{
         .extra_headers = &.{
lib/std/Build/Step/CheckFile.zig
@@ -53,7 +53,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
     try step.singleUnchangingWatchInput(check_file.source);
 
     const src_path = check_file.source.getPath2(b, step);
-    const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| {
+    const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| {
         return step.fail("unable to read '{s}': {s}", .{
             src_path, @errorName(err),
         });
lib/std/Build/Step/CheckObject.zig
@@ -553,14 +553,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
 
     const src_path = check_object.source.getPath3(b, step);
     const contents = src_path.root_dir.handle.readFileAllocOptions(
-        gpa,
         src_path.sub_path,
-        check_object.max_bytes,
-        null,
+        gpa,
+        .limited(check_object.max_bytes),
         .of(u64),
         null,
-    ) catch |err| return step.fail("unable to read '{f}': {s}", .{
-        std.fmt.alt(src_path, .formatEscapeChar), @errorName(err),
+    ) catch |err| return step.fail("unable to read '{f}': {t}", .{
+        std.fmt.alt(src_path, .formatEscapeChar), err,
     });
 
     var vars: std.StringHashMap(u64) = .init(gpa);
lib/std/Build/Step/ConfigHeader.zig
@@ -208,7 +208,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
         .autoconf_undef, .autoconf_at => |file_source| {
             try bw.writeAll(c_generated_line);
             const src_path = file_source.getPath2(b, step);
-            const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| {
+            const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
                 return step.fail("unable to read autoconf input file '{s}': {s}", .{
                     src_path, @errorName(err),
                 });
@@ -222,7 +222,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
         .cmake => |file_source| {
             try bw.writeAll(c_generated_line);
             const src_path = file_source.getPath2(b, step);
-            const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| {
+            const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
                 return step.fail("unable to read cmake input file '{s}': {s}", .{
                     src_path, @errorName(err),
                 });
lib/std/Build/Cache.zig
@@ -1056,7 +1056,7 @@ pub const Manifest = struct {
 
     fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
         const gpa = self.cache.gpa;
-        const dep_file_contents = try dir.readFileAlloc(gpa, dep_file_basename, manifest_file_size_max);
+        const dep_file_contents = try dir.readFileAlloc(dep_file_basename, gpa, .limited(manifest_file_size_max));
         defer gpa.free(dep_file_contents);
 
         var error_buf: std.ArrayListUnmanaged(u8) = .empty;
lib/std/Build/WebServer.zig
@@ -446,7 +446,7 @@ pub fn serveFile(
     // The desired API is actually sendfile, which will require enhancing http.Server.
     // We load the file with every request so that the user can make changes to the file
     // and refresh the HTML page without restarting this server.
-    const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
+    const file_contents = path.root_dir.handle.readFileAlloc(path.sub_path, gpa, .limited(10 * 1024 * 1024)) catch |err| {
         log.err("failed to read '{f}': {s}", .{ path, @errorName(err) });
         return error.AlreadyReported;
     };
lib/std/debug/Dwarf/expression.zig
@@ -851,7 +851,7 @@ pub fn Builder(comptime options: Options) type {
                     },
                     .signed => {
                         try writer.writeByte(OP.consts);
-                        try leb.writeIleb128(writer, value);
+                        try writer.writeLeb128(value);
                     },
                 },
             }
@@ -885,19 +885,19 @@ pub fn Builder(comptime options: Options) type {
         // 2.5.1.2: Register Values
         pub fn writeFbreg(writer: *Writer, offset: anytype) !void {
             try writer.writeByte(OP.fbreg);
-            try leb.writeIleb128(writer, offset);
+            try writer.writeSleb128(offset);
         }
 
         pub fn writeBreg(writer: *Writer, register: u8, offset: anytype) !void {
             if (register > 31) return error.InvalidRegister;
             try writer.writeByte(OP.breg0 + register);
-            try leb.writeIleb128(writer, offset);
+            try writer.writeSleb128(offset);
         }
 
         pub fn writeBregx(writer: *Writer, register: anytype, offset: anytype) !void {
             try writer.writeByte(OP.bregx);
             try writer.writeUleb128(register);
-            try leb.writeIleb128(writer, offset);
+            try writer.writeSleb128(offset);
         }
 
         pub fn writeRegvalType(writer: *Writer, register: anytype, offset: anytype) !void {
lib/std/fs/Dir.zig
@@ -1977,41 +1977,59 @@ pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
     return buffer[0..end_index];
 }
 
-/// On success, caller owns returned buffer.
-/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
-pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
-    return self.readFileAllocOptions(allocator, file_path, max_bytes, null, .of(u8), null);
+pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{
+    /// File size reached or exceeded the provided limit.
+    StreamTooLong,
+};
+
+/// Reads all the bytes from the named file. On success, caller owns returned
+/// buffer.
+///
+/// If the file size is already known, a better alternative is to initialize a
+/// `File.Reader`.
+///
+/// If the file size cannot be obtained, an error is returned. If
+/// this is a realistic possibility, a better alternative is to initialize a
+/// `File.Reader` which handles this seamlessly.
+pub fn readFileAlloc(
+    dir: Dir,
+    /// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
+    /// On WASI, should be encoded as valid UTF-8.
+    /// On other platforms, an opaque sequence of bytes with no particular encoding.
+    sub_path: []const u8,
+    /// Used to allocate the result.
+    gpa: Allocator,
+    /// If reached or exceeded, `error.StreamTooLong` is returned instead.
+    limit: std.Io.Limit,
+) ReadFileAllocError![]u8 {
+    return readFileAllocOptions(dir, sub_path, gpa, limit, .of(u8), null);
 }
 
-/// On success, caller owns returned buffer.
-/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
-/// If `size_hint` is specified the initial buffer size is calculated using
-/// that value, otherwise the effective file size is used instead.
-/// Allows specifying alignment and a sentinel value.
-/// On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
-/// On WASI, `file_path` should be encoded as valid UTF-8.
-/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
+/// Reads all the bytes from the named file. On success, caller owns returned
+/// buffer.
+///
+/// If the file size is already known, a better alternative is to initialize a
+/// `File.Reader`.
 pub fn readFileAllocOptions(
-    self: Dir,
-    allocator: mem.Allocator,
-    file_path: []const u8,
-    max_bytes: usize,
-    size_hint: ?usize,
+    dir: Dir,
+    /// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
+    /// On WASI, should be encoded as valid UTF-8.
+    /// On other platforms, an opaque sequence of bytes with no particular encoding.
+    sub_path: []const u8,
+    /// Used to allocate the result.
+    gpa: Allocator,
+    /// If reached or exceeded, `error.StreamTooLong` is returned instead.
+    limit: std.Io.Limit,
     comptime alignment: std.mem.Alignment,
-    comptime optional_sentinel: ?u8,
-) !(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
-    var file = try self.openFile(file_path, .{});
+    comptime sentinel: ?u8,
+) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
+    var file = try dir.openFile(sub_path, .{});
     defer file.close();
-
-    // If the file size doesn't fit a usize it'll be certainly greater than
-    // `max_bytes`
-    const stat_size = size_hint orelse std.math.cast(usize, try file.getEndPos()) orelse
-        return error.FileTooBig;
-
-    return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);
+    var file_reader = file.reader(&.{});
+    return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
+        error.ReadFailed => return file_reader.err.?,
+        error.OutOfMemory, error.StreamTooLong => |e| return e,
+    };
 }
 
 pub const DeleteTreeError = error{
lib/std/fs/test.zig
@@ -676,37 +676,47 @@ test "Dir.realpath smoke test" {
     }.impl);
 }
 
-test "readAllAlloc" {
+test "readFileAlloc" {
     var tmp_dir = tmpDir(.{});
     defer tmp_dir.cleanup();
 
     var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
     defer file.close();
 
-    const buf1 = try file.readToEndAlloc(testing.allocator, 1024);
+    const buf1 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
     defer testing.allocator.free(buf1);
-    try testing.expectEqual(@as(usize, 0), buf1.len);
+    try testing.expectEqualStrings("", buf1);
 
     const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
     try file.writeAll(write_buf);
-    try file.seekTo(0);
-
-    // max_bytes > file_size
-    const buf2 = try file.readToEndAlloc(testing.allocator, 1024);
-    defer testing.allocator.free(buf2);
-    try testing.expectEqual(write_buf.len, buf2.len);
-    try testing.expectEqualStrings(write_buf, buf2);
-    try file.seekTo(0);
-
-    // max_bytes == file_size
-    const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len);
-    defer testing.allocator.free(buf3);
-    try testing.expectEqual(write_buf.len, buf3.len);
-    try testing.expectEqualStrings(write_buf, buf3);
-    try file.seekTo(0);
+
+    {
+        // max_bytes > file_size
+        const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(1024));
+        defer testing.allocator.free(buf2);
+        try testing.expectEqualStrings(write_buf, buf2);
+    }
+
+    {
+        // max_bytes == file_size
+        try testing.expectError(
+            error.StreamTooLong,
+            tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len)),
+        );
+    }
+
+    {
+        // max_bytes == file_size + 1
+        const buf2 = try tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len + 1));
+        defer testing.allocator.free(buf2);
+        try testing.expectEqualStrings(write_buf, buf2);
+    }
 
     // max_bytes < file_size
-    try testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1));
+    try testing.expectError(
+        error.StreamTooLong,
+        tmp_dir.dir.readFileAlloc("test_file", testing.allocator, .limited(write_buf.len - 1)),
+    );
 }
 
 test "Dir.statFile" {
@@ -778,16 +788,16 @@ test "file operations on directories" {
             switch (native_os) {
                 .dragonfly, .netbsd => {
                     // no error when reading a directory. See https://github.com/ziglang/zig/issues/5732
-                    const buf = try ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize));
+                    const buf = try ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited);
                     testing.allocator.free(buf);
                 },
                 .wasi => {
                     // WASI return EBADF, which gets mapped to NotOpenForReading.
                     // See https://github.com/bytecodealliance/wasmtime/issues/1935
-                    try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
+                    try testing.expectError(error.NotOpenForReading, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
                 },
                 else => {
-                    try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(testing.allocator, test_dir_name, std.math.maxInt(usize)));
+                    try testing.expectError(error.IsDir, ctx.dir.readFileAlloc(test_dir_name, testing.allocator, .unlimited));
                 },
             }
 
@@ -1564,7 +1574,7 @@ test "copyFile" {
 }
 
 fn expectFileContents(dir: Dir, file_path: []const u8, data: []const u8) !void {
-    const contents = try dir.readFileAlloc(testing.allocator, file_path, 1000);
+    const contents = try dir.readFileAlloc(file_path, testing.allocator, .limited(1000));
     defer testing.allocator.free(contents);
 
     try testing.expectEqualSlices(u8, data, contents);
@@ -1587,7 +1597,7 @@ test "AtomicFile" {
                 try af.file_writer.interface.writeAll(test_content);
                 try af.finish();
             }
-            const content = try ctx.dir.readFileAlloc(allocator, test_out_file, 9999);
+            const content = try ctx.dir.readFileAlloc(test_out_file, allocator, .limited(9999));
             try testing.expectEqualStrings(test_content, content);
 
             try ctx.dir.deleteFile(test_out_file);
@@ -2004,7 +2014,7 @@ test "invalid UTF-8/WTF-8 paths" {
             }
 
             try testing.expectError(expected_err, ctx.dir.readFile(invalid_path, &[_]u8{}));
-            try testing.expectError(expected_err, ctx.dir.readFileAlloc(testing.allocator, invalid_path, 0));
+            try testing.expectError(expected_err, ctx.dir.readFileAlloc(invalid_path, testing.allocator, .limited(0)));
 
             try testing.expectError(expected_err, ctx.dir.deleteTree(invalid_path));
             try testing.expectError(expected_err, ctx.dir.deleteTreeMinStackSize(invalid_path));
lib/std/Io/Reader.zig
@@ -292,6 +292,23 @@ pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocErro
     return buffer.toOwnedSlice(gpa);
 }
 
+pub fn allocRemainingAlignedSentinel(
+    r: *Reader,
+    gpa: Allocator,
+    limit: Limit,
+    comptime alignment: std.mem.Alignment,
+    comptime sentinel: ?u8,
+) LimitedAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
+    var buffer: std.array_list.Aligned(u8, alignment) = .empty;
+    defer buffer.deinit(gpa);
+    try appendRemainingAligned(r, gpa, alignment, &buffer, limit);
+    if (sentinel) |s| {
+        return buffer.toOwnedSliceSentinel(gpa, s);
+    } else {
+        return buffer.toOwnedSlice(gpa);
+    }
+}
+
 /// Transfers all bytes from the current position to the end of the stream, up
 /// to `limit`, appending them to `list`.
 ///
@@ -308,15 +325,30 @@ pub fn appendRemaining(
     list: *ArrayList(u8),
     limit: Limit,
 ) LimitedAllocError!void {
-    var a: std.Io.Writer.Allocating = .initOwnedSlice(gpa, list.allocatedSlice());
-    a.writer.end = list.items.len;
-    list.* = .empty;
-    defer {
-        list.* = .{
-            .items = a.writer.buffer[0..a.writer.end],
-            .capacity = a.writer.buffer.len,
-        };
-    }
+    return appendRemainingAligned(r, gpa, .of(u8), list, limit);
+}
+
+/// Transfers all bytes from the current position to the end of the stream, up
+/// to `limit`, appending them to `list`.
+///
+/// If `limit` is reached or exceeded, `error.StreamTooLong` is returned
+/// instead. In such case, the next byte that would be read will be the first
+/// one to exceed `limit`, and all preceeding bytes have been appended to
+/// `list`.
+///
+/// See also:
+/// * `appendRemaining`
+/// * `allocRemainingAligned`
+pub fn appendRemainingAligned(
+    r: *Reader,
+    gpa: Allocator,
+    comptime alignment: std.mem.Alignment,
+    list: *std.array_list.Aligned(u8, alignment),
+    limit: Limit,
+) LimitedAllocError!void {
+    var a = std.Io.Writer.Allocating.fromArrayListAligned(gpa, alignment, list);
+    defer list.* = a.toArrayListAligned(alignment);
+
     var remaining = limit;
     while (remaining.nonzero()) {
         const n = stream(r, &a.writer, remaining) catch |err| switch (err) {
lib/std/Io/Writer.zig
@@ -2531,13 +2531,14 @@ pub fn Hashing(comptime Hasher: type) type {
 /// Maintains `Writer` state such that it writes to the unused capacity of an
 /// array list, filling it up completely before making a call through the
 /// vtable, causing a resize. Consequently, the same, optimized, non-generic
-/// machine code that uses `std.Io.Reader`, such as formatted printing, takes
+/// machine code that uses `Writer`, such as formatted printing, takes
 /// the hot paths when using this API.
 ///
 /// When using this API, it is not necessary to call `flush`.
 pub const Allocating = struct {
     allocator: Allocator,
     writer: Writer,
+    alignment: std.mem.Alignment,
 
     pub fn init(allocator: Allocator) Allocating {
         return .{
@@ -2546,6 +2547,7 @@ pub const Allocating = struct {
                 .buffer = &.{},
                 .vtable = &vtable,
             },
+            .alignment = .of(u8),
         };
     }
 
@@ -2553,24 +2555,47 @@ pub const Allocating = struct {
         return .{
             .allocator = allocator,
             .writer = .{
-                .buffer = try allocator.alloc(u8, capacity),
+                .buffer = if (capacity == 0)
+                    &.{}
+                else
+                    (allocator.rawAlloc(capacity, .of(u8), @returnAddress()) orelse
+                        return error.OutOfMemory)[0..capacity],
                 .vtable = &vtable,
             },
+            .alignment = .of(u8),
         };
     }
 
     pub fn initOwnedSlice(allocator: Allocator, slice: []u8) Allocating {
+        return initOwnedSliceAligned(allocator, .of(u8), slice);
+    }
+
+    pub fn initOwnedSliceAligned(
+        allocator: Allocator,
+        comptime alignment: std.mem.Alignment,
+        slice: []align(alignment.toByteUnits()) u8,
+    ) Allocating {
         return .{
             .allocator = allocator,
             .writer = .{
                 .buffer = slice,
                 .vtable = &vtable,
             },
+            .alignment = alignment,
         };
     }
 
     /// Replaces `array_list` with empty, taking ownership of the memory.
     pub fn fromArrayList(allocator: Allocator, array_list: *ArrayList(u8)) Allocating {
+        return fromArrayListAligned(allocator, .of(u8), array_list);
+    }
+
+    /// Replaces `array_list` with empty, taking ownership of the memory.
+    pub fn fromArrayListAligned(
+        allocator: Allocator,
+        comptime alignment: std.mem.Alignment,
+        array_list: *std.array_list.Aligned(u8, alignment),
+    ) Allocating {
         defer array_list.* = .empty;
         return .{
             .allocator = allocator,
@@ -2579,6 +2604,7 @@ pub const Allocating = struct {
                 .buffer = array_list.allocatedSlice(),
                 .end = array_list.items.len,
             },
+            .alignment = alignment,
         };
     }
 
@@ -2590,15 +2616,26 @@ pub const Allocating = struct {
     };
 
     pub fn deinit(a: *Allocating) void {
-        a.allocator.free(a.writer.buffer);
+        if (a.writer.buffer.len == 0) return;
+        a.allocator.rawFree(a.writer.buffer, a.alignment, @returnAddress());
         a.* = undefined;
     }
 
     /// Returns an array list that takes ownership of the allocated memory.
     /// Resets the `Allocating` to an empty state.
     pub fn toArrayList(a: *Allocating) ArrayList(u8) {
+        return toArrayListAligned(a, .of(u8));
+    }
+
+    /// Returns an array list that takes ownership of the allocated memory.
+    /// Resets the `Allocating` to an empty state.
+    pub fn toArrayListAligned(
+        a: *Allocating,
+        comptime alignment: std.mem.Alignment,
+    ) std.array_list.Aligned(u8, alignment) {
+        assert(a.alignment == alignment); // Required for Allocator correctness.
         const w = &a.writer;
-        const result: ArrayList(u8) = .{
+        const result: std.array_list.Aligned(u8, alignment) = .{
             .items = w.buffer[0..w.end],
             .capacity = w.buffer.len,
         };
@@ -2608,28 +2645,71 @@ pub const Allocating = struct {
     }
 
     pub fn ensureUnusedCapacity(a: *Allocating, additional_count: usize) Allocator.Error!void {
-        var list = a.toArrayList();
-        defer a.setArrayList(list);
-        return list.ensureUnusedCapacity(a.allocator, additional_count);
+        const new_capacity = std.math.add(usize, a.writer.buffer.len, additional_count) catch return error.OutOfMemory;
+        return ensureTotalCapacity(a, new_capacity);
     }
 
     pub fn ensureTotalCapacity(a: *Allocating, new_capacity: usize) Allocator.Error!void {
-        var list = a.toArrayList();
-        defer a.setArrayList(list);
-        return list.ensureTotalCapacity(a.allocator, new_capacity);
+        return ensureTotalCapacityPrecise(a, ArrayList(u8).growCapacity(a.writer.buffer.len, new_capacity));
     }
 
-    pub fn toOwnedSlice(a: *Allocating) error{OutOfMemory}![]u8 {
-        var list = a.toArrayList();
-        defer a.setArrayList(list);
-        return list.toOwnedSlice(a.allocator);
+    pub fn ensureTotalCapacityPrecise(a: *Allocating, new_capacity: usize) Allocator.Error!void {
+        const old_memory = a.writer.buffer;
+        if (old_memory.len >= new_capacity) return;
+        assert(new_capacity != 0);
+        const alignment = a.alignment;
+        if (old_memory.len > 0) {
+            if (a.allocator.rawRemap(old_memory, alignment, new_capacity, @returnAddress())) |new| {
+                a.writer.buffer = new[0..new_capacity];
+                return;
+            }
+        }
+        const new_memory = (a.allocator.rawAlloc(new_capacity, alignment, @returnAddress()) orelse
+            return error.OutOfMemory)[0..new_capacity];
+        const saved = old_memory[0..a.writer.end];
+        @memcpy(new_memory[0..saved.len], saved);
+        if (old_memory.len != 0) a.allocator.rawFree(old_memory, alignment, @returnAddress());
+        a.writer.buffer = new_memory;
+    }
+
+    pub fn toOwnedSlice(a: *Allocating) Allocator.Error![]u8 {
+        const old_memory = a.writer.buffer;
+        const alignment = a.alignment;
+        const buffered_len = a.writer.end;
+
+        if (old_memory.len > 0) {
+            if (buffered_len == 0) {
+                a.allocator.rawFree(old_memory, alignment, @returnAddress());
+                a.writer.buffer = &.{};
+                a.writer.end = 0;
+                return old_memory[0..0];
+            } else if (a.allocator.rawRemap(old_memory, alignment, buffered_len, @returnAddress())) |new| {
+                a.writer.buffer = &.{};
+                a.writer.end = 0;
+                return new[0..buffered_len];
+            }
+        }
+
+        if (buffered_len == 0)
+            return a.writer.buffer[0..0];
+
+        const new_memory = (a.allocator.rawAlloc(buffered_len, alignment, @returnAddress()) orelse
+            return error.OutOfMemory)[0..buffered_len];
+        @memcpy(new_memory, old_memory[0..buffered_len]);
+        if (old_memory.len != 0) a.allocator.rawFree(old_memory, alignment, @returnAddress());
+        a.writer.buffer = &.{};
+        a.writer.end = 0;
+        return new_memory;
     }
 
-    pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
-        const gpa = a.allocator;
-        var list = @This().toArrayList(a);
-        defer a.setArrayList(list);
-        return list.toOwnedSliceSentinel(gpa, sentinel);
+    pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) Allocator.Error![:sentinel]u8 {
+        // This addition can never overflow because `a.writer.buffer` can never occupy the whole address space.
+        try ensureTotalCapacityPrecise(a, a.writer.end + 1);
+        a.writer.buffer[a.writer.end] = sentinel;
+        a.writer.end += 1;
+        errdefer a.writer.end -= 1;
+        const result = try toOwnedSlice(a);
+        return result[0 .. result.len - 1 :sentinel];
     }
 
     pub fn written(a: *Allocating) []u8 {
@@ -2646,57 +2726,50 @@ pub const Allocating = struct {
 
     fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
         const a: *Allocating = @fieldParentPtr("writer", w);
-        const gpa = a.allocator;
         const pattern = data[data.len - 1];
         const splat_len = pattern.len * splat;
-        var list = a.toArrayList();
-        defer setArrayList(a, list);
-        const start_len = list.items.len;
+        const start_len = a.writer.end;
         assert(data.len != 0);
         for (data) |bytes| {
-            list.ensureUnusedCapacity(gpa, bytes.len + splat_len + 1) catch return error.WriteFailed;
-            list.appendSliceAssumeCapacity(bytes);
+            a.ensureUnusedCapacity(bytes.len + splat_len + 1) catch return error.WriteFailed;
+            @memcpy(a.writer.buffer[a.writer.end..][0..bytes.len], bytes);
+            a.writer.end += bytes.len;
         }
         if (splat == 0) {
-            list.items.len -= pattern.len;
+            a.writer.end -= pattern.len;
         } else switch (pattern.len) {
             0 => {},
-            1 => list.appendNTimesAssumeCapacity(pattern[0], splat - 1),
-            else => for (0..splat - 1) |_| list.appendSliceAssumeCapacity(pattern),
+            1 => {
+                @memset(a.writer.buffer[a.writer.end..][0 .. splat - 1], pattern[0]);
+                a.writer.end += splat - 1;
+            },
+            else => for (0..splat - 1) |_| {
+                @memcpy(a.writer.buffer[a.writer.end..][0..pattern.len], pattern);
+                a.writer.end += pattern.len;
+            },
         }
-        return list.items.len - start_len;
+        return a.writer.end - start_len;
     }
 
     fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
         if (File.Handle == void) return error.Unimplemented;
         if (limit == .nothing) return 0;
         const a: *Allocating = @fieldParentPtr("writer", w);
-        const gpa = a.allocator;
-        var list = a.toArrayList();
-        defer setArrayList(a, list);
         const pos = file_reader.logicalPos();
         const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
         if (additional == 0) return error.EndOfStream;
-        list.ensureUnusedCapacity(gpa, limit.minInt64(additional)) catch return error.WriteFailed;
-        const dest = limit.slice(list.unusedCapacitySlice());
+        a.ensureUnusedCapacity(limit.minInt64(additional)) catch return error.WriteFailed;
+        const dest = limit.slice(a.writer.buffer[a.writer.end..]);
         const n = try file_reader.read(dest);
-        list.items.len += n;
+        a.writer.end += n;
         return n;
     }
 
     fn growingRebase(w: *Writer, preserve: usize, minimum_len: usize) Error!void {
         const a: *Allocating = @fieldParentPtr("writer", w);
-        const gpa = a.allocator;
-        var list = a.toArrayList();
-        defer setArrayList(a, list);
         const total = std.math.add(usize, preserve, minimum_len) catch return error.WriteFailed;
-        list.ensureTotalCapacity(gpa, total) catch return error.WriteFailed;
-        list.ensureUnusedCapacity(gpa, minimum_len) catch return error.WriteFailed;
-    }
-
-    fn setArrayList(a: *Allocating, list: ArrayList(u8)) void {
-        a.writer.buffer = list.allocatedSlice();
-        a.writer.end = list.items.len;
+        a.ensureTotalCapacity(total) catch return error.WriteFailed;
+        a.ensureUnusedCapacity(minimum_len) catch return error.WriteFailed;
     }
 
     test Allocating {
lib/std/zig/LibCInstallation.zig
@@ -43,7 +43,7 @@ pub fn parse(
         }
     }
 
-    const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize));
+    const contents = try std.fs.cwd().readFileAlloc(libc_file, allocator, .limited(std.math.maxInt(usize)));
     defer allocator.free(contents);
 
     var it = std.mem.tokenizeScalar(u8, contents, '\n');
lib/std/zig/WindowsSdk.zig
@@ -766,7 +766,7 @@ const MsvcLibDir = struct {
             writer.writeByte(std.fs.path.sep) catch unreachable;
             writer.writeAll("state.json") catch unreachable;
 
-            const json_contents = instances_dir.readFileAlloc(allocator, writer.buffered(), std.math.maxInt(usize)) catch continue;
+            const json_contents = instances_dir.readFileAlloc(writer.buffered(), allocator, .limited(std.math.maxInt(usize))) catch continue;
             defer allocator.free(json_contents);
 
             var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue;
lib/std/array_list.zig
@@ -664,9 +664,10 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
 
         /// The caller owns the returned memory. ArrayList becomes empty.
         pub fn toOwnedSliceSentinel(self: *Self, gpa: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
-            // This addition can never overflow because `self.items` can never occupy the whole address space
+            // This addition can never overflow because `self.items` can never occupy the whole address space.
             try self.ensureTotalCapacityPrecise(gpa, self.items.len + 1);
             self.appendAssumeCapacity(sentinel);
+            errdefer self.items.len -= 1;
             const result = try self.toOwnedSlice(gpa);
             return result[0 .. result.len - 1 :sentinel];
         }
@@ -1361,7 +1362,7 @@ pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
 
         /// Called when memory growth is necessary. Returns a capacity larger than
         /// minimum that grows super-linearly.
-        fn growCapacity(current: usize, minimum: usize) usize {
+        pub fn growCapacity(current: usize, minimum: usize) usize {
             var new = current;
             while (true) {
                 new +|= new / 2 + init_capacity;
lib/std/Thread.zig
@@ -282,7 +282,7 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
             defer file.close();
 
             var file_reader = file.readerStreaming(&.{});
-            const data_len = file_reader.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
+            const data_len = file_reader.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
                 error.ReadFailed => return file_reader.err.?,
             };
             return if (data_len >= 1) buffer[0 .. data_len - 1] else null;
lib/fuzzer.zig
@@ -220,7 +220,7 @@ const Fuzzer = struct {
             const i = f.corpus.items.len;
             var buf: [30]u8 = undefined;
             const input_sub_path = std.fmt.bufPrint(&buf, "{d}", .{i}) catch unreachable;
-            const input = f.corpus_directory.handle.readFileAlloc(gpa, input_sub_path, 1 << 31) catch |err| switch (err) {
+            const input = f.corpus_directory.handle.readFileAlloc(input_sub_path, gpa, .limited(1 << 31)) catch |err| switch (err) {
                 error.FileNotFound => {
                     // Make this one the next input.
                     const input_file = f.corpus_directory.handle.createFile(input_sub_path, .{
src/link/MachO/dyld_info/bind.zig
@@ -647,7 +647,7 @@ fn setDylibOrdinal(ordinal: i16, writer: *std.Io.Writer) !void {
 fn setAddend(addend: i64, writer: *std.Io.Writer) !void {
     log.debug(">>> set addend: {x}", .{addend});
     try writer.writeByte(macho.BIND_OPCODE_SET_ADDEND_SLEB);
-    try std.leb.writeIleb128(writer, addend);
+    try writer.writeSleb128(addend);
 }
 
 fn doBind(writer: *std.Io.Writer) !void {
src/link/MachO.zig
@@ -4361,7 +4361,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
 // The file/property is also available with vendored libc.
 fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 {
     const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" });
-    const contents = try fs.cwd().readFileAlloc(arena, sdk_path, std.math.maxInt(u16));
+    const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16)));
     const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{});
     if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string;
     return error.SdkVersionFailure;
src/Package/Fetch/git.zig
@@ -1599,7 +1599,7 @@ fn runRepositoryTest(comptime format: Oid.Format, head_commit: []const u8) !void
     const max_file_size = 8192;
 
     if (!skip_checksums) {
-        const index_file_data = try git_dir.dir.readFileAlloc(testing.allocator, "testrepo.idx", max_file_size);
+        const index_file_data = try git_dir.dir.readFileAlloc("testrepo.idx", testing.allocator, .limited(max_file_size));
         defer testing.allocator.free(index_file_data);
         // testrepo.idx is generated by Git. The index created by this file should
         // match it exactly. Running `git verify-pack -v testrepo.pack` can verify
@@ -1675,7 +1675,7 @@ fn runRepositoryTest(comptime format: Oid.Format, head_commit: []const u8) !void
         \\revision 19
         \\
     ;
-    const actual_file_contents = try worktree.dir.readFileAlloc(testing.allocator, "file", max_file_size);
+    const actual_file_contents = try worktree.dir.readFileAlloc("file", testing.allocator, .limited(max_file_size));
     defer testing.allocator.free(actual_file_contents);
     try testing.expectEqualStrings(expected_file_contents, actual_file_contents);
 }
src/Package/Fetch.zig
@@ -655,10 +655,9 @@ fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
     const eb = &f.error_bundle;
     const arena = f.arena.allocator();
     const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
-        arena,
         try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
-        Manifest.max_bytes,
-        null,
+        arena,
+        .limited(Manifest.max_bytes),
         .@"1",
         0,
     ) catch |err| switch (err) {
src/Compilation.zig
@@ -6576,7 +6576,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
         // Read depfile and update cache manifest
         {
             const dep_basename = fs.path.basename(out_dep_path);
-            const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(arena, dep_basename, 50 * 1024 * 1024);
+            const dep_file_contents = try zig_cache_tmp_dir.readFileAlloc(dep_basename, arena, .limited(50 * 1024 * 1024));
             defer arena.free(dep_file_contents);
 
             const value = try std.json.parseFromSliceLeaky(std.json.Value, arena, dep_file_contents, .{});
src/main.zig
@@ -5443,7 +5443,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
                         // that are missing.
                         const s = fs.path.sep_str;
                         const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce;
-                        const stdout = dirs.local_cache.handle.readFileAlloc(arena, tmp_sub_path, 50 * 1024 * 1024) catch |err| {
+                        const stdout = dirs.local_cache.handle.readFileAlloc(tmp_sub_path, arena, .limited(50 * 1024 * 1024)) catch |err| {
                             fatal("unable to read results of configure phase from '{f}{s}': {s}", .{
                                 dirs.local_cache, tmp_sub_path, @errorName(err),
                             });
@@ -5826,7 +5826,7 @@ const ArgIteratorResponseFile = process.ArgIteratorGeneral(.{ .comments = true,
 /// Initialize the arguments from a Response File. "*.rsp"
 fn initArgIteratorResponseFile(allocator: Allocator, resp_file_path: []const u8) !ArgIteratorResponseFile {
     const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit
-    const cmd_line = try fs.cwd().readFileAlloc(allocator, resp_file_path, max_bytes);
+    const cmd_line = try fs.cwd().readFileAlloc(resp_file_path, allocator, .limited(max_bytes));
     errdefer allocator.free(cmd_line);
 
     return ArgIteratorResponseFile.initTakeOwnership(allocator, cmd_line);
@@ -7350,10 +7350,9 @@ fn loadManifest(
 ) !struct { Package.Manifest, Ast } {
     const manifest_bytes = while (true) {
         break options.dir.readFileAllocOptions(
-            arena,
             Package.Manifest.basename,
-            Package.Manifest.max_bytes,
-            null,
+            arena,
+            .limited(Package.Manifest.max_bytes),
             .@"1",
             0,
         ) catch |err| switch (err) {
@@ -7435,7 +7434,7 @@ const Templates = struct {
         }
 
         const max_bytes = 10 * 1024 * 1024;
-        const contents = templates.dir.readFileAlloc(arena, template_path, max_bytes) catch |err| {
+        const contents = templates.dir.readFileAlloc(template_path, arena, .limited(max_bytes)) catch |err| {
             fatal("unable to read template file '{s}': {s}", .{ template_path, @errorName(err) });
         };
         templates.buffer.clearRetainingCapacity();
src/print_targets.zig
@@ -24,9 +24,9 @@ pub fn cmdTargets(
     defer allocator.free(zig_lib_directory.path.?);
 
     const abilists_contents = zig_lib_directory.handle.readFileAlloc(
-        allocator,
         glibc.abilists_path,
-        glibc.abilists_max_size,
+        allocator,
+        .limited(glibc.abilists_max_size),
     ) catch |err| switch (err) {
         error.OutOfMemory => return error.OutOfMemory,
         else => fatal("unable to read " ++ glibc.abilists_path ++ ": {s}", .{@errorName(err)}),