Commit 9f27d770a1

Andrew Kelley <andrew@ziglang.org>
2025-06-28 04:33:03
std.io: deprecated Reader/Writer; introduce new API
1 parent fc2c188
lib/compiler/resinator/compile.zig
@@ -2949,7 +2949,7 @@ pub fn HeaderSlurpingReader(comptime size: usize, comptime ReaderType: anytype)
         slurped_header: [size]u8 = [_]u8{0x00} ** size,
 
         pub const Error = ReaderType.Error;
-        pub const Reader = std.io.Reader(*@This(), Error, read);
+        pub const Reader = std.io.GenericReader(*@This(), Error, read);
 
         pub fn read(self: *@This(), buf: []u8) Error!usize {
             const amt = try self.child_reader.read(buf);
@@ -2983,7 +2983,7 @@ pub fn LimitedWriter(comptime WriterType: type) type {
         bytes_left: u64,
 
         pub const Error = error{NoSpaceLeft} || WriterType.Error;
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         const Self = @This();
 
lib/compiler/resinator/main.zig
@@ -471,7 +471,7 @@ const IoStream = struct {
             allocator: std.mem.Allocator,
         };
         pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
-        pub const Writer = std.io.Writer(WriterContext, WriteError, write);
+        pub const Writer = std.io.GenericWriter(WriterContext, WriteError, write);
 
         pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
             switch (ctx.self.*) {
lib/std/compress/flate/deflate.zig
@@ -355,7 +355,7 @@ fn Deflate(comptime container: Container, comptime WriterType: type, comptime Bl
 
         // Writer interface
 
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
         pub const Error = BlockWriterType.Error;
 
         /// Write `input` of uncompressed data.
@@ -512,7 +512,7 @@ fn SimpleCompressor(
 
         // Writer interface
 
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
         pub const Error = BlockWriterType.Error;
 
         // Write `input` of uncompressed data.
lib/std/compress/flate/inflate.zig
@@ -341,7 +341,7 @@ pub fn Inflate(comptime container: Container, comptime LookaheadType: type, comp
 
         // Reader interface
 
-        pub const Reader = std.io.Reader(*Self, Error, read);
+        pub const Reader = std.io.GenericReader(*Self, Error, read);
 
         /// Returns the number of bytes read. It may be less than buffer.len.
         /// If the number of bytes read is 0, it means end of stream.
lib/std/compress/xz/block.zig
@@ -27,7 +27,7 @@ pub fn Decoder(comptime ReaderType: type) type {
             ReaderType.Error ||
             DecodeError ||
             Allocator.Error;
-        pub const Reader = std.io.Reader(*Self, Error, read);
+        pub const Reader = std.io.GenericReader(*Self, Error, read);
 
         allocator: Allocator,
         inner_reader: ReaderType,
lib/std/compress/zstandard/readers.zig
@@ -4,7 +4,7 @@ pub const ReversedByteReader = struct {
     remaining_bytes: usize,
     bytes: []const u8,
 
-    const Reader = std.io.Reader(*ReversedByteReader, error{}, readFn);
+    const Reader = std.io.GenericReader(*ReversedByteReader, error{}, readFn);
 
     pub fn init(bytes: []const u8) ReversedByteReader {
         return .{
lib/std/compress/lzma.zig
@@ -30,7 +30,7 @@ pub fn Decompress(comptime ReaderType: type) type {
             Allocator.Error ||
             error{ CorruptInput, EndOfStream, Overflow };
 
-        pub const Reader = std.io.Reader(*Self, Error, read);
+        pub const Reader = std.io.GenericReader(*Self, Error, read);
 
         allocator: Allocator,
         in_reader: ReaderType,
lib/std/compress/xz.zig
@@ -34,7 +34,7 @@ pub fn Decompress(comptime ReaderType: type) type {
         const Self = @This();
 
         pub const Error = ReaderType.Error || block.Decoder(ReaderType).Error;
-        pub const Reader = std.io.Reader(*Self, Error, read);
+        pub const Reader = std.io.GenericReader(*Self, Error, read);
 
         allocator: Allocator,
         block_decoder: block.Decoder(ReaderType),
lib/std/compress/zstandard.zig
@@ -50,7 +50,7 @@ pub fn Decompressor(comptime ReaderType: type) type {
             OutOfMemory,
         };
 
-        pub const Reader = std.io.Reader(*Self, Error, read);
+        pub const Reader = std.io.GenericReader(*Self, Error, read);
 
         pub fn init(source: ReaderType, options: DecompressorOptions) Self {
             return .{
lib/std/crypto/codecs/asn1/der/ArrayListReverse.zig
@@ -45,7 +45,7 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
     self.data.ptr = begin;
 }
 
-pub const Writer = std.io.Writer(*ArrayListReverse, Error, prependSliceSize);
+pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
 /// Warning: This writer writes backwards. `fn print` will NOT work as expected.
 pub fn writer(self: *ArrayListReverse) Writer {
     return .{ .context = self };
lib/std/crypto/aegis.zig
@@ -803,7 +803,7 @@ fn AegisMac(comptime T: type) type {
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Mac, Error, write);
+        pub const Writer = std.io.GenericWriter(*Mac, Error, write);
 
         fn write(self: *Mac, bytes: []const u8) Error!usize {
             self.update(bytes);
lib/std/crypto/blake2.zig
@@ -187,7 +187,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
lib/std/crypto/blake3.zig
@@ -476,7 +476,7 @@ pub const Blake3 = struct {
     }
 
     pub const Error = error{};
-    pub const Writer = std.io.Writer(*Blake3, Error, write);
+    pub const Writer = std.io.GenericWriter(*Blake3, Error, write);
 
     fn write(self: *Blake3, bytes: []const u8) Error!usize {
         self.update(bytes);
lib/std/crypto/sha1.zig
@@ -269,7 +269,7 @@ pub const Sha1 = struct {
     }
 
     pub const Error = error{};
-    pub const Writer = std.io.Writer(*Self, Error, write);
+    pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
     fn write(self: *Self, bytes: []const u8) Error!usize {
         self.update(bytes);
lib/std/crypto/sha2.zig
@@ -376,7 +376,7 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
lib/std/crypto/sha3.zig
@@ -82,7 +82,7 @@ pub fn Keccak(comptime f: u11, comptime output_bits: u11, comptime default_delim
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
@@ -193,7 +193,7 @@ fn ShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
@@ -286,7 +286,7 @@ fn CShakeLike(comptime security_level: u11, comptime default_delim: u8, comptime
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
@@ -392,7 +392,7 @@ fn KMacLike(comptime security_level: u11, comptime default_delim: u8, comptime r
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
@@ -484,7 +484,7 @@ fn TupleHashLike(comptime security_level: u11, comptime default_delim: u8, compt
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
lib/std/crypto/siphash.zig
@@ -240,7 +240,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
         }
 
         pub const Error = error{};
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
 
         fn write(self: *Self, bytes: []const u8) Error!usize {
             self.update(bytes);
lib/std/debug/Pdb.zig
@@ -562,7 +562,7 @@ const MsfStream = struct {
         return block * self.block_size + offset;
     }
 
-    pub fn reader(self: *MsfStream) std.io.Reader(*MsfStream, Error, read) {
+    pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
         return .{ .context = self };
     }
 };
lib/std/fs/File.zig
@@ -1581,13 +1581,13 @@ fn writeFileAllSendfile(self: File, in_file: File, args: WriteFileOptions) posix
     }
 }
 
-pub const Reader = io.Reader(File, ReadError, read);
+pub const Reader = io.GenericReader(File, ReadError, read);
 
 pub fn reader(file: File) Reader {
     return .{ .context = file };
 }
 
-pub const Writer = io.Writer(File, WriteError, write);
+pub const Writer = io.GenericWriter(File, WriteError, write);
 
 pub fn writer(file: File) Writer {
     return .{ .context = file };
lib/std/http/Client.zig
@@ -311,7 +311,7 @@ pub const Connection = struct {
         EndOfStream,
     };
 
-    pub const Reader = std.io.Reader(*Connection, ReadError, read);
+    pub const Reader = std.io.GenericReader(*Connection, ReadError, read);
 
     pub fn reader(conn: *Connection) Reader {
         return Reader{ .context = conn };
@@ -374,7 +374,7 @@ pub const Connection = struct {
         UnexpectedWriteFailure,
     };
 
-    pub const Writer = std.io.Writer(*Connection, WriteError, write);
+    pub const Writer = std.io.GenericWriter(*Connection, WriteError, write);
 
     pub fn writer(conn: *Connection) Writer {
         return Writer{ .context = conn };
@@ -934,7 +934,7 @@ pub const Request = struct {
 
     const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
 
-    const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
+    const TransferReader = std.io.GenericReader(*Request, TransferReadError, transferRead);
 
     fn transferReader(req: *Request) TransferReader {
         return .{ .context = req };
@@ -1094,7 +1094,7 @@ pub const Request = struct {
     pub const ReadError = TransferReadError || proto.HeadersParser.CheckCompleteHeadError ||
         error{ DecompressionFailure, InvalidTrailers };
 
-    pub const Reader = std.io.Reader(*Request, ReadError, read);
+    pub const Reader = std.io.GenericReader(*Request, ReadError, read);
 
     pub fn reader(req: *Request) Reader {
         return .{ .context = req };
@@ -1134,7 +1134,7 @@ pub const Request = struct {
 
     pub const WriteError = Connection.WriteError || error{ NotWriteable, MessageTooLong };
 
-    pub const Writer = std.io.Writer(*Request, WriteError, write);
+    pub const Writer = std.io.GenericWriter(*Request, WriteError, write);
 
     pub fn writer(req: *Request) Writer {
         return .{ .context = req };
lib/std/http/protocol.zig
@@ -344,7 +344,7 @@ const MockBufferedConnection = struct {
     }
 
     pub const ReadError = std.io.FixedBufferStream([]const u8).ReadError || error{EndOfStream};
-    pub const Reader = std.io.Reader(*MockBufferedConnection, ReadError, read);
+    pub const Reader = std.io.GenericReader(*MockBufferedConnection, ReadError, read);
 
     pub fn reader(conn: *MockBufferedConnection) Reader {
         return Reader{ .context = conn };
@@ -359,7 +359,7 @@ const MockBufferedConnection = struct {
     }
 
     pub const WriteError = std.io.FixedBufferStream([]const u8).WriteError;
-    pub const Writer = std.io.Writer(*MockBufferedConnection, WriteError, write);
+    pub const Writer = std.io.GenericWriter(*MockBufferedConnection, WriteError, write);
 
     pub fn writer(conn: *MockBufferedConnection) Writer {
         return Writer{ .context = conn };
lib/std/io/Reader/Limited.zig
@@ -0,0 +1,42 @@
+const Limited = @This();
+
+const std = @import("../../std.zig");
+const Reader = std.io.Reader;
+const Writer = std.io.Writer;
+const Limit = std.io.Limit;
+
+unlimited: *Reader,
+remaining: Limit,
+interface: Reader,
+
+pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
+    return .{
+        .unlimited = reader,
+        .remaining = limit,
+        .interface = .{
+            .vtable = &.{
+                .stream = stream,
+                .discard = discard,
+            },
+            .buffer = buffer,
+            .seek = 0,
+            .end = 0,
+        },
+    };
+}
+
+fn stream(context: ?*anyopaque, w: *Writer, limit: Limit) Reader.StreamError!usize {
+    const l: *Limited = @alignCast(@ptrCast(context));
+    const combined_limit = limit.min(l.remaining);
+    const n = try l.unlimited_reader.read(w, combined_limit);
+    l.remaining = l.remaining.subtract(n).?;
+    return n;
+}
+
+fn discard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
+    const l: *Limited = @alignCast(@ptrCast(context));
+    const combined_limit = limit.min(l.remaining);
+    const n = try l.unlimited_reader.discard(combined_limit);
+    l.remaining = l.remaining.subtract(n).?;
+    return n;
+}
lib/std/io/buffered_atomic_file.zig
@@ -11,7 +11,7 @@ pub const BufferedAtomicFile = struct {
 
     pub const buffer_size = 4096;
     pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
-    pub const Writer = std.io.Writer(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write);
+    pub const Writer = std.io.GenericWriter(*BufferedWriter, BufferedWriter.Error, BufferedWriter.write);
 
     /// TODO when https://github.com/ziglang/zig/issues/2761 is solved
     /// this API will not need an allocator
lib/std/io/buffered_reader.zig
@@ -12,7 +12,7 @@ pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) ty
         end: usize = 0,
 
         pub const Error = ReaderType.Error;
-        pub const Reader = io.Reader(*Self, Error, read);
+        pub const Reader = io.GenericReader(*Self, Error, read);
 
         const Self = @This();
 
@@ -61,7 +61,7 @@ test "OneByte" {
 
         const Error = error{NoError};
         const Self = @This();
-        const Reader = io.Reader(*Self, Error, read);
+        const Reader = io.GenericReader(*Self, Error, read);
 
         fn init(str: []const u8) Self {
             return Self{
@@ -105,7 +105,7 @@ test "Block" {
 
         const Error = error{NoError};
         const Self = @This();
-        const Reader = io.Reader(*Self, Error, read);
+        const Reader = io.GenericReader(*Self, Error, read);
 
         fn init(block: []const u8, reads_allowed: usize) Self {
             return Self{
lib/std/io/buffered_writer.zig
@@ -10,7 +10,7 @@ pub fn BufferedWriter(comptime buffer_size: usize, comptime WriterType: type) ty
         end: usize = 0,
 
         pub const Error = WriterType.Error;
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
 
         const Self = @This();
 
lib/std/io/c_writer.zig
@@ -3,7 +3,7 @@ const builtin = @import("builtin");
 const io = std.io;
 const testing = std.testing;
 
-pub const CWriter = io.Writer(*std.c.FILE, std.fs.File.WriteError, cWriterWrite);
+pub const CWriter = io.GenericWriter(*std.c.FILE, std.fs.File.WriteError, cWriterWrite);
 
 pub fn cWriter(c_file: *std.c.FILE) CWriter {
     return .{ .context = c_file };
lib/std/io/change_detection_stream.zig
@@ -8,7 +8,7 @@ pub fn ChangeDetectionStream(comptime WriterType: type) type {
     return struct {
         const Self = @This();
         pub const Error = WriterType.Error;
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
 
         anything_changed: bool,
         underlying_writer: WriterType,
lib/std/io/counting_reader.zig
@@ -9,7 +9,7 @@ pub fn CountingReader(comptime ReaderType: anytype) type {
         bytes_read: u64 = 0,
 
         pub const Error = ReaderType.Error;
-        pub const Reader = io.Reader(*@This(), Error, read);
+        pub const Reader = io.GenericReader(*@This(), Error, read);
 
         pub fn read(self: *@This(), buf: []u8) Error!usize {
             const amt = try self.child_reader.read(buf);
lib/std/io/counting_writer.zig
@@ -9,7 +9,7 @@ pub fn CountingWriter(comptime WriterType: type) type {
         child_stream: WriterType,
 
         pub const Error = WriterType.Error;
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
 
         const Self = @This();
 
lib/std/io/DeprecatedReader.zig
@@ -0,0 +1,386 @@
+context: *const anyopaque,
+readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
+
+pub const Error = anyerror;
+
+/// Returns the number of bytes read. It may be less than buffer.len.
+/// If the number of bytes read is 0, it means end of stream.
+/// End of stream is not an error condition.
+pub fn read(self: Self, buffer: []u8) anyerror!usize {
+    return self.readFn(self.context, buffer);
+}
+
+/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
+/// means the stream reached the end. Reaching the end of a stream is not an error
+/// condition.
+pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
+    return readAtLeast(self, buffer, buffer.len);
+}
+
+/// Returns the number of bytes read, calling the underlying read
+/// function the minimal number of times until the buffer has at least
+/// `len` bytes filled. If the number read is less than `len` it means
+/// the stream reached the end. Reaching the end of the stream is not
+/// an error condition.
+pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
+    assert(len <= buffer.len);
+    var index: usize = 0;
+    while (index < len) {
+        const amt = try self.read(buffer[index..]);
+        if (amt == 0) break;
+        index += amt;
+    }
+    return index;
+}
+
+/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
+pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
+    const amt_read = try self.readAll(buf);
+    if (amt_read < buf.len) return error.EndOfStream;
+}
+
+/// Appends to the `std.ArrayList` contents by reading from the stream
+/// until end of stream is found.
+/// If the number of bytes appended would exceed `max_append_size`,
+/// `error.StreamTooLong` is returned
+/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
+pub fn readAllArrayList(
+    self: Self,
+    array_list: *std.ArrayList(u8),
+    max_append_size: usize,
+) anyerror!void {
+    return self.readAllArrayListAligned(null, array_list, max_append_size);
+}
+
+pub fn readAllArrayListAligned(
+    self: Self,
+    comptime alignment: ?Alignment,
+    array_list: *std.ArrayListAligned(u8, alignment),
+    max_append_size: usize,
+) anyerror!void {
+    try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
+    const original_len = array_list.items.len;
+    var start_index: usize = original_len;
+    while (true) {
+        array_list.expandToCapacity();
+        const dest_slice = array_list.items[start_index..];
+        const bytes_read = try self.readAll(dest_slice);
+        start_index += bytes_read;
+
+        if (start_index - original_len > max_append_size) {
+            array_list.shrinkAndFree(original_len + max_append_size);
+            return error.StreamTooLong;
+        }
+
+        if (bytes_read != dest_slice.len) {
+            array_list.shrinkAndFree(start_index);
+            return;
+        }
+
+        // This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
+        try array_list.ensureTotalCapacity(start_index + 1);
+    }
+}
+
+/// Allocates enough memory to hold all the contents of the stream. If the allocated
+/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
+/// Caller owns returned memory.
+/// If this function returns an error, the contents from the stream read so far are lost.
+pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
+    var array_list = std.ArrayList(u8).init(allocator);
+    defer array_list.deinit();
+    try self.readAllArrayList(&array_list, max_size);
+    return try array_list.toOwnedSlice();
+}
+
+/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
+/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
+/// Does not include the delimiter in the result.
+/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
+/// `std.ArrayList` is populated with `max_size` bytes from the stream.
+pub fn readUntilDelimiterArrayList(
+    self: Self,
+    array_list: *std.ArrayList(u8),
+    delimiter: u8,
+    max_size: usize,
+) anyerror!void {
+    array_list.shrinkRetainingCapacity(0);
+    try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
+}
+
+/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
+/// Allocates enough memory to read until `delimiter`. If the allocated
+/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
+/// Caller owns returned memory.
+/// If this function returns an error, the contents from the stream read so far are lost.
+pub fn readUntilDelimiterAlloc(
+    self: Self,
+    allocator: mem.Allocator,
+    delimiter: u8,
+    max_size: usize,
+) anyerror![]u8 {
+    var array_list = std.ArrayList(u8).init(allocator);
+    defer array_list.deinit();
+    try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
+    return try array_list.toOwnedSlice();
+}
+
+/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
+/// Reads from the stream until specified byte is found. If the buffer is not
+/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
+/// If end-of-stream is found, `error.EndOfStream` is returned.
+/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
+/// delimiter byte is written to the output buffer but is not included
+/// in the returned slice.
+pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
+    var fbs = std.io.fixedBufferStream(buf);
+    try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
+    const output = fbs.getWritten();
+    buf[output.len] = delimiter; // emulating old behaviour
+    return output;
+}
+
+/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
+/// Allocates enough memory to read until `delimiter` or end-of-stream.
+/// If the allocated memory would be greater than `max_size`, returns
+/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
+/// of the stream. If this function is called again after that, returns
+/// null.
+/// Caller owns returned memory.
+/// If this function returns an error, the contents from the stream read so far are lost.
+pub fn readUntilDelimiterOrEofAlloc(
+    self: Self,
+    allocator: mem.Allocator,
+    delimiter: u8,
+    max_size: usize,
+) anyerror!?[]u8 {
+    var array_list = std.ArrayList(u8).init(allocator);
+    defer array_list.deinit();
+    self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
+        error.EndOfStream => if (array_list.items.len == 0) {
+            return null;
+        },
+        else => |e| return e,
+    };
+    return try array_list.toOwnedSlice();
+}
+
+/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
+/// Reads from the stream until specified byte is found. If the buffer is not
+/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
+/// If end-of-stream is found, returns the rest of the stream. If this
+/// function is called again after that, returns null.
+/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
+/// delimiter byte is written to the output buffer but is not included
+/// in the returned slice.
+pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
+    var fbs = std.io.fixedBufferStream(buf);
+    self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
+        error.EndOfStream => if (fbs.getWritten().len == 0) {
+            return null;
+        },
+
+        else => |e| return e,
+    };
+    const output = fbs.getWritten();
+    buf[output.len] = delimiter; // emulating old behaviour
+    return output;
+}
+
+/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
+/// Does not write the delimiter itself.
+/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
+/// returns `error.StreamTooLong` and finishes appending.
+/// If `optional_max_size` is null, appending is unbounded.
+pub fn streamUntilDelimiter(
+    self: Self,
+    writer: anytype,
+    delimiter: u8,
+    optional_max_size: ?usize,
+) anyerror!void {
+    if (optional_max_size) |max_size| {
+        for (0..max_size) |_| {
+            const byte: u8 = try self.readByte();
+            if (byte == delimiter) return;
+            try writer.writeByte(byte);
+        }
+        return error.StreamTooLong;
+    } else {
+        while (true) {
+            const byte: u8 = try self.readByte();
+            if (byte == delimiter) return;
+            try writer.writeByte(byte);
+        }
+        // Can not throw `error.StreamTooLong` since there are no boundary.
+    }
+}
+
+/// Reads from the stream until specified byte is found, discarding all data,
+/// including the delimiter.
+/// If end-of-stream is found, this function succeeds.
+pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
+    while (true) {
+        const byte = self.readByte() catch |err| switch (err) {
+            error.EndOfStream => return,
+            else => |e| return e,
+        };
+        if (byte == delimiter) return;
+    }
+}
+
+/// Reads 1 byte from the stream or returns `error.EndOfStream`.
+pub fn readByte(self: Self) anyerror!u8 {
+    var result: [1]u8 = undefined;
+    const amt_read = try self.read(result[0..]);
+    if (amt_read < 1) return error.EndOfStream;
+    return result[0];
+}
+
+/// Same as `readByte` except the returned byte is signed.
+pub fn readByteSigned(self: Self) anyerror!i8 {
+    return @as(i8, @bitCast(try self.readByte()));
+}
+
+/// Reads exactly `num_bytes` bytes and returns as an array.
+/// `num_bytes` must be comptime-known
+pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
+    var bytes: [num_bytes]u8 = undefined;
+    try self.readNoEof(&bytes);
+    return bytes;
+}
+
+/// Reads bytes until `bounded.len` is equal to `num_bytes`,
+/// or the stream ends.
+///
+/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
+pub fn readIntoBoundedBytes(
+    self: Self,
+    comptime num_bytes: usize,
+    bounded: *std.BoundedArray(u8, num_bytes),
+) anyerror!void {
+    while (bounded.len < num_bytes) {
+        // get at most the number of bytes free in the bounded array
+        const bytes_read = try self.read(bounded.unusedCapacitySlice());
+        if (bytes_read == 0) return;
+
+        // bytes_read will never be larger than @TypeOf(bounded.len)
+        // due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
+        bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
+    }
+}
+
+/// Reads at most `num_bytes` and returns as a bounded array.
+pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
+    var result = std.BoundedArray(u8, num_bytes){};
+    try self.readIntoBoundedBytes(num_bytes, &result);
+    return result;
+}
+
+pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
+    const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
+    return mem.readInt(T, &bytes, endian);
+}
+
+pub fn readVarInt(
+    self: Self,
+    comptime ReturnType: type,
+    endian: std.builtin.Endian,
+    size: usize,
+) anyerror!ReturnType {
+    assert(size <= @sizeOf(ReturnType));
+    var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
+    const bytes = bytes_buf[0..size];
+    try self.readNoEof(bytes);
+    return mem.readVarInt(ReturnType, bytes, endian);
+}
+
+/// Optional parameters for `skipBytes`
+pub const SkipBytesOptions = struct {
+    buf_size: usize = 512,
+};
+
+// `num_bytes` is a `u64` to match `off_t`
+/// Reads `num_bytes` bytes from the stream and discards them
+pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
+    var buf: [options.buf_size]u8 = undefined;
+    var remaining = num_bytes;
+
+    while (remaining > 0) {
+        const amt = @min(remaining, options.buf_size);
+        try self.readNoEof(buf[0..amt]);
+        remaining -= amt;
+    }
+}
+
+/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
+pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
+    var i: usize = 0;
+    var matches = true;
+    while (i < slice.len) : (i += 1) {
+        if (slice[i] != try self.readByte()) {
+            matches = false;
+        }
+    }
+    return matches;
+}
+
+pub fn readStruct(self: Self, comptime T: type) anyerror!T {
+    // Only extern and packed structs have defined in-memory layout.
+    comptime assert(@typeInfo(T).@"struct".layout != .auto);
+    var res: [1]T = undefined;
+    try self.readNoEof(mem.sliceAsBytes(res[0..]));
+    return res[0];
+}
+
+pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
+    var res = try self.readStruct(T);
+    if (native_endian != endian) {
+        mem.byteSwapAllFields(T, &res);
+    }
+    return res;
+}
+
+/// Reads an integer with the same size as the given enum's tag type. If the integer matches
+/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
+/// TODO optimization taking advantage of most fields being in order
+pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
+    const E = error{
+        /// An integer was read, but it did not match any of the tags in the supplied enum.
+        InvalidValue,
+    };
+    const type_info = @typeInfo(Enum).@"enum";
+    const tag = try self.readInt(type_info.tag_type, endian);
+
+    inline for (std.meta.fields(Enum)) |field| {
+        if (tag == field.value) {
+            return @field(Enum, field.name);
+        }
+    }
+
+    return E.InvalidValue;
+}
+
+/// Reads the stream until the end, ignoring all the data.
+/// Returns the number of bytes discarded.
+pub fn discard(self: Self) anyerror!u64 {
+    var trash: [4096]u8 = undefined;
+    var index: u64 = 0;
+    while (true) {
+        const n = try self.read(&trash);
+        if (n == 0) return index;
+        index += n;
+    }
+}
+
+const std = @import("../std.zig");
+const Self = @This();
+const math = std.math;
+const assert = std.debug.assert;
+const mem = std.mem;
+const testing = std.testing;
+const native_endian = @import("builtin").target.cpu.arch.endian();
+const Alignment = std.mem.Alignment;
+
+test {
+    _ = @import("Reader/test.zig");
+}
lib/std/io/DeprecatedWriter.zig
@@ -0,0 +1,83 @@
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const mem = std.mem;
+const native_endian = @import("builtin").target.cpu.arch.endian();
+
+context: *const anyopaque,
+writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
+
+const Self = @This();
+pub const Error = anyerror;
+
+pub fn write(self: Self, bytes: []const u8) anyerror!usize {
+    return self.writeFn(self.context, bytes);
+}
+
+pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
+    var index: usize = 0;
+    while (index != bytes.len) {
+        index += try self.write(bytes[index..]);
+    }
+}
+
+pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
+    return std.fmt.format(self, format, args);
+}
+
+pub fn writeByte(self: Self, byte: u8) anyerror!void {
+    const array = [1]u8{byte};
+    return self.writeAll(&array);
+}
+
+pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
+    var bytes: [256]u8 = undefined;
+    @memset(bytes[0..], byte);
+
+    var remaining: usize = n;
+    while (remaining > 0) {
+        const to_write = @min(remaining, bytes.len);
+        try self.writeAll(bytes[0..to_write]);
+        remaining -= to_write;
+    }
+}
+
+pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
+    var i: usize = 0;
+    while (i < n) : (i += 1) {
+        try self.writeAll(bytes);
+    }
+}
+
+pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
+    var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
+    mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
+    return self.writeAll(&bytes);
+}
+
+pub fn writeStruct(self: Self, value: anytype) anyerror!void {
+    // Only extern and packed structs have defined in-memory layout.
+    comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
+    return self.writeAll(mem.asBytes(&value));
+}
+
+pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
+    // TODO: make sure this value is not a reference type
+    if (native_endian == endian) {
+        return self.writeStruct(value);
+    } else {
+        var copy = value;
+        mem.byteSwapAllFields(@TypeOf(value), &copy);
+        return self.writeStruct(copy);
+    }
+}
+
+pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
+    // TODO: figure out how to adjust std lib abstractions so that this ends up
+    // doing sendfile or maybe even copy_file_range under the right conditions.
+    var buf: [4000]u8 = undefined;
+    while (true) {
+        const n = try file.readAll(&buf);
+        try self.writeAll(buf[0..n]);
+        if (n < buf.len) return;
+    }
+}
lib/std/io/find_byte_writer.zig
@@ -8,7 +8,7 @@ pub fn FindByteWriter(comptime UnderlyingWriter: type) type {
     return struct {
         const Self = @This();
         pub const Error = UnderlyingWriter.Error;
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
 
         underlying_writer: UnderlyingWriter,
         byte_found: bool,
lib/std/io/fixed_buffer_stream.zig
@@ -4,8 +4,8 @@ const testing = std.testing;
 const mem = std.mem;
 const assert = std.debug.assert;
 
-/// This turns a byte buffer into an `io.Writer`, `io.Reader`, or `io.SeekableStream`.
-/// If the supplied byte buffer is const, then `io.Writer` is not available.
+/// This turns a byte buffer into an `io.GenericWriter`, `io.GenericReader`, or `io.SeekableStream`.
+/// If the supplied byte buffer is const, then `io.GenericWriter` is not available.
 pub fn FixedBufferStream(comptime Buffer: type) type {
     return struct {
         /// `Buffer` is either a `[]u8` or `[]const u8`.
@@ -17,8 +17,8 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
         pub const SeekError = error{};
         pub const GetSeekPosError = error{};
 
-        pub const Reader = io.Reader(*Self, ReadError, read);
-        pub const Writer = io.Writer(*Self, WriteError, write);
+        pub const Reader = io.GenericReader(*Self, ReadError, read);
+        pub const Writer = io.GenericWriter(*Self, WriteError, write);
 
         pub const SeekableStream = io.SeekableStream(
             *Self,
lib/std/io/limited_reader.zig
@@ -9,7 +9,7 @@ pub fn LimitedReader(comptime ReaderType: type) type {
         bytes_left: u64,
 
         pub const Error = ReaderType.Error;
-        pub const Reader = io.Reader(*Self, Error, read);
+        pub const Reader = io.GenericReader(*Self, Error, read);
 
         const Self = @This();
 
lib/std/io/multi_writer.zig
@@ -15,7 +15,7 @@ pub fn MultiWriter(comptime Writers: type) type {
         streams: Writers,
 
         pub const Error = ErrSet;
-        pub const Writer = io.Writer(*Self, Error, write);
+        pub const Writer = io.GenericWriter(*Self, Error, write);
 
         pub fn writer(self: *Self) Writer {
             return .{ .context = self };
lib/std/io/Reader.zig
@@ -1,386 +1,1471 @@
-context: *const anyopaque,
-readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
+const Reader = @This();
 
-pub const Error = anyerror;
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
 
-/// Returns the number of bytes read. It may be less than buffer.len.
-/// If the number of bytes read is 0, it means end of stream.
-/// End of stream is not an error condition.
-pub fn read(self: Self, buffer: []u8) anyerror!usize {
-    return self.readFn(self.context, buffer);
+const std = @import("../std.zig");
+const Writer = std.io.Writer;
+const assert = std.debug.assert;
+const testing = std.testing;
+const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayListUnmanaged;
+const Limit = std.io.Limit;
+
+pub const Limited = @import("Reader/Limited.zig");
+
+vtable: *const VTable,
+buffer: []u8,
+/// Number of bytes which have been consumed from `buffer`.
+seek: usize,
+/// In `buffer` before this are buffered bytes, after this is `undefined`.
+end: usize,
+
+pub const VTable = struct {
+    /// Writes bytes from the internally tracked logical position to `w`.
+    ///
+    /// Returns the number of bytes written, which will be at minimum `0` and
+    /// at most `limit`. The number returned, including zero, does not indicate
+    /// end of stream. `limit` is guaranteed to be at least as large as the
+    /// buffer capacity of `w`.
+    ///
+    /// The reader's internal logical seek position moves forward in accordance
+    /// with the number of bytes returned from this function.
+    ///
+    /// Implementations are encouraged to utilize mandatory minimum buffer
+    /// sizes combined with short reads (returning a value less than `limit`)
+    /// in order to minimize complexity.
+    ///
+    /// This function is always called when `buffer` is empty.
+    stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize,
+
+    /// Consumes bytes from the internally tracked stream position without
+    /// providing access to them.
+    ///
+    /// Returns the number of bytes discarded, which will be at minimum `0` and
+    /// at most `limit`. The number of bytes returned, including zero, does not
+    /// indicate end of stream.
+    ///
+    /// The reader's internal logical seek position moves forward in accordance
+    /// with the number of bytes returned from this function.
+    ///
+    /// Implementations are encouraged to utilize mandatory minimum buffer
+    /// sizes combined with short reads (returning a value less than `limit`)
+    /// in order to minimize complexity.
+    ///
+    /// The default implementation is is based on calling `stream`, borrowing
+    /// `buffer` to construct a temporary `Writer` and ignoring the written
+    /// data.
+    discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard,
+};
+
+pub const StreamError = error{
+    /// See the `Reader` implementation for detailed diagnostics.
+    ReadFailed,
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+    /// End of stream indicated from the `Reader`. This error cannot originate
+    /// from the `Writer`.
+    EndOfStream,
+};
+
+pub const Error = error{
+    /// See the `Reader` implementation for detailed diagnostics.
+    ReadFailed,
+    EndOfStream,
+};
+
+pub const StreamRemainingError = error{
+    /// See the `Reader` implementation for detailed diagnostics.
+    ReadFailed,
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+};
+
+pub const ShortError = error{
+    /// See the `Reader` implementation for detailed diagnostics.
+    ReadFailed,
+};
+
+pub const failing: Reader = .{
+    .vtable = &.{
+        .read = failingStream,
+        .discard = failingDiscard,
+    },
+    .buffer = &.{},
+    .seek = 0,
+    .end = 0,
+};
+
+/// This is generally safe to `@constCast` because it has an empty buffer, so
+/// there is not really a way to accidentally attempt mutation of these fields.
+const ending_state: Reader = .fixed(&.{});
+pub const ending: *Reader = @constCast(&ending_state);
+
+pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
+    return Limited.init(r, limit, buffer);
 }
 
-/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
-/// means the stream reached the end. Reaching the end of a stream is not an error
-/// condition.
-pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
-    return readAtLeast(self, buffer, buffer.len);
+/// Constructs a `Reader` such that it will read from `buffer` and then end.
+pub fn fixed(buffer: []const u8) Reader {
+    return .{
+        .vtable = &.{
+            .stream = endingStream,
+            .discard = endingDiscard,
+        },
+        // This cast is safe because all potential writes to it will instead
+        // return `error.EndOfStream`.
+        .buffer = @constCast(buffer),
+        .end = buffer.len,
+        .seek = 0,
+    };
 }
 
-/// Returns the number of bytes read, calling the underlying read
-/// function the minimal number of times until the buffer has at least
-/// `len` bytes filled. If the number read is less than `len` it means
-/// the stream reached the end. Reaching the end of the stream is not
-/// an error condition.
-pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
-    assert(len <= buffer.len);
-    var index: usize = 0;
-    while (index < len) {
-        const amt = try self.read(buffer[index..]);
-        if (amt == 0) break;
-        index += amt;
+pub fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
+    const buffer = limit.slice(r.buffer[r.seek..r.end]);
+    if (buffer.len > 0) {
+        @branchHint(.likely);
+        const n = try w.write(buffer);
+        r.seek += n;
+        return n;
+    }
+    const before = w.count;
+    const n = try r.vtable.stream(r, w, limit);
+    assert(n <= @intFromEnum(limit));
+    assert(w.count == before + n);
+    return n;
+}
+
+pub fn discard(r: *Reader, limit: Limit) Error!usize {
+    const buffered_len = r.end - r.seek;
+    const remaining: Limit = if (limit.toInt()) |n| l: {
+        if (buffered_len >= n) {
+            r.seek += n;
+            return n;
+        }
+        break :l .limited(n - buffered_len);
+    } else .unlimited;
+    r.seek = 0;
+    r.end = 0;
+    const n = try r.vtable.discard(r, remaining);
+    assert(n <= @intFromEnum(remaining));
+    return buffered_len + n;
+}
+
+pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize {
+    assert(r.seek == 0);
+    assert(r.end == 0);
+    var w: Writer = .discarding(r.buffer);
+    const n = r.stream(&w, limit) catch |err| switch (err) {
+        error.WriteFailed => unreachable,
+        error.ReadFailed => return error.ReadFailed,
+        error.EndOfStream => return error.EndOfStream,
+    };
+    if (n > @intFromEnum(limit)) {
+        const over_amt = n - @intFromEnum(limit);
+        r.seek = w.end - over_amt;
+        r.end = w.end;
+        assert(r.end <= w.buffer.len); // limit may be exceeded only by an amount within buffer capacity.
+        return @intFromEnum(limit);
     }
-    return index;
-}
-
-/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
-pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
-    const amt_read = try self.readAll(buf);
-    if (amt_read < buf.len) return error.EndOfStream;
-}
-
-/// Appends to the `std.ArrayList` contents by reading from the stream
-/// until end of stream is found.
-/// If the number of bytes appended would exceed `max_append_size`,
-/// `error.StreamTooLong` is returned
-/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
-pub fn readAllArrayList(
-    self: Self,
-    array_list: *std.ArrayList(u8),
-    max_append_size: usize,
-) anyerror!void {
-    return self.readAllArrayListAligned(null, array_list, max_append_size);
-}
-
-pub fn readAllArrayListAligned(
-    self: Self,
-    comptime alignment: ?Alignment,
-    array_list: *std.ArrayListAligned(u8, alignment),
-    max_append_size: usize,
-) anyerror!void {
-    try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
-    const original_len = array_list.items.len;
-    var start_index: usize = original_len;
+    return n;
+}
+
+/// "Pump" exactly `n` bytes from the reader to the writer.
+pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void {
+    var remaining = n;
+    while (remaining != 0) remaining -= try r.stream(w, .limited(remaining));
+}
+
+/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as
+/// a success case.
+///
+/// Returns total number of bytes written to `w`.
+pub fn streamRemaining(r: *Reader, w: *Writer) StreamRemainingError!usize {
+    var offset: usize = 0;
     while (true) {
-        array_list.expandToCapacity();
-        const dest_slice = array_list.items[start_index..];
-        const bytes_read = try self.readAll(dest_slice);
-        start_index += bytes_read;
+        offset += r.stream(w, .unlimited) catch |err| switch (err) {
+            error.EndOfStream => return offset,
+            else => |e| return e,
+        };
+    }
+}
+
+/// Consumes the stream until the end, ignoring all the data, returning the
+/// number of bytes discarded.
+pub fn discardRemaining(r: *Reader) ShortError!usize {
+    var offset: usize = r.end;
+    r.seek = 0;
+    r.end = 0;
+    while (true) {
+        offset += r.vtable.discard(r, .unlimited) catch |err| switch (err) {
+            error.EndOfStream => return offset,
+            else => |e| return e,
+        };
+    }
+}
+
+pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLong};
+
+/// Transfers all bytes from the current position to the end of the stream, up
+/// to `limit`, returning them as a caller-owned allocated slice.
+///
+/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In
+/// such case, the next byte that would be read will be the first one to exceed
+/// `limit`, and all preceeding bytes have been discarded.
+///
+/// Asserts `buffer` has nonzero capacity.
+///
+/// See also:
+/// * `appendRemaining`
+pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 {
+    var buffer: ArrayList(u8) = .empty;
+    defer buffer.deinit(gpa);
+    try appendRemaining(r, gpa, null, &buffer, limit);
+    return buffer.toOwnedSlice(gpa);
+}
 
-        if (start_index - original_len > max_append_size) {
-            array_list.shrinkAndFree(original_len + max_append_size);
+/// Transfers all bytes from the current position to the end of the stream, up
+/// to `limit`, appending them to `list`.
+///
+/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In
+/// such case, the next byte that would be read will be the first one to exceed
+/// `limit`, and all preceeding bytes have been appended to `list`.
+///
+/// Asserts `buffer` has nonzero capacity.
+///
+/// See also:
+/// * `allocRemaining`
+pub fn appendRemaining(
+    r: *Reader,
+    gpa: Allocator,
+    comptime alignment: ?std.mem.Alignment,
+    list: *std.ArrayListAlignedUnmanaged(u8, alignment),
+    limit: Limit,
+) LimitedAllocError!void {
+    const buffer = r.buffer;
+    const buffer_contents = buffer[r.seek..r.end];
+    const copy_len = limit.minInt(buffer_contents.len);
+    try list.ensureUnusedCapacity(gpa, copy_len);
+    @memcpy(list.unusedCapacitySlice()[0..copy_len], buffer[0..copy_len]);
+    list.items.len += copy_len;
+    r.seek += copy_len;
+    if (copy_len == buffer_contents.len) {
+        r.seek = 0;
+        r.end = 0;
+    }
+    var remaining = limit.subtract(copy_len).?;
+    while (true) {
+        try list.ensureUnusedCapacity(gpa, 1);
+        const dest = remaining.slice(list.unusedCapacitySlice());
+        const additional_buffer: []u8 = if (@intFromEnum(remaining) == dest.len) buffer else &.{};
+        const n = readVec(r, &.{ dest, additional_buffer }) catch |err| switch (err) {
+            error.EndOfStream => break,
+            error.ReadFailed => return error.ReadFailed,
+        };
+        if (n >= dest.len) {
+            r.end = n - dest.len;
+            list.items.len += dest.len;
+            if (n == dest.len) return;
             return error.StreamTooLong;
         }
+        list.items.len += n;
+        remaining = remaining.subtract(n).?;
+    }
+}
+
+/// Writes bytes from the internally tracked stream position to `data`.
+///
+/// Returns the number of bytes written, which will be at minimum `0` and
+/// at most the sum of each data slice length. The number of bytes read,
+/// including zero, does not indicate end of stream.
+///
+/// The reader's internal logical seek position moves forward in accordance
+/// with the number of bytes returned from this function.
+pub fn readVec(r: *Reader, data: []const []u8) Error!usize {
+    return readVecLimit(r, data, .unlimited);
+}
 
-        if (bytes_read != dest_slice.len) {
-            array_list.shrinkAndFree(start_index);
-            return;
+/// Equivalent to `readVec` but reads at most `limit` bytes.
+///
+/// This ultimately will lower to a call to `stream`, but it must ensure
+/// that the buffer used has at least as much capacity, in case that function
+/// depends on a minimum buffer capacity. It also ensures that if the `stream`
+/// implementation calls `Writer.writableVector`, it will get this data slice
+/// along with the buffer at the end.
+pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize {
+    comptime assert(@intFromEnum(Limit.unlimited) == std.math.maxInt(usize));
+    var remaining = @intFromEnum(limit);
+    for (data, 0..) |buf, i| {
+        const buffer_contents = r.buffer[r.seek..r.end];
+        const copy_len = @min(buffer_contents.len, buf.len, remaining);
+        @memcpy(buf[0..copy_len], buffer_contents[0..copy_len]);
+        r.seek += copy_len;
+        remaining -= copy_len;
+        if (remaining == 0) break;
+        if (buf.len - copy_len == 0) continue;
+
+        // All of `buffer` has been copied to `data`. We now set up a structure
+        // that enables the `Writer.writableVector` API, while also ensuring
+        // API that directly operates on the `Writable.buffer` has its minimum
+        // buffer capacity requirements met.
+        r.seek = 0;
+        r.end = 0;
+        const first = buf[copy_len..];
+        const middle = data[i + 1 ..];
+        var wrapper: Writer.VectorWrapper = .{
+            .it = .{
+                .first = first,
+                .middle = middle,
+                .last = r.buffer,
+            },
+            .writer = .{
+                .buffer = if (first.len >= r.buffer.len) first else r.buffer,
+                .vtable = &Writer.VectorWrapper.vtable,
+            },
+        };
+        var n = r.vtable.stream(r, &wrapper.writer, .limited(remaining)) catch |err| switch (err) {
+            error.WriteFailed => {
+                if (wrapper.writer.buffer.ptr == first.ptr) {
+                    remaining -= wrapper.writer.end;
+                } else {
+                    r.end = wrapper.writer.end;
+                }
+                break;
+            },
+            else => |e| return e,
+        };
+        if (wrapper.writer.buffer.ptr != first.ptr) {
+            r.end = n;
+            break;
+        }
+        if (n < first.len) {
+            remaining -= n;
+            break;
         }
+        remaining -= first.len;
+        n -= first.len;
+        for (middle) |mid| {
+            if (n < mid.len) {
+                remaining -= n;
+                break;
+            }
+            remaining -= mid.len;
+            n -= mid.len;
+        }
+        r.end = n;
+        break;
+    }
+    return @intFromEnum(limit) - remaining;
+}
+
+pub fn buffered(r: *Reader) []u8 {
+    return r.buffer[r.seek..r.end];
+}
+
+pub fn bufferedLen(r: *const Reader) usize {
+    return r.end - r.seek;
+}
+
+pub fn hashed(r: *Reader, hasher: anytype) Hashed(@TypeOf(hasher)) {
+    return .{ .in = r, .hasher = hasher };
+}
 
-        // This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
-        try array_list.ensureTotalCapacity(start_index + 1);
+pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
+    var index: usize = 0;
+    var truncate: usize = 0;
+    while (index < data.len) {
+        {
+            const untruncated = data[index];
+            data[index] = untruncated[truncate..];
+            defer data[index] = untruncated;
+            truncate += try r.readVec(data[index..]);
+        }
+        while (index < data.len and truncate >= data[index].len) {
+            truncate -= data[index].len;
+            index += 1;
+        }
     }
 }
 
-/// Allocates enough memory to hold all the contents of the stream. If the allocated
-/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
-    var array_list = std.ArrayList(u8).init(allocator);
-    defer array_list.deinit();
-    try self.readAllArrayList(&array_list, max_size);
-    return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
-/// Does not include the delimiter in the result.
-/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
-/// `std.ArrayList` is populated with `max_size` bytes from the stream.
-pub fn readUntilDelimiterArrayList(
-    self: Self,
-    array_list: *std.ArrayList(u8),
-    delimiter: u8,
-    max_size: usize,
-) anyerror!void {
-    array_list.shrinkRetainingCapacity(0);
-    try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Allocates enough memory to read until `delimiter`. If the allocated
-/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterAlloc(
-    self: Self,
-    allocator: mem.Allocator,
-    delimiter: u8,
-    max_size: usize,
-) anyerror![]u8 {
-    var array_list = std.ArrayList(u8).init(allocator);
-    defer array_list.deinit();
-    try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
-    return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, `error.EndOfStream` is returned.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
-    var fbs = std.io.fixedBufferStream(buf);
-    try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
-    const output = fbs.getWritten();
-    buf[output.len] = delimiter; // emulating old behaviour
-    return output;
-}
-
-/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
-/// Allocates enough memory to read until `delimiter` or end-of-stream.
-/// If the allocated memory would be greater than `max_size`, returns
-/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
-/// of the stream. If this function is called again after that, returns
-/// null.
-/// Caller owns returned memory.
-/// If this function returns an error, the contents from the stream read so far are lost.
-pub fn readUntilDelimiterOrEofAlloc(
-    self: Self,
-    allocator: mem.Allocator,
-    delimiter: u8,
-    max_size: usize,
-) anyerror!?[]u8 {
-    var array_list = std.ArrayList(u8).init(allocator);
-    defer array_list.deinit();
-    self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
-        error.EndOfStream => if (array_list.items.len == 0) {
-            return null;
+/// Returns the next `len` bytes from the stream, filling the buffer as
+/// necessary.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// Asserts that the `Reader` was initialized with a buffer capacity at
+/// least as big as `len`.
+///
+/// If there are fewer than `len` bytes left in the stream, `error.EndOfStream`
+/// is returned instead.
+///
+/// See also:
+/// * `peek`
+/// * `toss`
+pub fn peek(r: *Reader, n: usize) Error![]u8 {
+    try r.fill(n);
+    return r.buffer[r.seek..][0..n];
+}
+
+/// Returns all the next buffered bytes, after filling the buffer to ensure it
+/// contains at least `n` bytes.
+///
+/// Invalidates previously returned values from `peek` and `peekGreedy`.
+///
+/// Asserts that the `Reader` was initialized with a buffer capacity at
+/// least as big as `n`.
+///
+/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
+/// is returned instead.
+///
+/// See also:
+/// * `peek`
+/// * `toss`
+pub fn peekGreedy(r: *Reader, n: usize) Error![]u8 {
+    try r.fill(n);
+    return r.buffer[r.seek..r.end];
+}
+
+/// Skips the next `n` bytes from the stream, advancing the seek position. This
+/// is typically and safely used after `peek`.
+///
+/// Asserts that the number of bytes buffered is at least as many as `n`.
+///
+/// The "tossed" memory remains alive until a "peek" operation occurs.
+///
+/// See also:
+/// * `peek`.
+/// * `discard`.
+pub fn toss(r: *Reader, n: usize) void {
+    r.seek += n;
+    assert(r.seek <= r.end);
+}
+
+/// Equivalent to `toss(r.bufferedLen())`.
+pub fn tossAll(r: *Reader) void {
+    r.seek = 0;
+    r.end = 0;
+}
+
+/// Equivalent to `peek` followed by `toss`.
+///
+/// The data returned is invalidated by the next call to `take`, `peek`,
+/// `fill`, and functions with those prefixes.
+pub fn take(r: *Reader, n: usize) Error![]u8 {
+    const result = try r.peek(n);
+    r.toss(n);
+    return result;
+}
+
+/// Returns the next `n` bytes from the stream as an array, filling the buffer
+/// as necessary and advancing the seek position `n` bytes.
+///
+/// Asserts that the `Reader` was initialized with a buffer capacity at
+/// least as big as `n`.
+///
+/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
+/// is returned instead.
+///
+/// See also:
+/// * `take`
+pub fn takeArray(r: *Reader, comptime n: usize) Error!*[n]u8 {
+    return (try r.take(n))[0..n];
+}
+
+/// Returns the next `n` bytes from the stream as an array, filling the buffer
+/// as necessary, without advancing the seek position.
+///
+/// Asserts that the `Reader` was initialized with a buffer capacity at
+/// least as big as `n`.
+///
+/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
+/// is returned instead.
+///
+/// See also:
+/// * `peek`
+/// * `takeArray`
+pub fn peekArray(r: *Reader, comptime n: usize) Error!*[n]u8 {
+    return (try r.peek(n))[0..n];
+}
+
+/// Skips the next `n` bytes from the stream, advancing the seek position.
+///
+/// Unlike `toss` which is infallible, in this function `n` can be any amount.
+///
+/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded.
+///
+/// See also:
+/// * `toss`
+/// * `discardRemaining`
+/// * `discardShort`
+/// * `discard`
+pub fn discardAll(r: *Reader, n: usize) Error!void {
+    if ((try r.discardShort(n)) != n) return error.EndOfStream;
+}
+
+pub fn discardAll64(r: *Reader, n: u64) Error!void {
+    var remaining: u64 = n;
+    while (remaining > 0) {
+        const limited_remaining = std.math.cast(usize, remaining) orelse std.math.maxInt(usize);
+        try discardAll(r, limited_remaining);
+        remaining -= limited_remaining;
+    }
+}
+
+/// Skips the next `n` bytes from the stream, advancing the seek position.
+///
+/// Unlike `toss` which is infallible, in this function `n` can be any amount.
+///
+/// Returns the number of bytes discarded, which is less than `n` if and only
+/// if the stream reached the end.
+///
+/// See also:
+/// * `discardAll`
+/// * `discardRemaining`
+/// * `discard`
+pub fn discardShort(r: *Reader, n: usize) ShortError!usize {
+    const proposed_seek = r.seek + n;
+    if (proposed_seek <= r.end) {
+        @branchHint(.likely);
+        r.seek = proposed_seek;
+        return n;
+    }
+    var remaining = n - (r.end - r.seek);
+    r.end = 0;
+    r.seek = 0;
+    while (true) {
+        const discard_len = r.vtable.discard(r, .limited(remaining)) catch |err| switch (err) {
+            error.EndOfStream => return n - remaining,
+            error.ReadFailed => return error.ReadFailed,
+        };
+        remaining -= discard_len;
+        if (remaining == 0) return n;
+    }
+}
+
+/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
+/// the seek position.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
+/// returned instead.
+///
+/// See also:
+/// * `peek`
+/// * `readSliceShort`
+pub fn readSlice(r: *Reader, buffer: []u8) Error!void {
+    const n = try readSliceShort(r, buffer);
+    if (n != buffer.len) return error.EndOfStream;
+}
+
+/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
+/// the seek position.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// Returns the number of bytes read, which is less than `buffer.len` if and
+/// only if the stream reached the end.
+///
+/// See also:
+/// * `readSlice`
+pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize {
+    const in_buffer = r.buffer[r.seek..r.end];
+    const copy_len = @min(buffer.len, in_buffer.len);
+    @memcpy(buffer[0..copy_len], in_buffer[0..copy_len]);
+    if (buffer.len - copy_len == 0) {
+        r.seek += copy_len;
+        return buffer.len;
+    }
+    var i: usize = copy_len;
+    r.end = 0;
+    r.seek = 0;
+    while (true) {
+        const remaining = buffer[i..];
+        var wrapper: Writer.VectorWrapper = .{
+            .it = .{
+                .first = remaining,
+                .last = r.buffer,
+            },
+            .writer = .{
+                .buffer = if (remaining.len >= r.buffer.len) remaining else r.buffer,
+                .vtable = &Writer.VectorWrapper.vtable,
+            },
+        };
+        const n = r.vtable.stream(r, &wrapper.writer, .unlimited) catch |err| switch (err) {
+            error.WriteFailed => {
+                if (wrapper.writer.buffer.ptr != remaining.ptr) {
+                    assert(r.seek == 0);
+                    r.seek = remaining.len;
+                    r.end = wrapper.writer.end;
+                    @memcpy(remaining, r.buffer[0..remaining.len]);
+                    return buffer.len;
+                }
+                return buffer.len;
+            },
+            error.EndOfStream => return i,
+            error.ReadFailed => return error.ReadFailed,
+        };
+        if (n < remaining.len) {
+            i += n;
+            continue;
+        }
+        r.end = n - remaining.len;
+        return buffer.len;
+    }
+}
+
+/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
+/// the seek position.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
+/// returned instead.
+///
+/// The function is inline to avoid the dead code in case `endian` is
+/// comptime-known and matches host endianness.
+///
+/// See also:
+/// * `readSlice`
+/// * `readSliceEndianAlloc`
+pub inline fn readSliceEndian(
+    r: *Reader,
+    comptime Elem: type,
+    buffer: []Elem,
+    endian: std.builtin.Endian,
+) Error!void {
+    try readSlice(r, @ptrCast(buffer));
+    if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem);
+}
+
+pub const ReadAllocError = Error || Allocator.Error;
+
+/// The function is inline to avoid the dead code in case `endian` is
+/// comptime-known and matches host endianness.
+pub inline fn readSliceEndianAlloc(
+    r: *Reader,
+    allocator: Allocator,
+    comptime Elem: type,
+    len: usize,
+    endian: std.builtin.Endian,
+) ReadAllocError![]Elem {
+    const dest = try allocator.alloc(Elem, len);
+    errdefer allocator.free(dest);
+    try readSlice(r, @ptrCast(dest));
+    if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem);
+    return dest;
+}
+
+pub fn readSliceAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 {
+    const dest = try allocator.alloc(u8, len);
+    errdefer allocator.free(dest);
+    try readSlice(r, dest);
+    return dest;
+}
+
+pub const DelimiterError = error{
+    /// See the `Reader` implementation for detailed diagnostics.
+    ReadFailed,
+    /// For "inclusive" functions, stream ended before the delimiter was found.
+    /// For "exclusive" functions, stream ended and there are no more bytes to
+    /// return.
+    EndOfStream,
+    /// The delimiter was not found within a number of bytes matching the
+    /// capacity of the `Reader`.
+    StreamTooLong,
+};
+
+/// Returns a slice of the next bytes of buffered data from the stream until
+/// `sentinel` is found, advancing the seek position.
+///
+/// Returned slice has a sentinel.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `peekSentinel`
+/// * `takeDelimiterExclusive`
+/// * `takeDelimiterInclusive`
+pub fn takeSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
+    const result = try r.peekSentinel(sentinel);
+    r.toss(result.len + 1);
+    return result;
+}
+
+pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
+    const result = try r.peekDelimiterInclusive(sentinel);
+    return result[0 .. result.len - 1 :sentinel];
+}
+
+/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, advancing the seek position.
+///
+/// Returned slice includes the delimiter as the last byte.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `takeSentinel`
+/// * `takeDelimiterExclusive`
+/// * `peekDelimiterInclusive`
+pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
+    const result = try r.peekDelimiterInclusive(delimiter);
+    r.toss(result.len);
+    return result;
+}
+
+/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, without advancing the seek position.
+///
+/// Returned slice includes the delimiter as the last byte.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `peekSentinel`
+/// * `peekDelimiterExclusive`
+/// * `takeDelimiterInclusive`
+pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
+    const buffer = r.buffer[0..r.end];
+    const seek = r.seek;
+    if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
+        @branchHint(.likely);
+        return buffer[seek .. end + 1];
+    }
+    if (seek > 0) {
+        const remainder = buffer[seek..];
+        @memmove(buffer[0..remainder.len], remainder);
+        r.end = remainder.len;
+        r.seek = 0;
+    }
+    var writer: Writer = .{
+        .buffer = r.buffer,
+        .vtable = &.{ .drain = Writer.fixedDrain },
+    };
+    while (r.end < r.buffer.len) {
+        writer.end = r.end;
+        const n = r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
+            error.WriteFailed => unreachable,
+            else => |e| return e,
+        };
+        const prev_end = r.end;
+        r.end = prev_end + n;
+        if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], prev_end, delimiter)) |end| {
+            return r.buffer[0 .. end + 1];
+        }
+    }
+    return error.StreamTooLong;
+}
+
+/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, advancing the seek position.
+///
+/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
+/// to a delimiter, unless it would result in a length 0 return value, in which
+/// case `error.EndOfStream` is returned instead.
+///
+/// If the delimiter is not found within a number of bytes matching the
+/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
+/// such case, the stream state is unmodified as if this function was never
+/// called.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `takeDelimiterInclusive`
+/// * `peekDelimiterExclusive`
+pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
+    const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
+        error.EndOfStream => {
+            if (r.end == 0) return error.EndOfStream;
+            r.toss(r.end);
+            return r.buffer[0..r.end];
         },
         else => |e| return e,
     };
-    return try array_list.toOwnedSlice();
-}
-
-/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
-/// Reads from the stream until specified byte is found. If the buffer is not
-/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
-/// If end-of-stream is found, returns the rest of the stream. If this
-/// function is called again after that, returns null.
-/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
-/// delimiter byte is written to the output buffer but is not included
-/// in the returned slice.
-pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
-    var fbs = std.io.fixedBufferStream(buf);
-    self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
-        error.EndOfStream => if (fbs.getWritten().len == 0) {
-            return null;
-        },
+    r.toss(result.len);
+    return result[0 .. result.len - 1];
+}
 
+/// Returns a slice of the next bytes of buffered data from the stream until
+/// `delimiter` is found, without advancing the seek position.
+///
+/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
+/// to a delimiter, unless it would result in a length 0 return value, in which
+/// case `error.EndOfStream` is returned instead.
+///
+/// If the delimiter is not found within a number of bytes matching the
+/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
+/// such case, the stream state is unmodified as if this function was never
+/// called.
+///
+/// Invalidates previously returned values from `peek`.
+///
+/// See also:
+/// * `peekDelimiterInclusive`
+/// * `takeDelimiterExclusive`
+pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
+    const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
+        error.EndOfStream => {
+            if (r.end == 0) return error.EndOfStream;
+            return r.buffer[0..r.end];
+        },
         else => |e| return e,
     };
-    const output = fbs.getWritten();
-    buf[output.len] = delimiter; // emulating old behaviour
-    return output;
+    return result[0 .. result.len - 1];
 }
 
-/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
+/// Appends to `w` contents by reading from the stream until `delimiter` is
+/// found. Does not write the delimiter itself.
+///
+/// Returns number of bytes streamed.
+pub fn readDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize {
+    const amount, const to = try r.readAny(w, delimiter, .unlimited);
+    return switch (to) {
+        .delimiter => amount,
+        .limit => unreachable,
+        .end => error.EndOfStream,
+    };
+}
+
+/// Appends to `w` contents by reading from the stream until `delimiter` is found.
 /// Does not write the delimiter itself.
-/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
-/// returns `error.StreamTooLong` and finishes appending.
-/// If `optional_max_size` is null, appending is unbounded.
-pub fn streamUntilDelimiter(
-    self: Self,
-    writer: anytype,
+///
+/// Succeeds if stream ends before delimiter found.
+///
+/// Returns number of bytes streamed. The end is not signaled to the writer.
+pub fn readDelimiterEnding(
+    r: *Reader,
+    w: *Writer,
     delimiter: u8,
-    optional_max_size: ?usize,
-) anyerror!void {
-    if (optional_max_size) |max_size| {
-        for (0..max_size) |_| {
-            const byte: u8 = try self.readByte();
-            if (byte == delimiter) return;
-            try writer.writeByte(byte);
-        }
-        return error.StreamTooLong;
-    } else {
-        while (true) {
-            const byte: u8 = try self.readByte();
-            if (byte == delimiter) return;
-            try writer.writeByte(byte);
-        }
-        // Can not throw `error.StreamTooLong` since there are no boundary.
+) StreamRemainingError!usize {
+    const amount, const to = try r.readAny(w, delimiter, .unlimited);
+    return switch (to) {
+        .delimiter, .end => amount,
+        .limit => unreachable,
+    };
+}
+
+pub const StreamDelimiterLimitedError = StreamRemainingError || error{
+    /// Stream ended before the delimiter was found.
+    EndOfStream,
+    /// The delimiter was not found within the limit.
+    StreamTooLong,
+};
+
+/// Appends to `w` contents by reading from the stream until `delimiter` is found.
+/// Does not write the delimiter itself.
+///
+/// Returns number of bytes streamed.
+pub fn readDelimiterLimit(
+    r: *Reader,
+    w: *Writer,
+    delimiter: u8,
+    limit: Limit,
+) StreamDelimiterLimitedError!usize {
+    const amount, const to = try r.readAny(w, delimiter, limit);
+    return switch (to) {
+        .delimiter => amount,
+        .limit => error.StreamTooLong,
+        .end => error.EndOfStream,
+    };
+}
+
+fn readAny(
+    r: *Reader,
+    w: *Writer,
+    delimiter: ?u8,
+    limit: Limit,
+) StreamRemainingError!struct { usize, enum { delimiter, limit, end } } {
+    var amount: usize = 0;
+    var remaining = limit;
+    while (remaining.nonzero()) {
+        const available = remaining.slice(r.peekGreedy(1) catch |err| switch (err) {
+            error.ReadFailed => |e| return e,
+            error.EndOfStream => return .{ amount, .end },
+        });
+        if (delimiter) |d| if (std.mem.indexOfScalar(u8, available, d)) |delimiter_index| {
+            try w.writeAll(available[0..delimiter_index]);
+            r.toss(delimiter_index + 1);
+            return .{ amount + delimiter_index, .delimiter };
+        };
+        try w.writeAll(available);
+        r.toss(available.len);
+        amount += available.len;
+        remaining = remaining.subtract(available.len).?;
     }
+    return .{ amount, .limit };
 }
 
 /// Reads from the stream until specified byte is found, discarding all data,
 /// including the delimiter.
-/// If end-of-stream is found, this function succeeds.
-pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
-    while (true) {
-        const byte = self.readByte() catch |err| switch (err) {
-            error.EndOfStream => return,
-            else => |e| return e,
+///
+/// If end of stream is found, this function succeeds.
+pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!void {
+    _ = r;
+    _ = delimiter;
+    @panic("TODO");
+}
+
+/// Reads from the stream until specified byte is found, discarding all data,
+/// excluding the delimiter.
+///
+/// Succeeds if stream ends before delimiter found.
+pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!void {
+    _ = r;
+    _ = delimiter;
+    @panic("TODO");
+}
+
+/// Fills the buffer such that it contains at least `n` bytes, without
+/// advancing the seek position.
+///
+/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes
+/// remaining.
+///
+/// Asserts buffer capacity is at least `n`.
+pub fn fill(r: *Reader, n: usize) Error!void {
+    assert(n <= r.buffer.len);
+    if (r.seek + n <= r.end) {
+        @branchHint(.likely);
+        return;
+    }
+    rebaseCapacity(r, n);
+    var writer: Writer = .{
+        .buffer = r.buffer,
+        .vtable = &.{ .drain = Writer.fixedDrain },
+    };
+    while (r.end < r.seek + n) {
+        writer.end = r.end;
+        r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
+            error.WriteFailed => unreachable,
+            error.ReadFailed, error.EndOfStream => |e| return e,
         };
-        if (byte == delimiter) return;
     }
 }
 
-/// Reads 1 byte from the stream or returns `error.EndOfStream`.
-pub fn readByte(self: Self) anyerror!u8 {
-    var result: [1]u8 = undefined;
-    const amt_read = try self.read(result[0..]);
-    if (amt_read < 1) return error.EndOfStream;
-    return result[0];
-}
-
-/// Same as `readByte` except the returned byte is signed.
-pub fn readByteSigned(self: Self) anyerror!i8 {
-    return @as(i8, @bitCast(try self.readByte()));
-}
-
-/// Reads exactly `num_bytes` bytes and returns as an array.
-/// `num_bytes` must be comptime-known
-pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
-    var bytes: [num_bytes]u8 = undefined;
-    try self.readNoEof(&bytes);
-    return bytes;
-}
-
-/// Reads bytes until `bounded.len` is equal to `num_bytes`,
-/// or the stream ends.
-///
-/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
-pub fn readIntoBoundedBytes(
-    self: Self,
-    comptime num_bytes: usize,
-    bounded: *std.BoundedArray(u8, num_bytes),
-) anyerror!void {
-    while (bounded.len < num_bytes) {
-        // get at most the number of bytes free in the bounded array
-        const bytes_read = try self.read(bounded.unusedCapacitySlice());
-        if (bytes_read == 0) return;
-
-        // bytes_read will never be larger than @TypeOf(bounded.len)
-        // due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
-        bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
+/// Without advancing the seek position, does exactly one underlying read, filling the buffer as
+/// much as possible. This may result in zero bytes added to the buffer, which is not an end of
+/// stream condition. End of stream is communicated via returning `error.EndOfStream`.
+///
+/// Asserts buffer capacity is at least 1.
+pub fn fillMore(r: *Reader) Error!void {
+    rebaseCapacity(r, 1);
+    var writer: Writer = .{
+        .buffer = r.buffer,
+        .end = r.end,
+        .vtable = &.{ .drain = Writer.fixedDrain },
+    };
+    r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
+        error.WriteFailed => unreachable,
+        else => |e| return e,
+    };
+}
+
+/// Returns the next byte from the stream or returns `error.EndOfStream`.
+///
+/// Does not advance the seek position.
+///
+/// Asserts the buffer capacity is nonzero.
+pub fn peekByte(r: *Reader) Error!u8 {
+    const buffer = r.buffer[0..r.end];
+    const seek = r.seek;
+    if (seek >= buffer.len) {
+        @branchHint(.unlikely);
+        try fill(r, 1);
     }
+    return buffer[seek];
 }
 
-/// Reads at most `num_bytes` and returns as a bounded array.
-pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
-    var result = std.BoundedArray(u8, num_bytes){};
-    try self.readIntoBoundedBytes(num_bytes, &result);
+/// Reads 1 byte from the stream or returns `error.EndOfStream`.
+///
+/// Asserts the buffer capacity is nonzero.
+pub fn takeByte(r: *Reader) Error!u8 {
+    const result = try peekByte(r);
+    r.seek += 1;
     return result;
 }
 
-pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
-    const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
-    return mem.readInt(T, &bytes, endian);
+/// Same as `takeByte` except the returned byte is signed.
+pub fn takeByteSigned(r: *Reader) Error!i8 {
+    return @bitCast(try r.takeByte());
 }
 
-pub fn readVarInt(
-    self: Self,
-    comptime ReturnType: type,
-    endian: std.builtin.Endian,
-    size: usize,
-) anyerror!ReturnType {
-    assert(size <= @sizeOf(ReturnType));
-    var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
-    const bytes = bytes_buf[0..size];
-    try self.readNoEof(bytes);
-    return mem.readVarInt(ReturnType, bytes, endian);
-}
-
-/// Optional parameters for `skipBytes`
-pub const SkipBytesOptions = struct {
-    buf_size: usize = 512,
-};
-
-// `num_bytes` is a `u64` to match `off_t`
-/// Reads `num_bytes` bytes from the stream and discards them
-pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
-    var buf: [options.buf_size]u8 = undefined;
-    var remaining = num_bytes;
+/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`.
+pub inline fn takeInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
+    const n = @divExact(@typeInfo(T).int.bits, 8);
+    return std.mem.readInt(T, try r.takeArray(n), endian);
+}
 
-    while (remaining > 0) {
-        const amt = @min(remaining, options.buf_size);
-        try self.readNoEof(buf[0..amt]);
-        remaining -= amt;
-    }
+/// Asserts the buffer was initialized with a capacity at least `n`.
+pub fn takeVarInt(r: *Reader, comptime Int: type, endian: std.builtin.Endian, n: usize) Error!Int {
+    assert(n <= @sizeOf(Int));
+    return std.mem.readVarInt(Int, try r.take(n), endian);
 }
 
-/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
-pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
-    var i: usize = 0;
-    var matches = true;
-    while (i < slice.len) : (i += 1) {
-        if (slice[i] != try self.readByte()) {
-            matches = false;
-        }
-    }
-    return matches;
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
+///
+/// Advances the seek position.
+///
+/// See also:
+/// * `peekStruct`
+pub fn takeStruct(r: *Reader, comptime T: type) Error!*align(1) T {
+    // Only extern and packed structs have defined in-memory layout.
+    comptime assert(@typeInfo(T).@"struct".layout != .auto);
+    return @ptrCast(try r.takeArray(@sizeOf(T)));
 }
 
-pub fn readStruct(self: Self, comptime T: type) anyerror!T {
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
+///
+/// Does not advance the seek position.
+///
+/// See also:
+/// * `takeStruct`
+pub fn peekStruct(r: *Reader, comptime T: type) Error!*align(1) T {
     // Only extern and packed structs have defined in-memory layout.
     comptime assert(@typeInfo(T).@"struct".layout != .auto);
-    var res: [1]T = undefined;
-    try self.readNoEof(mem.sliceAsBytes(res[0..]));
-    return res[0];
+    return @ptrCast(try r.peekArray(@sizeOf(T)));
 }
 
-pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
-    var res = try self.readStruct(T);
-    if (native_endian != endian) {
-        mem.byteSwapAllFields(T, &res);
-    }
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
+///
+/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
+/// when `endian` is comptime-known and matches the host endianness.
+pub inline fn takeStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
+    var res = (try r.takeStruct(T)).*;
+    if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
+    return res;
+}
+
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
+///
+/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
+/// when `endian` is comptime-known and matches the host endianness.
+pub inline fn peekStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
+    var res = (try r.peekStruct(T)).*;
+    if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
     return res;
 }
 
-/// Reads an integer with the same size as the given enum's tag type. If the integer matches
-/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
-/// TODO optimization taking advantage of most fields being in order
-pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
-    const E = error{
-        /// An integer was read, but it did not match any of the tags in the supplied enum.
-        InvalidValue,
+pub const TakeEnumError = Error || error{InvalidEnumTag};
+
+/// Reads an integer with the same size as the given enum's tag type. If the
+/// integer matches an enum tag, casts the integer to the enum tag and returns
+/// it. Otherwise, returns `error.InvalidEnumTag`.
+///
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
+pub fn takeEnum(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum {
+    const Tag = @typeInfo(Enum).@"enum".tag_type;
+    const int = try r.takeInt(Tag, endian);
+    return std.meta.intToEnum(Enum, int);
+}
+
+/// Reads an integer with the same size as the given nonexhaustive enum's tag type.
+///
+/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
+pub fn takeEnumNonexhaustive(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) Error!Enum {
+    const info = @typeInfo(Enum).@"enum";
+    comptime assert(!info.is_exhaustive);
+    comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8);
+    return takeEnum(r, Enum, endian) catch |err| switch (err) {
+        error.InvalidEnumTag => unreachable,
+        else => |e| return e,
     };
-    const type_info = @typeInfo(Enum).@"enum";
-    const tag = try self.readInt(type_info.tag_type, endian);
+}
 
-    inline for (std.meta.fields(Enum)) |field| {
-        if (tag == field.value) {
-            return @field(Enum, field.name);
-        }
+pub const TakeLeb128Error = Error || error{Overflow};
+
+/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit.
+pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
+    const result_info = @typeInfo(Result).int;
+    return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Type(.{ .int = .{
+        .signedness = result_info.signedness,
+        .bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7),
+    } }))) orelse error.Overflow;
+}
+
+pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void {
+    if (n <= r.buffer.len) return;
+    if (r.seek > 0) rebase(r);
+    var list: ArrayList(u8) = .{
+        .items = r.buffer[0..r.end],
+        .capacity = r.buffer.len,
+    };
+    defer r.buffer = list.allocatedSlice();
+    try list.ensureTotalCapacity(allocator, n);
+}
+
+pub const FillAllocError = Error || Allocator.Error;
+
+pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void {
+    try expandTotalCapacity(r, allocator, n);
+    return fill(r, n);
+}
+
+/// Returns a slice into the unused capacity of `buffer` with at least
+/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
+///
+/// After calling this function, typically the caller will follow up with a
+/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
+pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
+    {
+        const unused = r.buffer[r.end..];
+        if (unused.len >= min_len) return unused;
+    }
+    if (r.seek > 0) rebase(r);
+    {
+        var list: ArrayList(u8) = .{
+            .items = r.buffer[0..r.end],
+            .capacity = r.buffer.len,
+        };
+        defer r.buffer = list.allocatedSlice();
+        try list.ensureUnusedCapacity(allocator, min_len);
     }
+    const unused = r.buffer[r.end..];
+    assert(unused.len >= min_len);
+    return unused;
+}
 
-    return E.InvalidValue;
+/// After writing directly into the unused capacity of `buffer`, this function
+/// updates `end` so that users of `Reader` can receive the data.
+pub fn advanceBufferEnd(r: *Reader, n: usize) void {
+    assert(n <= r.buffer.len - r.end);
+    r.end += n;
 }
 
-/// Reads the stream until the end, ignoring all the data.
-/// Returns the number of bytes discarded.
-pub fn discard(self: Self) anyerror!u64 {
-    var trash: [4096]u8 = undefined;
-    var index: u64 = 0;
+fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
+    const result_info = @typeInfo(Result).int;
+    comptime assert(result_info.bits % 7 == 0);
+    var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits;
+    const UnsignedResult = @Type(.{ .int = .{
+        .signedness = .unsigned,
+        .bits = result_info.bits,
+    } });
+    var result: UnsignedResult = 0;
+    var fits = true;
     while (true) {
-        const n = try self.read(&trash);
-        if (n == 0) return index;
-        index += n;
+        const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try r.peekGreedy(1));
+        for (buffer, 1..) |byte, len| {
+            if (remaining_bits > 0) {
+                result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) |
+                    if (result_info.bits > 7) @shrExact(result, 7) else 0;
+                remaining_bits -= 7;
+            } else if (fits) fits = switch (result_info.signedness) {
+                .signed => @as(i7, @bitCast(byte.bits)) ==
+                    @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),
+                .unsigned => byte.bits == 0,
+            };
+            if (byte.more) continue;
+            r.toss(len);
+            return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow;
+        }
+        r.toss(buffer.len);
     }
 }
 
-const std = @import("../std.zig");
-const Self = @This();
-const math = std.math;
-const assert = std.debug.assert;
-const mem = std.mem;
-const testing = std.testing;
-const native_endian = @import("builtin").target.cpu.arch.endian();
-const Alignment = std.mem.Alignment;
+/// Left-aligns data such that `r.seek` becomes zero.
+pub fn rebase(r: *Reader) void {
+    if (r.seek == 0) return;
+    const data = r.buffer[r.seek..r.end];
+    @memmove(r.buffer[0..data.len], data);
+    r.seek = 0;
+    r.end = data.len;
+}
+
+/// Ensures `capacity` more data can be buffered without rebasing, by rebasing
+/// if necessary.
+///
+/// Asserts `capacity` is within the buffer capacity.
+pub fn rebaseCapacity(r: *Reader, capacity: usize) void {
+    if (r.end > r.buffer.len - capacity) rebase(r);
+}
+
+/// Advances the stream and decreases the size of the storage buffer by `n`,
+/// returning the range of bytes no longer accessible by `r`.
+///
+/// This action can be undone by `restitute`.
+///
+/// Asserts there are at least `n` buffered bytes already.
+///
+/// Asserts that `r.seek` is zero, i.e. the buffer is in a rebased state.
+pub fn steal(r: *Reader, n: usize) []u8 {
+    assert(r.seek == 0);
+    assert(n <= r.end);
+    const stolen = r.buffer[0..n];
+    r.buffer = r.buffer[n..];
+    r.end -= n;
+    return stolen;
+}
+
+/// Expands the storage buffer, undoing the effects of `steal`
+/// Assumes that `n` does not exceed the total number of stolen bytes.
+pub fn restitute(r: *Reader, n: usize) void {
+    r.buffer = (r.buffer.ptr - n)[0 .. r.buffer.len + n];
+    r.end += n;
+    r.seek += n;
+}
+
+test fixed {
+    var r: Reader = .fixed("a\x02");
+    try testing.expect((try r.takeByte()) == 'a');
+    try testing.expect((try r.takeEnum(enum(u8) {
+        a = 0,
+        b = 99,
+        c = 2,
+        d = 3,
+    }, builtin.cpu.arch.endian())) == .c);
+    try testing.expectError(error.EndOfStream, r.takeByte());
+}
+
+test peek {
+    return error.Unimplemented;
+}
+
+test peekGreedy {
+    return error.Unimplemented;
+}
+
+test toss {
+    return error.Unimplemented;
+}
+
+test take {
+    return error.Unimplemented;
+}
+
+test takeArray {
+    return error.Unimplemented;
+}
+
+test peekArray {
+    return error.Unimplemented;
+}
+
+test discardAll {
+    var r: Reader = .fixed("foobar");
+    try r.discard(3);
+    try testing.expectEqualStrings("bar", try r.take(3));
+    try r.discard(0);
+    try testing.expectError(error.EndOfStream, r.discard(1));
+}
+
+test discardRemaining {
+    return error.Unimplemented;
+}
+
+test stream {
+    return error.Unimplemented;
+}
 
-test {
-    _ = @import("Reader/test.zig");
+test takeSentinel {
+    return error.Unimplemented;
+}
+
+test peekSentinel {
+    return error.Unimplemented;
+}
+
+test takeDelimiterInclusive {
+    return error.Unimplemented;
+}
+
+test peekDelimiterInclusive {
+    return error.Unimplemented;
+}
+
+test takeDelimiterExclusive {
+    return error.Unimplemented;
+}
+
+test peekDelimiterExclusive {
+    return error.Unimplemented;
+}
+
+test readDelimiter {
+    return error.Unimplemented;
+}
+
+test readDelimiterEnding {
+    return error.Unimplemented;
+}
+
+test readDelimiterLimit {
+    return error.Unimplemented;
+}
+
+test discardDelimiterExclusive {
+    return error.Unimplemented;
+}
+
+test discardDelimiterInclusive {
+    return error.Unimplemented;
+}
+
+test fill {
+    return error.Unimplemented;
+}
+
+test takeByte {
+    return error.Unimplemented;
+}
+
+test takeByteSigned {
+    return error.Unimplemented;
+}
+
+test takeInt {
+    return error.Unimplemented;
+}
+
+test takeVarInt {
+    return error.Unimplemented;
+}
+
+test takeStruct {
+    return error.Unimplemented;
+}
+
+test peekStruct {
+    return error.Unimplemented;
+}
+
+test takeStructEndian {
+    return error.Unimplemented;
+}
+
+test peekStructEndian {
+    return error.Unimplemented;
+}
+
+test takeEnum {
+    return error.Unimplemented;
+}
+
+test takeLeb128 {
+    return error.Unimplemented;
+}
+
+test readSliceShort {
+    return error.Unimplemented;
+}
+
+test readVec {
+    return error.Unimplemented;
+}
+
+test "expected error.EndOfStream" {
+    // Unit test inspired by https://github.com/ziglang/zig/issues/17733
+    var r: std.io.Reader = .fixed("");
+    try std.testing.expectError(error.EndOfStream, r.readEnum(enum(u8) { a, b }, .little));
+    try std.testing.expectError(error.EndOfStream, r.isBytes("foo"));
+}
+
+fn endingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
+    _ = r;
+    _ = w;
+    _ = limit;
+    return error.EndOfStream;
+}
+
+fn endingDiscard(r: *Reader, limit: Limit) Error!usize {
+    _ = r;
+    _ = limit;
+    return error.EndOfStream;
+}
+
+fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
+    _ = r;
+    _ = w;
+    _ = limit;
+    return error.ReadFailed;
+}
+
+fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
+    _ = r;
+    _ = limit;
+    return error.ReadFailed;
+}
+
+test "readAlloc when the backing reader provides one byte at a time" {
+    const OneByteReader = struct {
+        str: []const u8,
+        curr: usize,
+
+        fn read(self: *@This(), dest: []u8) usize {
+            if (self.str.len <= self.curr or dest.len == 0)
+                return 0;
+
+            dest[0] = self.str[self.curr];
+            self.curr += 1;
+            return 1;
+        }
+    };
+
+    const str = "This is a test";
+    var one_byte_stream: OneByteReader = .init(str);
+    const res = try one_byte_stream.reader().streamReadAlloc(std.testing.allocator, str.len + 1);
+    defer std.testing.allocator.free(res);
+    try std.testing.expectEqualStrings(str, res);
+}
+
+/// Provides a `Reader` implementation by passing data from an underlying
+/// reader through `Hasher.update`.
+///
+/// The underlying reader is best unbuffered.
+///
+/// This implementation makes suboptimal buffering decisions due to being
+/// generic. A better solution will involve creating a reader for each hash
+/// function, where the discard buffer can be tailored to the hash
+/// implementation details.
+pub fn Hashed(comptime Hasher: type) type {
+    return struct {
+        in: *Reader,
+        hasher: Hasher,
+        interface: Reader,
+
+        pub fn init(in: *Reader, hasher: Hasher, buffer: []u8) @This() {
+            return .{
+                .in = in,
+                .hasher = hasher,
+                .interface = .{
+                    .vtable = &.{
+                        .read = @This().read,
+                        .discard = @This().discard,
+                    },
+                    .buffer = buffer,
+                    .end = 0,
+                    .seek = 0,
+                },
+            };
+        }
+
+        fn read(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
+            const this: *@This() = @alignCast(@fieldParentPtr("interface", r));
+            const data = w.writableVector(limit);
+            const n = try this.in.readVec(data);
+            const result = w.advanceVector(n);
+            var remaining: usize = n;
+            for (data) |slice| {
+                if (remaining < slice.len) {
+                    this.hasher.update(slice[0..remaining]);
+                    return result;
+                } else {
+                    remaining -= slice.len;
+                    this.hasher.update(slice);
+                }
+            }
+            assert(remaining == 0);
+            return result;
+        }
+
+        fn discard(r: *Reader, limit: Limit) Error!usize {
+            const this: *@This() = @alignCast(@fieldParentPtr("interface", r));
+            var w = this.hasher.writer(&.{});
+            const n = this.in.stream(&w, limit) catch |err| switch (err) {
+                error.WriteFailed => unreachable,
+                else => |e| return e,
+            };
+            return n;
+        }
+    };
 }
lib/std/io/stream_source.zig
@@ -2,9 +2,9 @@ const std = @import("../std.zig");
 const builtin = @import("builtin");
 const io = std.io;
 
-/// Provides `io.Reader`, `io.Writer`, and `io.SeekableStream` for in-memory buffers as
+/// Provides `io.GenericReader`, `io.GenericWriter`, and `io.SeekableStream` for in-memory buffers as
 /// well as files.
-/// For memory sources, if the supplied byte buffer is const, then `io.Writer` is not available.
+/// For memory sources, if the supplied byte buffer is const, then `io.GenericWriter` is not available.
 /// The error set of the stream functions is the error set of the corresponding file functions.
 pub const StreamSource = union(enum) {
     // TODO: expose UEFI files to std.os in a way that allows this to be true
@@ -26,8 +26,8 @@ pub const StreamSource = union(enum) {
     pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{});
     pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{});
 
-    pub const Reader = io.Reader(*StreamSource, ReadError, read);
-    pub const Writer = io.Writer(*StreamSource, WriteError, write);
+    pub const Reader = io.GenericReader(*StreamSource, ReadError, read);
+    pub const Writer = io.GenericWriter(*StreamSource, WriteError, write);
     pub const SeekableStream = io.SeekableStream(
         *StreamSource,
         SeekError,
lib/std/io/Writer.zig
@@ -1,83 +1,2206 @@
+const builtin = @import("builtin");
+const native_endian = builtin.target.cpu.arch.endian();
+
+const Writer = @This();
 const std = @import("../std.zig");
 const assert = std.debug.assert;
-const mem = std.mem;
-const native_endian = @import("builtin").target.cpu.arch.endian();
+const Limit = std.io.Limit;
+const File = std.fs.File;
+const testing = std.testing;
+const Allocator = std.mem.Allocator;
+
+vtable: *const VTable,
+/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
+buffer: []u8,
+/// In `buffer` before this are buffered bytes, after this is `undefined`.
+end: usize = 0,
+/// Tracks total number of bytes written to this `Writer`. This value
+/// only increases. In the case of fixed mode, this value always equals `end`.
+///
+/// This value is maintained by the interface; `VTable` function
+/// implementations need not modify it.
+count: usize = 0,
+
+pub const VTable = struct {
+    /// Sends bytes to the logical sink. A write will only be sent here if it
+    /// could not fit into `buffer`, or during a `flush` operation.
+    ///
+    /// `buffer[0..end]` is consumed first, followed by each slice of `data` in
+    /// order. Elements of `data` may alias each other but may not alias
+    /// `buffer`.
+    ///
+    /// This function modifies `Writer.end` and `Writer.buffer` in an
+    /// implementation-defined manner.
+    ///
+    /// `data.len` must be nonzero.
+    ///
+    /// The last element of `data` is repeated as necessary so that it is
+    /// written `splat` number of times, which may be zero.
+    ///
+    /// Number of bytes consumed from `data` is returned, excluding bytes from
+    /// `buffer`.
+    ///
+    /// Number of bytes returned may be zero, which does not indicate stream
+    /// end. A subsequent call may return nonzero, or signal end of stream via
+    /// `error.WriteFailed`.
+    drain: *const fn (w: *Writer, data: []const []const u8, splat: usize) Error!usize,
+
+    /// Copies contents from an open file to the logical sink. `buffer[0..end]`
+    /// is consumed first, followed by `limit` bytes from `file_reader`.
+    ///
+    /// Number of bytes logically written is returned. This excludes bytes from
+    /// `buffer` because they have already been logically written. Number of
+    /// bytes consumed from `buffer` are tracked by modifying `end`.
+    ///
+    /// Number of bytes returned may be zero, which does not indicate stream
+    /// end. A subsequent call may return nonzero, or signal end of stream via
+    /// `error.WriteFailed`. Caller may check `file_reader` state
+    /// (`File.Reader.atEnd`) to disambiguate between a zero-length read or
+    /// write, and whether the file reached the end.
+    ///
+    /// `error.Unimplemented` indicates the callee cannot offer a more
+    /// efficient implementation than the caller performing its own reads.
+    sendFile: *const fn (
+        w: *Writer,
+        file_reader: *File.Reader,
+        /// Maximum amount of bytes to read from the file. Implementations may
+        /// assume that the file size does not exceed this amount. Data from
+        /// `buffer` does not count towards this limit.
+        limit: Limit,
+    ) FileError!usize = unimplementedSendFile,
+
+    /// Consumes all remaining buffer.
+    ///
+    /// The default flush implementation calls drain repeatedly until `end` is
+    /// zero, however it is legal for implementations to manage `end`
+    /// differently. For instance, `Allocating` flush is a no-op.
+    ///
+    /// There may be subsequent calls to `drain` and `sendFile` after a `flush`
+    /// operation.
+    flush: *const fn (w: *Writer) Error!void = defaultFlush,
+};
+
+pub const Error = error{
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+};
 
-context: *const anyopaque,
-writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
+pub const FileAllError = error{
+    /// Detailed diagnostics are found on the `File.Reader` struct.
+    ReadFailed,
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+};
 
-const Self = @This();
-pub const Error = anyerror;
+pub const FileReadingError = error{
+    /// Detailed diagnostics are found on the `File.Reader` struct.
+    ReadFailed,
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+    /// Reached the end of the file being read.
+    EndOfStream,
+};
 
-pub fn write(self: Self, bytes: []const u8) anyerror!usize {
-    return self.writeFn(self.context, bytes);
+pub const FileError = error{
+    /// Detailed diagnostics are found on the `File.Reader` struct.
+    ReadFailed,
+    /// See the `Writer` implementation for detailed diagnostics.
+    WriteFailed,
+    /// Reached the end of the file being read.
+    EndOfStream,
+    /// Indicates the caller should do its own file reading; the callee cannot
+    /// offer a more efficient implementation.
+    Unimplemented,
+};
+
+/// Writes to `buffer` and returns `error.WriteFailed` when it is full. Unless
+/// modified externally, `count` will always equal `end`.
+pub fn fixed(buffer: []u8) Writer {
+    return .{
+        .vtable = &.{ .drain = fixedDrain },
+        .buffer = buffer,
+    };
 }
 
-pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
-    var index: usize = 0;
-    while (index != bytes.len) {
-        index += try self.write(bytes[index..]);
+pub fn hashed(w: *Writer, hasher: anytype) Hashed(@TypeOf(hasher)) {
+    return .{ .out = w, .hasher = hasher };
+}
+
+pub const failing: Writer = .{
+    .vtable = &.{
+        .drain = failingDrain,
+        .sendFile = failingSendFile,
+    },
+};
+
+pub fn discarding(buffer: []u8) Writer {
+    return .{
+        .vtable = &.{
+            .drain = discardingDrain,
+            .sendFile = discardingSendFile,
+        },
+        .buffer = buffer,
+    };
+}
+
+/// Returns the contents not yet drained.
+pub fn buffered(w: *const Writer) []u8 {
+    return w.buffer[0..w.end];
+}
+
+pub fn countSplat(data: []const []const u8, splat: usize) usize {
+    var total: usize = 0;
+    for (data[0 .. data.len - 1]) |buf| total += buf.len;
+    total += data[data.len - 1].len * splat;
+    return total;
+}
+
+pub fn countSendFileLowerBound(n: usize, file_reader: *File.Reader, limit: Limit) ?usize {
+    const total: u64 = @min(@intFromEnum(limit), file_reader.getSize() catch return null);
+    return std.math.lossyCast(usize, total + n);
+}
+
+/// If the total number of bytes of `data` fits inside `unusedCapacitySlice`,
+/// this function is guaranteed to not fail, not call into `VTable`, and return
+/// the total bytes inside `data`.
+pub fn writeVec(w: *Writer, data: []const []const u8) Error!usize {
+    return writeSplat(w, data, 1);
+}
+
+/// If the number of bytes to write based on `data` and `splat` fits inside
+/// `unusedCapacitySlice`, this function is guaranteed to not fail, not call
+/// into `VTable`, and return the full number of bytes.
+pub fn writeSplat(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+    assert(data.len > 0);
+    const buffer = w.buffer;
+    const count = countSplat(data, splat);
+    if (w.end + count > buffer.len) {
+        const n = try w.vtable.drain(w, data, splat);
+        w.count += n;
+        return n;
+    }
+    w.count += count;
+    for (data) |bytes| {
+        @memcpy(buffer[w.end..][0..bytes.len], bytes);
+        w.end += bytes.len;
     }
+    const pattern = data[data.len - 1];
+    if (splat == 0) {
+        @branchHint(.unlikely);
+        w.end -= pattern.len;
+        return count;
+    }
+    const remaining_splat = splat - 1;
+    switch (pattern.len) {
+        0 => {},
+        1 => {
+            @memset(buffer[w.end..][0..remaining_splat], pattern[0]);
+            w.end += remaining_splat;
+        },
+        else => {
+            const new_end = w.end + pattern.len * remaining_splat;
+            while (w.end < new_end) : (w.end += pattern.len) {
+                @memcpy(buffer[w.end..][0..pattern.len], pattern);
+            }
+        },
+    }
+    return count;
 }
 
-pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
-    return std.fmt.format(self, format, args);
+/// Equivalent to `writeSplat` but writes at most `limit` bytes.
+pub fn writeSplatLimit(
+    w: *Writer,
+    data: []const []const u8,
+    splat: usize,
+    limit: Limit,
+) Error!usize {
+    _ = w;
+    _ = data;
+    _ = splat;
+    _ = limit;
+    @panic("TODO");
 }
 
-pub fn writeByte(self: Self, byte: u8) anyerror!void {
-    const array = [1]u8{byte};
-    return self.writeAll(&array);
+/// Returns how many bytes were consumed from `header` and `data`.
+pub fn writeSplatHeader(
+    w: *Writer,
+    header: []const u8,
+    data: []const []const u8,
+    splat: usize,
+) Error!usize {
+    const new_end = w.end + header.len;
+    if (new_end <= w.buffer.len) {
+        @memcpy(w.buffer[w.end..][0..header.len], header);
+        w.end = new_end;
+        w.count += header.len;
+        return header.len + try writeSplat(w, data, splat);
+    }
+    var vecs: [8][]const u8 = undefined; // Arbitrarily chosen size.
+    var i: usize = 1;
+    vecs[0] = header;
+    for (data) |buf| {
+        if (buf.len == 0) continue;
+        vecs[i] = buf;
+        i += 1;
+        if (vecs.len - i == 0) break;
+    }
+    const new_splat = if (vecs[i - 1].ptr == data[data.len - 1].ptr) splat else 1;
+    const n = try w.vtable.drain(w, vecs[0..i], new_splat);
+    w.count += n;
+    return n;
 }
 
-pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
-    var bytes: [256]u8 = undefined;
-    @memset(bytes[0..], byte);
+/// Equivalent to `writeSplatHeader` but writes at most `limit` bytes.
+pub fn writeSplatHeaderLimit(
+    w: *Writer,
+    header: []const u8,
+    data: []const []const u8,
+    splat: usize,
+    limit: Limit,
+) Error!usize {
+    _ = w;
+    _ = header;
+    _ = data;
+    _ = splat;
+    _ = limit;
+    @panic("TODO");
+}
 
-    var remaining: usize = n;
-    while (remaining > 0) {
-        const to_write = @min(remaining, bytes.len);
-        try self.writeAll(bytes[0..to_write]);
-        remaining -= to_write;
+/// Drains all remaining buffered data.
+pub fn flush(w: *Writer) Error!void {
+    return w.vtable.flush(w);
+}
+
+/// Repeatedly calls `VTable.drain` until `end` is zero.
+pub fn defaultFlush(w: *Writer) Error!void {
+    const drainFn = w.vtable.drain;
+    while (w.end != 0) _ = try drainFn(w, &.{""}, 1);
+}
+
+/// Does nothing.
+pub fn noopFlush(w: *Writer) Error!void {
+    _ = w;
+}
+
+/// Calls `VTable.drain` but hides the last `preserve_length` bytes from the
+/// implementation, keeping them buffered.
+pub fn drainPreserve(w: *Writer, preserve_length: usize) Error!void {
+    const temp_end = w.end -| preserve_length;
+    const preserved = w.buffer[temp_end..w.end];
+    w.end = temp_end;
+    defer w.end += preserved.len;
+    assert(0 == try w.vtable.drain(w, &.{""}, 1));
+    assert(w.end <= temp_end + preserved.len);
+    @memmove(w.buffer[w.end..][0..preserved.len], preserved);
+}
+
+pub fn unusedCapacitySlice(w: *const Writer) []u8 {
+    return w.buffer[w.end..];
+}
+
+pub fn unusedCapacityLen(w: *const Writer) usize {
+    return w.buffer.len - w.end;
+}
+
+/// Asserts the provided buffer has total capacity enough for `len`.
+///
+/// Advances the buffer end position by `len`.
+pub fn writableArray(w: *Writer, comptime len: usize) Error!*[len]u8 {
+    const big_slice = try w.writableSliceGreedy(len);
+    advance(w, len);
+    return big_slice[0..len];
+}
+
+/// Asserts the provided buffer has total capacity enough for `len`.
+///
+/// Advances the buffer end position by `len`.
+pub fn writableSlice(w: *Writer, len: usize) Error![]u8 {
+    const big_slice = try w.writableSliceGreedy(len);
+    advance(w, len);
+    return big_slice[0..len];
+}
+
+/// Asserts the provided buffer has total capacity enough for `minimum_length`.
+///
+/// Does not `advance` the buffer end position.
+///
+/// If `minimum_length` is zero, this is equivalent to `unusedCapacitySlice`.
+pub fn writableSliceGreedy(w: *Writer, minimum_length: usize) Error![]u8 {
+    assert(w.buffer.len >= minimum_length);
+    while (w.buffer.len - w.end < minimum_length) {
+        assert(0 == try w.vtable.drain(w, &.{""}, 1));
+    } else {
+        @branchHint(.likely);
+        return w.buffer[w.end..];
     }
 }
 
-pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
+/// Asserts the provided buffer has total capacity enough for `minimum_length`
+/// and `preserve_length` combined.
+///
+/// Does not `advance` the buffer end position.
+///
+/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// remain buffered.
+///
+/// If `preserve_length` is zero, this is equivalent to `writableSliceGreedy`.
+pub fn writableSliceGreedyPreserve(w: *Writer, preserve_length: usize, minimum_length: usize) Error![]u8 {
+    assert(w.buffer.len >= preserve_length + minimum_length);
+    while (w.buffer.len - w.end < minimum_length) {
+        try drainPreserve(w, preserve_length);
+    } else {
+        @branchHint(.likely);
+        return w.buffer[w.end..];
+    }
+}
+
+pub const WritableVectorIterator = struct {
+    first: []u8,
+    middle: []const []u8 = &.{},
+    last: []u8 = &.{},
+    index: usize = 0,
+
+    pub fn next(it: *WritableVectorIterator) ?[]u8 {
+        while (true) {
+            const i = it.index;
+            it.index += 1;
+            if (i == 0) {
+                if (it.first.len == 0) continue;
+                return it.first;
+            }
+            const middle_index = i - 1;
+            if (middle_index < it.middle.len) {
+                const middle = it.middle[middle_index];
+                if (middle.len == 0) continue;
+                return middle;
+            }
+            if (middle_index == it.middle.len) {
+                if (it.last.len == 0) continue;
+                return it.last;
+            }
+            return null;
+        }
+    }
+};
+
+pub const VectorWrapper = struct {
+    writer: Writer,
+    it: WritableVectorIterator,
+    pub const vtable: VTable = .{ .drain = fixedDrain };
+};
+
+pub fn writableVectorIterator(w: *Writer) Error!WritableVectorIterator {
+    if (w.vtable == &VectorWrapper.vtable) {
+        const wrapper: *VectorWrapper = @fieldParentPtr("writer", w);
+        return wrapper.it;
+    }
+    return .{ .first = try writableSliceGreedy(w, 1) };
+}
+
+pub fn writableVectorPosix(w: *Writer, buffer: []std.posix.iovec, limit: Limit) Error![]std.posix.iovec {
+    var it = try writableVectorIterator(w);
     var i: usize = 0;
-    while (i < n) : (i += 1) {
-        try self.writeAll(bytes);
+    var remaining = limit;
+    while (it.next()) |full_buffer| {
+        if (!remaining.nonzero()) break;
+        if (buffer.len - i == 0) break;
+        const buf = remaining.slice(full_buffer);
+        if (buf.len == 0) continue;
+        buffer[i] = .{ .base = buf.ptr, .len = buf.len };
+        i += 1;
+        remaining = remaining.subtract(buf.len).?;
+    }
+    return buffer[0..i];
+}
+
+pub fn ensureUnusedCapacity(w: *Writer, n: usize) Error!void {
+    _ = try writableSliceGreedy(w, n);
+}
+
+pub fn undo(w: *Writer, n: usize) void {
+    w.end -= n;
+    w.count -= n;
+}
+
+/// After calling `writableSliceGreedy`, this function tracks how many bytes
+/// were written to it.
+///
+/// This is not needed when using `writableSlice` or `writableArray`.
+pub fn advance(w: *Writer, n: usize) void {
+    const new_end = w.end + n;
+    assert(new_end <= w.buffer.len);
+    w.end = new_end;
+    w.count += n;
+}
+
+/// After calling `writableVector`, this function tracks how many bytes were
+/// written to it.
+pub fn advanceVector(w: *Writer, n: usize) usize {
+    w.count += n;
+    return consume(w, n);
+}
+
+/// The `data` parameter is mutable because this function needs to mutate the
+/// fields in order to handle partial writes from `VTable.writeSplat`.
+pub fn writeVecAll(w: *Writer, data: [][]const u8) Error!void {
+    var index: usize = 0;
+    var truncate: usize = 0;
+    while (index < data.len) {
+        {
+            const untruncated = data[index];
+            data[index] = untruncated[truncate..];
+            defer data[index] = untruncated;
+            truncate += try w.writeVec(data[index..]);
+        }
+        while (index < data.len and truncate >= data[index].len) {
+            truncate -= data[index].len;
+            index += 1;
+        }
     }
 }
 
-pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
+/// The `data` parameter is mutable because this function needs to mutate the
+/// fields in order to handle partial writes from `VTable.writeSplat`.
+pub fn writeSplatAll(w: *Writer, data: [][]const u8, splat: usize) Error!void {
+    var index: usize = 0;
+    var truncate: usize = 0;
+    var remaining_splat = splat;
+    while (index + 1 < data.len) {
+        {
+            const untruncated = data[index];
+            data[index] = untruncated[truncate..];
+            defer data[index] = untruncated;
+            truncate += try w.writeSplat(data[index..], remaining_splat);
+        }
+        while (truncate >= data[index].len) {
+            if (index + 1 < data.len) {
+                truncate -= data[index].len;
+                index += 1;
+            } else {
+                const last = data[data.len - 1];
+                remaining_splat -= @divExact(truncate, last.len);
+                while (remaining_splat > 0) {
+                    const n = try w.writeSplat(data[data.len - 1 ..][0..1], remaining_splat);
+                    remaining_splat -= @divExact(n, last.len);
+                }
+                return;
+            }
+        }
+    }
+}
+
+pub fn write(w: *Writer, bytes: []const u8) Error!usize {
+    if (w.end + bytes.len <= w.buffer.len) {
+        @branchHint(.likely);
+        @memcpy(w.buffer[w.end..][0..bytes.len], bytes);
+        w.end += bytes.len;
+        w.count += bytes.len;
+        return bytes.len;
+    }
+    const n = try w.vtable.drain(w, &.{bytes}, 1);
+    w.count += n;
+    return n;
+}
+
+/// Asserts `buffer` capacity exceeds `preserve_length`.
+pub fn writePreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!usize {
+    assert(preserve_length <= w.buffer.len);
+    if (w.end + bytes.len <= w.buffer.len) {
+        @branchHint(.likely);
+        @memcpy(w.buffer[w.end..][0..bytes.len], bytes);
+        w.end += bytes.len;
+        w.count += bytes.len;
+        return bytes.len;
+    }
+    const temp_end = w.end -| preserve_length;
+    const preserved = w.buffer[temp_end..w.end];
+    w.end = temp_end;
+    defer w.end += preserved.len;
+    const n = try w.vtable.drain(w, &.{bytes}, 1);
+    w.count += n;
+    assert(w.end <= temp_end + preserved.len);
+    @memmove(w.buffer[w.end..][0..preserved.len], preserved);
+    return n;
+}
+
+/// Calls `drain` as many times as necessary such that all of `bytes` are
+/// transferred.
+pub fn writeAll(w: *Writer, bytes: []const u8) Error!void {
+    var index: usize = 0;
+    while (index < bytes.len) index += try w.write(bytes[index..]);
+}
+
+/// Calls `drain` as many times as necessary such that all of `bytes` are
+/// transferred.
+///
+/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// remain buffered.
+///
+/// Asserts `buffer` capacity exceeds `preserve_length`.
+pub fn writeAllPreserve(w: *Writer, preserve_length: usize, bytes: []const u8) Error!void {
+    var index: usize = 0;
+    while (index < bytes.len) index += try w.writePreserve(preserve_length, bytes[index..]);
+}
+
+pub fn print(w: *Writer, comptime format: []const u8, args: anytype) Error!void {
+    try std.fmt.format(w, format, args);
+}
+
+/// Calls `drain` as many times as necessary such that `byte` is transferred.
+pub fn writeByte(w: *Writer, byte: u8) Error!void {
+    while (w.buffer.len - w.end == 0) {
+        const n = try w.vtable.drain(w, &.{&.{byte}}, 1);
+        if (n > 0) {
+            w.count += 1;
+            return;
+        }
+    } else {
+        @branchHint(.likely);
+        w.buffer[w.end] = byte;
+        w.end += 1;
+        w.count += 1;
+    }
+}
+
+/// When draining the buffer, ensures that at least `preserve_length` bytes
+/// remain buffered.
+pub fn writeBytePreserve(w: *Writer, preserve_length: usize, byte: u8) Error!void {
+    while (w.buffer.len - w.end == 0) {
+        try drainPreserve(w, preserve_length);
+    } else {
+        @branchHint(.likely);
+        w.buffer[w.end] = byte;
+        w.end += 1;
+        w.count += 1;
+    }
+}
+
+/// Writes the same byte many times, performing the underlying write call as
+/// many times as necessary.
+pub fn splatByteAll(w: *Writer, byte: u8, n: usize) Error!void {
+    var remaining: usize = n;
+    while (remaining > 0) remaining -= try w.splatByte(byte, remaining);
+}
+
+/// Writes the same byte many times, allowing short writes.
+///
+/// Does maximum of one underlying `VTable.drain`.
+pub fn splatByte(w: *Writer, byte: u8, n: usize) Error!usize {
+    return writeSplat(w, &.{&.{byte}}, n);
+}
+
+/// Writes the same slice many times, performing the underlying write call as
+/// many times as necessary.
+pub fn splatBytesAll(w: *Writer, bytes: []const u8, splat: usize) Error!void {
+    var remaining_bytes: usize = bytes.len * splat;
+    remaining_bytes -= try w.splatBytes(bytes, splat);
+    while (remaining_bytes > 0) {
+        const leftover = remaining_bytes % bytes.len;
+        const buffers: [2][]const u8 = .{ bytes[bytes.len - leftover ..], bytes };
+        remaining_bytes -= try w.splatBytes(&buffers, splat);
+    }
+}
+
+/// Writes the same slice many times, allowing short writes.
+///
+/// Does maximum of one underlying `VTable.writeSplat`.
+pub fn splatBytes(w: *Writer, bytes: []const u8, n: usize) Error!usize {
+    return writeSplat(w, &.{bytes}, n);
+}
+
+/// Asserts the `buffer` was initialized with a capacity of at least `@sizeOf(T)` bytes.
+pub inline fn writeInt(w: *Writer, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
     var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
-    mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
-    return self.writeAll(&bytes);
+    std.mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
+    return w.writeAll(&bytes);
 }
 
-pub fn writeStruct(self: Self, value: anytype) anyerror!void {
+pub fn writeStruct(w: *Writer, value: anytype) Error!void {
     // Only extern and packed structs have defined in-memory layout.
     comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
-    return self.writeAll(mem.asBytes(&value));
+    return w.writeAll(std.mem.asBytes(&value));
 }
 
-pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
-    // TODO: make sure this value is not a reference type
+/// The function is inline to avoid the dead code in case `endian` is
+/// comptime-known and matches host endianness.
+/// TODO: make sure this value is not a reference type
+pub inline fn writeStructEndian(w: *Writer, value: anytype, endian: std.builtin.Endian) Error!void {
     if (native_endian == endian) {
-        return self.writeStruct(value);
+        return w.writeStruct(value);
     } else {
         var copy = value;
-        mem.byteSwapAllFields(@TypeOf(value), &copy);
-        return self.writeStruct(copy);
+        std.mem.byteSwapAllFields(@TypeOf(value), &copy);
+        return w.writeStruct(copy);
+    }
+}
+
+pub inline fn writeSliceEndian(
+    w: *Writer,
+    Elem: type,
+    slice: []const Elem,
+    endian: std.builtin.Endian,
+) Error!void {
+    if (native_endian == endian) {
+        return writeAll(w, @ptrCast(slice));
+    } else {
+        return w.writeArraySwap(w, Elem, slice);
+    }
+}
+
+/// Asserts that the buffer storage capacity is at least enough to store `@sizeOf(Elem)`
+pub fn writeSliceSwap(w: *Writer, Elem: type, slice: []const Elem) Error!void {
+    // copy to storage first, then swap in place
+    _ = w;
+    _ = slice;
+    @panic("TODO");
+}
+
+/// Unlike `writeSplat` and `writeVec`, this function will call into `VTable`
+/// even if there is enough buffer capacity for the file contents.
+///
+/// Although it would be possible to eliminate `error.Unimplemented` from the
+/// error set by reading directly into the buffer in such case, this is not
+/// done because it is more efficient to do it higher up the call stack so that
+/// the error does not occur with each write.
+///
+/// See `sendFileReading` for an alternative that does not have
+/// `error.Unimplemented` in the error set.
+pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
+    return w.vtable.sendFile(w, file_reader, limit);
+}
+
+/// Returns how many bytes from `header` and `file_reader` were consumed.
+pub fn sendFileHeader(
+    w: *Writer,
+    header: []const u8,
+    file_reader: *File.Reader,
+    limit: Limit,
+) FileError!usize {
+    const new_end = w.end + header.len;
+    if (new_end <= w.buffer.len) {
+        @memcpy(w.buffer[w.end..][0..header.len], header);
+        w.end = new_end;
+        w.count += header.len;
+        return header.len + try w.vtable.sendFile(w, file_reader, limit);
+    }
+    const buffered_contents = limit.slice(file_reader.interface.buffered());
+    const n = try w.vtable.drain(w, &.{ header, buffered_contents }, 1);
+    w.count += n;
+    file_reader.interface.toss(n - header.len);
+    return n;
+}
+
+/// Asserts nonzero buffer capacity.
+pub fn sendFileReading(w: *Writer, file_reader: *File.Reader, limit: Limit) FileReadingError!usize {
+    const dest = limit.slice(try w.writableSliceGreedy(1));
+    const n = try file_reader.read(dest);
+    w.advance(n);
+    return n;
+}
+
+pub fn sendFileAll(w: *Writer, file_reader: *File.Reader, limit: Limit) FileAllError!usize {
+    var remaining = @intFromEnum(limit);
+    while (remaining > 0) {
+        const n = sendFile(w, file_reader, .limited(remaining)) catch |err| switch (err) {
+            error.EndOfStream => break,
+            error.Unimplemented => {
+                file_reader.mode = file_reader.mode.toReading();
+                remaining -= try w.sendFileReadingAll(file_reader, .limited(remaining));
+                break;
+            },
+            else => |e| return e,
+        };
+        remaining -= n;
+    }
+    return @intFromEnum(limit) - remaining;
+}
+
+/// Equivalent to `sendFileAll` but uses direct `pread` and `read` calls on
+/// `file` rather than `sendFile`. This is generally used as a fallback when
+/// the underlying implementation returns `error.Unimplemented`, which is why
+/// that error code does not appear in this function's error set.
+///
+/// Asserts nonzero buffer capacity.
+pub fn sendFileReadingAll(w: *Writer, file_reader: *File.Reader, limit: Limit) FileAllError!usize {
+    var remaining = @intFromEnum(limit);
+    while (remaining > 0) {
+        remaining -= sendFileReading(w, file_reader, .limited(remaining)) catch |err| switch (err) {
+            error.EndOfStream => break,
+            else => |e| return e,
+        };
     }
+    return @intFromEnum(limit) - remaining;
 }
 
-pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
-    // TODO: figure out how to adjust std lib abstractions so that this ends up
-    // doing sendfile or maybe even copy_file_range under the right conditions.
-    var buf: [4000]u8 = undefined;
+pub fn alignBuffer(
+    w: *Writer,
+    buffer: []const u8,
+    width: usize,
+    alignment: std.fmt.Alignment,
+    fill: u8,
+) Error!void {
+    const padding = if (buffer.len < width) width - buffer.len else 0;
+    if (padding == 0) {
+        @branchHint(.likely);
+        return w.writeAll(buffer);
+    }
+    switch (alignment) {
+        .left => {
+            try w.writeAll(buffer);
+            try w.splatByteAll(fill, padding);
+        },
+        .center => {
+            const left_padding = padding / 2;
+            const right_padding = (padding + 1) / 2;
+            try w.splatByteAll(fill, left_padding);
+            try w.writeAll(buffer);
+            try w.splatByteAll(fill, right_padding);
+        },
+        .right => {
+            try w.splatByteAll(fill, padding);
+            try w.writeAll(buffer);
+        },
+    }
+}
+
+pub fn alignBufferOptions(w: *Writer, buffer: []const u8, options: std.fmt.Options) Error!void {
+    return w.alignBuffer(buffer, options.width orelse buffer.len, options.alignment, options.fill);
+}
+
+pub fn printAddress(w: *Writer, value: anytype) Error!void {
+    const T = @TypeOf(value);
+    switch (@typeInfo(T)) {
+        .pointer => |info| {
+            try w.writeAll(@typeName(info.child) ++ "@");
+            if (info.size == .slice)
+                try w.printIntOptions(@intFromPtr(value.ptr), 16, .lower, .{})
+            else
+                try w.printIntOptions(@intFromPtr(value), 16, .lower, .{});
+            return;
+        },
+        .optional => |info| {
+            if (@typeInfo(info.child) == .pointer) {
+                try w.writeAll(@typeName(info.child) ++ "@");
+                try w.printIntOptions(@intFromPtr(value), 16, .lower, .{});
+                return;
+            }
+        },
+        else => {},
+    }
+
+    @compileError("cannot format non-pointer type " ++ @typeName(T) ++ " with * specifier");
+}
+
+pub fn printValue(
+    w: *Writer,
+    comptime fmt: []const u8,
+    options: std.fmt.Options,
+    value: anytype,
+    max_depth: usize,
+) Error!void {
+    const T = @TypeOf(value);
+
+    if (comptime std.mem.eql(u8, fmt, "*")) {
+        return w.printAddress(value);
+    }
+
+    const is_any = comptime std.mem.eql(u8, fmt, ANY);
+    if (!is_any and std.meta.hasMethod(T, "format")) {
+        if (fmt.len > 0 and fmt[0] == 'f') {
+            return value.format(w, fmt[1..]);
+        } else if (fmt.len == 0) {
+            // after 0.15.0 is tagged, delete the hasMethod condition and this compile error
+            @compileError("ambiguous format string; specify {f} to call format method, or {any} to skip it");
+        }
+    }
+
+    switch (@typeInfo(T)) {
+        .float, .comptime_float => return w.printFloat(if (is_any) "d" else fmt, options, value),
+        .int, .comptime_int => return w.printInt(if (is_any) "d" else fmt, options, value),
+        .bool => {
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            return w.alignBufferOptions(if (value) "true" else "false", options);
+        },
+        .void => {
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            return w.alignBufferOptions("void", options);
+        },
+        .optional => {
+            const remaining_fmt = comptime if (fmt.len > 0 and fmt[0] == '?')
+                stripOptionalOrErrorUnionSpec(fmt)
+            else if (is_any)
+                ANY
+            else
+                @compileError("cannot print optional without a specifier (i.e. {?} or {any})");
+            if (value) |payload| {
+                return w.printValue(remaining_fmt, options, payload, max_depth);
+            } else {
+                return w.alignBufferOptions("null", options);
+            }
+        },
+        .error_union => {
+            const remaining_fmt = comptime if (fmt.len > 0 and fmt[0] == '!')
+                stripOptionalOrErrorUnionSpec(fmt)
+            else if (is_any)
+                ANY
+            else
+                @compileError("cannot print error union without a specifier (i.e. {!} or {any})");
+            if (value) |payload| {
+                return w.printValue(remaining_fmt, options, payload, max_depth);
+            } else |err| {
+                return w.printValue("", options, err, max_depth);
+            }
+        },
+        .error_set => {
+            if (fmt.len == 1 and fmt[0] == 's') return w.writeAll(@errorName(value));
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            try printErrorSet(w, value);
+        },
+        .@"enum" => {
+            if (fmt.len == 1 and fmt[0] == 's') {
+                try w.writeAll(@tagName(value));
+                return;
+            }
+            if (!is_any) {
+                if (fmt.len != 0) return printValue(w, fmt, options, @intFromEnum(value), max_depth);
+                return printValue(w, ANY, options, value, max_depth);
+            }
+            const enum_info = @typeInfo(T).@"enum";
+            if (enum_info.is_exhaustive) {
+                var vecs: [3][]const u8 = .{ @typeName(T), ".", @tagName(value) };
+                try w.writeVecAll(&vecs);
+                return;
+            }
+            try w.writeAll(@typeName(T));
+            @setEvalBranchQuota(3 * enum_info.fields.len);
+            inline for (enum_info.fields) |field| {
+                if (@intFromEnum(value) == field.value) {
+                    try w.writeAll(".");
+                    try w.writeAll(@tagName(value));
+                    return;
+                }
+            }
+            try w.writeByte('(');
+            try w.printValue(ANY, options, @intFromEnum(value), max_depth);
+            try w.writeByte(')');
+        },
+        .@"union" => |info| {
+            if (!is_any) {
+                if (fmt.len != 0) invalidFmtError(fmt, value);
+                return printValue(w, ANY, options, value, max_depth);
+            }
+            try w.writeAll(@typeName(T));
+            if (max_depth == 0) {
+                try w.writeAll("{ ... }");
+                return;
+            }
+            if (info.tag_type) |UnionTagType| {
+                try w.writeAll("{ .");
+                try w.writeAll(@tagName(@as(UnionTagType, value)));
+                try w.writeAll(" = ");
+                inline for (info.fields) |u_field| {
+                    if (value == @field(UnionTagType, u_field.name)) {
+                        try w.printValue(ANY, options, @field(value, u_field.name), max_depth - 1);
+                    }
+                }
+                try w.writeAll(" }");
+            } else {
+                try w.writeByte('@');
+                try w.printIntOptions(@intFromPtr(&value), 16, .lower, options);
+            }
+        },
+        .@"struct" => |info| {
+            if (!is_any) {
+                if (fmt.len != 0) invalidFmtError(fmt, value);
+                return printValue(w, ANY, options, value, max_depth);
+            }
+            if (info.is_tuple) {
+                // Skip the type and field names when formatting tuples.
+                if (max_depth == 0) {
+                    try w.writeAll("{ ... }");
+                    return;
+                }
+                try w.writeAll("{");
+                inline for (info.fields, 0..) |f, i| {
+                    if (i == 0) {
+                        try w.writeAll(" ");
+                    } else {
+                        try w.writeAll(", ");
+                    }
+                    try w.printValue(ANY, options, @field(value, f.name), max_depth - 1);
+                }
+                try w.writeAll(" }");
+                return;
+            }
+            try w.writeAll(@typeName(T));
+            if (max_depth == 0) {
+                try w.writeAll("{ ... }");
+                return;
+            }
+            try w.writeAll("{");
+            inline for (info.fields, 0..) |f, i| {
+                if (i == 0) {
+                    try w.writeAll(" .");
+                } else {
+                    try w.writeAll(", .");
+                }
+                try w.writeAll(f.name);
+                try w.writeAll(" = ");
+                try w.printValue(ANY, options, @field(value, f.name), max_depth - 1);
+            }
+            try w.writeAll(" }");
+        },
+        .pointer => |ptr_info| switch (ptr_info.size) {
+            .one => switch (@typeInfo(ptr_info.child)) {
+                .array, .@"enum", .@"union", .@"struct" => {
+                    return w.printValue(fmt, options, value.*, max_depth);
+                },
+                else => {
+                    var buffers: [2][]const u8 = .{ @typeName(ptr_info.child), "@" };
+                    try w.writeVecAll(&buffers);
+                    try w.printIntOptions(@intFromPtr(value), 16, .lower, options);
+                    return;
+                },
+            },
+            .many, .c => {
+                if (ptr_info.sentinel() != null)
+                    return w.printValue(fmt, options, std.mem.span(value), max_depth);
+                if (fmt.len == 1 and fmt[0] == 's' and ptr_info.child == u8)
+                    return w.alignBufferOptions(std.mem.span(value), options);
+                if (!is_any and fmt.len == 0)
+                    @compileError("cannot format pointer without a specifier (i.e. {s} or {*})");
+                if (!is_any and fmt.len != 0)
+                    invalidFmtError(fmt, value);
+                try w.printAddress(value);
+            },
+            .slice => {
+                if (!is_any and fmt.len == 0)
+                    @compileError("cannot format slice without a specifier (i.e. {s}, {x}, {b64}, or {any})");
+                if (max_depth == 0)
+                    return w.writeAll("{ ... }");
+                if (ptr_info.child == u8) switch (fmt.len) {
+                    1 => switch (fmt[0]) {
+                        's' => return w.alignBufferOptions(value, options),
+                        'x' => return w.printHex(value, .lower),
+                        'X' => return w.printHex(value, .upper),
+                        else => {},
+                    },
+                    3 => if (fmt[0] == 'b' and fmt[1] == '6' and fmt[2] == '4') {
+                        return w.printBase64(value);
+                    },
+                    else => {},
+                };
+                try w.writeAll("{ ");
+                for (value, 0..) |elem, i| {
+                    try w.printValue(fmt, options, elem, max_depth - 1);
+                    if (i != value.len - 1) {
+                        try w.writeAll(", ");
+                    }
+                }
+                try w.writeAll(" }");
+            },
+        },
+        .array => |info| {
+            if (fmt.len == 0)
+                @compileError("cannot format array without a specifier (i.e. {s} or {any})");
+            if (max_depth == 0) {
+                return w.writeAll("{ ... }");
+            }
+            if (info.child == u8) {
+                if (fmt[0] == 's') {
+                    return w.alignBufferOptions(&value, options);
+                } else if (fmt[0] == 'x') {
+                    return w.printHex(&value, .lower);
+                } else if (fmt[0] == 'X') {
+                    return w.printHex(&value, .upper);
+                }
+            }
+            try w.writeAll("{ ");
+            for (value, 0..) |elem, i| {
+                try w.printValue(fmt, options, elem, max_depth - 1);
+                if (i < value.len - 1) {
+                    try w.writeAll(", ");
+                }
+            }
+            try w.writeAll(" }");
+        },
+        .vector => |info| {
+            if (max_depth == 0) {
+                return w.writeAll("{ ... }");
+            }
+            try w.writeAll("{ ");
+            var i: usize = 0;
+            while (i < info.len) : (i += 1) {
+                try w.printValue(fmt, options, value[i], max_depth - 1);
+                if (i < info.len - 1) {
+                    try w.writeAll(", ");
+                }
+            }
+            try w.writeAll(" }");
+        },
+        .@"fn" => @compileError("unable to format function body type, use '*const " ++ @typeName(T) ++ "' for a function pointer type"),
+        .type => {
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            return w.alignBufferOptions(@typeName(value), options);
+        },
+        .enum_literal => {
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            const buffer = [_]u8{'.'} ++ @tagName(value);
+            return w.alignBufferOptions(buffer, options);
+        },
+        .null => {
+            if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
+            return w.alignBufferOptions("null", options);
+        },
+        else => @compileError("unable to format type '" ++ @typeName(T) ++ "'"),
+    }
+}
+
+fn printErrorSet(w: *Writer, error_set: anyerror) Error!void {
+    var vecs: [2][]const u8 = .{ "error.", @errorName(error_set) };
+    try w.writeVecAll(&vecs);
+}
+
+pub fn printInt(
+    w: *Writer,
+    comptime fmt: []const u8,
+    options: std.fmt.Options,
+    value: anytype,
+) Error!void {
+    const int_value = if (@TypeOf(value) == comptime_int) blk: {
+        const Int = std.math.IntFittingRange(value, value);
+        break :blk @as(Int, value);
+    } else value;
+
+    switch (fmt.len) {
+        0 => return w.printIntOptions(int_value, 10, .lower, options),
+        1 => switch (fmt[0]) {
+            'd' => return w.printIntOptions(int_value, 10, .lower, options),
+            'c' => {
+                if (@typeInfo(@TypeOf(int_value)).int.bits <= 8) {
+                    return w.printAsciiChar(@as(u8, int_value), options);
+                } else {
+                    @compileError("cannot print integer that is larger than 8 bits as an ASCII character");
+                }
+            },
+            'u' => {
+                if (@typeInfo(@TypeOf(int_value)).int.bits <= 21) {
+                    return w.printUnicodeCodepoint(@as(u21, int_value), options);
+                } else {
+                    @compileError("cannot print integer that is larger than 21 bits as an UTF-8 sequence");
+                }
+            },
+            'b' => return w.printIntOptions(int_value, 2, .lower, options),
+            'x' => return w.printIntOptions(int_value, 16, .lower, options),
+            'X' => return w.printIntOptions(int_value, 16, .upper, options),
+            'o' => return w.printIntOptions(int_value, 8, .lower, options),
+            'B' => return w.printByteSize(int_value, .decimal, options),
+            'D' => return w.printDuration(int_value, options),
+            else => invalidFmtError(fmt, value),
+        },
+        2 => {
+            if (fmt[0] == 'B' and fmt[1] == 'i') {
+                return w.printByteSize(int_value, .binary, options);
+            } else {
+                invalidFmtError(fmt, value);
+            }
+        },
+        else => invalidFmtError(fmt, value),
+    }
+    comptime unreachable;
+}
+
+pub fn printAsciiChar(w: *Writer, c: u8, options: std.fmt.Options) Error!void {
+    return w.alignBufferOptions(@as(*const [1]u8, &c), options);
+}
+
+pub fn printAscii(w: *Writer, bytes: []const u8, options: std.fmt.Options) Error!void {
+    return w.alignBufferOptions(bytes, options);
+}
+
+pub fn printUnicodeCodepoint(w: *Writer, c: u21, options: std.fmt.Options) Error!void {
+    var buf: [4]u8 = undefined;
+    const len = try std.unicode.utf8Encode(c, &buf);
+    return w.alignBufferOptions(buf[0..len], options);
+}
+
+pub fn printIntOptions(
+    w: *Writer,
+    value: anytype,
+    base: u8,
+    case: std.fmt.Case,
+    options: std.fmt.Options,
+) Error!void {
+    assert(base >= 2);
+
+    const int_value = if (@TypeOf(value) == comptime_int) blk: {
+        const Int = std.math.IntFittingRange(value, value);
+        break :blk @as(Int, value);
+    } else value;
+
+    const value_info = @typeInfo(@TypeOf(int_value)).int;
+
+    // The type must have the same size as `base` or be wider in order for the
+    // division to work
+    const min_int_bits = comptime @max(value_info.bits, 8);
+    const MinInt = std.meta.Int(.unsigned, min_int_bits);
+
+    const abs_value = @abs(int_value);
+    // The worst case in terms of space needed is base 2, plus 1 for the sign
+    var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined;
+
+    var a: MinInt = abs_value;
+    var index: usize = buf.len;
+
+    if (base == 10) {
+        while (a >= 100) : (a = @divTrunc(a, 100)) {
+            index -= 2;
+            buf[index..][0..2].* = std.fmt.digits2(@intCast(a % 100));
+        }
+
+        if (a < 10) {
+            index -= 1;
+            buf[index] = '0' + @as(u8, @intCast(a));
+        } else {
+            index -= 2;
+            buf[index..][0..2].* = std.fmt.digits2(@intCast(a));
+        }
+    } else {
+        while (true) {
+            const digit = a % base;
+            index -= 1;
+            buf[index] = std.fmt.digitToChar(@intCast(digit), case);
+            a /= base;
+            if (a == 0) break;
+        }
+    }
+
+    if (value_info.signedness == .signed) {
+        if (value < 0) {
+            // Negative integer
+            index -= 1;
+            buf[index] = '-';
+        } else if (options.width == null or options.width.? == 0) {
+            // Positive integer, omit the plus sign
+        } else {
+            // Positive integer
+            index -= 1;
+            buf[index] = '+';
+        }
+    }
+
+    return w.alignBufferOptions(buf[index..], options);
+}
+
+pub fn printFloat(
+    w: *Writer,
+    comptime fmt: []const u8,
+    options: std.fmt.Options,
+    value: anytype,
+) Error!void {
+    var buf: [std.fmt.float.bufferSize(.decimal, f64)]u8 = undefined;
+
+    if (fmt.len > 1) invalidFmtError(fmt, value);
+    switch (if (fmt.len == 0) 'e' else fmt[0]) {
+        'e' => {
+            const s = std.fmt.float.render(&buf, value, .{ .mode = .scientific, .precision = options.precision }) catch |err| switch (err) {
+                error.BufferTooSmall => "(float)",
+            };
+            return w.alignBufferOptions(s, options);
+        },
+        'd' => {
+            const s = std.fmt.float.render(&buf, value, .{ .mode = .decimal, .precision = options.precision }) catch |err| switch (err) {
+                error.BufferTooSmall => "(float)",
+            };
+            return w.alignBufferOptions(s, options);
+        },
+        'x' => {
+            var sub_bw: Writer = .fixed(&buf);
+            sub_bw.printFloatHexadecimal(value, options.precision) catch unreachable;
+            return w.alignBufferOptions(sub_bw.buffered(), options);
+        },
+        else => invalidFmtError(fmt, value),
+    }
+}
+
+pub fn printFloatHexadecimal(w: *Writer, value: anytype, opt_precision: ?usize) Error!void {
+    if (std.math.signbit(value)) try w.writeByte('-');
+    if (std.math.isNan(value)) return w.writeAll("nan");
+    if (std.math.isInf(value)) return w.writeAll("inf");
+
+    const T = @TypeOf(value);
+    const TU = std.meta.Int(.unsigned, @bitSizeOf(T));
+
+    const mantissa_bits = std.math.floatMantissaBits(T);
+    const fractional_bits = std.math.floatFractionalBits(T);
+    const exponent_bits = std.math.floatExponentBits(T);
+    const mantissa_mask = (1 << mantissa_bits) - 1;
+    const exponent_mask = (1 << exponent_bits) - 1;
+    const exponent_bias = (1 << (exponent_bits - 1)) - 1;
+
+    const as_bits: TU = @bitCast(value);
+    var mantissa = as_bits & mantissa_mask;
+    var exponent: i32 = @as(u16, @truncate((as_bits >> mantissa_bits) & exponent_mask));
+
+    const is_denormal = exponent == 0 and mantissa != 0;
+    const is_zero = exponent == 0 and mantissa == 0;
+
+    if (is_zero) {
+        // Handle this case here to simplify the logic below.
+        try w.writeAll("0x0");
+        if (opt_precision) |precision| {
+            if (precision > 0) {
+                try w.writeAll(".");
+                try w.splatByteAll('0', precision);
+            }
+        } else {
+            try w.writeAll(".0");
+        }
+        try w.writeAll("p0");
+        return;
+    }
+
+    if (is_denormal) {
+        // Adjust the exponent for printing.
+        exponent += 1;
+    } else {
+        if (fractional_bits == mantissa_bits)
+            mantissa |= 1 << fractional_bits; // Add the implicit integer bit.
+    }
+
+    const mantissa_digits = (fractional_bits + 3) / 4;
+    // Fill in zeroes to round the fraction width to a multiple of 4.
+    mantissa <<= mantissa_digits * 4 - fractional_bits;
+
+    if (opt_precision) |precision| {
+        // Round if needed.
+        if (precision < mantissa_digits) {
+            // We always have at least 4 extra bits.
+            var extra_bits = (mantissa_digits - precision) * 4;
+            // The result LSB is the Guard bit, we need two more (Round and
+            // Sticky) to round the value.
+            while (extra_bits > 2) {
+                mantissa = (mantissa >> 1) | (mantissa & 1);
+                extra_bits -= 1;
+            }
+            // Round to nearest, tie to even.
+            mantissa |= @intFromBool(mantissa & 0b100 != 0);
+            mantissa += 1;
+            // Drop the excess bits.
+            mantissa >>= 2;
+            // Restore the alignment.
+            mantissa <<= @as(std.math.Log2Int(TU), @intCast((mantissa_digits - precision) * 4));
+
+            const overflow = mantissa & (1 << 1 + mantissa_digits * 4) != 0;
+            // Prefer a normalized result in case of overflow.
+            if (overflow) {
+                mantissa >>= 1;
+                exponent += 1;
+            }
+        }
+    }
+
+    // +1 for the decimal part.
+    var buf: [1 + mantissa_digits]u8 = undefined;
+    assert(std.fmt.printInt(&buf, mantissa, 16, .lower, .{ .fill = '0', .width = 1 + mantissa_digits }) == buf.len);
+
+    try w.writeAll("0x");
+    try w.writeByte(buf[0]);
+    const trimmed = std.mem.trimRight(u8, buf[1..], "0");
+    if (opt_precision) |precision| {
+        if (precision > 0) try w.writeAll(".");
+    } else if (trimmed.len > 0) {
+        try w.writeAll(".");
+    }
+    try w.writeAll(trimmed);
+    // Add trailing zeros if explicitly requested.
+    if (opt_precision) |precision| if (precision > 0) {
+        if (precision > trimmed.len)
+            try w.splatByteAll('0', precision - trimmed.len);
+    };
+    try w.writeAll("p");
+    try w.printIntOptions(exponent - exponent_bias, 10, .lower, .{});
+}
+
+pub const ByteSizeUnits = enum {
+    /// This formatter represents the number as multiple of 1000 and uses the SI
+    /// measurement units (kB, MB, GB, ...).
+    decimal,
+    /// This formatter represents the number as multiple of 1024 and uses the IEC
+    /// measurement units (KiB, MiB, GiB, ...).
+    binary,
+};
+
+/// Format option `precision` is ignored when `value` is less than 1kB
+pub fn printByteSize(
+    w: *std.io.Writer,
+    value: u64,
+    comptime units: ByteSizeUnits,
+    options: std.fmt.Options,
+) Error!void {
+    if (value == 0) return w.alignBufferOptions("0B", options);
+    // The worst case in terms of space needed is 32 bytes + 3 for the suffix.
+    var buf: [std.fmt.float.min_buffer_size + 3]u8 = undefined;
+
+    const mags_si = " kMGTPEZY";
+    const mags_iec = " KMGTPEZY";
+
+    const log2 = std.math.log2(value);
+    const base = switch (units) {
+        .decimal => 1000,
+        .binary => 1024,
+    };
+    const magnitude = switch (units) {
+        .decimal => @min(log2 / comptime std.math.log2(1000), mags_si.len - 1),
+        .binary => @min(log2 / 10, mags_iec.len - 1),
+    };
+    const new_value = std.math.lossyCast(f64, value) / std.math.pow(f64, std.math.lossyCast(f64, base), std.math.lossyCast(f64, magnitude));
+    const suffix = switch (units) {
+        .decimal => mags_si[magnitude],
+        .binary => mags_iec[magnitude],
+    };
+
+    const s = switch (magnitude) {
+        0 => buf[0..std.fmt.printInt(&buf, value, 10, .lower, .{})],
+        else => std.fmt.float.render(&buf, new_value, .{ .mode = .decimal, .precision = options.precision }) catch |err| switch (err) {
+            error.BufferTooSmall => unreachable,
+        },
+    };
+
+    var i: usize = s.len;
+    if (suffix == ' ') {
+        buf[i] = 'B';
+        i += 1;
+    } else switch (units) {
+        .decimal => {
+            buf[i..][0..2].* = [_]u8{ suffix, 'B' };
+            i += 2;
+        },
+        .binary => {
+            buf[i..][0..3].* = [_]u8{ suffix, 'i', 'B' };
+            i += 3;
+        },
+    }
+
+    return w.alignBufferOptions(buf[0..i], options);
+}
+
+// This ANY const is a workaround for: https://github.com/ziglang/zig/issues/7948
+const ANY = "any";
+
+fn stripOptionalOrErrorUnionSpec(comptime fmt: []const u8) []const u8 {
+    return if (std.mem.eql(u8, fmt[1..], ANY))
+        ANY
+    else
+        fmt[1..];
+}
+
+pub fn invalidFmtError(comptime fmt: []const u8, value: anytype) noreturn {
+    @compileError("invalid format string '" ++ fmt ++ "' for type '" ++ @typeName(@TypeOf(value)) ++ "'");
+}
+
+pub fn printDurationSigned(w: *Writer, ns: i64) Error!void {
+    if (ns < 0) try w.writeByte('-');
+    return w.printDurationUnsigned(@abs(ns));
+}
+
+pub fn printDurationUnsigned(w: *Writer, ns: u64) Error!void {
+    var ns_remaining = ns;
+    inline for (.{
+        .{ .ns = 365 * std.time.ns_per_day, .sep = 'y' },
+        .{ .ns = std.time.ns_per_week, .sep = 'w' },
+        .{ .ns = std.time.ns_per_day, .sep = 'd' },
+        .{ .ns = std.time.ns_per_hour, .sep = 'h' },
+        .{ .ns = std.time.ns_per_min, .sep = 'm' },
+    }) |unit| {
+        if (ns_remaining >= unit.ns) {
+            const units = ns_remaining / unit.ns;
+            try w.printIntOptions(units, 10, .lower, .{});
+            try w.writeByte(unit.sep);
+            ns_remaining -= units * unit.ns;
+            if (ns_remaining == 0) return;
+        }
+    }
+
+    inline for (.{
+        .{ .ns = std.time.ns_per_s, .sep = "s" },
+        .{ .ns = std.time.ns_per_ms, .sep = "ms" },
+        .{ .ns = std.time.ns_per_us, .sep = "us" },
+    }) |unit| {
+        const kunits = ns_remaining * 1000 / unit.ns;
+        if (kunits >= 1000) {
+            try w.printIntOptions(kunits / 1000, 10, .lower, .{});
+            const frac = kunits % 1000;
+            if (frac > 0) {
+                // Write up to 3 decimal places
+                var decimal_buf = [_]u8{ '.', 0, 0, 0 };
+                var inner: Writer = .fixed(decimal_buf[1..]);
+                inner.printIntOptions(frac, 10, .lower, .{ .fill = '0', .width = 3 }) catch unreachable;
+                var end: usize = 4;
+                while (end > 1) : (end -= 1) {
+                    if (decimal_buf[end - 1] != '0') break;
+                }
+                try w.writeAll(decimal_buf[0..end]);
+            }
+            return w.writeAll(unit.sep);
+        }
+    }
+
+    try w.printIntOptions(ns_remaining, 10, .lower, .{});
+    try w.writeAll("ns");
+}
+
+/// Writes number of nanoseconds according to its signed magnitude:
+/// `[#y][#w][#d][#h][#m]#[.###][n|u|m]s`
+/// `nanoseconds` must be an integer that coerces into `u64` or `i64`.
+pub fn printDuration(w: *Writer, nanoseconds: anytype, options: std.fmt.Options) Error!void {
+    // worst case: "-XXXyXXwXXdXXhXXmXX.XXXs".len = 24
+    var buf: [24]u8 = undefined;
+    var sub_bw: Writer = .fixed(&buf);
+    switch (@typeInfo(@TypeOf(nanoseconds)).int.signedness) {
+        .signed => sub_bw.printDurationSigned(nanoseconds) catch unreachable,
+        .unsigned => sub_bw.printDurationUnsigned(nanoseconds) catch unreachable,
+    }
+    return w.alignBufferOptions(sub_bw.buffered(), options);
+}
+
+pub fn printHex(w: *Writer, bytes: []const u8, case: std.fmt.Case) Error!void {
+    const charset = switch (case) {
+        .upper => "0123456789ABCDEF",
+        .lower => "0123456789abcdef",
+    };
+    for (bytes) |c| {
+        try w.writeByte(charset[c >> 4]);
+        try w.writeByte(charset[c & 15]);
+    }
+}
+
+pub fn printBase64(w: *Writer, bytes: []const u8) Error!void {
+    var chunker = std.mem.window(u8, bytes, 3, 3);
+    var temp: [5]u8 = undefined;
+    while (chunker.next()) |chunk| {
+        try w.writeAll(std.base64.standard.Encoder.encode(&temp, chunk));
+    }
+}
+
+/// Write a single unsigned integer as LEB128 to the given writer.
+pub fn writeUleb128(w: *Writer, value: anytype) Error!void {
+    try w.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
+        .comptime_int => @as(std.math.IntFittingRange(0, @abs(value)), value),
+        .int => |value_info| switch (value_info.signedness) {
+            .signed => @as(@Type(.{ .int = .{ .signedness = .unsigned, .bits = value_info.bits -| 1 } }), @intCast(value)),
+            .unsigned => value,
+        },
+        else => comptime unreachable,
+    });
+}
+
+/// Write a single signed integer as LEB128 to the given writer.
+pub fn writeSleb128(w: *Writer, value: anytype) Error!void {
+    try w.writeLeb128(switch (@typeInfo(@TypeOf(value))) {
+        .comptime_int => @as(std.math.IntFittingRange(@min(value, -1), @max(0, value)), value),
+        .int => |value_info| switch (value_info.signedness) {
+            .signed => value,
+            .unsigned => @as(@Type(.{ .int = .{ .signedness = .signed, .bits = value_info.bits + 1 } }), value),
+        },
+        else => comptime unreachable,
+    });
+}
+
+/// Write a single integer as LEB128 to the given writer.
+pub fn writeLeb128(w: *Writer, value: anytype) Error!void {
+    const value_info = @typeInfo(@TypeOf(value)).int;
+    try w.writeMultipleOf7Leb128(@as(@Type(.{ .int = .{
+        .signedness = value_info.signedness,
+        .bits = std.mem.alignForwardAnyAlign(u16, value_info.bits, 7),
+    } }), value));
+}
+
+fn writeMultipleOf7Leb128(w: *Writer, value: anytype) Error!void {
+    const value_info = @typeInfo(@TypeOf(value)).int;
+    comptime assert(value_info.bits % 7 == 0);
+    var remaining = value;
     while (true) {
-        const n = try file.readAll(&buf);
-        try self.writeAll(buf[0..n]);
-        if (n < buf.len) return;
+        const buffer: []packed struct(u8) { bits: u7, more: bool } = @ptrCast(try w.writableSliceGreedy(1));
+        for (buffer, 1..) |*byte, len| {
+            const more = switch (value_info.signedness) {
+                .signed => remaining >> 6 != remaining >> (value_info.bits - 1),
+                .unsigned => remaining > std.math.maxInt(u7),
+            };
+            byte.* = if (@inComptime()) @typeInfo(@TypeOf(buffer)).pointer.child{
+                .bits = @bitCast(@as(@Type(.{ .int = .{
+                    .signedness = value_info.signedness,
+                    .bits = 7,
+                } }), @truncate(remaining))),
+                .more = more,
+            } else .{
+                .bits = @bitCast(@as(@Type(.{ .int = .{
+                    .signedness = value_info.signedness,
+                    .bits = 7,
+                } }), @truncate(remaining))),
+                .more = more,
+            };
+            if (value_info.bits > 7) remaining >>= 7;
+            if (!more) return w.advance(len);
+        }
+        w.advance(buffer.len);
+    }
+}
+
+test "formatValue max_depth" {
+    const Vec2 = struct {
+        const SelfType = @This();
+        x: f32,
+        y: f32,
+
+        pub fn format(
+            self: SelfType,
+            comptime fmt: []const u8,
+            options: std.fmt.Options,
+            w: *Writer,
+        ) Error!void {
+            _ = options;
+            if (fmt.len == 0) {
+                return w.print("({d:.3},{d:.3})", .{ self.x, self.y });
+            } else {
+                @compileError("unknown format string: '" ++ fmt ++ "'");
+            }
+        }
+    };
+    const E = enum {
+        One,
+        Two,
+        Three,
+    };
+    const TU = union(enum) {
+        const SelfType = @This();
+        float: f32,
+        int: u32,
+        ptr: ?*SelfType,
+    };
+    const S = struct {
+        const SelfType = @This();
+        a: ?*SelfType,
+        tu: TU,
+        e: E,
+        vec: Vec2,
+    };
+
+    var inst = S{
+        .a = null,
+        .tu = TU{ .ptr = null },
+        .e = E.Two,
+        .vec = Vec2{ .x = 10.2, .y = 2.22 },
+    };
+    inst.a = &inst;
+    inst.tu.ptr = &inst.tu;
+
+    var buf: [1000]u8 = undefined;
+    var w: Writer = .fixed(&buf);
+    try w.printValue("", .{}, inst, 0);
+    try testing.expectEqualStrings("io.Writer.test.printValue max_depth.S{ ... }", w.buffered());
+
+    w.reset();
+    try w.printValue("", .{}, inst, 1);
+    try testing.expectEqualStrings("io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ ... }, .tu = io.Writer.test.printValue max_depth.TU{ ... }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }", w.buffered());
+
+    w.reset();
+    try w.printValue("", .{}, inst, 2);
+    try testing.expectEqualStrings("io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ ... }, .tu = io.Writer.test.printValue max_depth.TU{ ... }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }, .tu = io.Writer.test.printValue max_depth.TU{ .ptr = io.Writer.test.printValue max_depth.TU{ ... } }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }", w.buffered());
+
+    w.reset();
+    try w.printValue("", .{}, inst, 3);
+    try testing.expectEqualStrings("io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ .a = io.Writer.test.printValue max_depth.S{ ... }, .tu = io.Writer.test.printValue max_depth.TU{ ... }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }, .tu = io.Writer.test.printValue max_depth.TU{ .ptr = io.Writer.test.printValue max_depth.TU{ ... } }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }, .tu = io.Writer.test.printValue max_depth.TU{ .ptr = io.Writer.test.printValue max_depth.TU{ .ptr = io.Writer.test.printValue max_depth.TU{ ... } } }, .e = io.Writer.test.printValue max_depth.E.Two, .vec = (10.200,2.220) }", w.buffered());
+
+    const vec: @Vector(4, i32) = .{ 1, 2, 3, 4 };
+    w.reset();
+    try w.printValue("", .{}, vec, 0);
+    try testing.expectEqualStrings("{ ... }", w.buffered());
+
+    w.reset();
+    try w.printValue("", .{}, vec, 1);
+    try testing.expectEqualStrings("{ 1, 2, 3, 4 }", w.buffered());
+}
+
+test printDuration {
+    testDurationCase("0ns", 0);
+    testDurationCase("1ns", 1);
+    testDurationCase("999ns", std.time.ns_per_us - 1);
+    testDurationCase("1us", std.time.ns_per_us);
+    testDurationCase("1.45us", 1450);
+    testDurationCase("1.5us", 3 * std.time.ns_per_us / 2);
+    testDurationCase("14.5us", 14500);
+    testDurationCase("145us", 145000);
+    testDurationCase("999.999us", std.time.ns_per_ms - 1);
+    testDurationCase("1ms", std.time.ns_per_ms + 1);
+    testDurationCase("1.5ms", 3 * std.time.ns_per_ms / 2);
+    testDurationCase("1.11ms", 1110000);
+    testDurationCase("1.111ms", 1111000);
+    testDurationCase("1.111ms", 1111100);
+    testDurationCase("999.999ms", std.time.ns_per_s - 1);
+    testDurationCase("1s", std.time.ns_per_s);
+    testDurationCase("59.999s", std.time.ns_per_min - 1);
+    testDurationCase("1m", std.time.ns_per_min);
+    testDurationCase("1h", std.time.ns_per_hour);
+    testDurationCase("1d", std.time.ns_per_day);
+    testDurationCase("1w", std.time.ns_per_week);
+    testDurationCase("1y", 365 * std.time.ns_per_day);
+    testDurationCase("1y52w23h59m59.999s", 730 * std.time.ns_per_day - 1); // 365d = 52w1
+    testDurationCase("1y1h1.001s", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms);
+    testDurationCase("1y1h1s", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us);
+    testDurationCase("1y1h999.999us", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1);
+    testDurationCase("1y1h1ms", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms);
+    testDurationCase("1y1h1ms", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1);
+    testDurationCase("1y1m999ns", 365 * std.time.ns_per_day + std.time.ns_per_min + 999);
+    testDurationCase("584y49w23h34m33.709s", std.math.maxInt(u64));
+
+    testing.expectFmt("=======0ns", "{D:=>10}", .{0});
+    testing.expectFmt("1ns=======", "{D:=<10}", .{1});
+    testing.expectFmt("  999ns   ", "{D:^10}", .{std.time.ns_per_us - 1});
+}
+
+test printDurationSigned {
+    testDurationCaseSigned("0ns", 0);
+    testDurationCaseSigned("1ns", 1);
+    testDurationCaseSigned("-1ns", -(1));
+    testDurationCaseSigned("999ns", std.time.ns_per_us - 1);
+    testDurationCaseSigned("-999ns", -(std.time.ns_per_us - 1));
+    testDurationCaseSigned("1us", std.time.ns_per_us);
+    testDurationCaseSigned("-1us", -(std.time.ns_per_us));
+    testDurationCaseSigned("1.45us", 1450);
+    testDurationCaseSigned("-1.45us", -(1450));
+    testDurationCaseSigned("1.5us", 3 * std.time.ns_per_us / 2);
+    testDurationCaseSigned("-1.5us", -(3 * std.time.ns_per_us / 2));
+    testDurationCaseSigned("14.5us", 14500);
+    testDurationCaseSigned("-14.5us", -(14500));
+    testDurationCaseSigned("145us", 145000);
+    testDurationCaseSigned("-145us", -(145000));
+    testDurationCaseSigned("999.999us", std.time.ns_per_ms - 1);
+    testDurationCaseSigned("-999.999us", -(std.time.ns_per_ms - 1));
+    testDurationCaseSigned("1ms", std.time.ns_per_ms + 1);
+    testDurationCaseSigned("-1ms", -(std.time.ns_per_ms + 1));
+    testDurationCaseSigned("1.5ms", 3 * std.time.ns_per_ms / 2);
+    testDurationCaseSigned("-1.5ms", -(3 * std.time.ns_per_ms / 2));
+    testDurationCaseSigned("1.11ms", 1110000);
+    testDurationCaseSigned("-1.11ms", -(1110000));
+    testDurationCaseSigned("1.111ms", 1111000);
+    testDurationCaseSigned("-1.111ms", -(1111000));
+    testDurationCaseSigned("1.111ms", 1111100);
+    testDurationCaseSigned("-1.111ms", -(1111100));
+    testDurationCaseSigned("999.999ms", std.time.ns_per_s - 1);
+    testDurationCaseSigned("-999.999ms", -(std.time.ns_per_s - 1));
+    testDurationCaseSigned("1s", std.time.ns_per_s);
+    testDurationCaseSigned("-1s", -(std.time.ns_per_s));
+    testDurationCaseSigned("59.999s", std.time.ns_per_min - 1);
+    testDurationCaseSigned("-59.999s", -(std.time.ns_per_min - 1));
+    testDurationCaseSigned("1m", std.time.ns_per_min);
+    testDurationCaseSigned("-1m", -(std.time.ns_per_min));
+    testDurationCaseSigned("1h", std.time.ns_per_hour);
+    testDurationCaseSigned("-1h", -(std.time.ns_per_hour));
+    testDurationCaseSigned("1d", std.time.ns_per_day);
+    testDurationCaseSigned("-1d", -(std.time.ns_per_day));
+    testDurationCaseSigned("1w", std.time.ns_per_week);
+    testDurationCaseSigned("-1w", -(std.time.ns_per_week));
+    testDurationCaseSigned("1y", 365 * std.time.ns_per_day);
+    testDurationCaseSigned("-1y", -(365 * std.time.ns_per_day));
+    testDurationCaseSigned("1y52w23h59m59.999s", 730 * std.time.ns_per_day - 1); // 365d = 52w1d
+    testDurationCaseSigned("-1y52w23h59m59.999s", -(730 * std.time.ns_per_day - 1)); // 365d = 52w1d
+    testDurationCaseSigned("1y1h1.001s", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms);
+    testDurationCaseSigned("-1y1h1.001s", -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + std.time.ns_per_ms));
+    testDurationCaseSigned("1y1h1s", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us);
+    testDurationCaseSigned("-1y1h1s", -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_s + 999 * std.time.ns_per_us));
+    testDurationCaseSigned("1y1h999.999us", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1);
+    testDurationCaseSigned("-1y1h999.999us", -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms - 1));
+    testDurationCaseSigned("1y1h1ms", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms);
+    testDurationCaseSigned("-1y1h1ms", -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms));
+    testDurationCaseSigned("1y1h1ms", 365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1);
+    testDurationCaseSigned("-1y1h1ms", -(365 * std.time.ns_per_day + std.time.ns_per_hour + std.time.ns_per_ms + 1));
+    testDurationCaseSigned("1y1m999ns", 365 * std.time.ns_per_day + std.time.ns_per_min + 999);
+    testDurationCaseSigned("-1y1m999ns", -(365 * std.time.ns_per_day + std.time.ns_per_min + 999));
+    testDurationCaseSigned("292y24w3d23h47m16.854s", std.math.maxInt(i64));
+    testDurationCaseSigned("-292y24w3d23h47m16.854s", std.math.minInt(i64) + 1);
+    testDurationCaseSigned("-292y24w3d23h47m16.854s", std.math.minInt(i64));
+
+    testing.expectFmt("=======0ns", "{s:=>10}", .{0});
+    testing.expectFmt("1ns=======", "{s:=<10}", .{1});
+    testing.expectFmt("-1ns======", "{s:=<10}", .{-(1)});
+    testing.expectFmt("  -999ns  ", "{s:^10}", .{-(std.time.ns_per_us - 1)});
+}
+
+fn testDurationCase(expected: []const u8, input: u64) !void {
+    var buf: [24]u8 = undefined;
+    var w: Writer = .fixed(&buf);
+    try w.printDurationUnsigned(input);
+    try testing.expectEqualStrings(expected, w.buffered());
+}
+
+fn testDurationCaseSigned(expected: []const u8, input: i64) !void {
+    var buf: [24]u8 = undefined;
+    var w: Writer = .fixed(&buf);
+    try w.printDurationSigned(input);
+    try testing.expectEqualStrings(expected, w.buffered());
+}
+
+test printIntOptions {
+    try testPrintIntCase("-1", @as(i1, -1), 10, .lower, .{});
+
+    try testPrintIntCase("-101111000110000101001110", @as(i32, -12345678), 2, .lower, .{});
+    try testPrintIntCase("-12345678", @as(i32, -12345678), 10, .lower, .{});
+    try testPrintIntCase("-bc614e", @as(i32, -12345678), 16, .lower, .{});
+    try testPrintIntCase("-BC614E", @as(i32, -12345678), 16, .upper, .{});
+
+    try testPrintIntCase("12345678", @as(u32, 12345678), 10, .upper, .{});
+
+    try testPrintIntCase("   666", @as(u32, 666), 10, .lower, .{ .width = 6 });
+    try testPrintIntCase("  1234", @as(u32, 0x1234), 16, .lower, .{ .width = 6 });
+    try testPrintIntCase("1234", @as(u32, 0x1234), 16, .lower, .{ .width = 1 });
+
+    try testPrintIntCase("+42", @as(i32, 42), 10, .lower, .{ .width = 3 });
+    try testPrintIntCase("-42", @as(i32, -42), 10, .lower, .{ .width = 3 });
+}
+
+test "printInt with comptime_int" {
+    var buf: [20]u8 = undefined;
+    var w: Writer = .fixed(&buf);
+    try w.printInt(@as(comptime_int, 123456789123456789), "", .{});
+    try std.testing.expectEqualStrings("123456789123456789", w.buffered());
+}
+
+test "printFloat with comptime_float" {
+    var buf: [20]u8 = undefined;
+    var w: Writer = .fixed(&buf);
+    try w.printFloat("", .{}, @as(comptime_float, 1.0));
+    try std.testing.expectEqualStrings(w.buffered(), "1e0");
+    try std.testing.expectFmt("1e0", "{}", .{1.0});
+}
+
+fn testPrintIntCase(expected: []const u8, value: anytype, base: u8, case: std.fmt.Case, options: std.fmt.Options) !void {
+    var buffer: [100]u8 = undefined;
+    var w: Writer = .fixed(&buffer);
+    w.printIntOptions(value, base, case, options);
+    try testing.expectEqualStrings(expected, w.buffered());
+}
+
+test printByteSize {
+    try testing.expectFmt("file size: 42B\n", "file size: {B}\n", .{42});
+    try testing.expectFmt("file size: 42B\n", "file size: {Bi}\n", .{42});
+    try testing.expectFmt("file size: 63MB\n", "file size: {B}\n", .{63 * 1000 * 1000});
+    try testing.expectFmt("file size: 63MiB\n", "file size: {Bi}\n", .{63 * 1024 * 1024});
+    try testing.expectFmt("file size: 42B\n", "file size: {B:.2}\n", .{42});
+    try testing.expectFmt("file size:       42B\n", "file size: {B:>9.2}\n", .{42});
+    try testing.expectFmt("file size: 66.06MB\n", "file size: {B:.2}\n", .{63 * 1024 * 1024});
+    try testing.expectFmt("file size: 60.08MiB\n", "file size: {Bi:.2}\n", .{63 * 1000 * 1000});
+    try testing.expectFmt("file size: =66.06MB=\n", "file size: {B:=^9.2}\n", .{63 * 1024 * 1024});
+    try testing.expectFmt("file size:   66.06MB\n", "file size: {B: >9.2}\n", .{63 * 1024 * 1024});
+    try testing.expectFmt("file size: 66.06MB  \n", "file size: {B: <9.2}\n", .{63 * 1024 * 1024});
+    try testing.expectFmt("file size: 0.01844674407370955ZB\n", "file size: {B}\n", .{std.math.maxInt(u64)});
+}
+
+test "bytes.hex" {
+    const some_bytes = "\xCA\xFE\xBA\xBE";
+    try std.testing.expectFmt("lowercase: cafebabe\n", "lowercase: {x}\n", .{some_bytes});
+    try std.testing.expectFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", .{some_bytes});
+    try std.testing.expectFmt("uppercase: CAFE\n", "uppercase: {X}\n", .{some_bytes[0..2]});
+    try std.testing.expectFmt("lowercase: babe\n", "lowercase: {x}\n", .{some_bytes[2..]});
+    const bytes_with_zeros = "\x00\x0E\xBA\xBE";
+    try std.testing.expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros});
+}
+
+test fixed {
+    {
+        var buf: [255]u8 = undefined;
+        var w: Writer = .fixed(&buf);
+        try w.print("{s}{s}!", .{ "Hello", "World" });
+        try testing.expectEqualStrings("HelloWorld!", w.buffered());
+    }
+
+    comptime {
+        var buf: [255]u8 = undefined;
+        var w: Writer = .fixed(&buf);
+        try w.print("{s}{s}!", .{ "Hello", "World" });
+        try testing.expectEqualStrings("HelloWorld!", w.buffered());
+    }
+}
+
+test "fixed output" {
+    var buffer: [10]u8 = undefined;
+    var w: Writer = .fixed(&buffer);
+
+    try w.writeAll("Hello");
+    try testing.expect(std.mem.eql(u8, w.buffered(), "Hello"));
+
+    try w.writeAll("world");
+    try testing.expect(std.mem.eql(u8, w.buffered(), "Helloworld"));
+
+    try testing.expectError(error.WriteStreamEnd, w.writeAll("!"));
+    try testing.expect(std.mem.eql(u8, w.buffered(), "Helloworld"));
+
+    w.reset();
+    try testing.expect(w.buffered().len == 0);
+
+    try testing.expectError(error.WriteStreamEnd, w.writeAll("Hello world!"));
+    try testing.expect(std.mem.eql(u8, w.buffered(), "Hello worl"));
+
+    try w.seekTo((try w.getEndPos()) + 1);
+    try testing.expectError(error.WriteStreamEnd, w.writeAll("H"));
+}
+
+pub fn failingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+    _ = w;
+    _ = data;
+    _ = splat;
+    return error.WriteFailed;
+}
+
+pub fn failingSendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
+    _ = w;
+    _ = file_reader;
+    _ = limit;
+    return error.WriteFailed;
+}
+
+pub fn discardingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+    const slice = data[0 .. data.len - 1];
+    const pattern = data[slice.len..];
+    var written: usize = pattern.len * splat;
+    for (slice) |bytes| written += bytes.len;
+    w.end = 0;
+    return written;
+}
+
+pub fn discardingSendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
+    if (File.Handle == void) return error.Unimplemented;
+    w.end = 0;
+    if (file_reader.getSize()) |size| {
+        const n = limit.minInt(size - file_reader.pos);
+        file_reader.seekBy(@intCast(n)) catch return error.Unimplemented;
+        w.end = 0;
+        return n;
+    } else |_| {
+        // Error is observable on `file_reader` instance, and it is better to
+        // treat the file as a pipe.
+        return error.Unimplemented;
+    }
+}
+
+/// Removes the first `n` bytes from `buffer` by shifting buffer contents,
+/// returning how many bytes are left after consuming the entire buffer, or
+/// zero if the entire buffer was not consumed.
+///
+/// Useful for `VTable.drain` function implementations to implement partial
+/// drains.
+pub fn consume(w: *Writer, n: usize) usize {
+    if (n < w.end) {
+        const remaining = w.buffer[n..w.end];
+        @memmove(w.buffer[0..remaining.len], remaining);
+        w.end = remaining.len;
+        return 0;
+    }
+    defer w.end = 0;
+    return n - w.end;
+}
+
+/// Shortcut for setting `end` to zero and returning zero. Equivalent to
+/// calling `consume` with `end`.
+pub fn consumeAll(w: *Writer) usize {
+    w.end = 0;
+    return 0;
+}
+
+/// For use when the `Writer` implementation can cannot offer a more efficient
+/// implementation than a basic read/write loop on the file.
+pub fn unimplementedSendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
+    _ = w;
+    _ = file_reader;
+    _ = limit;
+    return error.Unimplemented;
+}
+
+/// When this function is called it usually means the buffer got full, so it's
+/// time to return an error. However, we still need to make sure all of the
+/// available buffer has been filled. Also, it may be called from `flush` in
+/// which case it should return successfully.
+pub fn fixedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+    if (data.len == 0) return 0;
+    for (data[0 .. data.len - 1]) |bytes| {
+        const dest = w.buffer[w.end..];
+        const len = @min(bytes.len, dest.len);
+        @memcpy(dest[0..len], bytes[0..len]);
+        w.end += len;
+        if (bytes.len > dest.len) return error.WriteFailed;
+    }
+    const pattern = data[data.len - 1];
+    const dest = w.buffer[w.end..];
+    switch (pattern.len) {
+        0 => return w.end,
+        1 => {
+            assert(splat >= dest.len);
+            @memset(dest, pattern[0]);
+            w.end += dest.len;
+            return error.WriteFailed;
+        },
+        else => {
+            for (0..splat) |i| {
+                const remaining = dest[i * pattern.len ..];
+                const len = @min(pattern.len, remaining.len);
+                @memcpy(remaining[0..len], pattern[0..len]);
+                w.end += len;
+                if (pattern.len > remaining.len) return error.WriteFailed;
+            }
+            unreachable;
+        },
     }
 }
+
+/// Provides a `Writer` implementation based on calling `Hasher.update`, sending
+/// all data also to an underlying `Writer`.
+///
+/// When using this, the underlying writer is best unbuffered because all
+/// writes are passed on directly to it.
+///
+/// This implementation makes suboptimal buffering decisions due to being
+/// generic. A better solution will involve creating a writer for each hash
+/// function, where the splat buffer can be tailored to the hash implementation
+/// details.
+pub fn Hashed(comptime Hasher: type) type {
+    return struct {
+        out: *Writer,
+        hasher: Hasher,
+        interface: Writer,
+
+        pub fn init(out: *Writer) @This() {
+            return .{
+                .out = out,
+                .hasher = .{},
+                .interface = .{
+                    .vtable = &.{@This().drain},
+                },
+            };
+        }
+
+        fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+            const this: *@This() = @alignCast(@fieldParentPtr("interface", w));
+            if (data.len == 0) {
+                const buf = w.buffered();
+                try this.out.writeAll(buf);
+                this.hasher.update(buf);
+                w.end = 0;
+                return buf.len;
+            }
+            const aux_n = try this.out.writeSplatAux(w.buffered(), data, splat);
+            if (aux_n < w.end) {
+                this.hasher.update(w.buffer[0..aux_n]);
+                const remaining = w.buffer[aux_n..w.end];
+                @memmove(w.buffer[0..remaining.len], remaining);
+                w.end = remaining.len;
+                return 0;
+            }
+            this.hasher.update(w.buffered());
+            const n = aux_n - w.end;
+            w.end = 0;
+            var remaining: usize = n;
+            const short_data = data[0 .. data.len - @intFromBool(splat == 0)];
+            for (short_data) |slice| {
+                if (remaining < slice.len) {
+                    this.hasher.update(slice[0..remaining]);
+                    return n;
+                } else {
+                    remaining -= slice.len;
+                    this.hasher.update(slice);
+                }
+            }
+            const remaining_splat = switch (splat) {
+                0, 1 => {
+                    assert(remaining == 0);
+                    return n;
+                },
+                else => splat - 1,
+            };
+            const pattern = data[data.len - 1];
+            assert(remaining == remaining_splat * pattern.len);
+            switch (pattern.len) {
+                0 => {
+                    assert(remaining == 0);
+                },
+                1 => {
+                    var buffer: [64]u8 = undefined;
+                    @memset(&buffer, pattern[0]);
+                    while (remaining > 0) {
+                        const update_len = @min(remaining, buffer.len);
+                        this.hasher.update(buffer[0..update_len]);
+                        remaining -= update_len;
+                    }
+                },
+                else => {
+                    while (remaining > 0) {
+                        const update_len = @min(remaining, pattern.len);
+                        this.hasher.update(pattern[0..update_len]);
+                        remaining -= update_len;
+                    }
+                },
+            }
+            return n;
+        }
+    };
+}
+
+/// Maintains `Writer` state such that it writes to the unused capacity of an
+/// array list, filling it up completely before making a call through the
+/// vtable, causing a resize. Consequently, the same, optimized, non-generic
+/// machine code that uses `std.io.Reader`, such as formatted printing, takes
+/// the hot paths when using this API.
+///
+/// When using this API, it is not necessary to call `flush`.
+pub const Allocating = struct {
+    allocator: Allocator,
+    interface: Writer,
+
+    pub fn init(allocator: Allocator) Allocating {
+        return .{
+            .allocator = allocator,
+            .interface = .{
+                .buffer = &.{},
+                .vtable = &vtable,
+            },
+        };
+    }
+
+    pub fn initCapacity(allocator: Allocator, capacity: usize) error{OutOfMemory}!Allocating {
+        return .{
+            .allocator = allocator,
+            .interface = .{
+                .buffer = try allocator.alloc(u8, capacity),
+                .vtable = &vtable,
+            },
+        };
+    }
+
+    pub fn initOwnedSlice(allocator: Allocator, slice: []u8) Allocating {
+        return .{
+            .allocator = allocator,
+            .interface = .{
+                .buffer = slice,
+                .vtable = &vtable,
+            },
+        };
+    }
+
+    /// Replaces `array_list` with empty, taking ownership of the memory.
+    pub fn fromArrayList(allocator: Allocator, array_list: *std.ArrayListUnmanaged(u8)) Allocating {
+        defer array_list.* = .empty;
+        return .{
+            .allocator = allocator,
+            .interface = .{
+                .vtable = &vtable,
+                .buffer = array_list.allocatedSlice(),
+                .end = array_list.items.len,
+            },
+        };
+    }
+
+    const vtable: VTable = .{
+        .drain = Allocating.drain,
+        .sendFile = Allocating.sendFile,
+        .flush = noopFlush,
+    };
+
+    pub fn deinit(a: *Allocating) void {
+        a.allocator.free(a.interface.buffer);
+        a.* = undefined;
+    }
+
+    /// Returns an array list that takes ownership of the allocated memory.
+    /// Resets the `Allocating` to an empty state.
+    pub fn toArrayList(a: *Allocating) std.ArrayListUnmanaged(u8) {
+        const w = &a.interface;
+        const result: std.ArrayListUnmanaged(u8) = .{
+            .items = w.buffer[0..w.end],
+            .capacity = w.buffer.len,
+        };
+        w.buffer = &.{};
+        w.end = 0;
+        return result;
+    }
+
+    pub fn toOwnedSlice(a: *Allocating) error{OutOfMemory}![]u8 {
+        var list = a.toArrayList();
+        return list.toOwnedSlice(a.allocator);
+    }
+
+    pub fn toOwnedSliceSentinel(a: *Allocating, comptime sentinel: u8) error{OutOfMemory}![:sentinel]u8 {
+        const gpa = a.allocator;
+        var list = toArrayList(a);
+        return list.toOwnedSliceSentinel(gpa, sentinel);
+    }
+
+    pub fn getWritten(a: *Allocating) []u8 {
+        return a.interface.buffered();
+    }
+
+    pub fn shrinkRetainingCapacity(a: *Allocating, new_len: usize) void {
+        const shrink_by = a.interface.end - new_len;
+        a.interface.end = new_len;
+        a.interface.count -= shrink_by;
+    }
+
+    pub fn clearRetainingCapacity(a: *Allocating) void {
+        a.shrinkRetainingCapacity(0);
+    }
+
+    fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
+        const a: *Allocating = @fieldParentPtr("interface", w);
+        const gpa = a.allocator;
+        const pattern = data[data.len - 1];
+        const splat_len = pattern.len * splat;
+        var list = a.toArrayList();
+        defer setArrayList(a, list);
+        const start_len = list.items.len;
+        for (data) |bytes| {
+            list.ensureUnusedCapacity(gpa, bytes.len + splat_len) catch return error.WriteFailed;
+            list.appendSliceAssumeCapacity(bytes);
+        }
+        if (splat == 0) {
+            list.items.len -= pattern.len;
+        } else switch (pattern.len) {
+            0 => {},
+            1 => list.appendNTimesAssumeCapacity(pattern[0], splat - 1),
+            else => for (0..splat - 1) |_| list.appendSliceAssumeCapacity(pattern),
+        }
+        return list.items.len - start_len;
+    }
+
+    fn sendFile(w: *Writer, file_reader: *File.Reader, limit: std.io.Limit) FileError!usize {
+        if (File.Handle == void) return error.Unimplemented;
+        const a: *Allocating = @fieldParentPtr("interface", w);
+        const gpa = a.allocator;
+        var list = a.toArrayList();
+        defer setArrayList(a, list);
+        const pos = file_reader.pos;
+        const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
+        list.ensureUnusedCapacity(gpa, limit.minInt(additional)) catch return error.WriteFailed;
+        const dest = limit.slice(list.unusedCapacitySlice());
+        const n = file_reader.read(dest) catch |err| switch (err) {
+            error.ReadFailed => return error.ReadFailed,
+            error.EndOfStream => 0,
+        };
+        list.items.len += n;
+        return n;
+    }
+
+    fn setArrayList(a: *Allocating, list: std.ArrayListUnmanaged(u8)) void {
+        a.interface.buffer = list.allocatedSlice();
+        a.interface.end = list.items.len;
+    }
+
+    test Allocating {
+        var a: Allocating = .init(std.testing.allocator);
+        defer a.deinit();
+        const w = &a.interface;
+
+        const x: i32 = 42;
+        const y: i32 = 1234;
+        try w.print("x: {}\ny: {}\n", .{ x, y });
+
+        try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", a.getWritten());
+    }
+};
lib/std/json/scanner.zig
@@ -219,7 +219,7 @@ pub const AllocWhen = enum { alloc_if_needed, alloc_always };
 /// This limit can be specified by calling `nextAllocMax()` instead of `nextAlloc()`.
 pub const default_max_value_len = 4 * 1024 * 1024;
 
-/// Connects a `std.io.Reader` to a `std.json.Scanner`.
+/// Connects a `std.io.GenericReader` to a `std.json.Scanner`.
 /// All `next*()` methods here handle `error.BufferUnderrun` from `std.json.Scanner`, and then read from the reader.
 pub fn Reader(comptime buffer_size: usize, comptime ReaderType: type) type {
     return struct {
lib/std/json/stringify.zig
@@ -38,7 +38,7 @@ pub const StringifyOptions = struct {
     emit_nonportable_numbers_as_strings: bool = false,
 };
 
-/// Writes the given value to the `std.io.Writer` stream.
+/// Writes the given value to the `std.io.GenericWriter` stream.
 /// See `WriteStream` for how the given value is serialized into JSON.
 /// The maximum nesting depth of the output JSON document is 256.
 /// See also `stringifyMaxDepth` and `stringifyArbitraryDepth`.
@@ -81,7 +81,7 @@ pub fn stringifyArbitraryDepth(
 }
 
 /// Calls `stringifyArbitraryDepth` and stores the result in dynamically allocated memory
-/// instead of taking a `std.io.Writer`.
+/// instead of taking a `std.io.GenericWriter`.
 ///
 /// Caller owns returned memory.
 pub fn stringifyAlloc(
lib/std/json/stringify_test.zig
@@ -307,7 +307,7 @@ test "stringify tuple" {
 fn testStringify(expected: []const u8, value: anytype, options: StringifyOptions) !void {
     const ValidationWriter = struct {
         const Self = @This();
-        pub const Writer = std.io.Writer(*Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*Self, Error, write);
         pub const Error = error{
             TooMuchData,
             DifferentData,
lib/std/os/uefi/protocol/file.zig
@@ -88,8 +88,8 @@ pub const File = extern struct {
         getPosition,
         getEndPos,
     );
-    pub const Reader = io.Reader(*File, ReadError, read);
-    pub const Writer = io.Writer(*File, WriteError, write);
+    pub const Reader = io.GenericReader(*File, ReadError, read);
+    pub const Writer = io.GenericWriter(*File, WriteError, write);
 
     pub fn seekableStream(self: *File) SeekableStream {
         return .{ .context = self };
lib/std/zig/llvm/Builder.zig
@@ -9520,7 +9520,7 @@ fn WriterWithErrors(comptime BackingWriter: type, comptime ExtraErrors: type) ty
         backing_writer: BackingWriter,
 
         pub const Error = BackingWriter.Error || ExtraErrors;
-        pub const Writer = std.io.Writer(*const Self, Error, write);
+        pub const Writer = std.io.GenericWriter(*const Self, Error, write);
 
         const Self = @This();
 
lib/std/zig/render.zig
@@ -3245,7 +3245,7 @@ fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
     return struct {
         const Self = @This();
         pub const WriteError = UnderlyingWriter.Error;
-        pub const Writer = std.io.Writer(*Self, WriteError, write);
+        pub const Writer = std.io.GenericWriter(*Self, WriteError, write);
 
         pub const IndentType = enum {
             normal,
lib/std/zig/string_literal.zig
@@ -322,7 +322,7 @@ test parseCharLiteral {
     );
 }
 
-/// Parses `bytes` as a Zig string literal and writes the result to the std.io.Writer type.
+/// Parses `bytes` as a Zig string literal and writes the result to the `std.io.GenericWriter` type.
 /// Asserts `bytes` has '"' at beginning and end.
 pub fn parseWrite(writer: anytype, bytes: []const u8) error{OutOfMemory}!Result {
     assert(bytes.len >= 2 and bytes[0] == '"' and bytes[bytes.len - 1] == '"');
lib/std/array_list.zig
@@ -338,11 +338,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
             @memcpy(self.items[old_len..][0..items.len], items);
         }
 
-        pub const Writer = if (T != u8)
-            @compileError("The Writer interface is only defined for ArrayList(u8) " ++
-                "but the given type is ArrayList(" ++ @typeName(T) ++ ")")
-        else
-            std.io.Writer(*Self, Allocator.Error, appendWrite);
+        pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
+            const gpa = self.allocator;
+            var unmanaged = self.moveToUnmanaged();
+            defer self.* = unmanaged.toManaged(gpa);
+            try unmanaged.print(gpa, fmt, args);
+        }
+
+        pub const Writer = if (T != u8) void else std.io.GenericWriter(*Self, Allocator.Error, appendWrite);
 
         /// Initializes a Writer which will append to the list.
         pub fn writer(self: *Self) Writer {
@@ -350,14 +353,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
         }
 
         /// Same as `append` except it returns the number of bytes written, which is always the same
-        /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
+        /// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
         /// Invalidates element pointers if additional memory is needed.
         fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
             try self.appendSlice(m);
             return m.len;
         }
 
-        pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
+        pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
 
         /// Initializes a Writer which will append to the list but will return
         /// `error.OutOfMemory` rather than increasing capacity.
@@ -365,7 +368,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
             return .{ .context = self };
         }
 
-        /// The purpose of this function existing is to match `std.io.Writer` API.
+        /// The purpose of this function existing is to match `std.io.GenericWriter` API.
         fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
             const available_capacity = self.capacity - self.items.len;
             if (m.len > available_capacity)
@@ -933,40 +936,56 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
             @memcpy(self.items[old_len..][0..items.len], items);
         }
 
+        pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
+            comptime assert(T == u8);
+            try self.ensureUnusedCapacity(gpa, fmt.len);
+            var aw: std.io.Writer.Allocating = .fromArrayList(gpa, self);
+            defer self.* = aw.toArrayList();
+            return aw.interface.print(fmt, args) catch |err| switch (err) {
+                error.WriteFailed => return error.OutOfMemory,
+            };
+        }
+
+        pub fn printAssumeCapacity(self: *Self, comptime fmt: []const u8, args: anytype) void {
+            comptime assert(T == u8);
+            var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
+            w.print(fmt, args) catch unreachable;
+            self.items.len += w.end;
+        }
+
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         pub const WriterContext = struct {
             self: *Self,
             allocator: Allocator,
         };
 
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         pub const Writer = if (T != u8)
             @compileError("The Writer interface is only defined for ArrayList(u8) " ++
                 "but the given type is ArrayList(" ++ @typeName(T) ++ ")")
         else
-            std.io.Writer(WriterContext, Allocator.Error, appendWrite);
+            std.io.GenericWriter(WriterContext, Allocator.Error, appendWrite);
 
-        /// Initializes a Writer which will append to the list.
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         pub fn writer(self: *Self, gpa: Allocator) Writer {
             return .{ .context = .{ .self = self, .allocator = gpa } };
         }
 
-        /// Same as `append` except it returns the number of bytes written,
-        /// which is always the same as `m.len`. The purpose of this function
-        /// existing is to match `std.io.Writer` API.
-        /// Invalidates element pointers if additional memory is needed.
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
             try context.self.appendSlice(context.allocator, m);
             return m.len;
         }
 
-        pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
+        pub const FixedWriter = std.io.GenericWriter(*Self, Allocator.Error, appendWriteFixed);
 
-        /// Initializes a Writer which will append to the list but will return
-        /// `error.OutOfMemory` rather than increasing capacity.
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         pub fn fixedWriter(self: *Self) FixedWriter {
             return .{ .context = self };
         }
 
-        /// The purpose of this function existing is to match `std.io.Writer` API.
+        /// Deprecated in favor of `print` or `std.io.Writer.Allocating`.
         fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
             const available_capacity = self.capacity - self.items.len;
             if (m.len > available_capacity)
lib/std/base64.zig
@@ -108,7 +108,7 @@ pub const Base64Encoder = struct {
         }
     }
 
-    // dest must be compatible with std.io.Writer's writeAll interface
+    // dest must be compatible with std.io.GenericWriter's writeAll interface
     pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
         var chunker = window(u8, source, 3, 3);
         while (chunker.next()) |chunk| {
@@ -118,8 +118,8 @@ pub const Base64Encoder = struct {
         }
     }
 
-    // destWriter must be compatible with std.io.Writer's writeAll interface
-    // sourceReader must be compatible with std.io.Reader's read interface
+    // destWriter must be compatible with std.io.GenericWriter's writeAll interface
+    // sourceReader must be compatible with `std.io.GenericReader` read interface
     pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
         while (true) {
             var tempSource: [3]u8 = undefined;
lib/std/bounded_array.zig
@@ -277,7 +277,7 @@ pub fn BoundedArrayAligned(
             @compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
                 "but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
         else
-            std.io.Writer(*Self, error{Overflow}, appendWrite);
+            std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
 
         /// Initializes a writer which will write into the array.
         pub fn writer(self: *Self) Writer {
@@ -285,7 +285,7 @@ pub fn BoundedArrayAligned(
         }
 
         /// Same as `appendSlice` except it returns the number of bytes written, which is always the same
-        /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
+        /// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
         fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
             try self.appendSlice(m);
             return m.len;
lib/std/compress.zig
@@ -16,7 +16,7 @@ pub fn HashedReader(ReaderType: type, HasherType: type) type {
         hasher: HasherType,
 
         pub const Error = ReaderType.Error;
-        pub const Reader = std.io.Reader(*@This(), Error, read);
+        pub const Reader = std.io.GenericReader(*@This(), Error, read);
 
         pub fn read(self: *@This(), buf: []u8) Error!usize {
             const amt = try self.child_reader.read(buf);
@@ -43,7 +43,7 @@ pub fn HashedWriter(WriterType: type, HasherType: type) type {
         hasher: HasherType,
 
         pub const Error = WriterType.Error;
-        pub const Writer = std.io.Writer(*@This(), Error, write);
+        pub const Writer = std.io.GenericWriter(*@This(), Error, write);
 
         pub fn write(self: *@This(), buf: []const u8) Error!usize {
             const amt = try self.child_writer.write(buf);
lib/std/fifo.zig
@@ -38,8 +38,8 @@ pub fn LinearFifo(
         count: usize,
 
         const Self = @This();
-        pub const Reader = std.io.Reader(*Self, error{}, readFn);
-        pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
+        pub const Reader = std.io.GenericReader(*Self, error{}, readFn);
+        pub const Writer = std.io.GenericWriter(*Self, error{OutOfMemory}, appendWrite);
 
         // Type of Self argument for slice operations.
         // If buffer is inline (Static) then we need to ensure we haven't
@@ -231,7 +231,7 @@ pub fn LinearFifo(
         }
 
         /// Same as `read` except it returns an error union
-        /// The purpose of this function existing is to match `std.io.Reader` API.
+        /// The purpose of this function existing is to match `std.io.GenericReader` API.
         fn readFn(self: *Self, dest: []u8) error{}!usize {
             return self.read(dest);
         }
@@ -320,7 +320,7 @@ pub fn LinearFifo(
         }
 
         /// Same as `write` except it returns the number of bytes written, which is always the same
-        /// as `bytes.len`. The purpose of this function existing is to match `std.io.Writer` API.
+        /// as `bytes.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
         fn appendWrite(self: *Self, bytes: []const u8) error{OutOfMemory}!usize {
             try self.write(bytes);
             return bytes.len;
lib/std/io.zig
@@ -14,6 +14,69 @@ const File = std.fs.File;
 const Allocator = std.mem.Allocator;
 const Alignment = std.mem.Alignment;
 
+pub const Limit = enum(usize) {
+    nothing = 0,
+    unlimited = std.math.maxInt(usize),
+    _,
+
+    /// `std.math.maxInt(usize)` is interpreted to mean `.unlimited`.
+    pub fn limited(n: usize) Limit {
+        return @enumFromInt(n);
+    }
+
+    pub fn countVec(data: []const []const u8) Limit {
+        var total: usize = 0;
+        for (data) |d| total += d.len;
+        return .limited(total);
+    }
+
+    pub fn min(a: Limit, b: Limit) Limit {
+        return @enumFromInt(@min(@intFromEnum(a), @intFromEnum(b)));
+    }
+
+    pub fn minInt(l: Limit, n: usize) usize {
+        return @min(n, @intFromEnum(l));
+    }
+
+    pub fn slice(l: Limit, s: []u8) []u8 {
+        return s[0..l.minInt(s.len)];
+    }
+
+    pub fn sliceConst(l: Limit, s: []const u8) []const u8 {
+        return s[0..l.minInt(s.len)];
+    }
+
+    pub fn toInt(l: Limit) ?usize {
+        return switch (l) {
+            else => @intFromEnum(l),
+            .unlimited => null,
+        };
+    }
+
+    /// Reduces a slice to account for the limit, leaving room for one extra
+    /// byte above the limit, allowing for the use case of differentiating
+    /// between end-of-stream and reaching the limit.
+    pub fn slice1(l: Limit, non_empty_buffer: []u8) []u8 {
+        assert(non_empty_buffer.len >= 1);
+        return non_empty_buffer[0..@min(@intFromEnum(l) +| 1, non_empty_buffer.len)];
+    }
+
+    pub fn nonzero(l: Limit) bool {
+        return @intFromEnum(l) > 0;
+    }
+
+    /// Return a new limit reduced by `amount` or return `null` indicating
+    /// limit would be exceeded.
+    pub fn subtract(l: Limit, amount: usize) ?Limit {
+        if (l == .unlimited) return .unlimited;
+        if (amount > @intFromEnum(l)) return null;
+        return @enumFromInt(@intFromEnum(l) - amount);
+    }
+};
+
+pub const Reader = @import("io/Reader.zig");
+pub const Writer = @import("io/Writer.zig");
+
 fn getStdOutHandle() posix.fd_t {
     if (is_windows) {
         return windows.peb().ProcessParameters.hStdOutput;
@@ -62,6 +125,7 @@ pub fn getStdIn() File {
     return .{ .handle = getStdInHandle() };
 }
 
+/// Deprecated in favor of `Reader`.
 pub fn GenericReader(
     comptime Context: type,
     comptime ReadError: type,
@@ -289,6 +353,7 @@ pub fn GenericReader(
     };
 }
 
+/// Deprecated in favor of `Writer`.
 pub fn GenericWriter(
     comptime Context: type,
     comptime WriteError: type,
@@ -350,15 +415,10 @@ pub fn GenericWriter(
     };
 }
 
-/// Deprecated; consider switching to `AnyReader` or use `GenericReader`
-/// to use previous API.
-pub const Reader = GenericReader;
-/// Deprecated; consider switching to `AnyWriter` or use `GenericWriter`
-/// to use previous API.
-pub const Writer = GenericWriter;
-
-pub const AnyReader = @import("io/Reader.zig");
-pub const AnyWriter = @import("io/Writer.zig");
+/// Deprecated in favor of `Reader`.
+pub const AnyReader = @import("io/DeprecatedReader.zig");
+/// Deprecated in favor of `Writer`.
+pub const AnyWriter = @import("io/DeprecatedWriter.zig");
 
 pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
 
@@ -819,8 +879,8 @@ pub fn PollFiles(comptime StreamEnum: type) type {
 }
 
 test {
-    _ = AnyReader;
-    _ = AnyWriter;
+    _ = Reader;
+    _ = Writer;
     _ = @import("io/bit_reader.zig");
     _ = @import("io/bit_writer.zig");
     _ = @import("io/buffered_atomic_file.zig");
lib/std/json.zig
@@ -1,12 +1,12 @@
 //! JSON parsing and stringification conforming to RFC 8259. https://datatracker.ietf.org/doc/html/rfc8259
 //!
 //! The low-level `Scanner` API produces `Token`s from an input slice or successive slices of inputs,
-//! The `Reader` API connects a `std.io.Reader` to a `Scanner`.
+//! The `Reader` API connects a `std.io.GenericReader` to a `Scanner`.
 //!
 //! The high-level `parseFromSlice` and `parseFromTokenSource` deserialize a JSON document into a Zig type.
 //! Parse into a dynamically-typed `Value` to load any JSON value for runtime inspection.
 //!
-//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.Writer`.
+//! The low-level `writeStream` emits syntax-conformant JSON tokens to a `std.io.GenericWriter`.
 //! The high-level `stringify` serializes a Zig or `Value` type into JSON.
 
 const builtin = @import("builtin");
lib/std/net.zig
@@ -1845,8 +1845,8 @@ pub const Stream = struct {
     pub const ReadError = posix.ReadError;
     pub const WriteError = posix.WriteError;
 
-    pub const Reader = io.Reader(Stream, ReadError, read);
-    pub const Writer = io.Writer(Stream, WriteError, write);
+    pub const Reader = io.GenericReader(Stream, ReadError, read);
+    pub const Writer = io.GenericWriter(Stream, WriteError, write);
 
     pub fn reader(self: Stream) Reader {
         return .{ .context = self };
lib/std/tar.zig
@@ -348,7 +348,7 @@ pub fn Iterator(comptime ReaderType: type) type {
             unread_bytes: *u64,
             parent_reader: ReaderType,
 
-            pub const Reader = std.io.Reader(File, ReaderType.Error, File.read);
+            pub const Reader = std.io.GenericReader(File, ReaderType.Error, File.read);
 
             pub fn reader(self: File) Reader {
                 return .{ .context = self };
lib/std/zip.zig
@@ -106,7 +106,7 @@ pub const EndRecord = extern struct {
 /// Find and return the end record for the given seekable zip stream.
 /// Note that `seekable_stream` must be an instance of `std.io.SeekableStream` and
 /// its context must also have a `.reader()` method that returns an instance of
-/// `std.io.Reader`.
+/// `std.io.GenericReader`.
 pub fn findEndRecord(seekable_stream: anytype, stream_len: u64) !EndRecord {
     var buf: [@sizeOf(EndRecord) + std.math.maxInt(u16)]u8 = undefined;
     const record_len_max = @min(stream_len, buf.len);
@@ -617,7 +617,7 @@ pub const ExtractOptions = struct {
 /// Extract the zipped files inside `seekable_stream` to the given `dest` directory.
 /// Note that `seekable_stream` must be an instance of `std.io.SeekableStream` and
 /// its context must also have a `.reader()` method that returns an instance of
-/// `std.io.Reader`.
+/// `std.io.GenericReader`.
 pub fn extract(dest: std.fs.Dir, seekable_stream: anytype, options: ExtractOptions) !void {
     const SeekableStream = @TypeOf(seekable_stream);
     var iter = try Iterator(SeekableStream).init(seekable_stream);
src/Package/Fetch/git.zig
@@ -1026,7 +1026,7 @@ pub const Session = struct {
             ProtocolError,
             UnexpectedPacket,
         };
-        pub const Reader = std.io.Reader(*FetchStream, ReadError, read);
+        pub const Reader = std.io.GenericReader(*FetchStream, ReadError, read);
 
         const StreamCode = enum(u8) {
             pack_data = 1,