Commit ba0e3be5cf

Andrew Kelley <andrew@ziglang.org>
2020-03-10 20:27:45
(breaking) rework stream abstractions
The main goal here is to make the function pointers comptime, so that we don't have to do the crazy stuff with async function frames. Since InStream, OutStream, and SeekableStream are already generic across error sets, it's not really worse to make them generic across the vtable as well. See #764 for the open issue acknowledging that using generics for these abstractions is a design flaw. See #130 for the efforts to make these abstractions non-generic. This commit also changes the OutStream API so that `write` returns number of bytes written, and `writeAll` is the one that loops until the whole buffer is written.
1 parent 1ad831a
lib/std/fs/file.zig
@@ -71,7 +71,7 @@ pub const File = struct {
         if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
             std.event.Loop.instance.?.close(self.handle);
         } else {
-            return os.close(self.handle);
+            os.close(self.handle);
         }
     }
 
@@ -496,85 +496,29 @@ pub const File = struct {
         }
     }
 
-    pub fn inStream(file: File) InStream {
-        return InStream{
-            .file = file,
-            .stream = InStream.Stream{ .readFn = InStream.readFn },
-        };
+    pub const InStream = io.InStream(File, ReadError, read);
+
+    pub fn inStream(file: File) io.InStream(File, ReadError, read) {
+        return .{ .context = file };
     }
 
+    pub const OutStream = io.OutStream(File, WriteError, write);
+
     pub fn outStream(file: File) OutStream {
-        return OutStream{
-            .file = file,
-            .stream = OutStream.Stream{ .writeFn = OutStream.writeFn },
-        };
+        return .{ .context = file };
     }
 
+    pub const SeekableStream = io.SeekableStream(
+        File,
+        SeekError,
+        GetPosError,
+        seekTo,
+        seekBy,
+        getPos,
+        getEndPos,
+    );
+
     pub fn seekableStream(file: File) SeekableStream {
-        return SeekableStream{
-            .file = file,
-            .stream = SeekableStream.Stream{
-                .seekToFn = SeekableStream.seekToFn,
-                .seekByFn = SeekableStream.seekByFn,
-                .getPosFn = SeekableStream.getPosFn,
-                .getEndPosFn = SeekableStream.getEndPosFn,
-            },
-        };
+        return .{ .context = file };
     }
-
-    /// Implementation of io.InStream trait for File
-    pub const InStream = struct {
-        file: File,
-        stream: Stream,
-
-        pub const Error = ReadError;
-        pub const Stream = io.InStream(Error);
-
-        fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
-            const self = @fieldParentPtr(InStream, "stream", in_stream);
-            return self.file.read(buffer);
-        }
-    };
-
-    /// Implementation of io.OutStream trait for File
-    pub const OutStream = struct {
-        file: File,
-        stream: Stream,
-
-        pub const Error = WriteError;
-        pub const Stream = io.OutStream(Error);
-
-        fn writeFn(out_stream: *Stream, bytes: []const u8) Error!usize {
-            const self = @fieldParentPtr(OutStream, "stream", out_stream);
-            return self.file.write(bytes);
-        }
-    };
-
-    /// Implementation of io.SeekableStream trait for File
-    pub const SeekableStream = struct {
-        file: File,
-        stream: Stream,
-
-        pub const Stream = io.SeekableStream(SeekError, GetPosError);
-
-        pub fn seekToFn(seekable_stream: *Stream, pos: u64) SeekError!void {
-            const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
-            return self.file.seekTo(pos);
-        }
-
-        pub fn seekByFn(seekable_stream: *Stream, amt: i64) SeekError!void {
-            const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
-            return self.file.seekBy(amt);
-        }
-
-        pub fn getEndPosFn(seekable_stream: *Stream) GetPosError!u64 {
-            const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
-            return self.file.getEndPos();
-        }
-
-        pub fn getPosFn(seekable_stream: *Stream) GetPosError!u64 {
-            const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
-            return self.file.getPos();
-        }
-    };
 };
lib/std/io/buffered_atomic_file.zig
@@ -0,0 +1,50 @@
+const std = @import("../std.zig");
+const mem = std.mem;
+const fs = std.fs;
+const File = std.fs.File;
+
+pub const BufferedAtomicFile = struct {
+    atomic_file: fs.AtomicFile,
+    file_stream: File.OutStream,
+    buffered_stream: BufferedOutStream,
+    allocator: *mem.Allocator,
+
+    pub const buffer_size = 4096;
+    pub const BufferedOutStream = std.io.BufferedOutStreamCustom(buffer_size, File.OutStream);
+    pub const OutStream = std.io.OutStream(*BufferedOutStream, BufferedOutStream.Error, BufferedOutStream.write);
+
+    /// TODO when https://github.com/ziglang/zig/issues/2761 is solved
+    /// this API will not need an allocator
+    pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
+        var self = try allocator.create(BufferedAtomicFile);
+        self.* = BufferedAtomicFile{
+            .atomic_file = undefined,
+            .file_stream = undefined,
+            .buffered_stream = undefined,
+            .allocator = allocator,
+        };
+        errdefer allocator.destroy(self);
+
+        self.atomic_file = try fs.AtomicFile.init(dest_path, File.default_mode);
+        errdefer self.atomic_file.deinit();
+
+        self.file_stream = self.atomic_file.file.outStream();
+        self.buffered_stream = std.io.bufferedOutStream(buffer_size, self.file_stream);
+        return self;
+    }
+
+    /// always call destroy, even after successful finish()
+    pub fn destroy(self: *BufferedAtomicFile) void {
+        self.atomic_file.deinit();
+        self.allocator.destroy(self);
+    }
+
+    pub fn finish(self: *BufferedAtomicFile) !void {
+        try self.buffered_stream.flush();
+        try self.atomic_file.finish();
+    }
+
+    pub fn stream(self: *BufferedAtomicFile) OutStream {
+        return .{ .context = &self.buffered_stream };
+    }
+};
lib/std/io/buffered_out_stream.zig
@@ -0,0 +1,56 @@
+const std = @import("../std.zig");
+const io = std.io;
+
+pub fn BufferedOutStream(comptime OutStreamType: type) type {
+    return BufferedOutStreamCustom(4096, OutStreamType);
+}
+
+pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamType: type) type {
+    return struct {
+        unbuffered_out_stream: OutStreamType,
+        fifo: FifoType,
+
+        pub const Error = OutStreamType.Error;
+        pub const OutStream = io.OutStream(*Self, Error, write);
+
+        const Self = @This();
+        const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size });
+
+        pub fn init(unbuffered_out_stream: OutStreamType) Self {
+            return Self{
+                .unbuffered_out_stream = unbuffered_out_stream,
+                .fifo = FifoType.init(),
+            };
+        }
+
+        pub fn flush(self: *Self) !void {
+            while (true) {
+                const slice = self.fifo.readableSlice(0);
+                if (slice.len == 0) break;
+                try self.unbuffered_out_stream.writeAll(slice);
+                self.fifo.discard(slice.len);
+            }
+        }
+
+        pub fn outStream(self: *Self) OutStream {
+            return .{ .context = self };
+        }
+
+        pub fn write(self: *Self, bytes: []const u8) Error!usize {
+            if (bytes.len >= self.fifo.writableLength()) {
+                try self.flush();
+                return self.unbuffered_out_stream.write(bytes);
+            }
+            self.fifo.writeAssumeCapacity(bytes);
+            return bytes.len;
+        }
+    };
+}
+
+pub fn bufferedOutStream(
+    comptime buffer_size: usize,
+    underlying_stream: var,
+) BufferedOutStreamCustom(buffer_size, @TypeOf(underlying_stream)) {
+    return BufferedOutStreamCustom(buffer_size, @TypeOf(underlying_stream)).init(underlying_stream);
+}
+
lib/std/io/c_out_stream.zig
@@ -1,43 +0,0 @@
-const std = @import("../std.zig");
-const os = std.os;
-const OutStream = std.io.OutStream;
-const builtin = @import("builtin");
-
-/// TODO make a proposal to make `std.fs.File` use *FILE when linking libc and this just becomes
-/// std.io.FileOutStream because std.fs.File.write would do this when linking
-/// libc.
-pub const COutStream = struct {
-    pub const Error = std.fs.File.WriteError;
-    pub const Stream = OutStream(Error);
-
-    stream: Stream,
-    c_file: *std.c.FILE,
-
-    pub fn init(c_file: *std.c.FILE) COutStream {
-        return COutStream{
-            .c_file = c_file,
-            .stream = Stream{ .writeFn = writeFn },
-        };
-    }
-
-    fn writeFn(out_stream: *Stream, bytes: []const u8) Error!usize {
-        const self = @fieldParentPtr(COutStream, "stream", out_stream);
-        const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, self.c_file);
-        if (amt_written >= 0) return amt_written;
-        switch (std.c._errno().*) {
-            0 => unreachable,
-            os.EINVAL => unreachable,
-            os.EFAULT => unreachable,
-            os.EAGAIN => unreachable, // this is a blocking API
-            os.EBADF => unreachable, // always a race condition
-            os.EDESTADDRREQ => unreachable, // connect was never called
-            os.EDQUOT => return error.DiskQuota,
-            os.EFBIG => return error.FileTooBig,
-            os.EIO => return error.InputOutput,
-            os.ENOSPC => return error.NoSpaceLeft,
-            os.EPERM => return error.AccessDenied,
-            os.EPIPE => return error.BrokenPipe,
-            else => |err| return os.unexpectedErrno(@intCast(usize, err)),
-        }
-    }
-};
lib/std/io/counting_out_stream.zig
@@ -0,0 +1,42 @@
+const std = @import("../std.zig");
+const io = std.io;
+
+/// An OutStream that counts how many bytes has been written to it.
+pub fn CountingOutStream(comptime OutStreamType: type) type {
+    return struct {
+        bytes_written: u64,
+        child_stream: OutStreamType,
+
+        pub const Error = OutStreamType.Error;
+        pub const OutStream = io.OutStream(*Self, Error, write);
+
+        const Self = @This();
+
+        pub fn init(child_stream: OutStreamType) Self {
+            return Self{
+                .bytes_written = 0,
+                .child_stream = child_stream,
+            };
+        }
+
+        pub fn write(self: *Self, bytes: []const u8) Error!usize {
+            const amt = try self.child_stream.write(bytes);
+            self.bytes_written += amt;
+            return amt;
+        }
+
+        pub fn outStream(self: *Self) OutStream {
+            return .{ .context = self };
+        }
+    };
+}
+
+test "io.CountingOutStream" {
+    var counting_stream = CountingOutStream(NullOutStream.Error).init(std.io.null_out_stream);
+    const stream = &counting_stream.stream;
+
+    const bytes = "yay" ** 10000;
+    stream.write(bytes) catch unreachable;
+    testing.expect(counting_stream.bytes_written == bytes.len);
+}
+
lib/std/io/fixed_buffer_stream.zig
@@ -0,0 +1,66 @@
+const std = @import("../std.zig");
+const io = std.io;
+
+pub const FixedBufferInStream = struct {
+    bytes: []const u8,
+    pos: usize,
+
+    pub const SeekError = error{EndOfStream};
+    pub const GetSeekPosError = error{};
+
+    pub const InStream = io.InStream(*FixedBufferInStream, error{}, read);
+
+    pub fn inStream(self: *FixedBufferInStream) InStream {
+        return .{ .context = self };
+    }
+
+    pub const SeekableStream = io.SeekableStream(
+        *FixedBufferInStream,
+        SeekError,
+        GetSeekPosError,
+        seekTo,
+        seekBy,
+        getPos,
+        getEndPos,
+    );
+
+    pub fn seekableStream(self: *FixedBufferInStream) SeekableStream {
+        return .{ .context = self };
+    }
+
+    pub fn read(self: *FixedBufferInStream, dest: []u8) error{}!usize {
+        const size = std.math.min(dest.len, self.bytes.len - self.pos);
+        const end = self.pos + size;
+
+        std.mem.copy(u8, dest[0..size], self.bytes[self.pos..end]);
+        self.pos = end;
+
+        return size;
+    }
+
+    pub fn seekTo(self: *FixedBufferInStream, pos: u64) SeekError!void {
+        const usize_pos = std.math.cast(usize, pos) catch return error.EndOfStream;
+        if (usize_pos > self.bytes.len) return error.EndOfStream;
+        self.pos = usize_pos;
+    }
+
+    pub fn seekBy(self: *FixedBufferInStream, amt: i64) SeekError!void {
+        if (amt < 0) {
+            const abs_amt = std.math.cast(usize, -amt) catch return error.EndOfStream;
+            if (abs_amt > self.pos) return error.EndOfStream;
+            self.pos -= abs_amt;
+        } else {
+            const usize_amt = std.math.cast(usize, amt) catch return error.EndOfStream;
+            if (self.pos + usize_amt > self.bytes.len) return error.EndOfStream;
+            self.pos += usize_amt;
+        }
+    }
+
+    pub fn getEndPos(self: *FixedBufferInStream) GetSeekPosError!u64 {
+        return self.bytes.len;
+    }
+
+    pub fn getPos(self: *FixedBufferInStream) GetSeekPosError!u64 {
+        return self.pos;
+    }
+};
lib/std/io/in_stream.zig
@@ -1,44 +1,31 @@
 const std = @import("../std.zig");
-const builtin = @import("builtin");
-const root = @import("root");
+const builtin = std.builtin;
 const math = std.math;
 const assert = std.debug.assert;
 const mem = std.mem;
 const Buffer = std.Buffer;
 const testing = std.testing;
 
-pub const default_stack_size = 1 * 1024 * 1024;
-pub const stack_size: usize = if (@hasDecl(root, "stack_size_std_io_InStream"))
-    root.stack_size_std_io_InStream
-else
-    default_stack_size;
-
-pub fn InStream(comptime ReadError: type) type {
+pub fn InStream(
+    comptime Context: type,
+    comptime ReadError: type,
+    /// Returns the number of bytes read. It may be less than buffer.len.
+    /// If the number of bytes read is 0, it means end of stream.
+    /// End of stream is not an error condition.
+    comptime readFn: fn (context: Context, buffer: []u8) ReadError!usize,
+) type {
     return struct {
-        const Self = @This();
         pub const Error = ReadError;
-        pub const ReadFn = if (std.io.is_async)
-            async fn (self: *Self, buffer: []u8) Error!usize
-        else
-            fn (self: *Self, buffer: []u8) Error!usize;
 
-        /// Returns the number of bytes read. It may be less than buffer.len.
-        /// If the number of bytes read is 0, it means end of stream.
-        /// End of stream is not an error condition.
-        readFn: ReadFn,
+        context: Context,
+
+        const Self = @This();
 
         /// Returns the number of bytes read. It may be less than buffer.len.
         /// If the number of bytes read is 0, it means end of stream.
         /// End of stream is not an error condition.
-        pub fn read(self: *Self, buffer: []u8) Error!usize {
-            if (std.io.is_async) {
-                // Let's not be writing 0xaa in safe modes for upwards of 4 MiB for every stream read.
-                @setRuntimeSafety(false);
-                var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
-                return await @asyncCall(&stack_frame, {}, self.readFn, self, buffer);
-            } else {
-                return self.readFn(self, buffer);
-            }
+        pub fn read(self: Self, buffer: []u8) Error!usize {
+            return readFn(self.context, buffer);
         }
 
         /// Deprecated: use `readAll`.
@@ -47,7 +34,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// Returns the number of bytes read. If the number read is smaller than buf.len, it
         /// means the stream reached the end. Reaching the end of a stream is not an error
         /// condition.
-        pub fn readAll(self: *Self, buffer: []u8) Error!usize {
+        pub fn readAll(self: Self, buffer: []u8) Error!usize {
             var index: usize = 0;
             while (index != buffer.len) {
                 const amt = try self.read(buffer[index..]);
@@ -59,13 +46,13 @@ pub fn InStream(comptime ReadError: type) type {
 
         /// Returns the number of bytes read. If the number read would be smaller than buf.len,
         /// error.EndOfStream is returned instead.
-        pub fn readNoEof(self: *Self, buf: []u8) !void {
+        pub fn readNoEof(self: Self, buf: []u8) !void {
             const amt_read = try self.readAll(buf);
             if (amt_read < buf.len) return error.EndOfStream;
         }
 
         /// Deprecated: use `readAllArrayList`.
-        pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
+        pub fn readAllBuffer(self: Self, buffer: *Buffer, max_size: usize) !void {
             buffer.list.shrink(0);
             try self.readAllArrayList(&buffer.list, max_size);
             errdefer buffer.shrink(0);
@@ -75,7 +62,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// Appends to the `std.ArrayList` contents by reading from the stream until end of stream is found.
         /// If the number of bytes appended would exceed `max_append_size`, `error.StreamTooLong` is returned
         /// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
-        pub fn readAllArrayList(self: *Self, array_list: *std.ArrayList(u8), max_append_size: usize) !void {
+        pub fn readAllArrayList(self: Self, array_list: *std.ArrayList(u8), max_append_size: usize) !void {
             try array_list.ensureCapacity(math.min(max_append_size, 4096));
             const original_len = array_list.len;
             var start_index: usize = original_len;
@@ -104,7 +91,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// memory would be greater than `max_size`, returns `error.StreamTooLong`.
         /// Caller owns returned memory.
         /// If this function returns an error, the contents from the stream read so far are lost.
-        pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
+        pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
             var array_list = std.ArrayList(u8).init(allocator);
             defer array_list.deinit();
             try self.readAllArrayList(&array_list, max_size);
@@ -116,7 +103,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
         /// `std.ArrayList` is populated with `max_size` bytes from the stream.
         pub fn readUntilDelimiterArrayList(
-            self: *Self,
+            self: Self,
             array_list: *std.ArrayList(u8),
             delimiter: u8,
             max_size: usize,
@@ -142,7 +129,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// Caller owns returned memory.
         /// If this function returns an error, the contents from the stream read so far are lost.
         pub fn readUntilDelimiterAlloc(
-            self: *Self,
+            self: Self,
             allocator: *mem.Allocator,
             delimiter: u8,
             max_size: usize,
@@ -159,7 +146,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// function is called again after that, returns null.
         /// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
         /// delimiter byte is not included in the returned slice.
-        pub fn readUntilDelimiterOrEof(self: *Self, buf: []u8, delimiter: u8) !?[]u8 {
+        pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) !?[]u8 {
             var index: usize = 0;
             while (true) {
                 const byte = self.readByte() catch |err| switch (err) {
@@ -184,7 +171,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// Reads from the stream until specified byte is found, discarding all data,
         /// including the delimiter.
         /// If end-of-stream is found, this function succeeds.
-        pub fn skipUntilDelimiterOrEof(self: *Self, delimiter: u8) !void {
+        pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) !void {
             while (true) {
                 const byte = self.readByte() catch |err| switch (err) {
                     error.EndOfStream => return,
@@ -195,7 +182,7 @@ pub fn InStream(comptime ReadError: type) type {
         }
 
         /// Reads 1 byte from the stream or returns `error.EndOfStream`.
-        pub fn readByte(self: *Self) !u8 {
+        pub fn readByte(self: Self) !u8 {
             var result: [1]u8 = undefined;
             const amt_read = try self.read(result[0..]);
             if (amt_read < 1) return error.EndOfStream;
@@ -203,43 +190,43 @@ pub fn InStream(comptime ReadError: type) type {
         }
 
         /// Same as `readByte` except the returned byte is signed.
-        pub fn readByteSigned(self: *Self) !i8 {
+        pub fn readByteSigned(self: Self) !i8 {
             return @bitCast(i8, try self.readByte());
         }
 
         /// Reads a native-endian integer
-        pub fn readIntNative(self: *Self, comptime T: type) !T {
+        pub fn readIntNative(self: Self, comptime T: type) !T {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             try self.readNoEof(bytes[0..]);
             return mem.readIntNative(T, &bytes);
         }
 
         /// Reads a foreign-endian integer
-        pub fn readIntForeign(self: *Self, comptime T: type) !T {
+        pub fn readIntForeign(self: Self, comptime T: type) !T {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             try self.readNoEof(bytes[0..]);
             return mem.readIntForeign(T, &bytes);
         }
 
-        pub fn readIntLittle(self: *Self, comptime T: type) !T {
+        pub fn readIntLittle(self: Self, comptime T: type) !T {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             try self.readNoEof(bytes[0..]);
             return mem.readIntLittle(T, &bytes);
         }
 
-        pub fn readIntBig(self: *Self, comptime T: type) !T {
+        pub fn readIntBig(self: Self, comptime T: type) !T {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             try self.readNoEof(bytes[0..]);
             return mem.readIntBig(T, &bytes);
         }
 
-        pub fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
+        pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             try self.readNoEof(bytes[0..]);
             return mem.readInt(T, &bytes, endian);
         }
 
-        pub fn readVarInt(self: *Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
+        pub fn readVarInt(self: Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
             assert(size <= @sizeOf(ReturnType));
             var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
             const bytes = bytes_buf[0..size];
@@ -247,14 +234,14 @@ pub fn InStream(comptime ReadError: type) type {
             return mem.readVarInt(ReturnType, bytes, endian);
         }
 
-        pub fn skipBytes(self: *Self, num_bytes: u64) !void {
+        pub fn skipBytes(self: Self, num_bytes: u64) !void {
             var i: u64 = 0;
             while (i < num_bytes) : (i += 1) {
                 _ = try self.readByte();
             }
         }
 
-        pub fn readStruct(self: *Self, comptime T: type) !T {
+        pub fn readStruct(self: Self, comptime T: type) !T {
             // Only extern and packed structs have defined in-memory layout.
             comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
             var res: [1]T = undefined;
@@ -265,7 +252,7 @@ pub fn InStream(comptime ReadError: type) type {
         /// Reads an integer with the same size as the given enum's tag type. If the integer matches
         /// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an error.
         /// TODO optimization taking advantage of most fields being in order
-        pub fn readEnum(self: *Self, comptime Enum: type, endian: builtin.Endian) !Enum {
+        pub fn readEnum(self: Self, comptime Enum: type, endian: builtin.Endian) !Enum {
             const E = error{
                 /// An integer was read, but it did not match any of the tags in the supplied enum.
                 InvalidValue,
lib/std/io/out_stream.zig
@@ -1,94 +1,85 @@
 const std = @import("../std.zig");
-const builtin = @import("builtin");
-const root = @import("root");
+const builtin = std.builtin;
 const mem = std.mem;
 
-pub const default_stack_size = 1 * 1024 * 1024;
-pub const stack_size: usize = if (@hasDecl(root, "stack_size_std_io_OutStream"))
-    root.stack_size_std_io_OutStream
-else
-    default_stack_size;
-
-pub fn OutStream(comptime WriteError: type) type {
+pub fn OutStream(
+    comptime Context: type,
+    comptime WriteError: type,
+    comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize,
+) type {
     return struct {
+        context: Context,
+
         const Self = @This();
         pub const Error = WriteError;
-        pub const WriteFn = if (std.io.is_async)
-            async fn (self: *Self, bytes: []const u8) Error!usize
-        else
-            fn (self: *Self, bytes: []const u8) Error!usize;
 
-        writeFn: WriteFn,
-
-        pub fn writeOnce(self: *Self, bytes: []const u8) Error!usize {
-            if (std.io.is_async) {
-                // Let's not be writing 0xaa in safe modes for upwards of 4 MiB for every stream write.
-                @setRuntimeSafety(false);
-                var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
-                return await @asyncCall(&stack_frame, {}, self.writeFn, self, bytes);
-            } else {
-                return self.writeFn(self, bytes);
-            }
+        pub fn write(self: Self, bytes: []const u8) Error!usize {
+            return writeFn(self.context, bytes);
         }
 
-        pub fn write(self: *Self, bytes: []const u8) Error!void {
+        pub fn writeAll(self: Self, bytes: []const u8) Error!void {
             var index: usize = 0;
             while (index != bytes.len) {
-                index += try self.writeOnce(bytes[index..]);
+                index += try self.write(bytes[index..]);
             }
         }
 
-        pub fn print(self: *Self, comptime format: []const u8, args: var) Error!void {
-            return std.fmt.format(self, Error, write, format, args);
+        pub fn print(self: Self, comptime format: []const u8, args: var) Error!void {
+            return std.fmt.format(self, Error, writeAll, format, args);
         }
 
-        pub fn writeByte(self: *Self, byte: u8) Error!void {
+        pub fn writeByte(self: Self, byte: u8) Error!void {
             const array = [1]u8{byte};
-            return self.write(&array);
+            return self.writeAll(&array);
         }
 
-        pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) Error!void {
+        pub fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
             var bytes: [256]u8 = undefined;
             mem.set(u8, bytes[0..], byte);
 
             var remaining: usize = n;
             while (remaining > 0) {
                 const to_write = std.math.min(remaining, bytes.len);
-                try self.write(bytes[0..to_write]);
+                try self.writeAll(bytes[0..to_write]);
                 remaining -= to_write;
             }
         }
 
         /// Write a native-endian integer.
-        pub fn writeIntNative(self: *Self, comptime T: type, value: T) Error!void {
+        /// TODO audit non-power-of-two int sizes
+        pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             mem.writeIntNative(T, &bytes, value);
-            return self.write(&bytes);
+            return self.writeAll(&bytes);
         }
 
         /// Write a foreign-endian integer.
-        pub fn writeIntForeign(self: *Self, comptime T: type, value: T) Error!void {
+        /// TODO audit non-power-of-two int sizes
+        pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             mem.writeIntForeign(T, &bytes, value);
-            return self.write(&bytes);
+            return self.writeAll(&bytes);
         }
 
-        pub fn writeIntLittle(self: *Self, comptime T: type, value: T) Error!void {
+        /// TODO audit non-power-of-two int sizes
+        pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             mem.writeIntLittle(T, &bytes, value);
-            return self.write(&bytes);
+            return self.writeAll(&bytes);
         }
 
-        pub fn writeIntBig(self: *Self, comptime T: type, value: T) Error!void {
+        /// TODO audit non-power-of-two int sizes
+        pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             mem.writeIntBig(T, &bytes, value);
-            return self.write(&bytes);
+            return self.writeAll(&bytes);
         }
 
-        pub fn writeInt(self: *Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
+        /// TODO audit non-power-of-two int sizes
+        pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
             var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
             mem.writeInt(T, &bytes, value, endian);
-            return self.write(&bytes);
+            return self.writeAll(&bytes);
         }
     };
 }
lib/std/io/seekable_stream.zig
@@ -1,103 +1,36 @@
 const std = @import("../std.zig");
 const InStream = std.io.InStream;
 
-pub fn SeekableStream(comptime SeekErrorType: type, comptime GetSeekPosErrorType: type) type {
+pub fn SeekableStream(
+    comptime Context: type,
+    comptime SeekErrorType: type,
+    comptime GetSeekPosErrorType: type,
+    comptime seekToFn: fn (context: Context, pos: u64) SeekErrorType!void,
+    comptime seekByFn: fn (context: Context, pos: i64) SeekErrorType!void,
+    comptime getPosFn: fn (context: Context) GetSeekPosErrorType!u64,
+    comptime getEndPosFn: fn (context: Context) GetSeekPosErrorType!u64,
+) type {
     return struct {
+        context: Context,
+
         const Self = @This();
         pub const SeekError = SeekErrorType;
         pub const GetSeekPosError = GetSeekPosErrorType;
 
-        seekToFn: fn (self: *Self, pos: u64) SeekError!void,
-        seekByFn: fn (self: *Self, pos: i64) SeekError!void,
-
-        getPosFn: fn (self: *Self) GetSeekPosError!u64,
-        getEndPosFn: fn (self: *Self) GetSeekPosError!u64,
-
-        pub fn seekTo(self: *Self, pos: u64) SeekError!void {
-            return self.seekToFn(self, pos);
+        pub fn seekTo(self: Self, pos: u64) SeekError!void {
+            return seekToFn(self.context, pos);
         }
 
-        pub fn seekBy(self: *Self, amt: i64) SeekError!void {
-            return self.seekByFn(self, amt);
+        pub fn seekBy(self: Self, amt: i64) SeekError!void {
+            return seekByFn(self.context, amt);
         }
 
-        pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
-            return self.getEndPosFn(self);
+        pub fn getEndPos(self: Self) GetSeekPosError!u64 {
+            return getEndPosFn(self.context);
         }
 
-        pub fn getPos(self: *Self) GetSeekPosError!u64 {
-            return self.getPosFn(self);
+        pub fn getPos(self: Self) GetSeekPosError!u64 {
+            return getPosFn(self.context);
         }
     };
 }
-
-pub const SliceSeekableInStream = struct {
-    const Self = @This();
-    pub const Error = error{};
-    pub const SeekError = error{EndOfStream};
-    pub const GetSeekPosError = error{};
-    pub const Stream = InStream(Error);
-    pub const SeekableInStream = SeekableStream(SeekError, GetSeekPosError);
-
-    stream: Stream,
-    seekable_stream: SeekableInStream,
-
-    pos: usize,
-    slice: []const u8,
-
-    pub fn init(slice: []const u8) Self {
-        return Self{
-            .slice = slice,
-            .pos = 0,
-            .stream = Stream{ .readFn = readFn },
-            .seekable_stream = SeekableInStream{
-                .seekToFn = seekToFn,
-                .seekByFn = seekByFn,
-                .getEndPosFn = getEndPosFn,
-                .getPosFn = getPosFn,
-            },
-        };
-    }
-
-    fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
-        const self = @fieldParentPtr(Self, "stream", in_stream);
-        const size = std.math.min(dest.len, self.slice.len - self.pos);
-        const end = self.pos + size;
-
-        std.mem.copy(u8, dest[0..size], self.slice[self.pos..end]);
-        self.pos = end;
-
-        return size;
-    }
-
-    fn seekToFn(in_stream: *SeekableInStream, pos: u64) SeekError!void {
-        const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
-        const usize_pos = @intCast(usize, pos);
-        if (usize_pos > self.slice.len) return error.EndOfStream;
-        self.pos = usize_pos;
-    }
-
-    fn seekByFn(in_stream: *SeekableInStream, amt: i64) SeekError!void {
-        const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
-
-        if (amt < 0) {
-            const abs_amt = @intCast(usize, -amt);
-            if (abs_amt > self.pos) return error.EndOfStream;
-            self.pos -= abs_amt;
-        } else {
-            const usize_amt = @intCast(usize, amt);
-            if (self.pos + usize_amt > self.slice.len) return error.EndOfStream;
-            self.pos += usize_amt;
-        }
-    }
-
-    fn getEndPosFn(in_stream: *SeekableInStream) GetSeekPosError!u64 {
-        const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
-        return @intCast(u64, self.slice.len);
-    }
-
-    fn getPosFn(in_stream: *SeekableInStream) GetSeekPosError!u64 {
-        const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
-        return @intCast(u64, self.pos);
-    }
-};
lib/std/json/write_stream.zig
@@ -30,11 +30,11 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
         /// The string used as spacing.
         space: []const u8 = " ",
 
-        stream: *OutStream,
+        stream: OutStream,
         state_index: usize,
         state: [max_depth]State,
 
-        pub fn init(stream: *OutStream) Self {
+        pub fn init(stream: OutStream) Self {
             var self = Self{
                 .stream = stream,
                 .state_index = 1,
@@ -90,8 +90,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
                     self.pushState(.Value);
                     try self.indent();
                     try self.writeEscapedString(name);
-                    try self.stream.write(":");
-                    try self.stream.write(self.space);
+                    try self.stream.writeAll(":");
+                    try self.stream.writeAll(self.space);
                 },
             }
         }
@@ -134,16 +134,16 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
 
         pub fn emitNull(self: *Self) !void {
             assert(self.state[self.state_index] == State.Value);
-            try self.stream.write("null");
+            try self.stream.writeAll("null");
             self.popState();
         }
 
         pub fn emitBool(self: *Self, value: bool) !void {
             assert(self.state[self.state_index] == State.Value);
             if (value) {
-                try self.stream.write("true");
+                try self.stream.writeAll("true");
             } else {
-                try self.stream.write("false");
+                try self.stream.writeAll("false");
             }
             self.popState();
         }
@@ -188,13 +188,13 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
             try self.stream.writeByte('"');
             for (string) |s| {
                 switch (s) {
-                    '"' => try self.stream.write("\\\""),
-                    '\t' => try self.stream.write("\\t"),
-                    '\r' => try self.stream.write("\\r"),
-                    '\n' => try self.stream.write("\\n"),
-                    8 => try self.stream.write("\\b"),
-                    12 => try self.stream.write("\\f"),
-                    '\\' => try self.stream.write("\\\\"),
+                    '"' => try self.stream.writeAll("\\\""),
+                    '\t' => try self.stream.writeAll("\\t"),
+                    '\r' => try self.stream.writeAll("\\r"),
+                    '\n' => try self.stream.writeAll("\\n"),
+                    8 => try self.stream.writeAll("\\b"),
+                    12 => try self.stream.writeAll("\\f"),
+                    '\\' => try self.stream.writeAll("\\\\"),
                     else => try self.stream.writeByte(s),
                 }
             }
@@ -231,10 +231,10 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
 
         fn indent(self: *Self) !void {
             assert(self.state_index >= 1);
-            try self.stream.write(self.newline);
+            try self.stream.writeAll(self.newline);
             var i: usize = 0;
             while (i < self.state_index - 1) : (i += 1) {
-                try self.stream.write(self.one_indent);
+                try self.stream.writeAll(self.one_indent);
             }
         }
 
lib/std/zig/ast.zig
@@ -375,7 +375,7 @@ pub const Error = union(enum) {
             token: TokenIndex,
 
             pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
-                return stream.write(msg);
+                return stream.writeAll(msg);
             }
         };
     }
lib/std/zig/render.zig
@@ -12,64 +12,58 @@ pub const Error = error{
 };
 
 /// Returns whether anything changed
-pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@TypeOf(stream).Child.Error || Error)!bool {
-    comptime assert(@typeInfo(@TypeOf(stream)) == .Pointer);
-
-    var anything_changed: bool = false;
-
+pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@TypeOf(stream).Error || Error)!bool {
     // make a passthrough stream that checks whether something changed
     const MyStream = struct {
         const MyStream = @This();
-        const StreamError = @TypeOf(stream).Child.Error;
-        const Stream = std.io.OutStream(StreamError);
+        const StreamError = @TypeOf(stream).Error;
 
-        anything_changed_ptr: *bool,
         child_stream: @TypeOf(stream),
-        stream: Stream,
+        anything_changed: bool,
         source_index: usize,
         source: []const u8,
 
-        fn write(iface_stream: *Stream, bytes: []const u8) StreamError!usize {
-            const self = @fieldParentPtr(MyStream, "stream", iface_stream);
-
-            if (!self.anything_changed_ptr.*) {
+        fn write(self: *MyStream, bytes: []const u8) StreamError!usize {
+            if (!self.anything_changed) {
                 const end = self.source_index + bytes.len;
                 if (end > self.source.len) {
-                    self.anything_changed_ptr.* = true;
+                    self.anything_changed = true;
                 } else {
                     const src_slice = self.source[self.source_index..end];
                     self.source_index += bytes.len;
                     if (!mem.eql(u8, bytes, src_slice)) {
-                        self.anything_changed_ptr.* = true;
+                        self.anything_changed = true;
                     }
                 }
             }
 
-            return self.child_stream.writeOnce(bytes);
+            return self.child_stream.write(bytes);
         }
     };
     var my_stream = MyStream{
-        .stream = MyStream.Stream{ .writeFn = MyStream.write },
         .child_stream = stream,
-        .anything_changed_ptr = &anything_changed,
+        .anything_changed = false,
         .source_index = 0,
         .source = tree.source,
     };
+    const my_stream_stream: std.io.OutStream(*MyStream, MyStream.StreamError, MyStream.write) = .{
+        .context = &my_stream,
+    };
 
-    try renderRoot(allocator, &my_stream.stream, tree);
+    try renderRoot(allocator, my_stream_stream, tree);
 
-    if (!anything_changed and my_stream.source_index != my_stream.source.len) {
-        anything_changed = true;
+    if (my_stream.source_index != my_stream.source.len) {
+        my_stream.anything_changed = true;
     }
 
-    return anything_changed;
+    return my_stream.anything_changed;
 }
 
 fn renderRoot(
     allocator: *mem.Allocator,
     stream: var,
     tree: *ast.Tree,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     var tok_it = tree.tokens.iterator(0);
 
     // render all the line comments at the beginning of the file
@@ -189,7 +183,7 @@ fn renderRoot(
     }
 }
 
-fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @TypeOf(stream).Child.Error!void {
+fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @TypeOf(stream).Error!void {
     const first_token = node.firstToken();
     var prev_token = first_token;
     if (prev_token == 0) return;
@@ -204,11 +198,11 @@ fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *as
     }
 }
 
-fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Child.Error || Error)!void {
+fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Error || Error)!void {
     try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Newline);
 }
 
-fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Child.Error || Error)!void {
+fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Error || Error)!void {
     switch (decl.id) {
         .FnProto => {
             const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
@@ -343,7 +337,7 @@ fn renderExpression(
     start_col: *usize,
     base: *ast.Node,
     space: Space,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     switch (base.id) {
         .Identifier => {
             const identifier = @fieldParentPtr(ast.Node.Identifier, "base", base);
@@ -449,9 +443,9 @@ fn renderExpression(
                     switch (op_tok_id) {
                         .Asterisk, .AsteriskAsterisk => try stream.writeByte('*'),
                         .LBracket => if (tree.tokens.at(prefix_op_node.op_token + 2).id == .Identifier)
-                            try stream.write("[*c")
+                            try stream.writeAll("[*c")
                         else
-                            try stream.write("[*"),
+                            try stream.writeAll("[*"),
                         else => unreachable,
                     }
                     if (ptr_info.sentinel) |sentinel| {
@@ -757,7 +751,7 @@ fn renderExpression(
                         while (it.next()) |field_init| {
                             var find_stream = FindByteOutStream.init('\n');
                             var dummy_col: usize = 0;
-                            try renderExpression(allocator, &find_stream.stream, tree, 0, &dummy_col, field_init.*, Space.None);
+                            try renderExpression(allocator, find_stream.outStream(), tree, 0, &dummy_col, field_init.*, Space.None);
                             if (find_stream.byte_found) break :blk false;
                         }
                         break :blk true;
@@ -909,8 +903,7 @@ fn renderExpression(
                         var column_widths = widths[widths.len - row_size ..];
 
                         // Null stream for counting the printed length of each expression
-                        var null_stream = std.io.NullOutStream.init();
-                        var counting_stream = std.io.CountingOutStream(std.io.NullOutStream.Error).init(&null_stream.stream);
+                        var counting_stream = std.io.CountingOutStream(@TypeOf(std.io.null_out_stream)).init(std.io.null_out_stream);
 
                         var it = exprs.iterator(0);
                         var i: usize = 0;
@@ -918,7 +911,7 @@ fn renderExpression(
                         while (it.next()) |expr| : (i += 1) {
                             counting_stream.bytes_written = 0;
                             var dummy_col: usize = 0;
-                            try renderExpression(allocator, &counting_stream.stream, tree, indent, &dummy_col, expr.*, Space.None);
+                            try renderExpression(allocator, counting_stream.outStream(), tree, indent, &dummy_col, expr.*, Space.None);
                             const width = @intCast(usize, counting_stream.bytes_written);
                             const col = i % row_size;
                             column_widths[col] = std.math.max(column_widths[col], width);
@@ -1336,7 +1329,7 @@ fn renderExpression(
 
             // TODO: Remove condition after deprecating 'typeOf'. See https://github.com/ziglang/zig/issues/1348
             if (mem.eql(u8, tree.tokenSlicePtr(tree.tokens.at(builtin_call.builtin_token)), "@typeOf")) {
-                try stream.write("@TypeOf");
+                try stream.writeAll("@TypeOf");
             } else {
                 try renderToken(tree, stream, builtin_call.builtin_token, indent, start_col, Space.None); // @name
             }
@@ -1505,9 +1498,9 @@ fn renderExpression(
                 try renderExpression(allocator, stream, tree, indent, start_col, callconv_expr, Space.None);
                 try renderToken(tree, stream, callconv_rparen, indent, start_col, Space.Space); // )
             } else if (cc_rewrite_str) |str| {
-                try stream.write("callconv(");
-                try stream.write(mem.toSliceConst(u8, str));
-                try stream.write(") ");
+                try stream.writeAll("callconv(");
+                try stream.writeAll(mem.toSliceConst(u8, str));
+                try stream.writeAll(") ");
             }
 
             switch (fn_proto.return_type) {
@@ -1997,11 +1990,11 @@ fn renderExpression(
         .AsmInput => {
             const asm_input = @fieldParentPtr(ast.Node.AsmInput, "base", base);
 
-            try stream.write("[");
+            try stream.writeAll("[");
             try renderExpression(allocator, stream, tree, indent, start_col, asm_input.symbolic_name, Space.None);
-            try stream.write("] ");
+            try stream.writeAll("] ");
             try renderExpression(allocator, stream, tree, indent, start_col, asm_input.constraint, Space.None);
-            try stream.write(" (");
+            try stream.writeAll(" (");
             try renderExpression(allocator, stream, tree, indent, start_col, asm_input.expr, Space.None);
             return renderToken(tree, stream, asm_input.lastToken(), indent, start_col, space); // )
         },
@@ -2009,18 +2002,18 @@ fn renderExpression(
         .AsmOutput => {
             const asm_output = @fieldParentPtr(ast.Node.AsmOutput, "base", base);
 
-            try stream.write("[");
+            try stream.writeAll("[");
             try renderExpression(allocator, stream, tree, indent, start_col, asm_output.symbolic_name, Space.None);
-            try stream.write("] ");
+            try stream.writeAll("] ");
             try renderExpression(allocator, stream, tree, indent, start_col, asm_output.constraint, Space.None);
-            try stream.write(" (");
+            try stream.writeAll(" (");
 
             switch (asm_output.kind) {
                 ast.Node.AsmOutput.Kind.Variable => |variable_name| {
                     try renderExpression(allocator, stream, tree, indent, start_col, &variable_name.base, Space.None);
                 },
                 ast.Node.AsmOutput.Kind.Return => |return_type| {
-                    try stream.write("-> ");
+                    try stream.writeAll("-> ");
                     try renderExpression(allocator, stream, tree, indent, start_col, return_type, Space.None);
                 },
             }
@@ -2052,7 +2045,7 @@ fn renderVarDecl(
     indent: usize,
     start_col: *usize,
     var_decl: *ast.Node.VarDecl,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     if (var_decl.visib_token) |visib_token| {
         try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
     }
@@ -2125,7 +2118,7 @@ fn renderParamDecl(
     start_col: *usize,
     base: *ast.Node,
     space: Space,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base);
 
     try renderDocComments(tree, stream, param_decl, indent, start_col);
@@ -2154,7 +2147,7 @@ fn renderStatement(
     indent: usize,
     start_col: *usize,
     base: *ast.Node,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     switch (base.id) {
         .VarDecl => {
             const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base);
@@ -2193,7 +2186,7 @@ fn renderTokenOffset(
     start_col: *usize,
     space: Space,
     token_skip_bytes: usize,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     if (space == Space.BlockStart) {
         if (start_col.* < indent + indent_delta)
             return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
@@ -2204,7 +2197,7 @@ fn renderTokenOffset(
     }
 
     var token = tree.tokens.at(token_index);
-    try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " "));
+    try stream.writeAll(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " "));
 
     if (space == Space.NoComment)
         return;
@@ -2214,15 +2207,15 @@ fn renderTokenOffset(
     if (space == Space.Comma) switch (next_token.id) {
         .Comma => return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline),
         .LineComment => {
-            try stream.write(", ");
+            try stream.writeAll(", ");
             return renderToken(tree, stream, token_index + 1, indent, start_col, Space.Newline);
         },
         else => {
             if (token_index + 2 < tree.tokens.len and tree.tokens.at(token_index + 2).id == .MultilineStringLiteralLine) {
-                try stream.write(",");
+                try stream.writeAll(",");
                 return;
             } else {
-                try stream.write(",\n");
+                try stream.writeAll(",\n");
                 start_col.* = 0;
                 return;
             }
@@ -2246,7 +2239,7 @@ fn renderTokenOffset(
                 if (next_token.id == .MultilineStringLiteralLine) {
                     return;
                 } else {
-                    try stream.write("\n");
+                    try stream.writeAll("\n");
                     start_col.* = 0;
                     return;
                 }
@@ -2309,7 +2302,7 @@ fn renderTokenOffset(
                     if (next_token.id == .MultilineStringLiteralLine) {
                         return;
                     } else {
-                        try stream.write("\n");
+                        try stream.writeAll("\n");
                         start_col.* = 0;
                         return;
                     }
@@ -2327,7 +2320,7 @@ fn renderTokenOffset(
         const newline_count = if (loc.line == 1) @as(u8, 1) else @as(u8, 2);
         try stream.writeByteNTimes('\n', newline_count);
         try stream.writeByteNTimes(' ', indent);
-        try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(next_token), " "));
+        try stream.writeAll(mem.trimRight(u8, tree.tokenSlicePtr(next_token), " "));
 
         offset += 1;
         token = next_token;
@@ -2338,7 +2331,7 @@ fn renderTokenOffset(
                     if (next_token.id == .MultilineStringLiteralLine) {
                         return;
                     } else {
-                        try stream.write("\n");
+                        try stream.writeAll("\n");
                         start_col.* = 0;
                         return;
                     }
@@ -2381,7 +2374,7 @@ fn renderToken(
     indent: usize,
     start_col: *usize,
     space: Space,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0);
 }
 
@@ -2391,7 +2384,7 @@ fn renderDocComments(
     node: var,
     indent: usize,
     start_col: *usize,
-) (@TypeOf(stream).Child.Error || Error)!void {
+) (@TypeOf(stream).Error || Error)!void {
     const comment = node.doc_comments orelse return;
     var it = comment.lines.iterator(0);
     const first_token = node.firstToken();
@@ -2401,7 +2394,7 @@ fn renderDocComments(
             try stream.writeByteNTimes(' ', indent);
         } else {
             try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.NoComment);
-            try stream.write("\n");
+            try stream.writeAll("\n");
             try stream.writeByteNTimes(' ', indent);
         }
     }
@@ -2427,27 +2420,23 @@ fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
     };
 }
 
-// An OutStream that returns whether the given character has been written to it.
-// The contents are not written to anything.
+/// A `std.io.OutStream` that returns whether the given character has been written to it.
+/// The contents are not written to anything.
 const FindByteOutStream = struct {
-    const Self = FindByteOutStream;
-    pub const Error = error{};
-    pub const Stream = std.io.OutStream(Error);
-
-    stream: Stream,
     byte_found: bool,
     byte: u8,
 
-    pub fn init(byte: u8) Self {
-        return Self{
-            .stream = Stream{ .writeFn = writeFn },
+    pub const Error = error{};
+    pub const OutStream = std.io.OutStream(*FindByteOutStream, Error, write);
+
+    pub fn init(byte: u8) FindByteOutStream {
+        return FindByteOutStream{
             .byte = byte,
             .byte_found = false,
         };
     }
 
-    fn writeFn(out_stream: *Stream, bytes: []const u8) Error!usize {
-        const self = @fieldParentPtr(Self, "stream", out_stream);
+    pub fn write(self: *FindByteOutStream, bytes: []const u8) Error!usize {
         if (self.byte_found) return bytes.len;
         self.byte_found = blk: {
             for (bytes) |b|
@@ -2456,11 +2445,15 @@ const FindByteOutStream = struct {
         };
         return bytes.len;
     }
+
+    pub fn outStream(self: *FindByteOutStream) OutStream {
+        return .{ .context = self };
+    }
 };
 
-fn copyFixingWhitespace(stream: var, slice: []const u8) @TypeOf(stream).Child.Error!void {
+fn copyFixingWhitespace(stream: var, slice: []const u8) @TypeOf(stream).Error!void {
     for (slice) |byte| switch (byte) {
-        '\t' => try stream.write("    "),
+        '\t' => try stream.writeAll("    "),
         '\r' => {},
         else => try stream.writeByte(byte),
     };
lib/std/buffer.zig
@@ -157,6 +157,17 @@ pub const Buffer = struct {
     pub fn print(self: *Buffer, comptime fmt: []const u8, args: var) !void {
         return std.fmt.format(self, error{OutOfMemory}, Buffer.append, fmt, args);
     }
+
+    pub fn outStream(self: *Buffer) std.io.OutStream(*Buffer, error{OutOfMemory}, appendWrite) {
+        return .{ .context = self };
+    }
+
+    /// Same as `append` except it returns the number of bytes written, which is always the same
+    /// as `m.len`. The purpose of this function existing is to match `std.io.OutStream` API.
+    pub fn appendWrite(self: *Buffer, m: []const u8) !usize {
+        try self.append(m);
+        return m.len;
+    }
 };
 
 test "simple Buffer" {
lib/std/child_process.zig
@@ -221,9 +221,9 @@ pub const ChildProcess = struct {
         var stderr_file_in_stream = child.stderr.?.inStream();
 
         // TODO need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
-        const stdout = try stdout_file_in_stream.stream.readAllAlloc(args.allocator, args.max_output_bytes);
+        const stdout = try stdout_file_in_stream.readAllAlloc(args.allocator, args.max_output_bytes);
         errdefer args.allocator.free(stdout);
-        const stderr = try stderr_file_in_stream.stream.readAllAlloc(args.allocator, args.max_output_bytes);
+        const stderr = try stderr_file_in_stream.readAllAlloc(args.allocator, args.max_output_bytes);
         errdefer args.allocator.free(stderr);
 
         return ExecResult{
@@ -857,8 +857,7 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
         .io_mode = .blocking,
         .async_block_allowed = File.async_block_allowed_yes,
     };
-    const stream = &file.outStream().stream;
-    stream.writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
+    file.outStream().writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
 }
 
 fn readIntFd(fd: i32) !ErrInt {
@@ -867,8 +866,7 @@ fn readIntFd(fd: i32) !ErrInt {
         .io_mode = .blocking,
         .async_block_allowed = File.async_block_allowed_yes,
     };
-    const stream = &file.inStream().stream;
-    return @intCast(ErrInt, stream.readIntNative(u64) catch return error.SystemResources);
+    return @intCast(ErrInt, file.inStream().readIntNative(u64) catch return error.SystemResources);
 }
 
 /// Caller must free result.
lib/std/debug.zig
@@ -55,7 +55,7 @@ pub const LineInfo = struct {
 var stderr_file: File = undefined;
 var stderr_file_out_stream: File.OutStream = undefined;
 
-var stderr_stream: ?*io.OutStream(File.WriteError) = null;
+var stderr_stream: ?*File.OutStream = null;
 var stderr_mutex = std.Mutex.init();
 
 pub fn warn(comptime fmt: []const u8, args: var) void {
@@ -65,13 +65,13 @@ pub fn warn(comptime fmt: []const u8, args: var) void {
     noasync stderr.print(fmt, args) catch return;
 }
 
-pub fn getStderrStream() *io.OutStream(File.WriteError) {
+pub fn getStderrStream() *File.OutStream {
     if (stderr_stream) |st| {
         return st;
     } else {
         stderr_file = io.getStdErr();
         stderr_file_out_stream = stderr_file.outStream();
-        const st = &stderr_file_out_stream.stream;
+        const st = &stderr_file_out_stream;
         stderr_stream = st;
         return st;
     }
@@ -408,15 +408,15 @@ pub const TTY = struct {
         windows_api,
 
         fn setColor(conf: Config, out_stream: var, color: Color) void {
-            switch (conf) {
+            noasync switch (conf) {
                 .no_color => return,
                 .escape_codes => switch (color) {
-                    .Red => noasync out_stream.write(RED) catch return,
-                    .Green => noasync out_stream.write(GREEN) catch return,
-                    .Cyan => noasync out_stream.write(CYAN) catch return,
-                    .White, .Bold => noasync out_stream.write(WHITE) catch return,
-                    .Dim => noasync out_stream.write(DIM) catch return,
-                    .Reset => noasync out_stream.write(RESET) catch return,
+                    .Red => out_stream.writeAll(RED) catch return,
+                    .Green => out_stream.writeAll(GREEN) catch return,
+                    .Cyan => out_stream.writeAll(CYAN) catch return,
+                    .White, .Bold => out_stream.writeAll(WHITE) catch return,
+                    .Dim => out_stream.writeAll(DIM) catch return,
+                    .Reset => out_stream.writeAll(RESET) catch return,
                 },
                 .windows_api => if (builtin.os.tag == .windows) {
                     const S = struct {
@@ -455,7 +455,7 @@ pub const TTY = struct {
                 } else {
                     unreachable;
                 },
-            }
+            };
         }
     };
 };
@@ -565,38 +565,40 @@ fn printLineInfo(
     tty_config: TTY.Config,
     comptime printLineFromFile: var,
 ) !void {
-    tty_config.setColor(out_stream, .White);
+    noasync {
+        tty_config.setColor(out_stream, .White);
 
-    if (line_info) |*li| {
-        try noasync out_stream.print("{}:{}:{}", .{ li.file_name, li.line, li.column });
-    } else {
-        try noasync out_stream.write("???:?:?");
-    }
+        if (line_info) |*li| {
+            try out_stream.print("{}:{}:{}", .{ li.file_name, li.line, li.column });
+        } else {
+            try out_stream.writeAll("???:?:?");
+        }
 
-    tty_config.setColor(out_stream, .Reset);
-    try noasync out_stream.write(": ");
-    tty_config.setColor(out_stream, .Dim);
-    try noasync out_stream.print("0x{x} in {} ({})", .{ address, symbol_name, compile_unit_name });
-    tty_config.setColor(out_stream, .Reset);
-    try noasync out_stream.write("\n");
-
-    // Show the matching source code line if possible
-    if (line_info) |li| {
-        if (noasync printLineFromFile(out_stream, li)) {
-            if (li.column > 0) {
-                // The caret already takes one char
-                const space_needed = @intCast(usize, li.column - 1);
-
-                try noasync out_stream.writeByteNTimes(' ', space_needed);
-                tty_config.setColor(out_stream, .Green);
-                try noasync out_stream.write("^");
-                tty_config.setColor(out_stream, .Reset);
+        tty_config.setColor(out_stream, .Reset);
+        try out_stream.writeAll(": ");
+        tty_config.setColor(out_stream, .Dim);
+        try out_stream.print("0x{x} in {} ({})", .{ address, symbol_name, compile_unit_name });
+        tty_config.setColor(out_stream, .Reset);
+        try out_stream.writeAll("\n");
+
+        // Show the matching source code line if possible
+        if (line_info) |li| {
+            if (printLineFromFile(out_stream, li)) {
+                if (li.column > 0) {
+                    // The caret already takes one char
+                    const space_needed = @intCast(usize, li.column - 1);
+
+                    try out_stream.writeByteNTimes(' ', space_needed);
+                    tty_config.setColor(out_stream, .Green);
+                    try out_stream.writeAll("^");
+                    tty_config.setColor(out_stream, .Reset);
+                }
+                try out_stream.writeAll("\n");
+            } else |err| switch (err) {
+                error.EndOfFile, error.FileNotFound => {},
+                error.BadPathName => {},
+                else => return err,
             }
-            try noasync out_stream.write("\n");
-        } else |err| switch (err) {
-            error.EndOfFile, error.FileNotFound => {},
-            error.BadPathName => {},
-            else => return err,
         }
     }
 }
@@ -609,21 +611,21 @@ pub const OpenSelfDebugInfoError = error{
 };
 
 /// TODO resources https://github.com/ziglang/zig/issues/4353
-/// TODO once https://github.com/ziglang/zig/issues/3157 is fully implemented,
-/// make this `noasync fn` and remove the individual noasync calls.
 pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
-    if (builtin.strip_debug_info)
-        return error.MissingDebugInfo;
-    if (@hasDecl(root, "os") and @hasDecl(root.os, "debug") and @hasDecl(root.os.debug, "openSelfDebugInfo")) {
-        return noasync root.os.debug.openSelfDebugInfo(allocator);
-    }
-    switch (builtin.os.tag) {
-        .linux,
-        .freebsd,
-        .macosx,
-        .windows,
-        => return DebugInfo.init(allocator),
-        else => @compileError("openSelfDebugInfo unsupported for this platform"),
+    noasync {
+        if (builtin.strip_debug_info)
+            return error.MissingDebugInfo;
+        if (@hasDecl(root, "os") and @hasDecl(root.os, "debug") and @hasDecl(root.os.debug, "openSelfDebugInfo")) {
+            return root.os.debug.openSelfDebugInfo(allocator);
+        }
+        switch (builtin.os.tag) {
+            .linux,
+            .freebsd,
+            .macosx,
+            .windows,
+            => return DebugInfo.init(allocator),
+            else => @compileError("openSelfDebugInfo unsupported for this platform"),
+        }
     }
 }
 
@@ -808,45 +810,64 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
 
 /// TODO resources https://github.com/ziglang/zig/issues/4353
 pub fn openElfDebugInfo(allocator: *mem.Allocator, elf_file_path: []const u8) !ModuleDebugInfo {
-    const mapped_mem = try mapWholeFile(elf_file_path);
-
-    var seekable_stream = io.SliceSeekableInStream.init(mapped_mem);
-    var efile = try noasync elf.Elf.openStream(
-        allocator,
-        @ptrCast(*DW.DwarfSeekableStream, &seekable_stream.seekable_stream),
-        @ptrCast(*DW.DwarfInStream, &seekable_stream.stream),
-    );
-    defer noasync efile.close();
+    noasync {
+        const mapped_mem = try mapWholeFile(elf_file_path);
+        const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
+        if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
+        if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
+
+        const endian: builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) {
+            elf.ELFDATA2LSB => .Little,
+            elf.ELFDATA2MSB => .Big,
+            else => return error.InvalidElfEndian,
+        };
+        assert(endian == std.builtin.endian); // this is our own debug info
+
+        const shoff = hdr.e_shoff;
+        const str_section_off = shoff + @as(u64, hdr.e_shentsize) * @as(u64, hdr.e_shstrndx);
+        const header_strings = mapped_mem[str_section_off..str_section_off + hdr.e_shentsize];
+        const shdrs = @ptrCast([*]const elf.Shdr, @alignCast(@alignOf(elf.Shdr), &mapped_mem[shoff]))[0..hdr.e_shnum];
+
+        var opt_debug_info: ?[]const u8 = null;
+        var opt_debug_abbrev: ?[]const u8 = null;
+        var opt_debug_str: ?[]const u8 = null;
+        var opt_debug_line: ?[]const u8 = null;
+        var opt_debug_ranges: ?[]const u8 = null;
+
+        for (shdrs) |*shdr| {
+            if (shdr.sh_type == elf.SHT_NULL) continue;
+
+            const name = std.mem.span(@ptrCast([*:0]const u8, header_strings[shdr.sh_name..].ptr));
+            if (mem.eql(u8, name, ".debug_info")) {
+                opt_debug_info = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+            } else if (mem.eql(u8, name, ".debug_abbrev")) {
+                opt_debug_abbrev = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+            } else if (mem.eql(u8, name, ".debug_str")) {
+                opt_debug_str = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+            } else if (mem.eql(u8, name, ".debug_line")) {
+                opt_debug_line = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+            } else if (mem.eql(u8, name, ".debug_ranges")) {
+                opt_debug_ranges = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
+            }
+        }
 
-    const debug_info = (try noasync efile.findSection(".debug_info")) orelse
-        return error.MissingDebugInfo;
-    const debug_abbrev = (try noasync efile.findSection(".debug_abbrev")) orelse
-        return error.MissingDebugInfo;
-    const debug_str = (try noasync efile.findSection(".debug_str")) orelse
-        return error.MissingDebugInfo;
-    const debug_line = (try noasync efile.findSection(".debug_line")) orelse
-        return error.MissingDebugInfo;
-    const opt_debug_ranges = try noasync efile.findSection(".debug_ranges");
-
-    var di = DW.DwarfInfo{
-        .endian = efile.endian,
-        .debug_info = try chopSlice(mapped_mem, debug_info.sh_offset, debug_info.sh_size),
-        .debug_abbrev = try chopSlice(mapped_mem, debug_abbrev.sh_offset, debug_abbrev.sh_size),
-        .debug_str = try chopSlice(mapped_mem, debug_str.sh_offset, debug_str.sh_size),
-        .debug_line = try chopSlice(mapped_mem, debug_line.sh_offset, debug_line.sh_size),
-        .debug_ranges = if (opt_debug_ranges) |debug_ranges|
-            try chopSlice(mapped_mem, debug_ranges.sh_offset, debug_ranges.sh_size)
-        else
-            null,
-    };
+        var di = DW.DwarfInfo{
+            .endian = endian,
+            .debug_info = opt_debug_info orelse return error.MissingDebugInfo,
+            .debug_abbrev = opt_debug_abbrev orelse return error.MissingDebugInfo,
+            .debug_str = opt_debug_str orelse return error.MissingDebugInfo,
+            .debug_line = opt_debug_line orelse return error.MissingDebugInfo,
+            .debug_ranges = opt_debug_ranges,
+        };
 
-    try noasync DW.openDwarfDebugInfo(&di, allocator);
+        try DW.openDwarfDebugInfo(&di, allocator);
 
-    return ModuleDebugInfo{
-        .base_address = undefined,
-        .dwarf = di,
-        .mapped_memory = mapped_mem,
-    };
+        return ModuleDebugInfo{
+            .base_address = undefined,
+            .dwarf = di,
+            .mapped_memory = mapped_mem,
+        };
+    }
 }
 
 /// TODO resources https://github.com/ziglang/zig/issues/4353
@@ -982,22 +1003,24 @@ const MachoSymbol = struct {
     }
 };
 
-fn mapWholeFile(path: []const u8) ![]const u8 {
-    const file = try noasync fs.openFileAbsolute(path, .{ .always_blocking = true });
-    defer noasync file.close();
-
-    const file_len = try math.cast(usize, try file.getEndPos());
-    const mapped_mem = try os.mmap(
-        null,
-        file_len,
-        os.PROT_READ,
-        os.MAP_SHARED,
-        file.handle,
-        0,
-    );
-    errdefer os.munmap(mapped_mem);
+fn mapWholeFile(path: []const u8) ![]align(mem.page_size) const u8 {
+    noasync {
+        const file = try fs.openFileAbsolute(path, .{ .always_blocking = true });
+        defer file.close();
 
-    return mapped_mem;
+        const file_len = try math.cast(usize, try file.getEndPos());
+        const mapped_mem = try os.mmap(
+            null,
+            file_len,
+            os.PROT_READ,
+            os.MAP_SHARED,
+            file.handle,
+            0,
+        );
+        errdefer os.munmap(mapped_mem);
+
+        return mapped_mem;
+    }
 }
 
 pub const DebugInfo = struct {
lib/std/dwarf.zig
@@ -11,9 +11,6 @@ const ArrayList = std.ArrayList;
 
 usingnamespace @import("dwarf_bits.zig");
 
-pub const DwarfSeekableStream = io.SeekableStream(anyerror, anyerror);
-pub const DwarfInStream = io.InStream(anyerror);
-
 const PcRange = struct {
     start: u64,
     end: u64,
@@ -239,7 +236,7 @@ const LineNumberProgram = struct {
     }
 };
 
-fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
+fn readInitialLength(in_stream: var, is_64: *bool) !u64 {
     const first_32_bits = try in_stream.readIntLittle(u32);
     is_64.* = (first_32_bits == 0xffffffff);
     if (is_64.*) {
@@ -414,40 +411,42 @@ pub const DwarfInfo = struct {
     }
 
     fn scanAllFunctions(di: *DwarfInfo) !void {
-        var s = io.SliceSeekableInStream.init(di.debug_info);
+        var stream = io.fixedBufferStream(di.debug_info);
+        const in = &stream.inStream();
+        const seekable = &stream.seekableStream();
         var this_unit_offset: u64 = 0;
 
-        while (this_unit_offset < try s.seekable_stream.getEndPos()) {
-            s.seekable_stream.seekTo(this_unit_offset) catch |err| switch (err) {
+        while (this_unit_offset < try seekable.getEndPos()) {
+            seekable.seekTo(this_unit_offset) catch |err| switch (err) {
                 error.EndOfStream => unreachable,
                 else => return err,
             };
 
             var is_64: bool = undefined;
-            const unit_length = try readInitialLength(@TypeOf(s.stream.readFn).ReturnType.ErrorSet, &s.stream, &is_64);
+            const unit_length = try readInitialLength(in, &is_64);
             if (unit_length == 0) return;
             const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
 
-            const version = try s.stream.readInt(u16, di.endian);
+            const version = try in.readInt(u16, di.endian);
             if (version < 2 or version > 5) return error.InvalidDebugInfo;
 
-            const debug_abbrev_offset = if (is_64) try s.stream.readInt(u64, di.endian) else try s.stream.readInt(u32, di.endian);
+            const debug_abbrev_offset = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian);
 
-            const address_size = try s.stream.readByte();
+            const address_size = try in.readByte();
             if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo;
 
-            const compile_unit_pos = try s.seekable_stream.getPos();
+            const compile_unit_pos = try seekable.getPos();
             const abbrev_table = try di.getAbbrevTable(debug_abbrev_offset);
 
-            try s.seekable_stream.seekTo(compile_unit_pos);
+            try seekable.seekTo(compile_unit_pos);
 
             const next_unit_pos = this_unit_offset + next_offset;
 
-            while ((try s.seekable_stream.getPos()) < next_unit_pos) {
-                const die_obj = (try di.parseDie(&s.stream, abbrev_table, is_64)) orelse continue;
+            while ((try seekable.getPos()) < next_unit_pos) {
+                const die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse continue;
                 defer die_obj.attrs.deinit();
 
-                const after_die_offset = try s.seekable_stream.getPos();
+                const after_die_offset = try seekable.getPos();
 
                 switch (die_obj.tag_id) {
                     TAG_subprogram, TAG_inlined_subroutine, TAG_subroutine, TAG_entry_point => {
@@ -463,14 +462,14 @@ pub const DwarfInfo = struct {
                                     // Follow the DIE it points to and repeat
                                     const ref_offset = try this_die_obj.getAttrRef(AT_abstract_origin);
                                     if (ref_offset > next_offset) return error.InvalidDebugInfo;
-                                    try s.seekable_stream.seekTo(this_unit_offset + ref_offset);
-                                    this_die_obj = (try di.parseDie(&s.stream, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
+                                    try seekable.seekTo(this_unit_offset + ref_offset);
+                                    this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
                                 } else if (this_die_obj.getAttr(AT_specification)) |ref| {
                                     // Follow the DIE it points to and repeat
                                     const ref_offset = try this_die_obj.getAttrRef(AT_specification);
                                     if (ref_offset > next_offset) return error.InvalidDebugInfo;
-                                    try s.seekable_stream.seekTo(this_unit_offset + ref_offset);
-                                    this_die_obj = (try di.parseDie(&s.stream, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
+                                    try seekable.seekTo(this_unit_offset + ref_offset);
+                                    this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
                                 } else {
                                     break :x null;
                                 }
@@ -511,7 +510,7 @@ pub const DwarfInfo = struct {
                     else => {},
                 }
 
-                try s.seekable_stream.seekTo(after_die_offset);
+                try seekable.seekTo(after_die_offset);
             }
 
             this_unit_offset += next_offset;
@@ -519,35 +518,37 @@ pub const DwarfInfo = struct {
     }
 
     fn scanAllCompileUnits(di: *DwarfInfo) !void {
-        var s = io.SliceSeekableInStream.init(di.debug_info);
+        var stream = io.fixedBufferStream(di.debug_info);
+        const in = &stream.inStream();
+        const seekable = &stream.seekableStream();
         var this_unit_offset: u64 = 0;
 
-        while (this_unit_offset < try s.seekable_stream.getEndPos()) {
-            s.seekable_stream.seekTo(this_unit_offset) catch |err| switch (err) {
+        while (this_unit_offset < try seekable.getEndPos()) {
+            seekable.seekTo(this_unit_offset) catch |err| switch (err) {
                 error.EndOfStream => unreachable,
                 else => return err,
             };
 
             var is_64: bool = undefined;
-            const unit_length = try readInitialLength(@TypeOf(s.stream.readFn).ReturnType.ErrorSet, &s.stream, &is_64);
+            const unit_length = try readInitialLength(in, &is_64);
             if (unit_length == 0) return;
             const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
 
-            const version = try s.stream.readInt(u16, di.endian);
+            const version = try in.readInt(u16, di.endian);
             if (version < 2 or version > 5) return error.InvalidDebugInfo;
 
-            const debug_abbrev_offset = if (is_64) try s.stream.readInt(u64, di.endian) else try s.stream.readInt(u32, di.endian);
+            const debug_abbrev_offset = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian);
 
-            const address_size = try s.stream.readByte();
+            const address_size = try in.readByte();
             if (address_size != @sizeOf(usize)) return error.InvalidDebugInfo;
 
-            const compile_unit_pos = try s.seekable_stream.getPos();
+            const compile_unit_pos = try seekable.getPos();
             const abbrev_table = try di.getAbbrevTable(debug_abbrev_offset);
 
-            try s.seekable_stream.seekTo(compile_unit_pos);
+            try seekable.seekTo(compile_unit_pos);
 
             const compile_unit_die = try di.allocator().create(Die);
-            compile_unit_die.* = (try di.parseDie(&s.stream, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
+            compile_unit_die.* = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
 
             if (compile_unit_die.tag_id != TAG_compile_unit) return error.InvalidDebugInfo;
 
@@ -593,7 +594,9 @@ pub const DwarfInfo = struct {
             }
             if (di.debug_ranges) |debug_ranges| {
                 if (compile_unit.die.getAttrSecOffset(AT_ranges)) |ranges_offset| {
-                    var s = io.SliceSeekableInStream.init(debug_ranges);
+                    var stream = io.fixedBufferStream(debug_ranges);
+                    const in = &stream.inStream();
+                    const seekable = &stream.seekableStream();
 
                     // All the addresses in the list are relative to the value
                     // specified by DW_AT_low_pc or to some other value encoded
@@ -604,11 +607,11 @@ pub const DwarfInfo = struct {
                         else => return err,
                     };
 
-                    try s.seekable_stream.seekTo(ranges_offset);
+                    try seekable.seekTo(ranges_offset);
 
                     while (true) {
-                        const begin_addr = try s.stream.readIntLittle(usize);
-                        const end_addr = try s.stream.readIntLittle(usize);
+                        const begin_addr = try in.readIntLittle(usize);
+                        const end_addr = try in.readIntLittle(usize);
                         if (begin_addr == 0 and end_addr == 0) {
                             break;
                         }
@@ -646,25 +649,27 @@ pub const DwarfInfo = struct {
     }
 
     fn parseAbbrevTable(di: *DwarfInfo, offset: u64) !AbbrevTable {
-        var s = io.SliceSeekableInStream.init(di.debug_abbrev);
+        var stream = io.fixedBufferStream(di.debug_abbrev);
+        const in = &stream.inStream();
+        const seekable = &stream.seekableStream();
 
-        try s.seekable_stream.seekTo(offset);
+        try seekable.seekTo(offset);
         var result = AbbrevTable.init(di.allocator());
         errdefer result.deinit();
         while (true) {
-            const abbrev_code = try leb.readULEB128(u64, &s.stream);
+            const abbrev_code = try leb.readULEB128(u64, in);
             if (abbrev_code == 0) return result;
             try result.append(AbbrevTableEntry{
                 .abbrev_code = abbrev_code,
-                .tag_id = try leb.readULEB128(u64, &s.stream),
-                .has_children = (try s.stream.readByte()) == CHILDREN_yes,
+                .tag_id = try leb.readULEB128(u64, in),
+                .has_children = (try in.readByte()) == CHILDREN_yes,
                 .attrs = ArrayList(AbbrevAttr).init(di.allocator()),
             });
             const attrs = &result.items[result.len - 1].attrs;
 
             while (true) {
-                const attr_id = try leb.readULEB128(u64, &s.stream);
-                const form_id = try leb.readULEB128(u64, &s.stream);
+                const attr_id = try leb.readULEB128(u64, in);
+                const form_id = try leb.readULEB128(u64, in);
                 if (attr_id == 0 and form_id == 0) break;
                 try attrs.append(AbbrevAttr{
                     .attr_id = attr_id,
@@ -695,42 +700,44 @@ pub const DwarfInfo = struct {
     }
 
     fn getLineNumberInfo(di: *DwarfInfo, compile_unit: CompileUnit, target_address: usize) !debug.LineInfo {
-        var s = io.SliceSeekableInStream.init(di.debug_line);
+        var stream = io.fixedBufferStream(di.debug_line);
+        const in = &stream.inStream();
+        const seekable = &stream.seekableStream();
 
         const compile_unit_cwd = try compile_unit.die.getAttrString(di, AT_comp_dir);
         const line_info_offset = try compile_unit.die.getAttrSecOffset(AT_stmt_list);
 
-        try s.seekable_stream.seekTo(line_info_offset);
+        try seekable.seekTo(line_info_offset);
 
         var is_64: bool = undefined;
-        const unit_length = try readInitialLength(@TypeOf(s.stream.readFn).ReturnType.ErrorSet, &s.stream, &is_64);
+        const unit_length = try readInitialLength(in, &is_64);
         if (unit_length == 0) {
             return error.MissingDebugInfo;
         }
         const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
 
-        const version = try s.stream.readInt(u16, di.endian);
+        const version = try in.readInt(u16, di.endian);
         // TODO support 3 and 5
         if (version != 2 and version != 4) return error.InvalidDebugInfo;
 
-        const prologue_length = if (is_64) try s.stream.readInt(u64, di.endian) else try s.stream.readInt(u32, di.endian);
-        const prog_start_offset = (try s.seekable_stream.getPos()) + prologue_length;
+        const prologue_length = if (is_64) try in.readInt(u64, di.endian) else try in.readInt(u32, di.endian);
+        const prog_start_offset = (try seekable.getPos()) + prologue_length;
 
-        const minimum_instruction_length = try s.stream.readByte();
+        const minimum_instruction_length = try in.readByte();
         if (minimum_instruction_length == 0) return error.InvalidDebugInfo;
 
         if (version >= 4) {
             // maximum_operations_per_instruction
-            _ = try s.stream.readByte();
+            _ = try in.readByte();
         }
 
-        const default_is_stmt = (try s.stream.readByte()) != 0;
-        const line_base = try s.stream.readByteSigned();
+        const default_is_stmt = (try in.readByte()) != 0;
+        const line_base = try in.readByteSigned();
 
-        const line_range = try s.stream.readByte();
+        const line_range = try in.readByte();
         if (line_range == 0) return error.InvalidDebugInfo;
 
-        const opcode_base = try s.stream.readByte();
+        const opcode_base = try in.readByte();
 
         const standard_opcode_lengths = try di.allocator().alloc(u8, opcode_base - 1);
         defer di.allocator().free(standard_opcode_lengths);
@@ -738,14 +745,14 @@ pub const DwarfInfo = struct {
         {
             var i: usize = 0;
             while (i < opcode_base - 1) : (i += 1) {
-                standard_opcode_lengths[i] = try s.stream.readByte();
+                standard_opcode_lengths[i] = try in.readByte();
             }
         }
 
         var include_directories = ArrayList([]const u8).init(di.allocator());
         try include_directories.append(compile_unit_cwd);
         while (true) {
-            const dir = try s.stream.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
+            const dir = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
             if (dir.len == 0) break;
             try include_directories.append(dir);
         }
@@ -754,11 +761,11 @@ pub const DwarfInfo = struct {
         var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
 
         while (true) {
-            const file_name = try s.stream.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
+            const file_name = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
             if (file_name.len == 0) break;
-            const dir_index = try leb.readULEB128(usize, &s.stream);
-            const mtime = try leb.readULEB128(usize, &s.stream);
-            const len_bytes = try leb.readULEB128(usize, &s.stream);
+            const dir_index = try leb.readULEB128(usize, in);
+            const mtime = try leb.readULEB128(usize, in);
+            const len_bytes = try leb.readULEB128(usize, in);
             try file_entries.append(FileEntry{
                 .file_name = file_name,
                 .dir_index = dir_index,
@@ -767,17 +774,17 @@ pub const DwarfInfo = struct {
             });
         }
 
-        try s.seekable_stream.seekTo(prog_start_offset);
+        try seekable.seekTo(prog_start_offset);
 
         const next_unit_pos = line_info_offset + next_offset;
 
-        while ((try s.seekable_stream.getPos()) < next_unit_pos) {
-            const opcode = try s.stream.readByte();
+        while ((try seekable.getPos()) < next_unit_pos) {
+            const opcode = try in.readByte();
 
             if (opcode == LNS_extended_op) {
-                const op_size = try leb.readULEB128(u64, &s.stream);
+                const op_size = try leb.readULEB128(u64, in);
                 if (op_size < 1) return error.InvalidDebugInfo;
-                var sub_op = try s.stream.readByte();
+                var sub_op = try in.readByte();
                 switch (sub_op) {
                     LNE_end_sequence => {
                         prog.end_sequence = true;
@@ -785,14 +792,14 @@ pub const DwarfInfo = struct {
                         prog.reset();
                     },
                     LNE_set_address => {
-                        const addr = try s.stream.readInt(usize, di.endian);
+                        const addr = try in.readInt(usize, di.endian);
                         prog.address = addr;
                     },
                     LNE_define_file => {
-                        const file_name = try s.stream.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
-                        const dir_index = try leb.readULEB128(usize, &s.stream);
-                        const mtime = try leb.readULEB128(usize, &s.stream);
-                        const len_bytes = try leb.readULEB128(usize, &s.stream);
+                        const file_name = try in.readUntilDelimiterAlloc(di.allocator(), 0, math.maxInt(usize));
+                        const dir_index = try leb.readULEB128(usize, in);
+                        const mtime = try leb.readULEB128(usize, in);
+                        const len_bytes = try leb.readULEB128(usize, in);
                         try file_entries.append(FileEntry{
                             .file_name = file_name,
                             .dir_index = dir_index,
@@ -802,7 +809,7 @@ pub const DwarfInfo = struct {
                     },
                     else => {
                         const fwd_amt = math.cast(isize, op_size - 1) catch return error.InvalidDebugInfo;
-                        try s.seekable_stream.seekBy(fwd_amt);
+                        try seekable.seekBy(fwd_amt);
                     },
                 }
             } else if (opcode >= opcode_base) {
@@ -821,19 +828,19 @@ pub const DwarfInfo = struct {
                         prog.basic_block = false;
                     },
                     LNS_advance_pc => {
-                        const arg = try leb.readULEB128(usize, &s.stream);
+                        const arg = try leb.readULEB128(usize, in);
                         prog.address += arg * minimum_instruction_length;
                     },
                     LNS_advance_line => {
-                        const arg = try leb.readILEB128(i64, &s.stream);
+                        const arg = try leb.readILEB128(i64, in);
                         prog.line += arg;
                     },
                     LNS_set_file => {
-                        const arg = try leb.readULEB128(usize, &s.stream);
+                        const arg = try leb.readULEB128(usize, in);
                         prog.file = arg;
                     },
                     LNS_set_column => {
-                        const arg = try leb.readULEB128(u64, &s.stream);
+                        const arg = try leb.readULEB128(u64, in);
                         prog.column = arg;
                     },
                     LNS_negate_stmt => {
@@ -847,14 +854,14 @@ pub const DwarfInfo = struct {
                         prog.address += inc_addr;
                     },
                     LNS_fixed_advance_pc => {
-                        const arg = try s.stream.readInt(u16, di.endian);
+                        const arg = try in.readInt(u16, di.endian);
                         prog.address += arg;
                     },
                     LNS_set_prologue_end => {},
                     else => {
                         if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo;
                         const len_bytes = standard_opcode_lengths[opcode - 1];
-                        try s.seekable_stream.seekBy(len_bytes);
+                        try seekable.seekBy(len_bytes);
                     },
                 }
             }
lib/std/elf.zig
@@ -333,6 +333,54 @@ pub const ET = extern enum(u16) {
 pub const SectionHeader = Elf64_Shdr;
 pub const ProgramHeader = Elf64_Phdr;
 
+const Header = struct {
+    endian: builtin.Endian,
+    is_64: bool,
+    entry: u64,
+    phoff: u64,
+    shoff: u64,
+    phentsize: u16,
+    phnum: u16,
+    shentsize: u16,
+    shnum: u16,
+    shstrndx: u16,
+};
+
+pub fn readHeader(in_stream: var) !Header {
+    var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
+    try in_stream.readAll(&hdr_buf);
+    const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
+    const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
+    if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
+    if (hdr32.e_ident[EI_VERSION] != 1) return error.InvalidElfVersion;
+
+    const endian = switch (hdr32.e_ident[elf.EI_DATA]) {
+        ELFDATA2LSB => .Little,
+        ELFDATA2MSB => .Big,
+        else => return error.InvalidElfEndian,
+    };
+    const need_bswap = endian != std.builtin.endian;
+
+    const is_64 = switch (hdr32.e_ident[EI_CLASS]) {
+        ELFCLASS32 => false,
+        ELFCLASS64 => true,
+        else => return error.InvalidElfClass,
+    };
+
+    return @as(Header, .{
+        .endian = endian,
+        .is_64 = is_64,
+        .entry = int(is_64, need_bswap, hdr32.e_entry, hdr64.e_entry),
+        .phoff = int(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff),
+        .shoff = int(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff),
+        .phentsize = int(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize),
+        .phnum = int(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum),
+        .shentsize = int(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize),
+        .shnum = int(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum),
+        .shstrndx = int(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx),
+    });
+}
+
 pub const Elf = struct {
     seekable_stream: *io.SeekableStream(anyerror, anyerror),
     in_stream: *io.InStream(anyerror),
lib/std/fs.zig
@@ -1150,7 +1150,7 @@ pub const Dir = struct {
         const buf = try allocator.alignedAlloc(u8, A, size);
         errdefer allocator.free(buf);
 
-        try file.inStream().stream.readNoEof(buf);
+        try file.inStream().readNoEof(buf);
         return buf;
     }
 
lib/std/io.zig
@@ -93,10 +93,46 @@ pub fn getStdIn() File {
 }
 
 pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
-pub const SliceSeekableInStream = @import("io/seekable_stream.zig").SliceSeekableInStream;
-pub const COutStream = @import("io/c_out_stream.zig").COutStream;
 pub const InStream = @import("io/in_stream.zig").InStream;
 pub const OutStream = @import("io/out_stream.zig").OutStream;
+pub const FixedBufferInStream = @import("io/fixed_buffer_stream.zig").FixedBufferInStream;
+pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAtomicFile;
+
+pub const BufferedOutStream = @import("io/buffered_out_stream.zig").BufferedOutStream;
+pub const BufferedOutStreamCustom = @import("io/buffered_out_stream.zig").BufferedOutStreamCustom;
+pub const bufferedOutStream = @import("io/buffered_out_stream.zig").bufferedOutStream;
+
+pub const CountingOutStream = @import("io/counting_out_stream.zig").CountingOutStream;
+
+pub fn fixedBufferStream(bytes: []const u8) FixedBufferInStream {
+    return (FixedBufferInStream{ .bytes = bytes, .pos = 0 });
+}
+
+pub fn cOutStream(c_file: *std.c.FILE) COutStream {
+    return .{ .context = c_file };
+}
+
+pub const COutStream = OutStream(*std.c.FILE, std.fs.File.WriteError, cOutStreamWrite);
+
+pub fn cOutStreamWrite(c_file: *std.c.FILE, bytes: []const u8) std.fs.File.WriteError!usize {
+    const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, c_file);
+    if (amt_written >= 0) return amt_written;
+    switch (std.c._errno().*) {
+        0 => unreachable,
+        os.EINVAL => unreachable,
+        os.EFAULT => unreachable,
+        os.EAGAIN => unreachable, // this is a blocking API
+        os.EBADF => unreachable, // always a race condition
+        os.EDESTADDRREQ => unreachable, // connect was never called
+        os.EDQUOT => return error.DiskQuota,
+        os.EFBIG => return error.FileTooBig,
+        os.EIO => return error.InputOutput,
+        os.ENOSPC => return error.NoSpaceLeft,
+        os.EPERM => return error.AccessDenied,
+        os.EPIPE => return error.BrokenPipe,
+        else => |err| return os.unexpectedErrno(@intCast(usize, err)),
+    }
+}
 
 /// Deprecated; use `std.fs.Dir.writeFile`.
 pub fn writeFile(path: []const u8, data: []const u8) !void {
@@ -495,139 +531,18 @@ test "io.SliceOutStream" {
     testing.expectEqualSlices(u8, "HelloWorld!", slice_stream.getWritten());
 }
 
-var null_out_stream_state = NullOutStream.init();
-pub const null_out_stream = &null_out_stream_state.stream;
-
 /// An OutStream that doesn't write to anything.
-pub const NullOutStream = struct {
-    pub const Error = error{};
-    pub const Stream = OutStream(Error);
-
-    stream: Stream,
-
-    pub fn init() NullOutStream {
-        return NullOutStream{
-            .stream = Stream{ .writeFn = writeFn },
-        };
-    }
-
-    fn writeFn(out_stream: *Stream, bytes: []const u8) Error!usize {
-        return bytes.len;
-    }
-};
+pub const null_out_stream = @as(NullOutStream, .{ .context = {} });
 
-test "io.NullOutStream" {
-    var null_stream = NullOutStream.init();
-    const stream = &null_stream.stream;
-    stream.write("yay" ** 10000) catch unreachable;
+const NullOutStream = OutStream(void, error{}, dummyWrite);
+fn dummyWrite(context: void, data: []const u8) error{}!usize {
+    return data.len;
 }
 
-/// An OutStream that counts how many bytes has been written to it.
-pub fn CountingOutStream(comptime OutStreamError: type) type {
-    return struct {
-        const Self = @This();
-        pub const Stream = OutStream(Error);
-        pub const Error = OutStreamError;
-
-        stream: Stream,
-        bytes_written: u64,
-        child_stream: *Stream,
-
-        pub fn init(child_stream: *Stream) Self {
-            return Self{
-                .stream = Stream{ .writeFn = writeFn },
-                .bytes_written = 0,
-                .child_stream = child_stream,
-            };
-        }
-
-        fn writeFn(out_stream: *Stream, bytes: []const u8) OutStreamError!usize {
-            const self = @fieldParentPtr(Self, "stream", out_stream);
-            try self.child_stream.write(bytes);
-            self.bytes_written += bytes.len;
-            return bytes.len;
-        }
-    };
-}
-
-test "io.CountingOutStream" {
-    var null_stream = NullOutStream.init();
-    var counting_stream = CountingOutStream(NullOutStream.Error).init(&null_stream.stream);
-    const stream = &counting_stream.stream;
-
-    const bytes = "yay" ** 10000;
-    stream.write(bytes) catch unreachable;
-    testing.expect(counting_stream.bytes_written == bytes.len);
-}
-
-pub fn BufferedOutStream(comptime Error: type) type {
-    return BufferedOutStreamCustom(mem.page_size, Error);
-}
-
-pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamError: type) type {
-    return struct {
-        const Self = @This();
-        pub const Stream = OutStream(Error);
-        pub const Error = OutStreamError;
-
-        stream: Stream,
-
-        unbuffered_out_stream: *Stream,
-
-        const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size });
-        fifo: FifoType,
-
-        pub fn init(unbuffered_out_stream: *Stream) Self {
-            return Self{
-                .unbuffered_out_stream = unbuffered_out_stream,
-                .fifo = FifoType.init(),
-                .stream = Stream{ .writeFn = writeFn },
-            };
-        }
-
-        pub fn flush(self: *Self) !void {
-            while (true) {
-                const slice = self.fifo.readableSlice(0);
-                if (slice.len == 0) break;
-                try self.unbuffered_out_stream.write(slice);
-                self.fifo.discard(slice.len);
-            }
-        }
-
-        fn writeFn(out_stream: *Stream, bytes: []const u8) Error!usize {
-            const self = @fieldParentPtr(Self, "stream", out_stream);
-            if (bytes.len >= self.fifo.writableLength()) {
-                try self.flush();
-                return self.unbuffered_out_stream.writeOnce(bytes);
-            }
-            self.fifo.writeAssumeCapacity(bytes);
-            return bytes.len;
-        }
-    };
+test "null_out_stream" {
+    null_out_stream.writeAll("yay" ** 1000) catch |err| switch (err) {};
 }
 
-/// Implementation of OutStream trait for Buffer
-pub const BufferOutStream = struct {
-    buffer: *Buffer,
-    stream: Stream,
-
-    pub const Error = error{OutOfMemory};
-    pub const Stream = OutStream(Error);
-
-    pub fn init(buffer: *Buffer) BufferOutStream {
-        return BufferOutStream{
-            .buffer = buffer,
-            .stream = Stream{ .writeFn = writeFn },
-        };
-    }
-
-    fn writeFn(out_stream: *Stream, bytes: []const u8) !usize {
-        const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
-        try self.buffer.append(bytes);
-        return bytes.len;
-    }
-};
-
 /// Creates a stream which allows for writing bit fields to another stream
 pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
     return struct {
@@ -752,52 +667,11 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
                 return buffer.len;
             }
 
-            return self.out_stream.writeOnce(buffer);
+            return self.out_stream.write(buffer);
         }
     };
 }
 
-pub const BufferedAtomicFile = struct {
-    atomic_file: fs.AtomicFile,
-    file_stream: File.OutStream,
-    buffered_stream: BufferedOutStream(File.WriteError),
-    allocator: *mem.Allocator,
-
-    pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
-        // TODO with well defined copy elision we don't need this allocation
-        var self = try allocator.create(BufferedAtomicFile);
-        self.* = BufferedAtomicFile{
-            .atomic_file = undefined,
-            .file_stream = undefined,
-            .buffered_stream = undefined,
-            .allocator = allocator,
-        };
-        errdefer allocator.destroy(self);
-
-        self.atomic_file = try fs.AtomicFile.init(dest_path, File.default_mode);
-        errdefer self.atomic_file.deinit();
-
-        self.file_stream = self.atomic_file.file.outStream();
-        self.buffered_stream = BufferedOutStream(File.WriteError).init(&self.file_stream.stream);
-        return self;
-    }
-
-    /// always call destroy, even after successful finish()
-    pub fn destroy(self: *BufferedAtomicFile) void {
-        self.atomic_file.deinit();
-        self.allocator.destroy(self);
-    }
-
-    pub fn finish(self: *BufferedAtomicFile) !void {
-        try self.buffered_stream.flush();
-        try self.atomic_file.finish();
-    }
-
-    pub fn stream(self: *BufferedAtomicFile) *OutStream(File.WriteError) {
-        return &self.buffered_stream.stream;
-    }
-};
-
 pub const Packing = enum {
     /// Pack data to byte alignment
     Byte,
@@ -1129,8 +1003,9 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
     };
 }
 
-test "import io tests" {
+test "" {
     comptime {
         _ = @import("io/test.zig");
     }
+    std.meta.refAllDecls(@This());
 }
lib/std/os.zig
@@ -176,7 +176,7 @@ fn getRandomBytesDevURandom(buf: []u8) !void {
         .io_mode = .blocking,
         .async_block_allowed = std.fs.File.async_block_allowed_yes,
     };
-    const stream = &file.inStream().stream;
+    const stream = file.inStream();
     stream.readNoEof(buf) catch return error.Unexpected;
 }
 
lib/std/pdb.zig
@@ -632,11 +632,7 @@ const MsfStream = struct {
     blocks: []u32 = undefined,
     block_size: u32 = undefined,
 
-    /// Implementation of InStream trait for Pdb.MsfStream
-    stream: Stream = undefined,
-
     pub const Error = @TypeOf(read).ReturnType.ErrorSet;
-    pub const Stream = io.InStream(Error);
 
     fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
         const stream = MsfStream{
@@ -644,7 +640,6 @@ const MsfStream = struct {
             .pos = 0,
             .blocks = blocks,
             .block_size = block_size,
-            .stream = Stream{ .readFn = readFn },
         };
 
         return stream;
@@ -715,8 +710,7 @@ const MsfStream = struct {
         return block * self.block_size + offset;
     }
 
-    fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
-        const self = @fieldParentPtr(MsfStream, "stream", in_stream);
-        return self.read(buffer);
+    fn inStream(self: *MsfStream) std.io.InStream(*MsfStream, Error, read) {
+        return .{ .context = self };
     }
 };
src-self-hosted/libc_installation.zig
@@ -38,7 +38,7 @@ pub const LibCInstallation = struct {
     pub fn parse(
         allocator: *Allocator,
         libc_file: []const u8,
-        stderr: *std.io.OutStream(fs.File.WriteError),
+        stderr: var,
     ) !LibCInstallation {
         var self: LibCInstallation = .{};
 
@@ -123,7 +123,7 @@ pub const LibCInstallation = struct {
         return self;
     }
 
-    pub fn render(self: LibCInstallation, out: *std.io.OutStream(fs.File.WriteError)) !void {
+    pub fn render(self: LibCInstallation, out: var) !void {
         @setEvalBranchQuota(4000);
         const include_dir = self.include_dir orelse "";
         const sys_include_dir = self.sys_include_dir orelse "";
src-self-hosted/print_targets.zig
@@ -52,7 +52,7 @@ const available_libcs = [_][]const u8{
     "sparc-linux-gnu",
     "sparcv9-linux-gnu",
     "wasm32-freestanding-musl",
-    "x86_64-linux-gnu (native)",
+    "x86_64-linux-gnu",
     "x86_64-linux-gnux32",
     "x86_64-linux-musl",
     "x86_64-windows-gnu",
@@ -61,7 +61,8 @@ const available_libcs = [_][]const u8{
 pub fn cmdTargets(
     allocator: *Allocator,
     args: []const []const u8,
-    stdout: *io.OutStream(fs.File.WriteError),
+    /// Output stream
+    stdout: var,
     native_target: Target,
 ) !void {
     const available_glibcs = blk: {
@@ -92,9 +93,9 @@ pub fn cmdTargets(
     };
     defer allocator.free(available_glibcs);
 
-    const BOS = io.BufferedOutStream(fs.File.WriteError);
-    var bos = BOS.init(stdout);
-    var jws = std.json.WriteStream(BOS.Stream, 6).init(&bos.stream);
+    var bos = io.bufferedOutStream(4096, stdout);
+    const bos_stream = bos.outStream();
+    var jws = std.json.WriteStream(@TypeOf(bos_stream), 6).init(bos_stream);
 
     try jws.beginObject();
 
@@ -219,6 +220,6 @@ pub fn cmdTargets(
 
     try jws.endObject();
 
-    try bos.stream.writeByte('\n');
+    try bos_stream.writeByte('\n');
     return bos.flush();
 }
src-self-hosted/stage2.zig
@@ -18,8 +18,8 @@ const assert = std.debug.assert;
 const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
 
 var stderr_file: fs.File = undefined;
-var stderr: *io.OutStream(fs.File.WriteError) = undefined;
-var stdout: *io.OutStream(fs.File.WriteError) = undefined;
+var stderr: fs.File.OutStream = undefined;
+var stdout: fs.File.OutStream = undefined;
 
 comptime {
     _ = @import("dep_tokenizer.zig");
@@ -146,7 +146,7 @@ export fn stage2_free_clang_errors(errors_ptr: [*]translate_c.ClangErrMsg, error
 }
 
 export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error {
-    const c_out_stream = &std.io.COutStream.init(output_file).stream;
+    const c_out_stream = std.io.cOutStream(output_file);
     _ = std.zig.render(std.heap.c_allocator, c_out_stream, tree) catch |e| switch (e) {
         error.WouldBlock => unreachable, // stage1 opens stuff in exclusively blocking mode
         error.SystemResources => return .SystemResources,
@@ -186,9 +186,9 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
         try args_list.append(mem.toSliceConst(u8, argv[arg_i]));
     }
 
-    stdout = &std.io.getStdOut().outStream().stream;
+    stdout = std.io.getStdOut().outStream();
     stderr_file = std.io.getStdErr();
-    stderr = &stderr_file.outStream().stream;
+    stderr = stderr_file.outStream();
 
     const args = args_list.toSliceConst()[2..];
 
@@ -203,11 +203,11 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
             const arg = args[i];
             if (mem.startsWith(u8, arg, "-")) {
                 if (mem.eql(u8, arg, "--help")) {
-                    try stdout.write(self_hosted_main.usage_fmt);
+                    try stdout.writeAll(self_hosted_main.usage_fmt);
                     process.exit(0);
                 } else if (mem.eql(u8, arg, "--color")) {
                     if (i + 1 >= args.len) {
-                        try stderr.write("expected [auto|on|off] after --color\n");
+                        try stderr.writeAll("expected [auto|on|off] after --color\n");
                         process.exit(1);
                     }
                     i += 1;
@@ -238,14 +238,14 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
 
     if (stdin_flag) {
         if (input_files.len != 0) {
-            try stderr.write("cannot use --stdin with positional arguments\n");
+            try stderr.writeAll("cannot use --stdin with positional arguments\n");
             process.exit(1);
         }
 
         const stdin_file = io.getStdIn();
         var stdin = stdin_file.inStream();
 
-        const source_code = try stdin.stream.readAllAlloc(allocator, self_hosted_main.max_src_size);
+        const source_code = try stdin.readAllAlloc(allocator, self_hosted_main.max_src_size);
         defer allocator.free(source_code);
 
         const tree = std.zig.parse(allocator, source_code) catch |err| {
@@ -272,7 +272,7 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
     }
 
     if (input_files.len == 0) {
-        try stderr.write("expected at least one source file argument\n");
+        try stderr.writeAll("expected at least one source file argument\n");
         process.exit(1);
     }
 
@@ -409,11 +409,11 @@ fn printErrMsgToFile(
     const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
 
     var text_buf = try std.Buffer.initSize(allocator, 0);
-    var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+    const out_stream = &text_buf.outStream();
     try parse_error.render(&tree.tokens, out_stream);
     const text = text_buf.toOwnedSlice();
 
-    const stream = &file.outStream().stream;
+    const stream = &file.outStream();
     try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text });
 
     if (!color_on) return;
@@ -641,7 +641,7 @@ fn cmdTargets(zig_triple: [*:0]const u8) !void {
     return @import("print_targets.zig").cmdTargets(
         std.heap.c_allocator,
         &[0][]u8{},
-        &std.io.getStdOut().outStream().stream,
+        std.io.getStdOut().outStream(),
         target,
     );
 }
@@ -808,7 +808,7 @@ const Stage2LibCInstallation = extern struct {
 // ABI warning
 export fn stage2_libc_parse(stage1_libc: *Stage2LibCInstallation, libc_file_z: [*:0]const u8) Error {
     stderr_file = std.io.getStdErr();
-    stderr = &stderr_file.outStream().stream;
+    stderr = stderr_file.outStream();
     const libc_file = mem.toSliceConst(u8, libc_file_z);
     var libc = LibCInstallation.parse(std.heap.c_allocator, libc_file, stderr) catch |err| switch (err) {
         error.ParseError => return .SemanticAnalyzeFail,
@@ -870,7 +870,7 @@ export fn stage2_libc_find_native(stage1_libc: *Stage2LibCInstallation) Error {
 // ABI warning
 export fn stage2_libc_render(stage1_libc: *Stage2LibCInstallation, output_file: *FILE) Error {
     var libc = stage1_libc.toStage2();
-    const c_out_stream = &std.io.COutStream.init(output_file).stream;
+    const c_out_stream = std.io.cOutStream(output_file);
     libc.render(c_out_stream) catch |err| switch (err) {
         error.WouldBlock => unreachable, // stage1 opens stuff in exclusively blocking mode
         error.SystemResources => return .SystemResources,