Commit 196e36bbb2

Andrew Kelley <andrew@ziglang.org>
2025-08-05 08:51:45
std: remove BoundedArray
This use case is handled by ArrayListUnmanaged via the "...Bounded" method variants, and it's more optimal to share machine code, versus generating multiple versions of each function for differing array lengths.
1 parent c47ec4f
doc/langref/test_switch_dispatch_loop.zig
@@ -8,20 +8,22 @@ const Instruction = enum {
 };
 
 fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 {
-    var stack = try std.BoundedArray(i32, 8).fromSlice(initial_stack);
+    var buffer: [8]i32 = undefined;
+    var stack = std.ArrayListUnmanaged(i32).initBuffer(&buffer);
+    try stack.appendSliceBounded(initial_stack);
     var ip: usize = 0;
 
     return vm: switch (code[ip]) {
         // Because all code after `continue` is unreachable, this branch does
         // not provide a result.
         .add => {
-            try stack.append(stack.pop().? + stack.pop().?);
+            try stack.appendBounded(stack.pop().? + stack.pop().?);
 
             ip += 1;
             continue :vm code[ip];
         },
         .mul => {
-            try stack.append(stack.pop().? * stack.pop().?);
+            try stack.appendBounded(stack.pop().? * stack.pop().?);
 
             ip += 1;
             continue :vm code[ip];
lib/docs/wasm/markdown/Parser.zig
@@ -29,13 +29,14 @@ const Node = Document.Node;
 const ExtraIndex = Document.ExtraIndex;
 const ExtraData = Document.ExtraData;
 const StringIndex = Document.StringIndex;
+const ArrayList = std.ArrayListUnmanaged;
 
 nodes: Node.List = .{},
-extra: std.ArrayListUnmanaged(u32) = .empty,
-scratch_extra: std.ArrayListUnmanaged(u32) = .empty,
-string_bytes: std.ArrayListUnmanaged(u8) = .empty,
-scratch_string: std.ArrayListUnmanaged(u8) = .empty,
-pending_blocks: std.ArrayListUnmanaged(Block) = .empty,
+extra: ArrayList(u32) = .empty,
+scratch_extra: ArrayList(u32) = .empty,
+string_bytes: ArrayList(u8) = .empty,
+scratch_string: ArrayList(u8) = .empty,
+pending_blocks: ArrayList(Block) = .empty,
 allocator: Allocator,
 
 const Parser = @This();
@@ -86,7 +87,8 @@ const Block = struct {
             continuation_indent: usize,
         },
         table: struct {
-            column_alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{},
+            column_alignments_buffer: [max_table_columns]Node.TableCellAlignment,
+            column_alignments_len: usize,
         },
         heading: struct {
             /// Between 1 and 6, inclusive.
@@ -354,7 +356,8 @@ const BlockStart = struct {
             continuation_indent: usize,
         },
         table_row: struct {
-            cells: std.BoundedArray([]const u8, max_table_columns),
+            cells_buffer: [max_table_columns][]const u8,
+            cells_len: usize,
         },
         heading: struct {
             /// Between 1 and 6, inclusive.
@@ -422,7 +425,8 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
             try p.pending_blocks.append(p.allocator, .{
                 .tag = .table,
                 .data = .{ .table = .{
-                    .column_alignments = .{},
+                    .column_alignments_buffer = undefined,
+                    .column_alignments_len = 0,
                 } },
                 .string_start = p.scratch_string.items.len,
                 .extra_start = p.scratch_extra.items.len,
@@ -431,15 +435,19 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
 
         const current_row = p.scratch_extra.items.len - p.pending_blocks.getLast().extra_start;
         if (current_row <= 1) {
-            if (parseTableHeaderDelimiter(block_start.data.table_row.cells)) |alignments| {
-                p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table.column_alignments = alignments;
+            var buffer: [max_table_columns]Node.TableCellAlignment = undefined;
+            const table_row = &block_start.data.table_row;
+            if (parseTableHeaderDelimiter(table_row.cells_buffer[0..table_row.cells_len], &buffer)) |alignments| {
+                const table = &p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table;
+                @memcpy(table.column_alignments_buffer[0..alignments.len], alignments);
+                table.column_alignments_len = alignments.len;
                 if (current_row == 1) {
                     // We need to go back and mark the header row and its column
                     // alignments.
                     const datas = p.nodes.items(.data);
                     const header_data = datas[p.scratch_extra.getLast()];
                     for (p.extraChildren(header_data.container.children), 0..) |header_cell, i| {
-                        const alignment = if (i < alignments.len) alignments.buffer[i] else .unset;
+                        const alignment = if (i < alignments.len) alignments[i] else .unset;
                         const cell_data = &datas[@intFromEnum(header_cell)].table_cell;
                         cell_data.info.alignment = alignment;
                         cell_data.info.header = true;
@@ -480,8 +488,10 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
         // available in the BlockStart. We can immediately parse and append
         // these children now.
         const containing_table = p.pending_blocks.items[p.pending_blocks.items.len - 2];
-        const column_alignments = containing_table.data.table.column_alignments.slice();
-        for (block_start.data.table_row.cells.slice(), 0..) |cell_content, i| {
+        const table = &containing_table.data.table;
+        const column_alignments = table.column_alignments_buffer[0..table.column_alignments_len];
+        const table_row = &block_start.data.table_row;
+        for (table_row.cells_buffer[0..table_row.cells_len], 0..) |cell_content, i| {
             const cell_children = try p.parseInlines(cell_content);
             const alignment = if (i < column_alignments.len) column_alignments[i] else .unset;
             const cell = try p.addNode(.{
@@ -523,7 +533,8 @@ fn startBlock(p: *Parser, line: []const u8) !?BlockStart {
         return .{
             .tag = .table_row,
             .data = .{ .table_row = .{
-                .cells = table_row.cells,
+                .cells_buffer = table_row.cells_buffer,
+                .cells_len = table_row.cells_len,
             } },
             .rest = "",
         };
@@ -606,7 +617,8 @@ fn startListItem(unindented_line: []const u8) ?ListItemStart {
 }
 
 const TableRowStart = struct {
-    cells: std.BoundedArray([]const u8, max_table_columns),
+    cells_buffer: [max_table_columns][]const u8,
+    cells_len: usize,
 };
 
 fn startTableRow(unindented_line: []const u8) ?TableRowStart {
@@ -615,7 +627,8 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
         mem.endsWith(u8, unindented_line, "\\|") or
         !mem.endsWith(u8, unindented_line, "|")) return null;
 
-    var cells: std.BoundedArray([]const u8, max_table_columns) = .{};
+    var cells_buffer: [max_table_columns][]const u8 = undefined;
+    var cells: ArrayList([]const u8) = .initBuffer(&cells_buffer);
     const table_row_content = unindented_line[1 .. unindented_line.len - 1];
     var cell_start: usize = 0;
     var i: usize = 0;
@@ -623,7 +636,7 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
         switch (table_row_content[i]) {
             '\\' => i += 1,
             '|' => {
-                cells.append(table_row_content[cell_start..i]) catch return null;
+                cells.appendBounded(table_row_content[cell_start..i]) catch return null;
                 cell_start = i + 1;
             },
             '`' => {
@@ -641,20 +654,21 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
             else => {},
         }
     }
-    cells.append(table_row_content[cell_start..]) catch return null;
+    cells.appendBounded(table_row_content[cell_start..]) catch return null;
 
-    return .{ .cells = cells };
+    return .{ .cells_buffer = cells_buffer, .cells_len = cells.items.len };
 }
 
 fn parseTableHeaderDelimiter(
-    row_cells: std.BoundedArray([]const u8, max_table_columns),
-) ?std.BoundedArray(Node.TableCellAlignment, max_table_columns) {
-    var alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{};
-    for (row_cells.slice()) |content| {
+    row_cells: []const []const u8,
+    buffer: []Node.TableCellAlignment,
+) ?[]Node.TableCellAlignment {
+    var alignments: ArrayList(Node.TableCellAlignment) = .initBuffer(buffer);
+    for (row_cells) |content| {
         const alignment = parseTableHeaderDelimiterCell(content) orelse return null;
         alignments.appendAssumeCapacity(alignment);
     }
-    return alignments;
+    return alignments.items;
 }
 
 fn parseTableHeaderDelimiterCell(content: []const u8) ?Node.TableCellAlignment {
@@ -928,8 +942,8 @@ const InlineParser = struct {
     parent: *Parser,
     content: []const u8,
     pos: usize = 0,
-    pending_inlines: std.ArrayListUnmanaged(PendingInline) = .empty,
-    completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .empty,
+    pending_inlines: ArrayList(PendingInline) = .empty,
+    completed_inlines: ArrayList(CompletedInline) = .empty,
 
     const PendingInline = struct {
         tag: Tag,
lib/std/Io/Reader/test.zig
@@ -349,24 +349,3 @@ test "streamUntilDelimiter writes all bytes without delimiter to the output" {
 
     try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
 }
-
-test "readBoundedBytes correctly reads into a new bounded array" {
-    const test_string = "abcdefg";
-    var fis = std.io.fixedBufferStream(test_string);
-    const reader = fis.reader();
-
-    var array = try reader.readBoundedBytes(10000);
-    try testing.expectEqualStrings(array.slice(), test_string);
-}
-
-test "readIntoBoundedBytes correctly reads into a provided bounded array" {
-    const test_string = "abcdefg";
-    var fis = std.io.fixedBufferStream(test_string);
-    const reader = fis.reader();
-
-    var bounded_array = std.BoundedArray(u8, 10000){};
-
-    // compile time error if the size is not the same at the provided `bounded.capacity()`
-    try reader.readIntoBoundedBytes(10000, &bounded_array);
-    try testing.expectEqualStrings(bounded_array.slice(), test_string);
-}
lib/std/Io/DeprecatedReader.zig
@@ -249,33 +249,6 @@ pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes
     return bytes;
 }
 
-/// Reads bytes until `bounded.len` is equal to `num_bytes`,
-/// or the stream ends.
-///
-/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
-pub fn readIntoBoundedBytes(
-    self: Self,
-    comptime num_bytes: usize,
-    bounded: *std.BoundedArray(u8, num_bytes),
-) anyerror!void {
-    while (bounded.len < num_bytes) {
-        // get at most the number of bytes free in the bounded array
-        const bytes_read = try self.read(bounded.unusedCapacitySlice());
-        if (bytes_read == 0) return;
-
-        // bytes_read will never be larger than @TypeOf(bounded.len)
-        // due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
-        bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
-    }
-}
-
-/// Reads at most `num_bytes` and returns as a bounded array.
-pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
-    var result = std.BoundedArray(u8, num_bytes){};
-    try self.readIntoBoundedBytes(num_bytes, &result);
-    return result;
-}
-
 pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
     const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
     return mem.readInt(T, &bytes, endian);
lib/std/array_list.zig
@@ -657,6 +657,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
 
         /// Initialize with externally-managed memory. The buffer determines the
         /// capacity, and the length is set to zero.
+        ///
         /// When initialized this way, all functions that accept an Allocator
         /// argument cause illegal behavior.
         pub fn initBuffer(buffer: Slice) Self {
lib/std/base64.zig
@@ -517,17 +517,21 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
         var buffer: [0x100]u8 = undefined;
         const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
         try testing.expectEqualSlices(u8, expected_encoded, encoded);
-
+    }
+    {
         // stream encode
-        var list = try std.BoundedArray(u8, 0x100).init(0);
-        try codecs.Encoder.encodeWriter(list.writer(), expected_decoded);
-        try testing.expectEqualSlices(u8, expected_encoded, list.slice());
-
+        var buffer: [0x100]u8 = undefined;
+        var writer: std.Io.Writer = .fixed(&buffer);
+        try codecs.Encoder.encodeWriter(&writer, expected_decoded);
+        try testing.expectEqualSlices(u8, expected_encoded, writer.buffered());
+    }
+    {
         // reader to writer encode
-        var stream = std.io.fixedBufferStream(expected_decoded);
-        list = try std.BoundedArray(u8, 0x100).init(0);
-        try codecs.Encoder.encodeFromReaderToWriter(list.writer(), stream.reader());
-        try testing.expectEqualSlices(u8, expected_encoded, list.slice());
+        var stream: std.Io.Reader = .fixed(expected_decoded);
+        var buffer: [0x100]u8 = undefined;
+        var writer: std.Io.Writer = .fixed(&buffer);
+        try codecs.Encoder.encodeFromReaderToWriter(&writer, &stream);
+        try testing.expectEqualSlices(u8, expected_encoded, writer.buffered());
     }
 
     // Base64Decoder
lib/std/bounded_array.zig
@@ -1,412 +0,0 @@
-const std = @import("std.zig");
-const assert = std.debug.assert;
-const mem = std.mem;
-const testing = std.testing;
-const Alignment = std.mem.Alignment;
-
-/// A structure with an array and a length, that can be used as a slice.
-///
-/// Useful to pass around small arrays whose exact size is only known at
-/// runtime, but whose maximum size is known at comptime, without requiring
-/// an `Allocator`.
-///
-/// ```zig
-/// var actual_size = 32;
-/// var a = try BoundedArray(u8, 64).init(actual_size);
-/// var slice = a.slice(); // a slice of the 64-byte array
-/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
-/// ```
-pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
-    return BoundedArrayAligned(T, .of(T), buffer_capacity);
-}
-
-/// A structure with an array, length and alignment, that can be used as a
-/// slice.
-///
-/// Useful to pass around small explicitly-aligned arrays whose exact size is
-/// only known at runtime, but whose maximum size is known at comptime, without
-/// requiring an `Allocator`.
-/// ```zig
-//  var a = try BoundedArrayAligned(u8, 16, 2).init(0);
-//  try a.append(255);
-//  try a.append(255);
-//  const b = @ptrCast(*const [1]u16, a.constSlice().ptr);
-//  try testing.expectEqual(@as(u16, 65535), b[0]);
-/// ```
-pub fn BoundedArrayAligned(
-    comptime T: type,
-    comptime alignment: Alignment,
-    comptime buffer_capacity: usize,
-) type {
-    return struct {
-        const Self = @This();
-        buffer: [buffer_capacity]T align(alignment.toByteUnits()) = undefined,
-        len: usize = 0,
-
-        /// Set the actual length of the slice.
-        /// Returns error.Overflow if it exceeds the length of the backing array.
-        pub fn init(len: usize) error{Overflow}!Self {
-            if (len > buffer_capacity) return error.Overflow;
-            return Self{ .len = len };
-        }
-
-        /// View the internal array as a slice whose size was previously set.
-        pub fn slice(self: anytype) switch (@TypeOf(&self.buffer)) {
-            *align(alignment.toByteUnits()) [buffer_capacity]T => []align(alignment.toByteUnits()) T,
-            *align(alignment.toByteUnits()) const [buffer_capacity]T => []align(alignment.toByteUnits()) const T,
-            else => unreachable,
-        } {
-            return self.buffer[0..self.len];
-        }
-
-        /// View the internal array as a constant slice whose size was previously set.
-        pub fn constSlice(self: *const Self) []align(alignment.toByteUnits()) const T {
-            return self.slice();
-        }
-
-        /// Adjust the slice's length to `len`.
-        /// Does not initialize added items if any.
-        pub fn resize(self: *Self, len: usize) error{Overflow}!void {
-            if (len > buffer_capacity) return error.Overflow;
-            self.len = len;
-        }
-
-        /// Remove all elements from the slice.
-        pub fn clear(self: *Self) void {
-            self.len = 0;
-        }
-
-        /// Copy the content of an existing slice.
-        pub fn fromSlice(m: []const T) error{Overflow}!Self {
-            var list = try init(m.len);
-            @memcpy(list.slice(), m);
-            return list;
-        }
-
-        /// Return the element at index `i` of the slice.
-        pub fn get(self: Self, i: usize) T {
-            return self.constSlice()[i];
-        }
-
-        /// Set the value of the element at index `i` of the slice.
-        pub fn set(self: *Self, i: usize, item: T) void {
-            self.slice()[i] = item;
-        }
-
-        /// Return the maximum length of a slice.
-        pub fn capacity(self: Self) usize {
-            return self.buffer.len;
-        }
-
-        /// Check that the slice can hold at least `additional_count` items.
-        pub fn ensureUnusedCapacity(self: Self, additional_count: usize) error{Overflow}!void {
-            if (self.len + additional_count > buffer_capacity) {
-                return error.Overflow;
-            }
-        }
-
-        /// Increase length by 1, returning a pointer to the new item.
-        pub fn addOne(self: *Self) error{Overflow}!*T {
-            try self.ensureUnusedCapacity(1);
-            return self.addOneAssumeCapacity();
-        }
-
-        /// Increase length by 1, returning pointer to the new item.
-        /// Asserts that there is space for the new item.
-        pub fn addOneAssumeCapacity(self: *Self) *T {
-            assert(self.len < buffer_capacity);
-            self.len += 1;
-            return &self.slice()[self.len - 1];
-        }
-
-        /// Resize the slice, adding `n` new elements, which have `undefined` values.
-        /// The return value is a pointer to the array of uninitialized elements.
-        pub fn addManyAsArray(self: *Self, comptime n: usize) error{Overflow}!*align(alignment.toByteUnits()) [n]T {
-            const prev_len = self.len;
-            try self.resize(self.len + n);
-            return self.slice()[prev_len..][0..n];
-        }
-
-        /// Resize the slice, adding `n` new elements, which have `undefined` values.
-        /// The return value is a slice pointing to the uninitialized elements.
-        pub fn addManyAsSlice(self: *Self, n: usize) error{Overflow}![]align(alignment.toByteUnits()) T {
-            const prev_len = self.len;
-            try self.resize(self.len + n);
-            return self.slice()[prev_len..][0..n];
-        }
-
-        /// Remove and return the last element from the slice, or return `null` if the slice is empty.
-        pub fn pop(self: *Self) ?T {
-            if (self.len == 0) return null;
-            const item = self.get(self.len - 1);
-            self.len -= 1;
-            return item;
-        }
-
-        /// Return a slice of only the extra capacity after items.
-        /// This can be useful for writing directly into it.
-        /// Note that such an operation must be followed up with a
-        /// call to `resize()`
-        pub fn unusedCapacitySlice(self: *Self) []align(alignment.toByteUnits()) T {
-            return self.buffer[self.len..];
-        }
-
-        /// Insert `item` at index `i` by moving `slice[n .. slice.len]` to make room.
-        /// This operation is O(N).
-        pub fn insert(
-            self: *Self,
-            i: usize,
-            item: T,
-        ) error{Overflow}!void {
-            if (i > self.len) {
-                return error.Overflow;
-            }
-            _ = try self.addOne();
-            var s = self.slice();
-            mem.copyBackwards(T, s[i + 1 .. s.len], s[i .. s.len - 1]);
-            self.buffer[i] = item;
-        }
-
-        /// Insert slice `items` at index `i` by moving `slice[i .. slice.len]` to make room.
-        /// This operation is O(N).
-        pub fn insertSlice(self: *Self, i: usize, items: []const T) error{Overflow}!void {
-            try self.ensureUnusedCapacity(items.len);
-            self.len += items.len;
-            mem.copyBackwards(T, self.slice()[i + items.len .. self.len], self.constSlice()[i .. self.len - items.len]);
-            @memcpy(self.slice()[i..][0..items.len], items);
-        }
-
-        /// Replace range of elements `slice[start..][0..len]` with `new_items`.
-        /// Grows slice if `len < new_items.len`.
-        /// Shrinks slice if `len > new_items.len`.
-        pub fn replaceRange(
-            self: *Self,
-            start: usize,
-            len: usize,
-            new_items: []const T,
-        ) error{Overflow}!void {
-            const after_range = start + len;
-            var range = self.slice()[start..after_range];
-
-            if (range.len == new_items.len) {
-                @memcpy(range[0..new_items.len], new_items);
-            } else if (range.len < new_items.len) {
-                const first = new_items[0..range.len];
-                const rest = new_items[range.len..];
-                @memcpy(range[0..first.len], first);
-                try self.insertSlice(after_range, rest);
-            } else {
-                @memcpy(range[0..new_items.len], new_items);
-                const after_subrange = start + new_items.len;
-                for (self.constSlice()[after_range..], 0..) |item, i| {
-                    self.slice()[after_subrange..][i] = item;
-                }
-                self.len -= len - new_items.len;
-            }
-        }
-
-        /// Extend the slice by 1 element.
-        pub fn append(self: *Self, item: T) error{Overflow}!void {
-            const new_item_ptr = try self.addOne();
-            new_item_ptr.* = item;
-        }
-
-        /// Extend the slice by 1 element, asserting the capacity is already
-        /// enough to store the new item.
-        pub fn appendAssumeCapacity(self: *Self, item: T) void {
-            const new_item_ptr = self.addOneAssumeCapacity();
-            new_item_ptr.* = item;
-        }
-
-        /// Remove the element at index `i`, shift elements after index
-        /// `i` forward, and return the removed element.
-        /// Asserts the slice has at least one item.
-        /// This operation is O(N).
-        pub fn orderedRemove(self: *Self, i: usize) T {
-            const newlen = self.len - 1;
-            if (newlen == i) return self.pop().?;
-            const old_item = self.get(i);
-            for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j);
-            self.set(newlen, undefined);
-            self.len = newlen;
-            return old_item;
-        }
-
-        /// Remove the element at the specified index and return it.
-        /// The empty slot is filled from the end of the slice.
-        /// This operation is O(1).
-        pub fn swapRemove(self: *Self, i: usize) T {
-            if (self.len - 1 == i) return self.pop().?;
-            const old_item = self.get(i);
-            self.set(i, self.pop().?);
-            return old_item;
-        }
-
-        /// Append the slice of items to the slice.
-        pub fn appendSlice(self: *Self, items: []const T) error{Overflow}!void {
-            try self.ensureUnusedCapacity(items.len);
-            self.appendSliceAssumeCapacity(items);
-        }
-
-        /// Append the slice of items to the slice, asserting the capacity is already
-        /// enough to store the new items.
-        pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
-            const old_len = self.len;
-            self.len += items.len;
-            @memcpy(self.slice()[old_len..][0..items.len], items);
-        }
-
-        /// Append a value to the slice `n` times.
-        /// Allocates more memory as necessary.
-        pub fn appendNTimes(self: *Self, value: T, n: usize) error{Overflow}!void {
-            const old_len = self.len;
-            try self.resize(old_len + n);
-            @memset(self.slice()[old_len..self.len], value);
-        }
-
-        /// Append a value to the slice `n` times.
-        /// Asserts the capacity is enough.
-        pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
-            const old_len = self.len;
-            self.len += n;
-            assert(self.len <= buffer_capacity);
-            @memset(self.slice()[old_len..self.len], value);
-        }
-
-        pub const Writer = if (T != u8)
-            @compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
-                "but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
-        else
-            std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
-
-        /// Initializes a writer which will write into the array.
-        pub fn writer(self: *Self) Writer {
-            return .{ .context = self };
-        }
-
-        /// Same as `appendSlice` except it returns the number of bytes written, which is always the same
-        /// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
-        fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
-            try self.appendSlice(m);
-            return m.len;
-        }
-    };
-}
-
-test BoundedArray {
-    var a = try BoundedArray(u8, 64).init(32);
-
-    try testing.expectEqual(a.capacity(), 64);
-    try testing.expectEqual(a.slice().len, 32);
-    try testing.expectEqual(a.constSlice().len, 32);
-
-    try a.resize(48);
-    try testing.expectEqual(a.len, 48);
-
-    const x = [_]u8{1} ** 10;
-    a = try BoundedArray(u8, 64).fromSlice(&x);
-    try testing.expectEqualSlices(u8, &x, a.constSlice());
-
-    var a2 = a;
-    try testing.expectEqualSlices(u8, a.constSlice(), a2.constSlice());
-    a2.set(0, 0);
-    try testing.expect(a.get(0) != a2.get(0));
-
-    try testing.expectError(error.Overflow, a.resize(100));
-    try testing.expectError(error.Overflow, BoundedArray(u8, x.len - 1).fromSlice(&x));
-
-    try a.resize(0);
-    try a.ensureUnusedCapacity(a.capacity());
-    (try a.addOne()).* = 0;
-    try a.ensureUnusedCapacity(a.capacity() - 1);
-    try testing.expectEqual(a.len, 1);
-
-    const uninitialized = try a.addManyAsArray(4);
-    try testing.expectEqual(uninitialized.len, 4);
-    try testing.expectEqual(a.len, 5);
-
-    try a.append(0xff);
-    try testing.expectEqual(a.len, 6);
-    try testing.expectEqual(a.pop(), 0xff);
-
-    a.appendAssumeCapacity(0xff);
-    try testing.expectEqual(a.len, 6);
-    try testing.expectEqual(a.pop(), 0xff);
-
-    try a.resize(1);
-    try testing.expectEqual(a.pop(), 0);
-    try testing.expectEqual(a.pop(), null);
-    var unused = a.unusedCapacitySlice();
-    @memset(unused[0..8], 2);
-    unused[8] = 3;
-    unused[9] = 4;
-    try testing.expectEqual(unused.len, a.capacity());
-    try a.resize(10);
-
-    try a.insert(5, 0xaa);
-    try testing.expectEqual(a.len, 11);
-    try testing.expectEqual(a.get(5), 0xaa);
-    try testing.expectEqual(a.get(9), 3);
-    try testing.expectEqual(a.get(10), 4);
-
-    try a.insert(11, 0xbb);
-    try testing.expectEqual(a.len, 12);
-    try testing.expectEqual(a.pop(), 0xbb);
-
-    try a.appendSlice(&x);
-    try testing.expectEqual(a.len, 11 + x.len);
-
-    try a.appendNTimes(0xbb, 5);
-    try testing.expectEqual(a.len, 11 + x.len + 5);
-    try testing.expectEqual(a.pop(), 0xbb);
-
-    a.appendNTimesAssumeCapacity(0xcc, 5);
-    try testing.expectEqual(a.len, 11 + x.len + 5 - 1 + 5);
-    try testing.expectEqual(a.pop(), 0xcc);
-
-    try testing.expectEqual(a.len, 29);
-    try a.replaceRange(1, 20, &x);
-    try testing.expectEqual(a.len, 29 + x.len - 20);
-
-    try a.insertSlice(0, &x);
-    try testing.expectEqual(a.len, 29 + x.len - 20 + x.len);
-
-    try a.replaceRange(1, 5, &x);
-    try testing.expectEqual(a.len, 29 + x.len - 20 + x.len + x.len - 5);
-
-    try a.append(10);
-    try testing.expectEqual(a.pop(), 10);
-
-    try a.append(20);
-    const removed = a.orderedRemove(5);
-    try testing.expectEqual(removed, 1);
-    try testing.expectEqual(a.len, 34);
-
-    a.set(0, 0xdd);
-    a.set(a.len - 1, 0xee);
-    const swapped = a.swapRemove(0);
-    try testing.expectEqual(swapped, 0xdd);
-    try testing.expectEqual(a.get(0), 0xee);
-
-    const added_slice = try a.addManyAsSlice(3);
-    try testing.expectEqual(added_slice.len, 3);
-    try testing.expectEqual(a.len, 36);
-
-    while (a.pop()) |_| {}
-    const w = a.writer();
-    const s = "hello, this is a test string";
-    try w.writeAll(s);
-    try testing.expectEqualStrings(s, a.constSlice());
-}
-
-test "BoundedArrayAligned" {
-    var a = try BoundedArrayAligned(u8, .@"16", 4).init(0);
-    try a.append(0);
-    try a.append(0);
-    try a.append(255);
-    try a.append(255);
-
-    const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr));
-    try testing.expectEqual(@as(u16, 0), b[0]);
-    try testing.expectEqual(@as(u16, 65535), b[1]);
-}
lib/std/Io.zig
@@ -231,21 +231,6 @@ pub fn GenericReader(
             return @errorCast(self.any().readBytesNoEof(num_bytes));
         }
 
-        pub inline fn readIntoBoundedBytes(
-            self: Self,
-            comptime num_bytes: usize,
-            bounded: *std.BoundedArray(u8, num_bytes),
-        ) Error!void {
-            return @errorCast(self.any().readIntoBoundedBytes(num_bytes, bounded));
-        }
-
-        pub inline fn readBoundedBytes(
-            self: Self,
-            comptime num_bytes: usize,
-        ) Error!std.BoundedArray(u8, num_bytes) {
-            return @errorCast(self.any().readBoundedBytes(num_bytes));
-        }
-
         pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
             return @errorCast(self.any().readInt(T, endian));
         }
lib/std/std.zig
@@ -9,8 +9,6 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
 pub const AutoHashMap = hash_map.AutoHashMap;
 pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
 pub const BitStack = @import("BitStack.zig");
-pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
-pub const BoundedArrayAligned = @import("bounded_array.zig").BoundedArrayAligned;
 pub const Build = @import("Build.zig");
 pub const BufMap = @import("buf_map.zig").BufMap;
 pub const BufSet = @import("buf_set.zig").BufSet;
src/Zcu/PerThread.zig
@@ -2116,8 +2116,9 @@ pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
     // multi-threaded environment (where things like file indices could differ between compiler runs).
 
     // The roots of our file liveness analysis will be the analysis roots.
-    try zcu.alive_files.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
-    for (zcu.analysis_roots.slice()) |mod| {
+    const analysis_roots = zcu.analysisRoots();
+    try zcu.alive_files.ensureTotalCapacity(gpa, analysis_roots.len);
+    for (analysis_roots) |mod| {
         const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
         const file = zcu.fileByIndex(file_index);
 
src/Compilation.zig
@@ -2103,6 +2103,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
                 .local_zir_cache = local_zir_cache,
                 .error_limit = error_limit,
                 .llvm_object = null,
+                .analysis_roots_buffer = undefined,
+                .analysis_roots_len = 0,
             };
             try zcu.init(options.thread_pool.getIdCount());
             break :blk zcu;
@@ -2933,22 +2935,26 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
             try comp.appendFileSystemInput(embed_file.path);
         }
 
-        zcu.analysis_roots.clear();
+        zcu.analysis_roots_len = 0;
 
-        zcu.analysis_roots.appendAssumeCapacity(zcu.std_mod);
+        zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.std_mod;
+        zcu.analysis_roots_len += 1;
 
         // Normally we rely on importing std to in turn import the root source file in the start code.
         // However, the main module is distinct from the root module in tests, so that won't happen there.
         if (comp.config.is_test and zcu.main_mod != zcu.std_mod) {
-            zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
+            zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.main_mod;
+            zcu.analysis_roots_len += 1;
         }
 
         if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
-            zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
+            zcu.analysis_roots_buffer[zcu.analysis_roots_len] = compiler_rt_mod;
+            zcu.analysis_roots_len += 1;
         }
 
         if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
-            zcu.analysis_roots.appendAssumeCapacity(ubsan_rt_mod);
+            zcu.analysis_roots_buffer[zcu.analysis_roots_len] = ubsan_rt_mod;
+            zcu.analysis_roots_len += 1;
         }
     }
 
@@ -4745,7 +4751,7 @@ fn performAllTheWork(
         try zcu.flushRetryableFailures();
 
         // It's analysis time! Queue up our initial analysis.
-        for (zcu.analysis_roots.slice()) |mod| {
+        for (zcu.analysisRoots()) |mod| {
             try comp.queueJob(.{ .analyze_mod = mod });
         }
 
src/Zcu.zig
@@ -268,7 +268,8 @@ nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, voi
 
 /// These are the modules which we initially queue for analysis in `Compilation.update`.
 /// `resolveReferences` will use these as the root of its reachability traversal.
-analysis_roots: std.BoundedArray(*Package.Module, 4) = .{},
+analysis_roots_buffer: [4]*Package.Module,
+analysis_roots_len: usize = 0,
 /// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
 /// reset to `null` when any semantic analysis occurs (since this invalidates the data).
 /// Allocated into `gpa`.
@@ -4013,8 +4014,8 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
     // This is not a sufficient size, but a lower bound.
     try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
 
-    try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
-    for (zcu.analysis_roots.slice()) |mod| {
+    try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots_len);
+    for (zcu.analysisRoots()) |mod| {
         const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
         const root_ty = zcu.fileRootType(file);
         if (root_ty == .none) continue;
@@ -4202,6 +4203,10 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
     return result;
 }
 
+pub fn analysisRoots(zcu: *Zcu) []*Package.Module {
+    return zcu.analysis_roots_buffer[0..zcu.analysis_roots_len];
+}
+
 pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
     return zcu.intern_pool.filePtr(file_index);
 }