Commit 749f10af49
Changed files (161)
doc
lib
compiler
aro
aro
backend
Object
aro_translate_c
reduce
resinator
docs
wasm
init
src
std
Build
compress
crypto
debug
http
Io
math
big
process
Target
zig
src
arch
codegen
link
MachO
Wasm
Package
Sema
test
behavior
tools
doc/langref/testing_detect_leak.zig
@@ -1,7 +1,7 @@
const std = @import("std");
test "detect leak" {
- var list = std.ArrayList(u21).init(std.testing.allocator);
+ var list = std.array_list.Managed(u21).init(std.testing.allocator);
// missing `defer list.deinit();`
try list.append('โ');
doc/langref.html.in
@@ -6241,9 +6241,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
C has a default allocator - <code>malloc</code>, <code>realloc</code>, and <code>free</code>.
When linking against libc, Zig exposes this allocator with {#syntax#}std.heap.c_allocator{#endsyntax#}.
However, by convention, there is no default allocator in Zig. Instead, functions which need to
- allocate accept an {#syntax#}Allocator{#endsyntax#} parameter. Likewise, data structures such as
- {#syntax#}std.ArrayList{#endsyntax#} accept an {#syntax#}Allocator{#endsyntax#} parameter in
- their initialization functions:
+ allocate accept an {#syntax#}Allocator{#endsyntax#} parameter. Likewise, some data structures
+ accept an {#syntax#}Allocator{#endsyntax#} parameter in their initialization functions:
</p>
{#code|test_allocator.zig#}
lib/compiler/aro/aro/toolchains/Linux.zig
@@ -162,7 +162,7 @@ pub fn getDefaultLinker(self: *const Linux, target: std.Target) []const u8 {
return "ld";
}
-pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.ArrayList([]const u8)) Compilation.Error!void {
+pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) Compilation.Error!void {
const d = tc.driver;
const target = tc.getTarget();
@@ -465,7 +465,7 @@ test Linux {
try toolchain.discover();
- var argv = std.ArrayList([]const u8).init(driver.comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(driver.comp.gpa);
defer argv.deinit();
var linker_path_buf: [std.fs.max_path_bytes]u8 = undefined;
lib/compiler/aro/aro/Compilation.zig
@@ -533,7 +533,7 @@ fn generateSystemDefines(comp: *Compilation, w: anytype) !void {
pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) !Source {
try comp.generateBuiltinTypes();
- var buf = std.ArrayList(u8).init(comp.gpa);
+ var buf = std.array_list.Managed(u8).init(comp.gpa);
defer buf.deinit();
if (system_defines_mode == .include_system_defines) {
@@ -1143,7 +1143,7 @@ pub fn addSourceFromOwnedBuffer(comp: *Compilation, buf: []u8, path: []const u8,
const duped_path = try comp.gpa.dupe(u8, path);
errdefer comp.gpa.free(duped_path);
- var splice_list = std.ArrayList(u32).init(comp.gpa);
+ var splice_list = std.array_list.Managed(u32).init(comp.gpa);
defer splice_list.deinit();
const source_id: Source.Id = @enumFromInt(comp.sources.count() + 2);
@@ -1428,7 +1428,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u
const file = try comp.cwd.openFile(path, .{});
defer file.close();
- var buf = std.ArrayList(u8).init(comp.gpa);
+ var buf = std.array_list.Managed(u8).init(comp.gpa);
defer buf.deinit();
const max = limit orelse std.math.maxInt(u32);
lib/compiler/aro/aro/Driver.zig
@@ -590,7 +590,7 @@ var stdout_buffer: [4096]u8 = undefined;
/// The entry point of the Aro compiler.
/// **MAY call `exit` if `fast_exit` is set.**
pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8, comptime fast_exit: bool) !void {
- var macro_buf = std.ArrayList(u8).init(d.comp.gpa);
+ var macro_buf = std.array_list.Managed(u8).init(d.comp.gpa);
defer macro_buf.deinit();
const std_out = std.fs.File.stdout().deprecatedWriter();
@@ -817,7 +817,7 @@ fn dumpLinkerArgs(items: []const []const u8) !void {
/// The entry point of the Aro compiler.
/// **MAY call `exit` if `fast_exit` is set.**
pub fn invokeLinker(d: *Driver, tc: *Toolchain, comptime fast_exit: bool) !void {
- var argv = std.ArrayList([]const u8).init(d.comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(d.comp.gpa);
defer argv.deinit();
var linker_path_buf: [std.fs.max_path_bytes]u8 = undefined;
lib/compiler/aro/aro/InitList.zig
@@ -9,7 +9,7 @@ const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex;
const Type = @import("Type.zig");
const Diagnostics = @import("Diagnostics.zig");
-const NodeList = std.ArrayList(NodeIndex);
+const NodeList = std.array_list.Managed(NodeIndex);
const Parser = @import("Parser.zig");
const Item = struct {
lib/compiler/aro/aro/Parser.zig
@@ -15,7 +15,7 @@ const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex;
const Type = @import("Type.zig");
const Diagnostics = @import("Diagnostics.zig");
-const NodeList = std.ArrayList(NodeIndex);
+const NodeList = std.array_list.Managed(NodeIndex);
const InitList = @import("InitList.zig");
const Attribute = @import("Attribute.zig");
const char_info = @import("char_info.zig");
@@ -33,7 +33,7 @@ const target_util = @import("target.zig");
const Switch = struct {
default: ?TokenIndex = null,
- ranges: std.ArrayList(Range),
+ ranges: std.array_list.Managed(Range),
ty: Type,
comp: *Compilation,
@@ -101,16 +101,16 @@ value_map: Tree.ValueMap,
// buffers used during compilation
syms: SymbolStack = .{},
-strings: std.ArrayListAligned(u8, .@"4"),
-labels: std.ArrayList(Label),
+strings: std.array_list.AlignedManaged(u8, .@"4"),
+labels: std.array_list.Managed(Label),
list_buf: NodeList,
decl_buf: NodeList,
-param_buf: std.ArrayList(Type.Func.Param),
-enum_buf: std.ArrayList(Type.Enum.Field),
-record_buf: std.ArrayList(Type.Record.Field),
+param_buf: std.array_list.Managed(Type.Func.Param),
+enum_buf: std.array_list.Managed(Type.Enum.Field),
+record_buf: std.array_list.Managed(Type.Record.Field),
attr_buf: std.MultiArrayList(TentativeAttribute) = .{},
attr_application_buf: std.ArrayListUnmanaged(Attribute) = .empty,
-field_attr_buf: std.ArrayList([]const Attribute),
+field_attr_buf: std.array_list.Managed([]const Attribute),
/// type name -> variable name location for tentative definitions (top-level defs with thus-far-incomplete types)
/// e.g. `struct Foo bar;` where `struct Foo` is not defined yet.
/// The key is the StringId of `Foo` and the value is the TokenIndex of `bar`
@@ -693,16 +693,16 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree {
.gpa = pp.comp.gpa,
.arena = arena.allocator(),
.tok_ids = pp.tokens.items(.id),
- .strings = std.ArrayListAligned(u8, .@"4").init(pp.comp.gpa),
+ .strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
.value_map = Tree.ValueMap.init(pp.comp.gpa),
.data = NodeList.init(pp.comp.gpa),
- .labels = std.ArrayList(Label).init(pp.comp.gpa),
+ .labels = std.array_list.Managed(Label).init(pp.comp.gpa),
.list_buf = NodeList.init(pp.comp.gpa),
.decl_buf = NodeList.init(pp.comp.gpa),
- .param_buf = std.ArrayList(Type.Func.Param).init(pp.comp.gpa),
- .enum_buf = std.ArrayList(Type.Enum.Field).init(pp.comp.gpa),
- .record_buf = std.ArrayList(Type.Record.Field).init(pp.comp.gpa),
- .field_attr_buf = std.ArrayList([]const Attribute).init(pp.comp.gpa),
+ .param_buf = std.array_list.Managed(Type.Func.Param).init(pp.comp.gpa),
+ .enum_buf = std.array_list.Managed(Type.Enum.Field).init(pp.comp.gpa),
+ .record_buf = std.array_list.Managed(Type.Record.Field).init(pp.comp.gpa),
+ .field_attr_buf = std.array_list.Managed([]const Attribute).init(pp.comp.gpa),
.string_ids = .{
.declspec_id = try StrInt.intern(pp.comp, "__declspec"),
.main_id = try StrInt.intern(pp.comp, "main"),
@@ -1222,7 +1222,7 @@ fn staticAssertMessage(p: *Parser, cond_node: NodeIndex, message: Result) !?[]co
const cond_tag = p.nodes.items(.tag)[@intFromEnum(cond_node)];
if (cond_tag != .builtin_types_compatible_p and message.node == .none) return null;
- var buf = std.ArrayList(u8).init(p.gpa);
+ var buf = std.array_list.Managed(u8).init(p.gpa);
defer buf.deinit();
if (cond_tag == .builtin_types_compatible_p) {
@@ -3994,7 +3994,7 @@ fn msvcAsmStmt(p: *Parser) Error!?NodeIndex {
}
/// asmOperand : ('[' IDENTIFIER ']')? asmStr '(' expr ')'
-fn asmOperand(p: *Parser, names: *std.ArrayList(?TokenIndex), constraints: *NodeList, exprs: *NodeList) Error!void {
+fn asmOperand(p: *Parser, names: *std.array_list.Managed(?TokenIndex), constraints: *NodeList, exprs: *NodeList) Error!void {
if (p.eatToken(.l_bracket)) |l_bracket| {
const ident = (try p.eatIdentifier()) orelse {
try p.err(.expected_identifier);
@@ -4044,7 +4044,7 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex
const allocator = stack_fallback.get();
// TODO: Consider using a TokenIndex of 0 instead of null if we need to store the names in the tree
- var names = std.ArrayList(?TokenIndex).initCapacity(allocator, expected_items) catch unreachable; // stack allocation already succeeded
+ var names = std.array_list.Managed(?TokenIndex).initCapacity(allocator, expected_items) catch unreachable; // stack allocation already succeeded
defer names.deinit();
var constraints = NodeList.initCapacity(allocator, expected_items) catch unreachable; // stack allocation already succeeded
defer constraints.deinit();
@@ -4317,7 +4317,7 @@ fn stmt(p: *Parser) Error!NodeIndex {
const old_switch = p.@"switch";
var @"switch" = Switch{
- .ranges = std.ArrayList(Switch.Range).init(p.gpa),
+ .ranges = std.array_list.Managed(Switch.Range).init(p.gpa),
.ty = cond.ty,
.comp = p.comp,
};
@@ -8268,7 +8268,7 @@ fn charLiteral(p: *Parser) Error!Result {
const max_chars_expected = 4;
var stack_fallback = std.heap.stackFallback(max_chars_expected * @sizeOf(u32), p.comp.gpa);
- var chars = std.ArrayList(u32).initCapacity(stack_fallback.get(), max_chars_expected) catch unreachable; // stack allocation already succeeded
+ var chars = std.array_list.Managed(u32).initCapacity(stack_fallback.get(), max_chars_expected) catch unreachable; // stack allocation already succeeded
defer chars.deinit();
while (char_literal_parser.next()) |item| switch (item) {
lib/compiler/aro/aro/Preprocessor.zig
@@ -17,7 +17,7 @@ const features = @import("features.zig");
const Hideset = @import("Hideset.zig");
const DefineMap = std.StringHashMapUnmanaged(Macro);
-const RawTokenList = std.ArrayList(RawToken);
+const RawTokenList = std.array_list.Managed(RawToken);
const max_include_depth = 200;
/// Errors that can be returned when expanding a macro.
@@ -84,7 +84,7 @@ tokens: Token.List = .{},
/// Do not directly mutate this; must be kept in sync with `tokens`
expansion_entries: std.MultiArrayList(ExpansionEntry) = .{},
token_buf: RawTokenList,
-char_buf: std.ArrayList(u8),
+char_buf: std.array_list.Managed(u8),
/// Counter that is incremented each time preprocess() is called
/// Can be used to distinguish multiple preprocessings of the same file
preprocess_count: u32 = 0,
@@ -131,7 +131,7 @@ pub fn init(comp: *Compilation) Preprocessor {
.gpa = comp.gpa,
.arena = std.heap.ArenaAllocator.init(comp.gpa),
.token_buf = RawTokenList.init(comp.gpa),
- .char_buf = std.ArrayList(u8).init(comp.gpa),
+ .char_buf = std.array_list.Managed(u8).init(comp.gpa),
.poisoned_identifiers = std.StringHashMap(void).init(comp.gpa),
.top_expansion_buf = ExpandBuf.init(comp.gpa),
.hideset = .{ .comp = comp },
@@ -982,7 +982,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool {
.tok_i = @intCast(token_state.tokens_len),
.arena = pp.arena.allocator(),
.in_macro = true,
- .strings = std.ArrayListAligned(u8, .@"4").init(pp.comp.gpa),
+ .strings = std.array_list.AlignedManaged(u8, .@"4").init(pp.comp.gpa),
.data = undefined,
.value_map = undefined,
@@ -1140,7 +1140,7 @@ fn skipToNl(tokenizer: *Tokenizer) void {
}
}
-const ExpandBuf = std.ArrayList(TokenWithExpansionLocs);
+const ExpandBuf = std.array_list.Managed(TokenWithExpansionLocs);
fn removePlacemarkers(buf: *ExpandBuf) void {
var i: usize = buf.items.len -% 1;
while (i < buf.items.len) : (i -%= 1) {
@@ -1151,7 +1151,7 @@ fn removePlacemarkers(buf: *ExpandBuf) void {
}
}
-const MacroArguments = std.ArrayList([]const TokenWithExpansionLocs);
+const MacroArguments = std.array_list.Managed([]const TokenWithExpansionLocs);
fn deinitMacroArguments(allocator: Allocator, args: *const MacroArguments) void {
for (args.items) |item| {
for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, allocator);
@@ -2075,7 +2075,7 @@ fn collectMacroFuncArguments(
var parens: u32 = 0;
var args = MacroArguments.init(pp.gpa);
errdefer deinitMacroArguments(pp.gpa, &args);
- var curArgument = std.ArrayList(TokenWithExpansionLocs).init(pp.gpa);
+ var curArgument = std.array_list.Managed(TokenWithExpansionLocs).init(pp.gpa);
defer curArgument.deinit();
while (true) {
var tok = try nextBufToken(pp, tokenizer, buf, start_idx, end_idx, extend_buf);
@@ -2645,7 +2645,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken) Error!
/// Handle a function like #define directive.
fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken, macro_name: RawToken, l_paren: RawToken) Error!void {
assert(macro_name.id.isMacroIdentifier());
- var params = std.ArrayList([]const u8).init(pp.gpa);
+ var params = std.array_list.Managed([]const u8).init(pp.gpa);
defer params.deinit();
// Parse the parameter list.
@@ -3471,7 +3471,7 @@ test "Preserve pragma tokens sometimes" {
const allocator = std.testing.allocator;
const Test = struct {
fn runPreprocessor(source_text: []const u8) ![]const u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
var comp = Compilation.init(allocator, std.fs.cwd());
@@ -3602,7 +3602,7 @@ test "Include guards" {
_ = try comp.addSourceFromBuffer(path, "int bar = 5;\n");
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
var writer = buf.writer();
lib/compiler/aro/aro/Toolchain.zig
@@ -157,7 +157,7 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
return use_linker;
}
} else {
- var linker_name = try std.ArrayList(u8).initCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker
+ var linker_name = try std.array_list.Managed(u8).initCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker
defer linker_name.deinit();
if (tc.getTarget().os.tag.isDarwin()) {
linker_name.appendSliceAssumeCapacity("ld64.");
@@ -198,7 +198,7 @@ fn possibleProgramNames(raw_triple: ?[]const u8, name: []const u8, buf: *[64]u8)
}
/// Add toolchain `file_paths` to argv as `-L` arguments
-pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
+pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
try argv.ensureUnusedCapacity(tc.file_paths.items.len);
var bytes_needed: usize = 0;
@@ -332,7 +332,7 @@ pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, des
/// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately
/// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed
-pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.ArrayList([]const u8)) !void {
+pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
return switch (tc.inner) {
.uninitialized => unreachable,
.linux => |*linux| linux.buildLinkerArgs(tc, argv),
@@ -412,7 +412,7 @@ fn getAsNeededOption(is_solaris: bool, needed: bool) []const u8 {
}
}
-fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
+fn addUnwindLibrary(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
const unw = try tc.getUnwindLibKind();
const target = tc.getTarget();
if ((target.abi.isAndroid() and unw == .libgcc) or
@@ -450,7 +450,7 @@ fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !voi
}
}
-fn addLibGCC(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
+fn addLibGCC(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
const libgcc_kind = tc.getLibGCCKind();
if (libgcc_kind == .static or libgcc_kind == .unspecified) {
try argv.append("-lgcc");
@@ -461,7 +461,7 @@ fn addLibGCC(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
}
}
-pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
+pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
const target = tc.getTarget();
const rlt = tc.getRuntimeLibKind();
switch (rlt) {
lib/compiler/aro/aro/Tree.zig
@@ -41,7 +41,7 @@ pub const TokenWithExpansionLocs = struct {
pub fn addExpansionLocation(tok: *TokenWithExpansionLocs, gpa: std.mem.Allocator, new: []const Source.Location) !void {
if (new.len == 0 or tok.id == .whitespace or tok.id == .macro_ws or tok.id == .placemarker) return;
- var list = std.ArrayList(Source.Location).init(gpa);
+ var list = std.array_list.Managed(Source.Location).init(gpa);
defer {
@memset(list.items.ptr[list.items.len..list.capacity], .{});
// Add a sentinel to indicate the end of the list since
lib/compiler/aro/backend/Object/Elf.zig
@@ -4,7 +4,7 @@ const Target = std.Target;
const Object = @import("../Object.zig");
const Section = struct {
- data: std.ArrayList(u8),
+ data: std.array_list.Managed(u8),
relocations: std.ArrayListUnmanaged(Relocation) = .empty,
flags: u64,
type: u32,
@@ -80,12 +80,12 @@ fn sectionString(sec: Object.Section) []const u8 {
};
}
-pub fn getSection(elf: *Elf, section_kind: Object.Section) !*std.ArrayList(u8) {
+pub fn getSection(elf: *Elf, section_kind: Object.Section) !*std.array_list.Managed(u8) {
const section_name = sectionString(section_kind);
const section = elf.sections.get(section_name) orelse blk: {
const section = try elf.arena.allocator().create(Section);
section.* = .{
- .data = std.ArrayList(u8).init(elf.arena.child_allocator),
+ .data = std.array_list.Managed(u8).init(elf.arena.child_allocator),
.type = std.elf.SHT_PROGBITS,
.flags = switch (section_kind) {
.func, .custom => std.elf.SHF_ALLOC + std.elf.SHF_EXECINSTR,
lib/compiler/aro/backend/Object.zig
@@ -30,7 +30,7 @@ pub const Section = union(enum) {
custom: []const u8,
};
-pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) {
+pub fn getSection(obj: *Object, section: Section) !*std.array_list.Managed(u8) {
switch (obj.format) {
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).getSection(section),
else => unreachable,
lib/compiler/aro_translate_c/ast.zig
@@ -763,7 +763,7 @@ pub const Payload = struct {
pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast {
var ctx = Context{
.gpa = gpa,
- .buf = std.ArrayList(u8).init(gpa),
+ .buf = std.array_list.Managed(u8).init(gpa),
};
defer ctx.buf.deinit();
defer ctx.nodes.deinit(gpa);
@@ -787,7 +787,7 @@ pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast {
});
const root_members = blk: {
- var result = std.ArrayList(NodeIndex).init(gpa);
+ var result = std.array_list.Managed(NodeIndex).init(gpa);
defer result.deinit();
for (nodes) |node| {
@@ -825,7 +825,7 @@ const ExtraIndex = std.zig.Ast.ExtraIndex;
const Context = struct {
gpa: Allocator,
- buf: std.ArrayList(u8),
+ buf: std.array_list.Managed(u8),
nodes: std.zig.Ast.NodeList = .{},
extra_data: std.ArrayListUnmanaged(u32) = .empty,
tokens: std.zig.Ast.TokenList = .{},
@@ -886,7 +886,7 @@ const Context = struct {
};
fn renderNodes(c: *Context, nodes: []const Node) Allocator.Error!NodeSubRange {
- var result = std.ArrayList(NodeIndex).init(c.gpa);
+ var result = std.array_list.Managed(NodeIndex).init(c.gpa);
defer result.deinit();
for (nodes) |node| {
@@ -1622,7 +1622,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
}
const l_brace = try c.addToken(.l_brace, "{");
- var stmts = std.ArrayList(NodeIndex).init(c.gpa);
+ var stmts = std.array_list.Managed(NodeIndex).init(c.gpa);
defer stmts.deinit();
for (payload.stmts) |stmt| {
const res = try renderNode(c, stmt);
@@ -2954,9 +2954,9 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex {
});
}
-fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) {
+fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.array_list.Managed(NodeIndex) {
_ = try c.addToken(.l_paren, "(");
- var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, params.len);
+ var rendered = try std.array_list.Managed(NodeIndex).initCapacity(c.gpa, params.len);
errdefer rendered.deinit();
for (params, 0..) |param, i| {
lib/compiler/reduce/Walk.zig
@@ -5,7 +5,7 @@ const assert = std.debug.assert;
const BuiltinFn = std.zig.BuiltinFn;
ast: *const Ast,
-transformations: *std.ArrayList(Transformation),
+transformations: *std.array_list.Managed(Transformation),
unreferenced_globals: std.StringArrayHashMapUnmanaged(Ast.Node.Index),
in_scope_names: std.StringArrayHashMapUnmanaged(u32),
replace_names: std.StringArrayHashMapUnmanaged(u32),
@@ -54,7 +54,7 @@ pub const Error = error{OutOfMemory};
pub fn findTransformations(
arena: std.mem.Allocator,
ast: *const Ast,
- transformations: *std.ArrayList(Transformation),
+ transformations: *std.array_list.Managed(Transformation),
) !void {
transformations.clearRetainingCapacity();
lib/compiler/resinator/cli.zig
@@ -1291,7 +1291,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
}
pub fn filepathWithExtension(allocator: Allocator, path: []const u8, ext: []const u8) ![]const u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
errdefer buf.deinit();
if (std.fs.path.dirname(path)) |dirname| {
var end_pos = dirname.len;
lib/compiler/resinator/compile.zig
@@ -38,7 +38,7 @@ pub const CompileOptions = struct {
/// Items within the list will be allocated using the allocator of the ArrayList and must be
/// freed by the caller.
/// TODO: Maybe a dedicated struct for this purpose so that it's a bit nicer to work with.
- dependencies_list: ?*std.ArrayList([]const u8) = null,
+ dependencies_list: ?*std.array_list.Managed([]const u8) = null,
default_code_page: SupportedCodePage = .windows1252,
/// If true, the first #pragma code_page directive only sets the input code page, but not the output code page.
/// This check must be done before comments are removed from the file.
@@ -74,7 +74,7 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: anytype, option
var tree = try parser.parse(allocator, options.diagnostics);
defer tree.deinit();
- var search_dirs = std.ArrayList(SearchDir).init(allocator);
+ var search_dirs = std.array_list.Managed(SearchDir).init(allocator);
defer {
for (search_dirs.items) |*search_dir| {
search_dir.deinit(allocator);
@@ -178,7 +178,7 @@ pub const Compiler = struct {
cwd: std.fs.Dir,
state: State = .{},
diagnostics: *Diagnostics,
- dependencies_list: ?*std.ArrayList([]const u8),
+ dependencies_list: ?*std.array_list.Managed([]const u8),
input_code_pages: *const CodePageLookup,
output_code_pages: *const CodePageLookup,
search_dirs: []SearchDir,
@@ -279,7 +279,7 @@ pub const Compiler = struct {
.literal, .number => {
const slice = literal_node.token.slice(self.source);
const code_page = self.input_code_pages.getForToken(literal_node.token);
- var buf = try std.ArrayList(u8).initCapacity(self.allocator, slice.len);
+ var buf = try std.array_list.Managed(u8).initCapacity(self.allocator, slice.len);
errdefer buf.deinit();
var index: usize = 0;
@@ -303,7 +303,7 @@ pub const Compiler = struct {
const column = literal_node.token.calculateColumn(self.source, 8, null);
const bytes = SourceBytes{ .slice = slice, .code_page = self.input_code_pages.getForToken(literal_node.token) };
- var buf = std.ArrayList(u8).init(self.allocator);
+ var buf = std.array_list.Managed(u8).init(self.allocator);
errdefer buf.deinit();
// Filenames are sort-of parsed as if they were wide strings, but the max escape width of
@@ -421,7 +421,7 @@ pub const Compiler = struct {
const bytes = self.sourceBytesForToken(token);
const output_code_page = self.output_code_pages.getForToken(token);
- var buf = try std.ArrayList(u8).initCapacity(self.allocator, bytes.slice.len);
+ var buf = try std.array_list.Managed(u8).initCapacity(self.allocator, bytes.slice.len);
errdefer buf.deinit();
var iterative_parser = literals.IterativeStringParser.init(bytes, .{
@@ -1226,7 +1226,7 @@ pub const Compiler = struct {
}
pub fn writeResourceRawData(self: *Compiler, node: *Node.ResourceRawData, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
// The header's data length field is a u32 so limit the resource's data size so that
// we know we can always specify the real size.
@@ -1306,7 +1306,7 @@ pub const Compiler = struct {
}
pub fn writeAccelerators(self: *Compiler, node: *Node.Accelerators, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
// The header's data length field is a u32 so limit the resource's data size so that
@@ -1405,7 +1405,7 @@ pub const Compiler = struct {
};
pub fn writeDialog(self: *Compiler, node: *Node.Dialog, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
// The header's data length field is a u32 so limit the resource's data size so that
// we know we can always specify the real size.
@@ -1973,7 +1973,7 @@ pub const Compiler = struct {
try NameOrOrdinal.writeEmpty(data_writer);
}
- var extra_data_buf = std.ArrayList(u8).init(self.allocator);
+ var extra_data_buf = std.array_list.Managed(u8).init(self.allocator);
defer extra_data_buf.deinit();
// The extra data byte length must be able to fit within a u16.
var limited_extra_data_writer = limitedWriter(extra_data_buf.writer(), std.math.maxInt(u16));
@@ -2004,7 +2004,7 @@ pub const Compiler = struct {
}
pub fn writeToolbar(self: *Compiler, node: *Node.Toolbar, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
const data_writer = data_buffer.writer();
@@ -2082,7 +2082,7 @@ pub const Compiler = struct {
}
pub fn writeMenu(self: *Compiler, node: *Node.Menu, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
// The header's data length field is a u32 so limit the resource's data size so that
// we know we can always specify the real size.
@@ -2265,7 +2265,7 @@ pub const Compiler = struct {
}
pub fn writeVersionInfo(self: *Compiler, node: *Node.VersionInfo, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(self.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(self.allocator);
defer data_buffer.deinit();
// The node's length field (which is inclusive of the length of all of its children) is a u16
// so limit the node's data size so that we know we can always specify the real size.
@@ -2394,7 +2394,7 @@ pub const Compiler = struct {
/// Expects writer to be a LimitedWriter limited to u16, meaning all writes to
/// the writer within this function could return error.NoSpaceLeft, and that buf.items.len
/// will never be able to exceed maxInt(u16).
- pub fn writeVersionNode(self: *Compiler, node: *Node, writer: *std.Io.Writer, buf: *std.ArrayList(u8)) !void {
+ pub fn writeVersionNode(self: *Compiler, node: *Node, writer: *std.Io.Writer, buf: *std.array_list.Managed(u8)) !void {
// We can assume that buf.items.len will never be able to exceed the limits of a u16
try writeDataPadding(writer, @as(u16, @intCast(buf.items.len)));
@@ -3246,7 +3246,7 @@ pub const StringTable = struct {
}
pub fn writeResData(self: *Block, compiler: *Compiler, language: res.Language, block_id: u16, writer: anytype) !void {
- var data_buffer = std.ArrayList(u8).init(compiler.allocator);
+ var data_buffer = std.array_list.Managed(u8).init(compiler.allocator);
defer data_buffer.deinit();
const data_writer = data_buffer.writer();
lib/compiler/resinator/ico.zig
@@ -56,7 +56,7 @@ pub fn readAnyError(allocator: std.mem.Allocator, reader: anytype, max_size: u64
// entries than it actually does, we use an ArrayList with a conservatively
// limited initial capacity instead of allocating the entire slice at once.
const initial_capacity = @min(num_images, 8);
- var entries = try std.ArrayList(Entry).initCapacity(allocator, initial_capacity);
+ var entries = try std.array_list.Managed(Entry).initCapacity(allocator, initial_capacity);
errdefer entries.deinit();
var i: usize = 0;
lib/compiler/resinator/literals.zig
@@ -469,7 +469,7 @@ pub fn parseQuotedString(
const T = if (literal_type == .ascii) u8 else u16;
std.debug.assert(bytes.slice.len >= 2); // must at least have 2 double quote chars
- var buf = try std.ArrayList(T).initCapacity(allocator, bytes.slice.len);
+ var buf = try std.array_list.Managed(T).initCapacity(allocator, bytes.slice.len);
errdefer buf.deinit();
var iterative_parser = IterativeStringParser.init(bytes, options);
@@ -564,7 +564,7 @@ pub fn parseQuotedStringAsWideString(allocator: std.mem.Allocator, bytes: Source
// Note: We're only handling the case of parsing an ASCII string into a wide string from here on out.
// TODO: The logic below is similar to that in AcceleratorKeyCodepointTranslator, might be worth merging the two
- var buf = try std.ArrayList(u16).initCapacity(allocator, bytes.slice.len);
+ var buf = try std.array_list.Managed(u16).initCapacity(allocator, bytes.slice.len);
errdefer buf.deinit();
var iterative_parser = IterativeStringParser.init(bytes, options);
lib/compiler/resinator/main.zig
@@ -97,14 +97,14 @@ pub fn main() !void {
try stdout_writer.writeByte('\n');
}
- var dependencies_list = std.ArrayList([]const u8).init(allocator);
+ var dependencies_list = std.array_list.Managed([]const u8).init(allocator);
defer {
for (dependencies_list.items) |item| {
allocator.free(item);
}
dependencies_list.deinit();
}
- const maybe_dependencies_list: ?*std.ArrayList([]const u8) = if (options.depfile_path != null) &dependencies_list else null;
+ const maybe_dependencies_list: ?*std.array_list.Managed([]const u8) = if (options.depfile_path != null) &dependencies_list else null;
var include_paths = LazyIncludePaths{
.arena = arena,
@@ -115,7 +115,7 @@ pub fn main() !void {
const full_input = full_input: {
if (options.input_format == .rc and options.preprocess != .no) {
- var preprocessed_buf = std.ArrayList(u8).init(allocator);
+ var preprocessed_buf = std.array_list.Managed(u8).init(allocator);
errdefer preprocessed_buf.deinit();
// We're going to throw away everything except the final preprocessed output anyway,
@@ -127,7 +127,7 @@ pub fn main() !void {
var comp = aro.Compilation.init(aro_arena, std.fs.cwd());
defer comp.deinit();
- var argv = std.ArrayList([]const u8).init(comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
try argv.append("arocc"); // dummy command name
@@ -946,7 +946,7 @@ fn aroDiagnosticsToErrorBundle(
// - Only prints the message itself (no location, source line, error: prefix, etc)
// - Keeps track of source path/line/col instead
const MsgWriter = struct {
- buf: std.ArrayList(u8),
+ buf: std.array_list.Managed(u8),
path: ?[]const u8 = null,
// 1-indexed
line: u32 = undefined,
@@ -956,7 +956,7 @@ const MsgWriter = struct {
fn init(allocator: std.mem.Allocator) MsgWriter {
return .{
- .buf = std.ArrayList(u8).init(allocator),
+ .buf = std.array_list.Managed(u8).init(allocator),
};
}
lib/compiler/resinator/parse.zig
@@ -82,7 +82,7 @@ pub const Parser = struct {
}
fn parseRoot(self: *Self) Error!*Node {
- var statements = std.ArrayList(*Node).init(self.state.allocator);
+ var statements = std.array_list.Managed(*Node).init(self.state.allocator);
defer statements.deinit();
try self.parseStatements(&statements);
@@ -95,7 +95,7 @@ pub const Parser = struct {
return &node.base;
}
- fn parseStatements(self: *Self, statements: *std.ArrayList(*Node)) Error!void {
+ fn parseStatements(self: *Self, statements: *std.array_list.Managed(*Node)) Error!void {
while (true) {
try self.nextToken(.whitespace_delimiter_only);
if (self.state.token.id == .eof) break;
@@ -355,7 +355,7 @@ pub const Parser = struct {
const begin_token = self.state.token;
try self.check(.begin);
- var strings = std.ArrayList(*Node).init(self.state.allocator);
+ var strings = std.array_list.Managed(*Node).init(self.state.allocator);
defer strings.deinit();
while (true) {
const maybe_end_token = try self.lookaheadToken(.normal);
@@ -852,7 +852,7 @@ pub const Parser = struct {
/// Expects the current token to be a begin token.
/// After return, the current token will be the end token.
fn parseRawDataBlock(self: *Self) Error![]*Node {
- var raw_data = std.ArrayList(*Node).init(self.state.allocator);
+ var raw_data = std.array_list.Managed(*Node).init(self.state.allocator);
defer raw_data.deinit();
while (true) {
const maybe_end_token = try self.lookaheadToken(.normal);
lib/compiler/resinator/preprocess.zig
@@ -11,14 +11,14 @@ pub fn preprocess(
writer: anytype,
/// Expects argv[0] to be the command name
argv: []const []const u8,
- maybe_dependencies_list: ?*std.ArrayList([]const u8),
+ maybe_dependencies_list: ?*std.array_list.Managed([]const u8),
) PreprocessError!void {
try comp.addDefaultPragmaHandlers();
var driver: aro.Driver = .{ .comp = comp, .aro_name = "arocc" };
defer driver.deinit();
- var macro_buf = std.ArrayList(u8).init(comp.gpa);
+ var macro_buf = std.array_list.Managed(u8).init(comp.gpa);
defer macro_buf.deinit();
_ = driver.parseArgs(std.io.null_writer, macro_buf.writer(), argv) catch |err| switch (err) {
@@ -87,7 +87,7 @@ fn hasAnyErrors(comp: *aro.Compilation) bool {
/// `arena` is used for temporary -D argument strings and the INCLUDE environment variable.
/// The arena should be kept alive at least as long as `argv`.
-pub fn appendAroArgs(arena: Allocator, argv: *std.ArrayList([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
+pub fn appendAroArgs(arena: Allocator, argv: *std.array_list.Managed([]const u8), options: cli.Options, system_include_paths: []const []const u8) !void {
try argv.appendSlice(&.{
"-E",
"--comments",
lib/compiler/resinator/res.zig
@@ -283,7 +283,7 @@ pub const NameOrOrdinal = union(enum) {
pub fn nameFromString(allocator: Allocator, bytes: SourceBytes) !NameOrOrdinal {
// Names have a limit of 256 UTF-16 code units + null terminator
- var buf = try std.ArrayList(u16).initCapacity(allocator, @min(257, bytes.slice.len));
+ var buf = try std.array_list.Managed(u16).initCapacity(allocator, @min(257, bytes.slice.len));
errdefer buf.deinit();
var i: usize = 0;
lib/compiler/resinator/source_mapping.zig
@@ -574,7 +574,7 @@ fn parseFilename(allocator: Allocator, str: []const u8) error{ OutOfMemory, Inva
escape_u,
};
- var filename = try std.ArrayList(u8).initCapacity(allocator, str.len);
+ var filename = try std.array_list.Managed(u8).initCapacity(allocator, str.len);
errdefer filename.deinit();
var state: State = .string;
var index: usize = 0;
lib/compiler/resinator/windows1252.zig
@@ -574,7 +574,7 @@ pub fn bestFitFromCodepoint(codepoint: u21) ?u8 {
}
test "windows-1252 to utf8" {
- var buf = std.ArrayList(u8).init(std.testing.allocator);
+ var buf = std.array_list.Managed(u8).init(std.testing.allocator);
defer buf.deinit();
const input_windows1252 = "\x81pqrstuvwxyz{|}~\x80\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8e\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9e\x9f\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff";
lib/compiler/aro_translate_c.zig
@@ -116,7 +116,7 @@ pub fn translate(
var driver: aro.Driver = .{ .comp = comp };
defer driver.deinit();
- var macro_buf = std.ArrayList(u8).init(gpa);
+ var macro_buf = std.array_list.Managed(u8).init(gpa);
defer macro_buf.deinit();
assert(!try driver.parseArgs(std.io.null_writer, macro_buf.writer(), args));
@@ -413,11 +413,11 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_ty: Type) Error!void {
break :blk ZigTag.opaque_literal.init();
}
- var fields = try std.ArrayList(ast.Payload.Record.Field).initCapacity(c.gpa, record_decl.fields.len);
+ var fields = try std.array_list.Managed(ast.Payload.Record.Field).initCapacity(c.gpa, record_decl.fields.len);
defer fields.deinit();
// TODO: Add support for flexible array field functions
- var functions = std.ArrayList(ZigNode).init(c.gpa);
+ var functions = std.array_list.Managed(ZigNode).init(c.gpa);
defer functions.deinit();
var unnamed_field_count: u32 = 0;
@@ -1234,7 +1234,7 @@ pub const PatternList = struct {
const source = template[0];
const impl = template[1];
- var tok_list = std.ArrayList(CToken).init(allocator);
+ var tok_list = std.array_list.Managed(CToken).init(allocator);
defer tok_list.deinit();
try tokenizeMacro(source, &tok_list);
const tokens = try allocator.dupe(CToken, tok_list.items);
@@ -1349,7 +1349,7 @@ pub const TypeError = Error || error{UnsupportedType};
pub const TransError = TypeError || error{UnsupportedTranslation};
pub const SymbolTable = std.StringArrayHashMap(ast.Node);
-pub const AliasList = std.ArrayList(struct {
+pub const AliasList = std.array_list.Managed(struct {
alias: []const u8,
name: []const u8,
});
@@ -1397,7 +1397,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
/// into the main arena.
pub const Block = struct {
base: ScopeExtraScope,
- statements: std.ArrayList(ast.Node),
+ statements: std.array_list.Managed(ast.Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
@@ -1429,7 +1429,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
.id = .block,
.parent = parent,
},
- .statements = std.ArrayList(ast.Node).init(c.gpa),
+ .statements = std.array_list.Managed(ast.Node).init(c.gpa),
.variables = AliasList.init(c.gpa),
.variable_discards = std.StringArrayHashMap(*ast.Payload.Discard).init(c.gpa),
};
@@ -1557,7 +1557,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
sym_table: SymbolTable,
blank_macros: std.StringArrayHashMap(void),
context: *ScopeExtraContext,
- nodes: std.ArrayList(ast.Node),
+ nodes: std.array_list.Managed(ast.Node),
pub fn init(c: *ScopeExtraContext) Root {
return .{
@@ -1568,7 +1568,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
.sym_table = SymbolTable.init(c.gpa),
.blank_macros = std.StringArrayHashMap(void).init(c.gpa),
.context = c,
- .nodes = std.ArrayList(ast.Node).init(c.gpa),
+ .nodes = std.array_list.Managed(ast.Node).init(c.gpa),
};
}
@@ -1705,7 +1705,7 @@ pub fn ScopeExtra(comptime ScopeExtraContext: type, comptime ScopeExtraType: typ
};
}
-pub fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!void {
+pub fn tokenizeMacro(source: []const u8, tok_list: *std.array_list.Managed(CToken)) Error!void {
var tokenizer: aro.Tokenizer = .{
.buf = source,
.source = .unused,
@@ -1732,7 +1732,7 @@ test "Macro matching" {
const helper = struct {
const MacroFunctions = std.zig.c_translation.Macros;
fn checkMacro(allocator: mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
- var tok_list = std.ArrayList(CToken).init(allocator);
+ var tok_list = std.array_list.Managed(CToken).init(allocator);
defer tok_list.deinit();
try tokenizeMacro(source, &tok_list);
const macro_slicer: MacroSlicer = .{ .source = source, .tokens = tok_list.items };
lib/compiler/build_runner.zig
@@ -5,7 +5,6 @@ const io = std.io;
const fmt = std.fmt;
const mem = std.mem;
const process = std.process;
-const ArrayList = std.ArrayList;
const File = std.fs.File;
const Step = std.Build.Step;
const Watch = std.Build.Watch;
@@ -98,8 +97,8 @@ pub fn main() !void {
dependencies.root_deps,
);
- var targets = ArrayList([]const u8).init(arena);
- var debug_log_scopes = ArrayList([]const u8).init(arena);
+ var targets = std.array_list.Managed([]const u8).init(arena);
+ var debug_log_scopes = std.array_list.Managed([]const u8).init(arena);
var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena };
var install_prefix: ?[]const u8 = null;
lib/compiler/reduce.zig
@@ -114,10 +114,10 @@ pub fn main() !void {
interestingness_argv.appendAssumeCapacity(checker_path);
interestingness_argv.appendSliceAssumeCapacity(argv);
- var rendered = std.ArrayList(u8).init(gpa);
+ var rendered = std.array_list.Managed(u8).init(gpa);
defer rendered.deinit();
- var astgen_input = std.ArrayList(u8).init(gpa);
+ var astgen_input = std.array_list.Managed(u8).init(gpa);
defer astgen_input.deinit();
var tree = try parse(gpa, root_source_file_path);
@@ -161,7 +161,7 @@ pub fn main() !void {
// result, restart the whole process, reparsing the AST and re-generating the list
// of all possible transformations and shuffling it again.
- var transformations = std.ArrayList(Walk.Transformation).init(gpa);
+ var transformations = std.array_list.Managed(Walk.Transformation).init(gpa);
defer transformations.deinit();
try Walk.findTransformations(arena, &tree, &transformations);
sortTransformations(transformations.items, rng.random());
@@ -382,7 +382,7 @@ fn transformationsToFixups(
}
}
- var other_source = std.ArrayList(u8).init(gpa);
+ var other_source = std.array_list.Managed(u8).init(gpa);
defer other_source.deinit();
try other_source.appendSlice("struct {\n");
try other_file_ast.renderToArrayList(&other_source, inlined_fixups);
lib/docs/wasm/markdown.zig
@@ -1119,7 +1119,7 @@ fn testRender(input: []const u8, expected: []const u8) !void {
var doc = try parser.endInput();
defer doc.deinit(testing.allocator);
- var actual = std.ArrayList(u8).init(testing.allocator);
+ var actual = std.array_list.Managed(u8).init(testing.allocator);
defer actual.deinit();
try doc.render(actual.writer());
lib/init/src/main.zig
@@ -8,9 +8,10 @@ pub fn main() !void {
}
test "simple test" {
- var list = std.ArrayList(i32).init(std.testing.allocator);
- defer list.deinit(); // Try commenting this out and see if zig detects the memory leak!
- try list.append(42);
+ const gpa = std.testing.allocator;
+ var list: std.ArrayList(i32) = .empty;
+ defer list.deinit(gpa); // Try commenting this out and see if zig detects the memory leak!
+ try list.append(gpa, 42);
try std.testing.expectEqual(@as(i32, 42), list.pop());
}
lib/std/Build/Step/CheckObject.zig
@@ -18,7 +18,7 @@ pub const base_id: Step.Id = .check_object;
step: Step,
source: std.Build.LazyPath,
max_bytes: usize = 20 * 1024 * 1024,
-checks: std.ArrayList(Check),
+checks: std.array_list.Managed(Check),
obj_format: std.Target.ObjectFormat,
pub fn create(
@@ -36,7 +36,7 @@ pub fn create(
.makeFn = make,
}),
.source = source.dupe(owner),
- .checks = std.ArrayList(Check).init(gpa),
+ .checks = std.array_list.Managed(Check).init(gpa),
.obj_format = obj_format,
};
check_object.source.addStepDependencies(&check_object.step);
@@ -81,7 +81,7 @@ const Action = struct {
const hay = mem.trim(u8, haystack, " ");
const phrase = mem.trim(u8, act.phrase.resolve(b, step), " ");
- var candidate_vars: std.ArrayList(struct { name: []const u8, value: u64 }) = .init(b.allocator);
+ var candidate_vars: std.array_list.Managed(struct { name: []const u8, value: u64 }) = .init(b.allocator);
var hay_it = mem.tokenizeScalar(u8, hay, ' ');
var needle_it = mem.tokenizeScalar(u8, phrase, ' ');
@@ -157,8 +157,8 @@ const Action = struct {
fn computeCmp(act: Action, b: *std.Build, step: *Step, global_vars: anytype) !bool {
const gpa = step.owner.allocator;
const phrase = act.phrase.resolve(b, step);
- var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa);
- var values = std.ArrayList(u64).init(gpa);
+ var op_stack = std.array_list.Managed(enum { add, sub, mod, mul }).init(gpa);
+ var values = std.array_list.Managed(u64).init(gpa);
var it = mem.tokenizeScalar(u8, phrase, ' ');
while (it.next()) |next| {
@@ -242,15 +242,15 @@ const ComputeCompareExpected = struct {
const Check = struct {
kind: Kind,
payload: Payload,
- data: std.ArrayList(u8),
- actions: std.ArrayList(Action),
+ data: std.array_list.Managed(u8),
+ actions: std.array_list.Managed(Action),
fn create(allocator: Allocator, kind: Kind) Check {
return .{
.kind = kind,
.payload = .{ .none = {} },
- .data = std.ArrayList(u8).init(allocator),
- .actions = std.ArrayList(Action).init(allocator),
+ .data = std.array_list.Managed(u8).init(allocator),
+ .actions = std.array_list.Managed(Action).init(allocator),
};
}
@@ -1214,7 +1214,7 @@ const MachODumper = struct {
}
fn dumpRebaseInfo(ctx: ObjectContext, data: []const u8, writer: anytype) !void {
- var rebases = std.ArrayList(u64).init(ctx.gpa);
+ var rebases = std.array_list.Managed(u64).init(ctx.gpa);
defer rebases.deinit();
try ctx.parseRebaseInfo(data, &rebases);
mem.sort(u64, rebases.items, {}, std.sort.asc(u64));
@@ -1223,7 +1223,7 @@ const MachODumper = struct {
}
}
- fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.ArrayList(u64)) !void {
+ fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.array_list.Managed(u64)) !void {
var stream = std.io.fixedBufferStream(data);
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
@@ -1313,7 +1313,7 @@ const MachODumper = struct {
};
fn dumpBindInfo(ctx: ObjectContext, data: []const u8, writer: anytype) !void {
- var bindings = std.ArrayList(Binding).init(ctx.gpa);
+ var bindings = std.array_list.Managed(Binding).init(ctx.gpa);
defer {
for (bindings.items) |*b| {
b.deinit(ctx.gpa);
@@ -1335,7 +1335,7 @@ const MachODumper = struct {
}
}
- fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.ArrayList(Binding)) !void {
+ fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.array_list.Managed(Binding)) !void {
var stream = std.io.fixedBufferStream(data);
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
@@ -1346,7 +1346,7 @@ const MachODumper = struct {
var offset: u64 = 0;
var addend: i64 = 0;
- var name_buf = std.ArrayList(u8).init(ctx.gpa);
+ var name_buf = std.array_list.Managed(u8).init(ctx.gpa);
defer name_buf.deinit();
while (true) {
@@ -1434,7 +1434,7 @@ const MachODumper = struct {
var arena = std.heap.ArenaAllocator.init(ctx.gpa);
defer arena.deinit();
- var exports = std.ArrayList(Export).init(arena.allocator());
+ var exports = std.array_list.Managed(Export).init(arena.allocator());
var it = TrieIterator{ .data = data };
try parseTrieNode(arena.allocator(), &it, "", &exports);
@@ -1546,7 +1546,7 @@ const MachODumper = struct {
arena: Allocator,
it: *TrieIterator,
prefix: []const u8,
- exports: *std.ArrayList(Export),
+ exports: *std.array_list.Managed(Export),
) !void {
const size = try it.readUleb128();
if (size > 0) {
@@ -1621,7 +1621,7 @@ const MachODumper = struct {
var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr };
try ctx.parse();
- var output = std.ArrayList(u8).init(gpa);
+ var output = std.array_list.Managed(u8).init(gpa);
const writer = output.writer();
switch (check.kind) {
@@ -1787,7 +1787,7 @@ const ElfDumper = struct {
try ctx.objects.append(gpa, .{ .name = name, .off = stream.pos, .len = size });
}
- var output = std.ArrayList(u8).init(gpa);
+ var output = std.array_list.Managed(u8).init(gpa);
const writer = output.writer();
switch (check.kind) {
@@ -1848,7 +1848,7 @@ const ElfDumper = struct {
files.putAssumeCapacityNoClobber(object.off - @sizeOf(elf.ar_hdr), object.name);
}
- var symbols = std.AutoArrayHashMap(usize, std.ArrayList([]const u8)).init(ctx.gpa);
+ var symbols = std.AutoArrayHashMap(usize, std.array_list.Managed([]const u8)).init(ctx.gpa);
defer {
for (symbols.values()) |*value| {
value.deinit();
@@ -1859,7 +1859,7 @@ const ElfDumper = struct {
for (ctx.symtab.items) |entry| {
const gop = try symbols.getOrPut(@intCast(entry.off));
if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList([]const u8).init(ctx.gpa);
+ gop.value_ptr.* = std.array_list.Managed([]const u8).init(ctx.gpa);
}
try gop.value_ptr.append(entry.name);
}
@@ -1944,7 +1944,7 @@ const ElfDumper = struct {
else => {},
};
- var output = std.ArrayList(u8).init(gpa);
+ var output = std.array_list.Managed(u8).init(gpa);
const writer = output.writer();
switch (check.kind) {
@@ -2398,7 +2398,7 @@ const WasmDumper = struct {
return error.UnsupportedWasmVersion;
}
- var output = std.ArrayList(u8).init(gpa);
+ var output = std.array_list.Managed(u8).init(gpa);
defer output.deinit();
parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
@@ -2412,7 +2412,7 @@ const WasmDumper = struct {
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
- output: *std.ArrayList(u8),
+ output: *std.array_list.Managed(u8),
) !void {
const reader = fbs.reader();
const writer = output.writer();
lib/std/Build/Step/Compile.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const fs = std.fs;
const assert = std.debug.assert;
const panic = std.debug.panic;
-const ArrayList = std.ArrayList;
const StringHashMap = std.StringHashMap;
const Sha256 = std.crypto.hash.sha2.Sha256;
const Allocator = mem.Allocator;
@@ -60,7 +59,7 @@ filters: []const []const u8,
test_runner: ?TestRunner,
wasi_exec_model: ?std.builtin.WasiExecModel = null,
-installed_headers: ArrayList(HeaderInstallation),
+installed_headers: std.array_list.Managed(HeaderInstallation),
/// This step is used to create an include tree that dependent modules can add to their include
/// search paths. Installed headers are copied to this step.
@@ -421,7 +420,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
.out_lib_filename = undefined,
.major_only_filename = null,
.name_only_filename = null,
- .installed_headers = ArrayList(HeaderInstallation).init(owner.allocator),
+ .installed_headers = std.array_list.Managed(HeaderInstallation).init(owner.allocator),
.zig_lib_dir = null,
.exec_cmd_args = null,
.filters = options.filters,
@@ -766,9 +765,9 @@ fn runPkgConfig(compile: *Compile, lib_name: []const u8) !PkgConfigResult {
else => return err,
};
- var zig_cflags = ArrayList([]const u8).init(b.allocator);
+ var zig_cflags = std.array_list.Managed([]const u8).init(b.allocator);
defer zig_cflags.deinit();
- var zig_libs = ArrayList([]const u8).init(b.allocator);
+ var zig_libs = std.array_list.Managed([]const u8).init(b.allocator);
defer zig_libs.deinit();
var arg_it = mem.tokenizeAny(u8, stdout, " \r\n\t");
@@ -1076,7 +1075,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
const b = step.owner;
const arena = b.allocator;
- var zig_args = ArrayList([]const u8).init(arena);
+ var zig_args = std.array_list.Managed([]const u8).init(arena);
defer zig_args.deinit();
try zig_args.append(b.graph.zig_exe);
@@ -1798,7 +1797,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
try b.cache_root.handle.makePath("args");
const args_to_escape = zig_args.items[2..];
- var escaped_args = try ArrayList([]const u8).initCapacity(arena, args_to_escape.len);
+ var escaped_args = try std.array_list.Managed([]const u8).initCapacity(arena, args_to_escape.len);
arg_blk: for (args_to_escape) |arg| {
for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
@@ -1948,7 +1947,7 @@ pub fn doAtomicSymLinks(
fn execPkgConfigList(b: *std.Build, out_code: *u8) (PkgConfigError || RunError)![]const PkgConfigPkg {
const pkg_config_exe = b.graph.env_map.get("PKG_CONFIG") orelse "pkg-config";
const stdout = try b.runAllowFail(&[_][]const u8{ pkg_config_exe, "--list-all" }, out_code, .Ignore);
- var list = ArrayList(PkgConfigPkg).init(b.allocator);
+ var list = std.array_list.Managed(PkgConfigPkg).init(b.allocator);
errdefer list.deinit();
var line_it = mem.tokenizeAny(u8, stdout, "\r\n");
while (line_it.next()) |line| {
@@ -1985,7 +1984,7 @@ fn getPkgConfigList(b: *std.Build) ![]const PkgConfigPkg {
}
}
-fn addFlag(args: *ArrayList([]const u8), comptime name: []const u8, opt: ?bool) !void {
+fn addFlag(args: *std.array_list.Managed([]const u8), comptime name: []const u8, opt: ?bool) !void {
const cond = opt orelse return;
try args.ensureUnusedCapacity(1);
if (cond) {
lib/std/Build/Step/ConfigHeader.zig
@@ -621,7 +621,7 @@ fn expand_variables_cmake(
contents: []const u8,
values: std.StringArrayHashMap(Value),
) ![]const u8 {
- var result: std.ArrayList(u8) = .init(allocator);
+ var result: std.array_list.Managed(u8) = .init(allocator);
errdefer result.deinit();
const valid_varname_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/_.+-";
@@ -633,7 +633,7 @@ fn expand_variables_cmake(
source: usize,
target: usize,
};
- var var_stack: std.ArrayList(Position) = .init(allocator);
+ var var_stack: std.array_list.Managed(Position) = .init(allocator);
defer var_stack.deinit();
loop: while (curr < contents.len) : (curr += 1) {
switch (contents[curr]) {
lib/std/Build/Step/ObjCopy.zig
@@ -182,7 +182,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
return step.fail("unable to make path {s}: {s}", .{ cache_path, @errorName(err) });
};
- var argv = std.ArrayList([]const u8).init(b.allocator);
+ var argv = std.array_list.Managed([]const u8).init(b.allocator);
try argv.appendSlice(&.{ b.graph.zig_exe, "objcopy" });
if (objcopy.only_section) |only_section| {
lib/std/Build/Step/Run.zig
@@ -679,15 +679,15 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const run: *Run = @fieldParentPtr("step", step);
const has_side_effects = run.hasSideEffects();
- var argv_list = std.ArrayList([]const u8).init(arena);
- var output_placeholders = std.ArrayList(IndexedOutput).init(arena);
+ var argv_list = std.array_list.Managed([]const u8).init(arena);
+ var output_placeholders = std.array_list.Managed(IndexedOutput).init(arena);
var man = b.graph.cache.obtain();
defer man.deinit();
if (run.env_map) |env_map| {
const KV = struct { []const u8, []const u8 };
- var kv_pairs = try std.ArrayList(KV).initCapacity(arena, env_map.count());
+ var kv_pairs = try std.array_list.Managed(KV).initCapacity(arena, env_map.count());
var iter = env_map.iterator();
while (iter.next()) |entry| {
kv_pairs.appendAssumeCapacity(.{ entry.key_ptr.*, entry.value_ptr.* });
@@ -1080,7 +1080,7 @@ fn runCommand(
else => false,
};
- var interp_argv = std.ArrayList([]const u8).init(b.allocator);
+ var interp_argv = std.array_list.Managed([]const u8).init(b.allocator);
defer interp_argv.deinit();
var env_map = run.env_map orelse &b.graph.env_map;
lib/std/Build/Step/TranslateC.zig
@@ -10,8 +10,8 @@ pub const base_id: Step.Id = .translate_c;
step: Step,
source: std.Build.LazyPath,
-include_dirs: std.ArrayList(std.Build.Module.IncludeDir),
-c_macros: std.ArrayList([]const u8),
+include_dirs: std.array_list.Managed(std.Build.Module.IncludeDir),
+c_macros: std.array_list.Managed([]const u8),
out_basename: []const u8,
target: std.Build.ResolvedTarget,
optimize: std.builtin.OptimizeMode,
@@ -38,8 +38,8 @@ pub fn create(owner: *std.Build, options: Options) *TranslateC {
.makeFn = make,
}),
.source = source,
- .include_dirs = std.ArrayList(std.Build.Module.IncludeDir).init(owner.allocator),
- .c_macros = std.ArrayList([]const u8).init(owner.allocator),
+ .include_dirs = std.array_list.Managed(std.Build.Module.IncludeDir).init(owner.allocator),
+ .c_macros = std.array_list.Managed([]const u8).init(owner.allocator),
.out_basename = undefined,
.target = options.target,
.optimize = options.optimize,
@@ -153,7 +153,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const b = step.owner;
const translate_c: *TranslateC = @fieldParentPtr("step", step);
- var argv_list = std.ArrayList([]const u8).init(b.allocator);
+ var argv_list = std.array_list.Managed([]const u8).init(b.allocator);
try argv_list.append(b.graph.zig_exe);
try argv_list.append("translate-c");
if (translate_c.link_libc) {
lib/std/Build/Module.zig
@@ -10,12 +10,12 @@ resolved_target: ?std.Build.ResolvedTarget = null,
optimize: ?std.builtin.OptimizeMode = null,
dwarf_format: ?std.dwarf.Format,
-c_macros: std.ArrayListUnmanaged([]const u8),
-include_dirs: std.ArrayListUnmanaged(IncludeDir),
-lib_paths: std.ArrayListUnmanaged(LazyPath),
-rpaths: std.ArrayListUnmanaged(RPath),
+c_macros: ArrayList([]const u8),
+include_dirs: ArrayList(IncludeDir),
+lib_paths: ArrayList(LazyPath),
+rpaths: ArrayList(RPath),
frameworks: std.StringArrayHashMapUnmanaged(LinkFrameworkOptions),
-link_objects: std.ArrayListUnmanaged(LinkObject),
+link_objects: ArrayList(LinkObject),
strip: ?bool,
unwind_tables: ?std.builtin.UnwindTables,
@@ -170,7 +170,7 @@ pub const IncludeDir = union(enum) {
pub fn appendZigProcessFlags(
include_dir: IncludeDir,
b: *std.Build,
- zig_args: *std.ArrayList([]const u8),
+ zig_args: *std.array_list.Managed([]const u8),
asking_step: ?*Step,
) !void {
const flag: []const u8, const lazy_path: LazyPath = switch (include_dir) {
@@ -537,7 +537,7 @@ pub fn addCMacro(m: *Module, name: []const u8, value: []const u8) void {
pub fn appendZigProcessFlags(
m: *Module,
- zig_args: *std.ArrayList([]const u8),
+ zig_args: *std.array_list.Managed([]const u8),
asking_step: ?*Step,
) !void {
const b = m.owner;
@@ -634,7 +634,7 @@ pub fn appendZigProcessFlags(
}
fn addFlag(
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
opt: ?bool,
then_name: []const u8,
else_name: []const u8,
@@ -706,3 +706,4 @@ const std = @import("std");
const assert = std.debug.assert;
const LazyPath = std.Build.LazyPath;
const Step = std.Build.Step;
+const ArrayList = std.ArrayList;
lib/std/Build/Step.zig
@@ -1,12 +1,22 @@
+const Step = @This();
+const std = @import("../std.zig");
+const Build = std.Build;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const builtin = @import("builtin");
+const Cache = Build.Cache;
+const Path = Cache.Path;
+const ArrayList = std.ArrayList;
+
id: Id,
name: []const u8,
owner: *Build,
makeFn: MakeFn,
-dependencies: std.ArrayList(*Step),
+dependencies: std.array_list.Managed(*Step),
/// This field is empty during execution of the user's build script, and
/// then populated during dependency loop checking in the build runner.
-dependants: std.ArrayListUnmanaged(*Step),
+dependants: ArrayList(*Step),
/// Collects the set of files that retrigger this step to run.
///
/// This is used by the build system's implementation of `--watch` but it can
@@ -39,7 +49,7 @@ state: State,
/// total system memory available.
max_rss: usize,
-result_error_msgs: std.ArrayListUnmanaged([]const u8),
+result_error_msgs: ArrayList([]const u8),
result_error_bundle: std.zig.ErrorBundle,
result_stderr: []const u8,
result_cached: bool,
@@ -175,7 +185,7 @@ pub const Inputs = struct {
pub const Table = std.ArrayHashMapUnmanaged(Build.Cache.Path, Files, Build.Cache.Path.TableAdapter, false);
/// The special file name "." means any changes inside the directory.
- pub const Files = std.ArrayListUnmanaged([]const u8);
+ pub const Files = ArrayList([]const u8);
pub fn populated(inputs: *Inputs) bool {
return inputs.table.count() != 0;
@@ -204,8 +214,8 @@ pub fn init(options: StepOptions) Step {
.name = arena.dupe(u8, options.name) catch @panic("OOM"),
.owner = options.owner,
.makeFn = options.makeFn,
- .dependencies = std.ArrayList(*Step).init(arena),
- .dependants = .{},
+ .dependencies = std.array_list.Managed(*Step).init(arena),
+ .dependants = .empty,
.inputs = Inputs.init,
.state = .precheck_unstarted,
.max_rss = options.max_rss,
@@ -326,15 +336,6 @@ pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void
}
}
-const Step = @This();
-const std = @import("../std.zig");
-const Build = std.Build;
-const Allocator = std.mem.Allocator;
-const assert = std.debug.assert;
-const builtin = @import("builtin");
-const Cache = Build.Cache;
-const Path = Cache.Path;
-
pub fn evalChildProcess(s: *Step, argv: []const []const u8) ![]u8 {
const run_result = try captureChildProcess(s, std.Progress.Node.none, argv);
try handleChildProcessTerm(s, run_result.term, null, argv);
@@ -980,7 +981,7 @@ fn addDirectoryWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []c
fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const u8) !void {
const gpa = step.owner.allocator;
const gop = try step.inputs.table.getOrPut(gpa, path);
- if (!gop.found_existing) gop.value_ptr.* = .{};
+ if (!gop.found_existing) gop.value_ptr.* = .empty;
try gop.value_ptr.append(gpa, basename);
}
lib/std/compress/lzma2.zig
@@ -18,7 +18,7 @@ test {
const compressed = &[_]u8{ 0x01, 0x00, 0x05, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x0A, 0x02, 0x00, 0x06, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x0A, 0x00 };
const allocator = std.testing.allocator;
- var decomp = std.ArrayList(u8).init(allocator);
+ var decomp = std.array_list.Managed(u8).init(allocator);
defer decomp.deinit();
var stream = std.io.fixedBufferStream(compressed);
try decompress(allocator, stream.reader(), decomp.writer());
lib/std/crypto/argon2.zig
@@ -14,7 +14,7 @@ const pwhash = crypto.pwhash;
const Thread = std.Thread;
const Blake2b512 = blake2.Blake2b512;
-const Blocks = std.ArrayListAligned([block_length]u64, .@"16");
+const Blocks = std.array_list.AlignedManaged([block_length]u64, .@"16");
const H0 = [Blake2b512.digest_length + 8]u8;
const EncodingError = crypto.errors.EncodingError;
@@ -252,7 +252,7 @@ fn processBlocksMt(
lanes: u32,
segments: u32,
) KdfError!void {
- var threads_list = try std.ArrayList(Thread).initCapacity(allocator, threads);
+ var threads_list = try std.array_list.Managed(Thread).initCapacity(allocator, threads);
defer threads_list.deinit();
var n: u32 = 0;
@@ -507,7 +507,7 @@ pub fn kdf(
var blocks = try Blocks.initCapacity(allocator, memory);
defer blocks.deinit();
- blocks.appendNTimesAssumeCapacity([_]u64{0} ** block_length, memory);
+ blocks.appendNTimesAssumeCapacity(@splat(0), memory);
initBlocks(&blocks, &h0, memory, params.p);
try processBlocks(allocator, &blocks, params.t, memory, params.p, mode);
lib/std/debug/Dwarf/expression.zig
@@ -1064,7 +1064,7 @@ test "DWARF expressions" {
const b = Builder(options);
- var program = std.ArrayList(u8).init(allocator);
+ var program = std.array_list.Managed(u8).init(allocator);
defer program.deinit();
const writer = program.writer();
@@ -1120,7 +1120,7 @@ test "DWARF expressions" {
var mock_compile_unit: std.debug.Dwarf.CompileUnit = undefined;
mock_compile_unit.addr_base = 1;
- var mock_debug_addr = std.ArrayList(u8).init(allocator);
+ var mock_debug_addr = std.array_list.Managed(u8).init(allocator);
defer mock_debug_addr.deinit();
try mock_debug_addr.writer().writeInt(u16, 0, native_endian);
@@ -1590,7 +1590,7 @@ test "DWARF expressions" {
// Sub-expression
{
- var sub_program = std.ArrayList(u8).init(allocator);
+ var sub_program = std.array_list.Managed(u8).init(allocator);
defer sub_program.deinit();
const sub_writer = sub_program.writer();
try b.writeLiteral(sub_writer, 3);
@@ -1617,7 +1617,7 @@ test "DWARF expressions" {
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
- var sub_program = std.ArrayList(u8).init(allocator);
+ var sub_program = std.array_list.Managed(u8).init(allocator);
defer sub_program.deinit();
const sub_writer = sub_program.writer();
try b.writeReg(sub_writer, 0);
lib/std/debug/Dwarf.zig
@@ -27,6 +27,7 @@ const maxInt = std.math.maxInt;
const MemoryAccessor = std.debug.MemoryAccessor;
const Path = std.Build.Cache.Path;
const FixedBufferReader = std.debug.FixedBufferReader;
+const ArrayList = std.ArrayList;
const Dwarf = @This();
@@ -42,11 +43,11 @@ sections: SectionArray = null_section_array,
is_macho: bool,
/// Filled later by the initializer
-abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .empty,
+abbrev_table_list: ArrayList(Abbrev.Table) = .empty,
/// Filled later by the initializer
-compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .empty,
+compile_unit_list: ArrayList(CompileUnit) = .empty,
/// Filled later by the initializer
-func_list: std.ArrayListUnmanaged(Func) = .empty,
+func_list: ArrayList(Func) = .empty,
/// Starts out non-`null` if the `.eh_frame_hdr` section is present. May become `null` later if we
/// find that `.eh_frame_hdr` is incomplete.
@@ -54,10 +55,10 @@ eh_frame_hdr: ?ExceptionFrameHeader = null,
/// These lookup tables are only used if `eh_frame_hdr` is null
cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .empty,
/// Sorted by start_pc
-fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .empty,
+fde_list: ArrayList(FrameDescriptionEntry) = .empty,
/// Populated by `populateRanges`.
-ranges: std.ArrayListUnmanaged(Range) = .empty,
+ranges: ArrayList(Range) = .empty,
pub const Range = struct {
start: u64,
@@ -1038,7 +1039,7 @@ fn scanAllCompileUnits(di: *Dwarf, allocator: Allocator) ScanError!void {
var fbr: FixedBufferReader = .{ .buf = di.section(.debug_info).?, .endian = di.endian };
var this_unit_offset: u64 = 0;
- var attrs_buf = std.ArrayList(Die.Attr).init(allocator);
+ var attrs_buf = std.array_list.Managed(Die.Attr).init(allocator);
defer attrs_buf.deinit();
while (this_unit_offset < fbr.buf.len) {
@@ -1343,7 +1344,7 @@ fn parseAbbrevTable(di: *Dwarf, allocator: Allocator, offset: u64) !Abbrev.Table
.endian = di.endian,
};
- var abbrevs = std.ArrayList(Abbrev).init(allocator);
+ var abbrevs = std.array_list.Managed(Abbrev).init(allocator);
defer {
for (abbrevs.items) |*abbrev| {
abbrev.deinit(allocator);
@@ -1351,7 +1352,7 @@ fn parseAbbrevTable(di: *Dwarf, allocator: Allocator, offset: u64) !Abbrev.Table
abbrevs.deinit();
}
- var attrs = std.ArrayList(Abbrev.Attr).init(allocator);
+ var attrs = std.array_list.Managed(Abbrev.Attr).init(allocator);
defer attrs.deinit();
while (true) {
@@ -1468,9 +1469,9 @@ fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) !
const standard_opcode_lengths = try fbr.readBytes(opcode_base - 1);
- var directories: std.ArrayListUnmanaged(FileEntry) = .empty;
+ var directories: ArrayList(FileEntry) = .empty;
defer directories.deinit(gpa);
- var file_entries: std.ArrayListUnmanaged(FileEntry) = .empty;
+ var file_entries: ArrayList(FileEntry) = .empty;
defer file_entries.deinit(gpa);
if (version < 5) {
@@ -2244,7 +2245,7 @@ pub const ElfModule = struct {
if (chdr.ch_type != .ZLIB) continue;
var decompress: std.compress.flate.Decompress = .init(§ion_reader, .zlib, &.{});
- var decompressed_section: std.ArrayListUnmanaged(u8) = .empty;
+ var decompressed_section: ArrayList(u8) = .empty;
defer decompressed_section.deinit(gpa);
decompress.reader.appendRemainingUnlimited(gpa, null, &decompressed_section, std.compress.flate.history_len) catch {
invalidDebugInfoDetected();
lib/std/debug/Pdb.zig
@@ -76,7 +76,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
const mod_info_size = header.mod_info_size;
const section_contrib_size = header.section_contribution_size;
- var modules = std.ArrayList(Module).init(self.allocator);
+ var modules = std.array_list.Managed(Module).init(self.allocator);
errdefer modules.deinit();
// Module Info Substream
@@ -117,7 +117,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
}
// Section Contribution Substream
- var sect_contribs = std.ArrayList(pdb.SectionContribEntry).init(self.allocator);
+ var sect_contribs = std.array_list.Managed(pdb.SectionContribEntry).init(self.allocator);
errdefer sect_contribs.deinit();
var sect_cont_offset: usize = 0;
@@ -569,7 +569,7 @@ const MsfStream = struct {
fn readSparseBitVector(stream: anytype, allocator: Allocator) ![]u32 {
const num_words = try stream.readInt(u32, .little);
- var list = std.ArrayList(u32).init(allocator);
+ var list = std.array_list.Managed(u32).init(allocator);
errdefer list.deinit();
var word_i: u32 = 0;
while (word_i != num_words) : (word_i += 1) {
lib/std/fs/File.zig
@@ -826,7 +826,7 @@ pub fn readToEndAllocOptions(
// size. If the reported size is zero, as it happens on Linux for files
// in /proc, a small buffer is allocated instead.
const initial_cap = @min((if (size > 0) size else 1024), max_bytes) + @intFromBool(optional_sentinel != null);
- var array_list = try std.ArrayListAligned(u8, alignment).initCapacity(allocator, initial_cap);
+ var array_list = try std.array_list.AlignedManaged(u8, alignment).initCapacity(allocator, initial_cap);
defer array_list.deinit();
self.deprecatedReader().readAllArrayListAligned(alignment, &array_list, max_bytes) catch |err| switch (err) {
lib/std/fs/path.zig
@@ -577,7 +577,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
}
// Allocate result and fill in the disk designator.
- var result = std.ArrayList(u8).init(allocator);
+ var result = std.array_list.Managed(u8).init(allocator);
defer result.deinit();
const disk_designator_len: usize = l: {
@@ -698,7 +698,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.Error![]u8 {
assert(paths.len > 0);
- var result = std.ArrayList(u8).init(allocator);
+ var result = std.array_list.Managed(u8).init(allocator);
defer result.deinit();
var negative_count: usize = 0;
lib/std/fs/test.zig
@@ -464,7 +464,7 @@ test "Dir.Iterator" {
defer arena.deinit();
const allocator = arena.allocator();
- var entries = std.ArrayList(Dir.Entry).init(allocator);
+ var entries = std.array_list.Managed(Dir.Entry).init(allocator);
// Create iterator.
var iter = tmp_dir.dir.iterate();
@@ -497,7 +497,7 @@ test "Dir.Iterator many entries" {
defer arena.deinit();
const allocator = arena.allocator();
- var entries = std.ArrayList(Dir.Entry).init(allocator);
+ var entries = std.array_list.Managed(Dir.Entry).init(allocator);
// Create iterator.
var iter = tmp_dir.dir.iterate();
@@ -531,7 +531,7 @@ test "Dir.Iterator twice" {
var i: u8 = 0;
while (i < 2) : (i += 1) {
- var entries = std.ArrayList(Dir.Entry).init(allocator);
+ var entries = std.array_list.Managed(Dir.Entry).init(allocator);
// Create iterator.
var iter = tmp_dir.dir.iterate();
@@ -567,7 +567,7 @@ test "Dir.Iterator reset" {
var i: u8 = 0;
while (i < 2) : (i += 1) {
- var entries = std.ArrayList(Dir.Entry).init(allocator);
+ var entries = std.array_list.Managed(Dir.Entry).init(allocator);
while (try iter.next()) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
@@ -617,7 +617,7 @@ fn entryEql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
return mem.eql(u8, lhs.name, rhs.name) and lhs.kind == rhs.kind;
}
-fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
+fn contains(entries: *const std.array_list.Managed(Dir.Entry), el: Dir.Entry) bool {
for (entries.items) |entry| {
if (entryEql(entry, el)) return true;
}
lib/std/heap/debug_allocator.zig
@@ -1061,7 +1061,7 @@ test "small allocations - free in same order" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
- var list = std.ArrayList(*u64).init(std.testing.allocator);
+ var list = std.array_list.Managed(*u64).init(std.testing.allocator);
defer list.deinit();
var i: usize = 0;
@@ -1080,7 +1080,7 @@ test "small allocations - free in reverse order" {
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
const allocator = gpa.allocator();
- var list = std.ArrayList(*u64).init(std.testing.allocator);
+ var list = std.array_list.Managed(*u64).init(std.testing.allocator);
defer list.deinit();
var i: usize = 0;
@@ -1241,7 +1241,7 @@ test "shrink large object to large object with larger alignment" {
// This loop allocates until we find a page that is not aligned to the big
// alignment. Then we shrink the allocation after the loop, but increase the
// alignment to the higher one, that we know will force it to realloc.
- var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
@@ -1313,7 +1313,7 @@ test "realloc large object to larger alignment" {
const big_alignment: usize = default_page_size * 2;
// This loop allocates until we find a page that is not aligned to the big alignment.
- var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
while (mem.isAligned(@intFromPtr(slice.ptr), big_alignment)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, .@"16", default_page_size * 2 + 50);
lib/std/http/test.zig
@@ -298,7 +298,7 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);
- var expected_response = std.ArrayList(u8).init(gpa);
+ var expected_response = std.array_list.Managed(u8).init(gpa);
defer expected_response.deinit();
try expected_response.appendSlice("HTTP/1.1 200 OK\r\nconnection: close\r\n\r\n");
@@ -369,7 +369,7 @@ test "receiving arbitrary http headers from the client" {
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);
- var expected_response = std.ArrayList(u8).init(gpa);
+ var expected_response = std.array_list.Managed(u8).init(gpa);
defer expected_response.deinit();
try expected_response.appendSlice("HTTP/1.1 200 OK\r\n");
lib/std/Io/Reader/test.zig
@@ -34,7 +34,7 @@ test "skipBytes" {
test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
const a = std.testing.allocator;
- var list = std.ArrayList(u8).init(a);
+ var list = std.array_list.Managed(u8).init(a);
defer list.deinit();
var fis = std.io.fixedBufferStream("0000\n1234\n");
@@ -49,7 +49,7 @@ test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the d
test "readUntilDelimiterArrayList returns an empty ArrayList" {
const a = std.testing.allocator;
- var list = std.ArrayList(u8).init(a);
+ var list = std.array_list.Managed(u8).init(a);
defer list.deinit();
var fis = std.io.fixedBufferStream("\n");
@@ -61,7 +61,7 @@ test "readUntilDelimiterArrayList returns an empty ArrayList" {
test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
const a = std.testing.allocator;
- var list = std.ArrayList(u8).init(a);
+ var list = std.array_list.Managed(u8).init(a);
defer list.deinit();
var fis = std.io.fixedBufferStream("1234567\n");
@@ -75,7 +75,7 @@ test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with
test "readUntilDelimiterArrayList returns EndOfStream" {
const a = std.testing.allocator;
- var list = std.ArrayList(u8).init(a);
+ var list = std.array_list.Managed(u8).init(a);
defer list.deinit();
var fis = std.io.fixedBufferStream("1234");
lib/std/Io/DeprecatedReader.zig
@@ -39,14 +39,14 @@ pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
if (amt_read < buf.len) return error.EndOfStream;
}
-/// Appends to the `std.ArrayList` contents by reading from the stream
+/// Appends to the `std.array_list.Managed` contents by reading from the stream
/// until end of stream is found.
/// If the number of bytes appended would exceed `max_append_size`,
/// `error.StreamTooLong` is returned
-/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
+/// and the `std.array_list.Managed` has exactly `max_append_size` bytes appended.
pub fn readAllArrayList(
self: Self,
- array_list: *std.ArrayList(u8),
+ array_list: *std.array_list.Managed(u8),
max_append_size: usize,
) anyerror!void {
return self.readAllArrayListAligned(null, array_list, max_append_size);
@@ -55,7 +55,7 @@ pub fn readAllArrayList(
pub fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
- array_list: *std.ArrayListAligned(u8, alignment),
+ array_list: *std.array_list.AlignedManaged(u8, alignment),
max_append_size: usize,
) anyerror!void {
try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
@@ -87,20 +87,20 @@ pub fn readAllArrayListAligned(
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
- var array_list = std.ArrayList(u8).init(allocator);
+ var array_list = std.array_list.Managed(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
-/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
+/// Replaces the `std.array_list.Managed` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
-/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
-/// `std.ArrayList` is populated with `max_size` bytes from the stream.
+/// If the `std.array_list.Managed` length would exceed `max_size`, `error.StreamTooLong` is returned and the
+/// `std.array_list.Managed` is populated with `max_size` bytes from the stream.
pub fn readUntilDelimiterArrayList(
self: Self,
- array_list: *std.ArrayList(u8),
+ array_list: *std.array_list.Managed(u8),
delimiter: u8,
max_size: usize,
) anyerror!void {
@@ -119,7 +119,7 @@ pub fn readUntilDelimiterAlloc(
delimiter: u8,
max_size: usize,
) anyerror![]u8 {
- var array_list = std.ArrayList(u8).init(allocator);
+ var array_list = std.array_list.Managed(u8).init(allocator);
defer array_list.deinit();
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
return try array_list.toOwnedSlice();
@@ -154,7 +154,7 @@ pub fn readUntilDelimiterOrEofAlloc(
delimiter: u8,
max_size: usize,
) anyerror!?[]u8 {
- var array_list = std.ArrayList(u8).init(allocator);
+ var array_list = std.array_list.Managed(u8).init(allocator);
defer array_list.deinit();
self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
error.EndOfStream => if (array_list.items.len == 0) {
lib/std/json/dynamic.zig
@@ -1,7 +1,6 @@
const std = @import("std");
const debug = std.debug;
const ArenaAllocator = std.heap.ArenaAllocator;
-const ArrayList = std.ArrayList;
const StringArrayHashMap = std.StringArrayHashMap;
const Allocator = std.mem.Allocator;
const json = std.json;
@@ -12,7 +11,7 @@ const ParseError = @import("./static.zig").ParseError;
const isNumberFormattedLikeAnInteger = @import("Scanner.zig").isNumberFormattedLikeAnInteger;
pub const ObjectMap = StringArrayHashMap(Value);
-pub const Array = ArrayList(Value);
+pub const Array = std.array_list.Managed(Value);
/// Represents any JSON value, potentially containing other JSON values.
/// A .float value may be an approximation of the original value.
lib/std/json/Scanner.zig
@@ -46,7 +46,6 @@ const Scanner = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
-const ArrayList = std.ArrayList;
const assert = std.debug.assert;
const BitStack = std.BitStack;
@@ -136,7 +135,7 @@ pub fn nextAllocMax(self: *@This(), allocator: Allocator, when: AllocWhen, max_v
};
switch (token_type) {
.number, .string => {
- var value_list = ArrayList(u8).init(allocator);
+ var value_list = std.array_list.Managed(u8).init(allocator);
errdefer {
value_list.deinit();
}
@@ -173,7 +172,7 @@ pub fn nextAllocMax(self: *@This(), allocator: Allocator, when: AllocWhen, max_v
}
/// Equivalent to `allocNextIntoArrayListMax(value_list, when, default_max_value_len);`
-pub fn allocNextIntoArrayList(self: *@This(), value_list: *ArrayList(u8), when: AllocWhen) AllocIntoArrayListError!?[]const u8 {
+pub fn allocNextIntoArrayList(self: *@This(), value_list: *std.array_list.Managed(u8), when: AllocWhen) AllocIntoArrayListError!?[]const u8 {
return self.allocNextIntoArrayListMax(value_list, when, default_max_value_len);
}
/// The next token type must be either `.number` or `.string`. See `peekNextTokenType()`.
@@ -186,7 +185,7 @@ pub fn allocNextIntoArrayList(self: *@This(), value_list: *ArrayList(u8), when:
/// can be resumed by passing the same array list in again.
/// This method does not indicate whether the token content being returned is for a `.number` or `.string` token type;
/// the caller of this method is expected to know which type of token is being processed.
-pub fn allocNextIntoArrayListMax(self: *@This(), value_list: *ArrayList(u8), when: AllocWhen, max_value_len: usize) AllocIntoArrayListError!?[]const u8 {
+pub fn allocNextIntoArrayListMax(self: *@This(), value_list: *std.array_list.Managed(u8), when: AllocWhen, max_value_len: usize) AllocIntoArrayListError!?[]const u8 {
while (true) {
const token = try self.next();
switch (token) {
@@ -1608,7 +1607,7 @@ pub const Reader = struct {
const token_type = try self.peekNextTokenType();
switch (token_type) {
.number, .string => {
- var value_list = ArrayList(u8).init(allocator);
+ var value_list = std.array_list.Managed(u8).init(allocator);
errdefer {
value_list.deinit();
}
@@ -1639,11 +1638,11 @@ pub const Reader = struct {
}
/// Equivalent to `allocNextIntoArrayListMax(value_list, when, default_max_value_len);`
- pub fn allocNextIntoArrayList(self: *@This(), value_list: *ArrayList(u8), when: AllocWhen) Reader.AllocError!?[]const u8 {
+ pub fn allocNextIntoArrayList(self: *@This(), value_list: *std.array_list.Managed(u8), when: AllocWhen) Reader.AllocError!?[]const u8 {
return self.allocNextIntoArrayListMax(value_list, when, default_max_value_len);
}
/// Calls `std.json.Scanner.allocNextIntoArrayListMax` and handles `error.BufferUnderrun`.
- pub fn allocNextIntoArrayListMax(self: *@This(), value_list: *ArrayList(u8), when: AllocWhen, max_value_len: usize) Reader.AllocError!?[]const u8 {
+ pub fn allocNextIntoArrayListMax(self: *@This(), value_list: *std.array_list.Managed(u8), when: AllocWhen, max_value_len: usize) Reader.AllocError!?[]const u8 {
while (true) {
return self.scanner.allocNextIntoArrayListMax(value_list, when, max_value_len) catch |err| switch (err) {
error.BufferUnderrun => {
@@ -1746,7 +1745,7 @@ pub const Reader = struct {
const OBJECT_MODE = 0;
const ARRAY_MODE = 1;
-fn appendSlice(list: *std.ArrayList(u8), buf: []const u8, max_value_len: usize) !void {
+fn appendSlice(list: *std.array_list.Managed(u8), buf: []const u8, max_value_len: usize) !void {
const new_len = std.math.add(usize, list.items.len, buf.len) catch return error.ValueTooLong;
if (new_len > max_value_len) return error.ValueTooLong;
try list.appendSlice(buf);
lib/std/json/static.zig
@@ -2,7 +2,7 @@ const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
-const ArrayList = std.ArrayList;
+const ArrayList = std.array_list.Managed;
const Scanner = @import("Scanner.zig");
const Token = Scanner.Token;
lib/std/math/big/int.zig
@@ -1412,7 +1412,7 @@ pub const Mutable = struct {
///
/// `limbs_buffer` is used for temporary storage during the operation. When this function returns,
/// it will have the same length as it had when the function was called.
- pub fn gcd(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
+ pub fn gcd(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.array_list.Managed(Limb)) !void {
const prev_len = limbs_buffer.items.len;
defer limbs_buffer.shrinkRetainingCapacity(prev_len);
const x_copy = if (rma.limbs.ptr == x.limbs.ptr) blk: {
@@ -1538,13 +1538,13 @@ pub const Mutable = struct {
/// Asserts that `rma` has enough limbs to store the result. Upper bound is given by `calcGcdNoAliasLimbLen`.
///
/// `limbs_buffer` is used for temporary storage during the operation.
- pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
+ pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.array_list.Managed(Limb)) !void {
assert(rma.limbs.ptr != x.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != y.limbs.ptr); // illegal aliasing
return gcdLehmer(rma, x, y, limbs_buffer);
}
- fn gcdLehmer(result: *Mutable, xa: Const, ya: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
+ fn gcdLehmer(result: *Mutable, xa: Const, ya: Const, limbs_buffer: *std.array_list.Managed(Limb)) !void {
var x = try xa.toManaged(limbs_buffer.allocator);
defer x.deinit();
x.abs();
@@ -3267,7 +3267,7 @@ pub const Managed = struct {
pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void {
try rma.ensureCapacity(@min(x.len(), y.len()));
var m = rma.toMutable();
- var limbs_buffer = std.ArrayList(Limb).init(rma.allocator);
+ var limbs_buffer = std.array_list.Managed(Limb).init(rma.allocator);
defer limbs_buffer.deinit();
try m.gcd(x.toConst(), y.toConst(), &limbs_buffer);
rma.setMetadata(m.positive, m.len);
lib/std/process/Child.zig
@@ -14,7 +14,7 @@ const assert = std.debug.assert;
const native_os = builtin.os.tag;
const Allocator = std.mem.Allocator;
const ChildProcess = @This();
-const ArrayList = std.ArrayListUnmanaged;
+const ArrayList = std.ArrayList;
pub const Id = switch (native_os) {
.windows => windows.HANDLE,
@@ -1545,7 +1545,7 @@ fn argvToCommandLineWindows(
allocator: mem.Allocator,
argv: []const []const u8,
) ArgvToCommandLineError![:0]u16 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
if (argv.len != 0) {
@@ -1725,7 +1725,7 @@ fn argvToScriptCommandLineWindows(
/// Arguments, not including the script name itself. Expected to be encoded as WTF-8.
script_args: []const []const u8,
) ArgvToScriptCommandLineError![:0]u16 {
- var buf = try std.ArrayList(u8).initCapacity(allocator, 64);
+ var buf = try std.array_list.Managed(u8).initCapacity(allocator, 64);
defer buf.deinit();
// `/d` disables execution of AutoRun commands.
lib/std/Target/Query.zig
@@ -3,6 +3,15 @@
//! provide meaningful and unsurprising defaults. This struct does reference
//! any resources and it is copyable.
+const Query = @This();
+const std = @import("../std.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const Target = std.Target;
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
+
/// `null` means native.
cpu_arch: ?Target.Cpu.Arch = null,
@@ -394,7 +403,7 @@ pub fn canDetectLibC(self: Query) bool {
/// Formats a version with the patch component omitted if it is zero,
/// unlike SemanticVersion.format which formats all its version components regardless.
-fn formatVersion(version: SemanticVersion, gpa: Allocator, list: *std.ArrayListUnmanaged(u8)) !void {
+fn formatVersion(version: SemanticVersion, gpa: Allocator, list: *ArrayList(u8)) !void {
if (version.patch == 0) {
try list.print(gpa, "{d}.{d}", .{ version.major, version.minor });
} else {
@@ -408,7 +417,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
const arch_name = if (self.cpu_arch) |arch| @tagName(arch) else "native";
const os_name = if (self.os_tag) |os_tag| @tagName(os_tag) else "native";
- var result: std.ArrayListUnmanaged(u8) = .empty;
+ var result: ArrayList(u8) = .empty;
defer result.deinit(gpa);
try result.print(gpa, "{s}-{s}", .{ arch_name, os_name });
@@ -469,7 +478,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
/// Renders the query into a textual representation that can be parsed via the
/// `-mcpu` flag passed to the Zig compiler.
/// Appends the result to `buffer`.
-pub fn serializeCpu(q: Query, buffer: *std.ArrayList(u8)) Allocator.Error!void {
+pub fn serializeCpu(q: Query, buffer: *std.array_list.Managed(u8)) Allocator.Error!void {
try buffer.ensureUnusedCapacity(8);
switch (q.cpu_model) {
.native => {
@@ -512,7 +521,7 @@ pub fn serializeCpu(q: Query, buffer: *std.ArrayList(u8)) Allocator.Error!void {
}
pub fn serializeCpuAlloc(q: Query, ally: Allocator) Allocator.Error![]u8 {
- var buffer = std.ArrayList(u8).init(ally);
+ var buffer = std.array_list.Managed(u8).init(ally);
try serializeCpu(q, &buffer);
return buffer.toOwnedSlice();
}
@@ -596,14 +605,6 @@ fn versionEqualOpt(a: ?SemanticVersion, b: ?SemanticVersion) bool {
return SemanticVersion.order(a.?, b.?) == .eq;
}
-const Query = @This();
-const std = @import("../std.zig");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const Target = std.Target;
-const mem = std.mem;
-const Allocator = std.mem.Allocator;
-
test parse {
if (builtin.target.isGnuLibC()) {
var query = try Query.parse(.{});
lib/std/zig/Ast/Render.zig
@@ -3456,8 +3456,8 @@ const AutoIndentingStream = struct {
indent_count: usize = 0,
indent_delta: usize,
- indent_stack: std.ArrayList(StackElem),
- space_stack: std.ArrayList(SpaceElem),
+ indent_stack: std.array_list.Managed(StackElem),
+ space_stack: std.array_list.Managed(SpaceElem),
space_mode: ?usize = null,
disable_indent_committing: usize = 0,
current_line_empty: bool = true,
lib/std/zig/llvm/bitcode_writer.zig
@@ -19,7 +19,7 @@ pub fn BitcodeWriter(comptime types: []const type) type {
return struct {
const BcWriter = @This();
- buffer: std.ArrayList(u32),
+ buffer: std.array_list.Managed(u32),
bit_buffer: u32 = 0,
bit_count: u5 = 0,
@@ -31,7 +31,7 @@ pub fn BitcodeWriter(comptime types: []const type) type {
pub fn init(allocator: std.mem.Allocator, widths: [types.len]u16) BcWriter {
return .{
- .buffer = std.ArrayList(u32).init(allocator),
+ .buffer = std.array_list.Managed(u32).init(allocator),
.widths = widths,
};
}
lib/std/zig/llvm/BitcodeReader.zig
@@ -60,7 +60,7 @@ pub const Record = struct {
blob: []const u8,
fn toOwnedAbbrev(record: Record, allocator: std.mem.Allocator) !Abbrev {
- var operands = std.ArrayList(Abbrev.Operand).init(allocator);
+ var operands = std.array_list.Managed(Abbrev.Operand).init(allocator);
defer operands.deinit();
assert(record.id == Abbrev.Builtin.define_abbrev.toRecordId());
@@ -194,8 +194,8 @@ fn nextRecord(bc: *BitcodeReader) !?Record {
defer bc.record_arena = record_arena.state;
_ = record_arena.reset(.retain_capacity);
- var operands = try std.ArrayList(u64).initCapacity(record_arena.allocator(), abbrev.operands.len);
- var blob = std.ArrayList(u8).init(record_arena.allocator());
+ var operands = try std.array_list.Managed(u64).initCapacity(record_arena.allocator(), abbrev.operands.len);
+ var blob = std.array_list.Managed(u8).init(record_arena.allocator());
for (abbrev.operands, 0..) |abbrev_operand, abbrev_operand_i| switch (abbrev_operand) {
.literal => |value| operands.appendAssumeCapacity(value),
.encoding => |abbrev_encoding| switch (abbrev_encoding) {
lib/std/zig/llvm/Builder.zig
@@ -9107,7 +9107,7 @@ pub fn getIntrinsic(
var attributes: struct {
builder: *Builder,
- list: std.ArrayList(Attribute.Index),
+ list: std.array_list.Managed(Attribute.Index),
fn deinit(state: *@This()) void {
state.list.deinit();
@@ -9120,7 +9120,7 @@ pub fn getIntrinsic(
item.* = try state.builder.attr(attribute);
return state.builder.attrs(state.list.items);
}
- } = .{ .builder = self, .list = std.ArrayList(Attribute.Index).init(allocator) };
+ } = .{ .builder = self, .list = std.array_list.Managed(Attribute.Index).init(allocator) };
defer attributes.deinit();
var overload_index: usize = 0;
lib/std/zig/AstGen.zig
@@ -1784,7 +1784,7 @@ fn structInitExpr(
while (it.next()) |entry| {
const record = entry.value_ptr.*;
if (record.items.len > 1) {
- var error_notes = std.ArrayList(u32).init(astgen.arena);
+ var error_notes = std.array_list.Managed(u32).init(astgen.arena);
for (record.items[1..]) |duplicate| {
try error_notes.append(try astgen.errNoteTok(duplicate, "duplicate name here", .{}));
lib/std/zig/LibCDirs.zig
@@ -89,8 +89,8 @@ pub fn detect(
}
fn detectFromInstallation(arena: Allocator, target: *const std.Target, lci: *const LibCInstallation) !LibCDirs {
- var list = try std.ArrayList([]const u8).initCapacity(arena, 5);
- var framework_list = std.ArrayList([]const u8).init(arena);
+ var list = try std.array_list.Managed([]const u8).initCapacity(arena, 5);
+ var framework_list = std.array_list.Managed([]const u8).init(arena);
list.appendAssumeCapacity(lci.include_dir.?);
lib/std/zig/LibCInstallation.zig
@@ -250,7 +250,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
const dev_null = if (is_windows) "nul" else "/dev/null";
- var argv = std.ArrayList([]const u8).init(allocator);
+ var argv = std.array_list.Managed([]const u8).init(allocator);
defer argv.deinit();
try appendCcExe(&argv, skip_cc_env_var);
@@ -294,7 +294,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
}
var it = std.mem.tokenizeAny(u8, run_res.stderr, "\n\r");
- var search_paths = std.ArrayList([]const u8).init(allocator);
+ var search_paths = std.array_list.Managed([]const u8).init(allocator);
defer search_paths.deinit();
while (it.next()) |line| {
if (line.len != 0 and line[0] == ' ') {
@@ -365,7 +365,7 @@ fn findNativeIncludeDirWindows(
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.ArrayList(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(allocator);
defer result_buf.deinit();
for (installs) |install| {
@@ -404,7 +404,7 @@ fn findNativeCrtDirWindows(
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.ArrayList(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(allocator);
defer result_buf.deinit();
const arch_sub_dir = switch (args.target.cpu.arch) {
@@ -471,7 +471,7 @@ fn findNativeKernel32LibDir(
var install_buf: [2]std.zig.WindowsSdk.Installation = undefined;
const installs = fillInstallations(&install_buf, sdk);
- var result_buf = std.ArrayList(u8).init(allocator);
+ var result_buf = std.array_list.Managed(u8).init(allocator);
defer result_buf.deinit();
const arch_sub_dir = switch (args.target.cpu.arch) {
@@ -578,7 +578,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
break :blk false;
};
- var argv = std.ArrayList([]const u8).init(allocator);
+ var argv = std.array_list.Managed([]const u8).init(allocator);
defer argv.deinit();
const arg1 = try std.fmt.allocPrint(allocator, "-print-file-name={s}", .{args.search_basename});
@@ -671,7 +671,7 @@ fn fillInstallations(
const inf_loop_env_key = "ZIG_IS_DETECTING_LIBC_PATHS";
-fn appendCcExe(args: *std.ArrayList([]const u8), skip_cc_env_var: bool) !void {
+fn appendCcExe(args: *std.array_list.Managed([]const u8), skip_cc_env_var: bool) !void {
const default_cc_exe = if (is_windows) "cc.exe" else "cc";
try args.ensureUnusedCapacity(1);
if (skip_cc_env_var) {
lib/std/zig/WindowsSdk.zig
@@ -92,8 +92,8 @@ fn iterateAndFilterByVersion(
std.mem.order(u8, lhs.build, rhs.build);
}
};
- var versions = std.ArrayList(Version).init(allocator);
- var dirs = std.ArrayList([]const u8).init(allocator);
+ var versions = std.array_list.Managed(Version).init(allocator);
+ var dirs = std.array_list.Managed([]const u8).init(allocator);
defer {
versions.deinit();
for (dirs.items) |filtered_dir| allocator.free(filtered_dir);
@@ -450,7 +450,7 @@ pub const Installation = struct {
return error.PathTooLong;
}
- var path = std.ArrayList(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
@@ -522,7 +522,7 @@ pub const Installation = struct {
return error.PathTooLong;
}
- var path = std.ArrayList(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
@@ -548,7 +548,7 @@ pub const Installation = struct {
return error.VersionTooLong;
}
- var version = std.ArrayList(u8).fromOwnedSlice(allocator, version_without_0);
+ var version = std.array_list.Managed(u8).fromOwnedSlice(allocator, version_without_0);
errdefer version.deinit();
try version.appendSlice(".0");
@@ -802,7 +802,7 @@ const MsvcLibDir = struct {
}
fn libDirFromInstallationPath(allocator: std.mem.Allocator, installation_path: []const u8, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
- var lib_dir_buf = try std.ArrayList(u8).initCapacity(allocator, installation_path.len + 64);
+ var lib_dir_buf = try std.array_list.Managed(u8).initCapacity(allocator, installation_path.len + 64);
errdefer lib_dir_buf.deinit();
lib_dir_buf.appendSliceAssumeCapacity(installation_path);
@@ -897,7 +897,7 @@ const MsvcLibDir = struct {
return error.PathNotFound;
}
- var msvc_dir = std.ArrayList(u8).fromOwnedSlice(allocator, msvc_include_dir_maybe_with_trailing_slash);
+ var msvc_dir = std.array_list.Managed(u8).fromOwnedSlice(allocator, msvc_include_dir_maybe_with_trailing_slash);
errdefer msvc_dir.deinit();
// String might contain trailing slash, so trim it here
@@ -929,7 +929,7 @@ const MsvcLibDir = struct {
}
fn findViaVs7Key(allocator: std.mem.Allocator, arch: std.Target.Cpu.Arch) error{ OutOfMemory, PathNotFound }![]const u8 {
- var base_path: std.ArrayList(u8) = base_path: {
+ var base_path: std.array_list.Managed(u8) = base_path: {
try_env: {
var env_map = std.process.getEnvMap(allocator) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -940,7 +940,7 @@ const MsvcLibDir = struct {
if (env_map.get("VS140COMNTOOLS")) |VS140COMNTOOLS| {
if (VS140COMNTOOLS.len < "C:\\Common7\\Tools".len) break :try_env;
if (!std.fs.path.isAbsolute(VS140COMNTOOLS)) break :try_env;
- var list = std.ArrayList(u8).init(allocator);
+ var list = std.array_list.Managed(u8).init(allocator);
errdefer list.deinit();
try list.appendSlice(VS140COMNTOOLS); // C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools
@@ -964,7 +964,7 @@ const MsvcLibDir = struct {
break :try_vs7_key;
}
- var path = std.ArrayList(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
+ var path = std.array_list.Managed(u8).fromOwnedSlice(allocator, path_maybe_with_trailing_slash);
errdefer path.deinit();
// String might contain trailing slash, so trim it here
lib/std/array_list.zig
@@ -5,27 +5,18 @@ const testing = std.testing;
const mem = std.mem;
const math = std.math;
const Allocator = mem.Allocator;
+const ArrayList = std.ArrayList;
-/// A contiguous, growable list of items in memory.
-/// This is a wrapper around an array of T values. Initialize with `init`.
-///
-/// This struct internally stores a `std.mem.Allocator` for memory management.
-/// To manually specify an allocator with each function call see `ArrayListUnmanaged`.
-pub fn ArrayList(comptime T: type) type {
- return ArrayListAligned(T, null);
+/// Deprecated.
+pub fn Managed(comptime T: type) type {
+ return AlignedManaged(T, null);
}
-/// A contiguous, growable list of arbitrarily aligned items in memory.
-/// This is a wrapper around an array of T values aligned to `alignment`-byte
-/// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used.
-/// Initialize with `init`.
-///
-/// This struct internally stores a `std.mem.Allocator` for memory management.
-/// To manually specify an allocator with each function call see `ArrayListAlignedUnmanaged`.
-pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
+/// Deprecated.
+pub fn AlignedManaged(comptime T: type, comptime alignment: ?mem.Alignment) type {
if (alignment) |a| {
if (a.toByteUnits() == @alignOf(T)) {
- return ArrayListAligned(T, null);
+ return AlignedManaged(T, null);
}
}
return struct {
@@ -96,11 +87,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
};
}
- /// Initializes an ArrayListUnmanaged with the `items` and `capacity` fields
+ /// Initializes an ArrayList with the `items` and `capacity` fields
/// of this ArrayList. Empties this ArrayList.
- pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) {
+ pub fn moveToUnmanaged(self: *Self) Aligned(T, alignment) {
const allocator = self.allocator;
- const result: ArrayListAlignedUnmanaged(T, alignment) = .{ .items = self.items, .capacity = self.capacity };
+ const result: Aligned(T, alignment) = .{ .items = self.items, .capacity = self.capacity };
self.* = init(allocator);
return result;
}
@@ -181,7 +172,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
- const new_capacity = ArrayListAlignedUnmanaged(T, alignment).growCapacity(self.capacity, new_len);
+ const new_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_len);
const old_memory = self.allocatedSlice();
if (self.allocator.remap(old_memory, new_capacity)) |new_memory| {
self.items.ptr = new_memory.ptr;
@@ -449,7 +440,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
if (self.capacity >= new_capacity) return;
- const better_capacity = ArrayListAlignedUnmanaged(T, alignment).growCapacity(self.capacity, new_capacity);
+ const better_capacity = Aligned(T, alignment).growCapacity(self.capacity, new_capacity);
return self.ensureTotalCapacityPrecise(better_capacity);
}
@@ -597,14 +588,6 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
};
}
-/// An ArrayList, but the allocator is passed as a parameter to the relevant functions
-/// rather than stored in the struct itself. The same allocator must be used throughout
-/// the entire lifetime of an ArrayListUnmanaged. Initialize directly or with
-/// `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`.
-pub fn ArrayListUnmanaged(comptime T: type) type {
- return ArrayListAlignedUnmanaged(T, null);
-}
-
/// A contiguous, growable list of arbitrarily aligned items in memory.
/// This is a wrapper around an array of T values aligned to `alignment`-byte
/// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used.
@@ -614,10 +597,10 @@ pub fn ArrayListUnmanaged(comptime T: type) type {
/// or use `toOwnedSlice`.
///
/// Default initialization of this struct is deprecated; use `.empty` instead.
-pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alignment) type {
+pub fn Aligned(comptime T: type, comptime alignment: ?mem.Alignment) type {
if (alignment) |a| {
if (a.toByteUnits() == @alignOf(T)) {
- return ArrayListAlignedUnmanaged(T, null);
+ return Aligned(T, null);
}
}
return struct {
@@ -675,11 +658,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
- pub fn toManaged(self: *Self, gpa: Allocator) ArrayListAligned(T, alignment) {
+ pub fn toManaged(self: *Self, gpa: Allocator) AlignedManaged(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = gpa };
}
- /// ArrayListUnmanaged takes ownership of the passed in slice.
+ /// ArrayList takes ownership of the passed in slice.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(slice: Slice) Self {
return Self{
@@ -688,7 +671,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
};
}
- /// ArrayListUnmanaged takes ownership of the passed in slice.
+ /// ArrayList takes ownership of the passed in slice.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSliceSentinel(comptime sentinel: T, slice: [:sentinel]T) Self {
return Self{
@@ -1444,7 +1427,7 @@ fn addOrOom(a: usize, b: usize) error{OutOfMemory}!usize {
test "init" {
{
- var list = ArrayList(i32).init(testing.allocator);
+ var list = Managed(i32).init(testing.allocator);
defer list.deinit();
try testing.expect(list.items.len == 0);
@@ -1452,7 +1435,7 @@ test "init" {
}
{
- const list: ArrayListUnmanaged(i32) = .empty;
+ const list: ArrayList(i32) = .empty;
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);
@@ -1462,13 +1445,13 @@ test "init" {
test "initCapacity" {
const a = testing.allocator;
{
- var list = try ArrayList(i8).initCapacity(a, 200);
+ var list = try Managed(i8).initCapacity(a, 200);
defer list.deinit();
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
}
{
- var list = try ArrayListUnmanaged(i8).initCapacity(a, 200);
+ var list = try ArrayList(i8).initCapacity(a, 200);
defer list.deinit(a);
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
@@ -1478,7 +1461,7 @@ test "initCapacity" {
test "clone" {
const a = testing.allocator;
{
- var array = ArrayList(i32).init(a);
+ var array = Managed(i32).init(a);
try array.append(-1);
try array.append(3);
try array.append(5);
@@ -1497,7 +1480,7 @@ test "clone" {
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
}
{
- var array: ArrayListUnmanaged(i32) = .empty;
+ var array: ArrayList(i32) = .empty;
try array.append(a, -1);
try array.append(a, 3);
try array.append(a, 5);
@@ -1519,7 +1502,7 @@ test "clone" {
test "basic" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
{
@@ -1569,7 +1552,7 @@ test "basic" {
try testing.expect(list.pop() == 33);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
{
@@ -1623,7 +1606,7 @@ test "basic" {
test "appendNTimes" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendNTimes(2, 10);
@@ -1633,7 +1616,7 @@ test "appendNTimes" {
}
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendNTimes(a, 2, 10);
@@ -1647,12 +1630,12 @@ test "appendNTimes" {
test "appendNTimes with failing allocator" {
const a = testing.failing_allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10));
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10));
}
@@ -1661,7 +1644,7 @@ test "appendNTimes with failing allocator" {
test "orderedRemove" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.append(1);
@@ -1687,7 +1670,7 @@ test "orderedRemove" {
try testing.expectEqual(@as(usize, 4), list.items.len);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.append(a, 1);
@@ -1714,7 +1697,7 @@ test "orderedRemove" {
}
{
// remove last item
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.append(1);
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
@@ -1722,7 +1705,7 @@ test "orderedRemove" {
}
{
// remove last item
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.append(a, 1);
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
@@ -1733,7 +1716,7 @@ test "orderedRemove" {
test "swapRemove" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.append(1);
@@ -1759,7 +1742,7 @@ test "swapRemove" {
try testing.expect(list.items.len == 4);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.append(a, 1);
@@ -1789,7 +1772,7 @@ test "swapRemove" {
test "insert" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.insert(0, 1);
@@ -1802,7 +1785,7 @@ test "insert" {
try testing.expect(list.items[3] == 3);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.insert(a, 0, 1);
@@ -1819,7 +1802,7 @@ test "insert" {
test "insertSlice" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.append(1);
@@ -1840,7 +1823,7 @@ test "insertSlice" {
try testing.expect(list.items[0] == 1);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.append(a, 1);
@@ -1862,11 +1845,11 @@ test "insertSlice" {
}
}
-test "ArrayList.replaceRange" {
+test "Managed.replaceRange" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1875,7 +1858,7 @@ test "ArrayList.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1888,7 +1871,7 @@ test "ArrayList.replaceRange" {
);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1897,7 +1880,7 @@ test "ArrayList.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1906,7 +1889,7 @@ test "ArrayList.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1916,11 +1899,11 @@ test "ArrayList.replaceRange" {
}
}
-test "ArrayList.replaceRangeAssumeCapacity" {
+test "Managed.replaceRangeAssumeCapacity" {
const a = testing.allocator;
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1929,7 +1912,7 @@ test "ArrayList.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1942,7 +1925,7 @@ test "ArrayList.replaceRangeAssumeCapacity" {
);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1951,7 +1934,7 @@ test "ArrayList.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1960,7 +1943,7 @@ test "ArrayList.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
@@ -1970,11 +1953,11 @@ test "ArrayList.replaceRangeAssumeCapacity" {
}
}
-test "ArrayListUnmanaged.replaceRange" {
+test "ArrayList.replaceRange" {
const a = testing.allocator;
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -1983,7 +1966,7 @@ test "ArrayListUnmanaged.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -1996,7 +1979,7 @@ test "ArrayListUnmanaged.replaceRange" {
);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2005,7 +1988,7 @@ test "ArrayListUnmanaged.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2014,7 +1997,7 @@ test "ArrayListUnmanaged.replaceRange" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2024,11 +2007,11 @@ test "ArrayListUnmanaged.replaceRange" {
}
}
-test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
+test "ArrayList.replaceRangeAssumeCapacity" {
const a = testing.allocator;
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2037,7 +2020,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2050,7 +2033,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2059,7 +2042,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2068,7 +2051,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
@@ -2080,15 +2063,15 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
const Item = struct {
integer: i32,
- sub_items: ArrayList(Item),
+ sub_items: Managed(Item),
};
const ItemUnmanaged = struct {
integer: i32,
- sub_items: ArrayListUnmanaged(ItemUnmanaged),
+ sub_items: ArrayList(ItemUnmanaged),
};
-test "ArrayList(T) of struct T" {
+test "Managed(T) of struct T" {
const a = std.testing.allocator;
{
var root = Item{ .integer = 1, .sub_items = .init(a) };
@@ -2104,11 +2087,11 @@ test "ArrayList(T) of struct T" {
}
}
-test "ArrayList(u8) implements writer" {
+test "Managed(u8) implements writer" {
const a = testing.allocator;
{
- var buffer = ArrayList(u8).init(a);
+ var buffer = Managed(u8).init(a);
defer buffer.deinit();
const x: i32 = 42;
@@ -2118,7 +2101,7 @@ test "ArrayList(u8) implements writer" {
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
- var list = ArrayListAligned(u8, .@"2").init(a);
+ var list = AlignedManaged(u8, .@"2").init(a);
defer list.deinit();
const writer = list.writer();
@@ -2131,11 +2114,11 @@ test "ArrayList(u8) implements writer" {
}
}
-test "ArrayListUnmanaged(u8) implements writer" {
+test "ArrayList(u8) implements writer" {
const a = testing.allocator;
{
- var buffer: ArrayListUnmanaged(u8) = .empty;
+ var buffer: ArrayList(u8) = .empty;
defer buffer.deinit(a);
const x: i32 = 42;
@@ -2145,7 +2128,7 @@ test "ArrayListUnmanaged(u8) implements writer" {
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
- var list: ArrayListAlignedUnmanaged(u8, .@"2") = .empty;
+ var list: Aligned(u8, .@"2") = .empty;
defer list.deinit(a);
const writer = list.writer(a);
@@ -2163,7 +2146,7 @@ test "shrink still sets length when resizing is disabled" {
const a = failing_allocator.allocator();
{
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.append(1);
@@ -2174,7 +2157,7 @@ test "shrink still sets length when resizing is disabled" {
try testing.expect(list.items.len == 1);
}
{
- var list: ArrayListUnmanaged(i32) = .empty;
+ var list: ArrayList(i32) = .empty;
defer list.deinit(a);
try list.append(a, 1);
@@ -2190,7 +2173,7 @@ test "shrinkAndFree with a copy" {
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
- var list = ArrayList(i32).init(a);
+ var list = Managed(i32).init(a);
defer list.deinit();
try list.appendNTimes(3, 16);
@@ -2201,7 +2184,7 @@ test "shrinkAndFree with a copy" {
test "addManyAsArray" {
const a = std.testing.allocator;
{
- var list = ArrayList(u8).init(a);
+ var list = Managed(u8).init(a);
defer list.deinit();
(try list.addManyAsArray(4)).* = "aoeu".*;
@@ -2211,7 +2194,7 @@ test "addManyAsArray" {
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
}
{
- var list: ArrayListUnmanaged(u8) = .empty;
+ var list: ArrayList(u8) = .empty;
defer list.deinit(a);
(try list.addManyAsArray(a, 4)).* = "aoeu".*;
@@ -2227,7 +2210,7 @@ test "growing memory preserves contents" {
// will be triggered in the next operation.
const a = std.testing.allocator;
{
- var list = ArrayList(u8).init(a);
+ var list = Managed(u8).init(a);
defer list.deinit();
(try list.addManyAsArray(4)).* = "abcd".*;
@@ -2241,7 +2224,7 @@ test "growing memory preserves contents" {
try testing.expectEqualSlices(u8, list.items, "abcdijklefgh");
}
{
- var list: ArrayListUnmanaged(u8) = .empty;
+ var list: ArrayList(u8) = .empty;
defer list.deinit(a);
(try list.addManyAsArray(a, 4)).* = "abcd".*;
@@ -2259,22 +2242,22 @@ test "growing memory preserves contents" {
test "fromOwnedSlice" {
const a = testing.allocator;
{
- var orig_list = ArrayList(u8).init(a);
+ var orig_list = Managed(u8).init(a);
defer orig_list.deinit();
try orig_list.appendSlice("foobar");
const slice = try orig_list.toOwnedSlice();
- var list = ArrayList(u8).fromOwnedSlice(a, slice);
+ var list = Managed(u8).fromOwnedSlice(a, slice);
defer list.deinit();
try testing.expectEqualStrings(list.items, "foobar");
}
{
- var list = ArrayList(u8).init(a);
+ var list = Managed(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const slice = try list.toOwnedSlice();
- var unmanaged = ArrayListUnmanaged(u8).fromOwnedSlice(slice);
+ var unmanaged = ArrayList(u8).fromOwnedSlice(slice);
defer unmanaged.deinit(a);
try testing.expectEqualStrings(unmanaged.items, "foobar");
}
@@ -2283,22 +2266,22 @@ test "fromOwnedSlice" {
test "fromOwnedSliceSentinel" {
const a = testing.allocator;
{
- var orig_list = ArrayList(u8).init(a);
+ var orig_list = Managed(u8).init(a);
defer orig_list.deinit();
try orig_list.appendSlice("foobar");
const sentinel_slice = try orig_list.toOwnedSliceSentinel(0);
- var list = ArrayList(u8).fromOwnedSliceSentinel(a, 0, sentinel_slice);
+ var list = Managed(u8).fromOwnedSliceSentinel(a, 0, sentinel_slice);
defer list.deinit();
try testing.expectEqualStrings(list.items, "foobar");
}
{
- var list = ArrayList(u8).init(a);
+ var list = Managed(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const sentinel_slice = try list.toOwnedSliceSentinel(0);
- var unmanaged = ArrayListUnmanaged(u8).fromOwnedSliceSentinel(0, sentinel_slice);
+ var unmanaged = ArrayList(u8).fromOwnedSliceSentinel(0, sentinel_slice);
defer unmanaged.deinit(a);
try testing.expectEqualStrings(unmanaged.items, "foobar");
}
@@ -2307,7 +2290,7 @@ test "fromOwnedSliceSentinel" {
test "toOwnedSliceSentinel" {
const a = testing.allocator;
{
- var list = ArrayList(u8).init(a);
+ var list = Managed(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
@@ -2317,7 +2300,7 @@ test "toOwnedSliceSentinel" {
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
}
{
- var list: ArrayListUnmanaged(u8) = .empty;
+ var list: ArrayList(u8) = .empty;
defer list.deinit(a);
try list.appendSlice(a, "foobar");
@@ -2331,7 +2314,7 @@ test "toOwnedSliceSentinel" {
test "accepts unaligned slices" {
const a = testing.allocator;
{
- var list = std.ArrayListAligned(u8, .@"8").init(a);
+ var list = AlignedManaged(u8, .@"8").init(a);
defer list.deinit();
try list.appendSlice(&.{ 0, 1, 2, 3 });
@@ -2341,7 +2324,7 @@ test "accepts unaligned slices" {
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
{
- var list: std.ArrayListAlignedUnmanaged(u8, .@"8") = .empty;
+ var list: Aligned(u8, .@"8") = .empty;
defer list.deinit(a);
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
@@ -2352,11 +2335,11 @@ test "accepts unaligned slices" {
}
}
-test "ArrayList(u0)" {
- // An ArrayList on zero-sized types should not need to allocate
+test "Managed(u0)" {
+ // An Managed on zero-sized types should not need to allocate
const a = testing.failing_allocator;
- var list = ArrayList(u0).init(a);
+ var list = Managed(u0).init(a);
defer list.deinit();
try list.append(0);
@@ -2372,10 +2355,10 @@ test "ArrayList(u0)" {
try testing.expectEqual(count, 3);
}
-test "ArrayList(?u32).pop()" {
+test "Managed(?u32).pop()" {
const a = testing.allocator;
- var list = ArrayList(?u32).init(a);
+ var list = Managed(?u32).init(a);
defer list.deinit();
try list.append(null);
@@ -2389,10 +2372,10 @@ test "ArrayList(?u32).pop()" {
try testing.expect(list.pop() == null);
}
-test "ArrayList(u32).getLast()" {
+test "Managed(u32).getLast()" {
const a = testing.allocator;
- var list = ArrayList(u32).init(a);
+ var list = Managed(u32).init(a);
defer list.deinit();
try list.append(2);
@@ -2400,10 +2383,10 @@ test "ArrayList(u32).getLast()" {
try testing.expectEqual(const_list.getLast(), 2);
}
-test "ArrayList(u32).getLastOrNull()" {
+test "Managed(u32).getLastOrNull()" {
const a = testing.allocator;
- var list = ArrayList(u32).init(a);
+ var list = Managed(u32).init(a);
defer list.deinit();
try testing.expectEqual(list.getLastOrNull(), null);
@@ -2419,7 +2402,7 @@ test "return OutOfMemory when capacity would exceed maximum usize integer value"
const items = &.{ 42, 43 };
{
- var list: ArrayListUnmanaged(u32) = .{
+ var list: ArrayList(u32) = .{
.items = undefined,
.capacity = math.maxInt(usize) - 1,
};
@@ -2436,7 +2419,7 @@ test "return OutOfMemory when capacity would exceed maximum usize integer value"
}
{
- var list: ArrayList(u32) = .{
+ var list: Managed(u32) = .{
.items = undefined,
.capacity = math.maxInt(usize) - 1,
.allocator = a,
@@ -2457,7 +2440,7 @@ test "return OutOfMemory when capacity would exceed maximum usize integer value"
test "orderedRemoveMany" {
const gpa = testing.allocator;
- var list: ArrayListUnmanaged(usize) = .empty;
+ var list: Aligned(usize, null) = .empty;
defer list.deinit(gpa);
for (0..10) |n| {
lib/std/BitStack.zig
@@ -4,14 +4,14 @@ const BitStack = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
-const ArrayList = std.ArrayList;
+const ArrayList = std.array_list.Managed;
-bytes: std.ArrayList(u8),
+bytes: std.array_list.Managed(u8),
bit_len: usize = 0,
pub fn init(allocator: Allocator) @This() {
return .{
- .bytes = std.ArrayList(u8).init(allocator),
+ .bytes = std.array_list.Managed(u8).init(allocator),
};
}
lib/std/Build.zig
@@ -7,7 +7,6 @@ const debug = std.debug;
const panic = std.debug.panic;
const assert = debug.assert;
const log = std.log;
-const ArrayList = std.ArrayList;
const StringHashMap = std.StringHashMap;
const Allocator = mem.Allocator;
const Target = std.Target;
@@ -16,6 +15,7 @@ const EnvMap = std.process.EnvMap;
const File = fs.File;
const Sha256 = std.crypto.hash.sha2.Sha256;
const Build = @This();
+const ArrayList = std.ArrayList;
pub const Cache = @import("Build/Cache.zig");
pub const Step = @import("Build/Step.zig");
@@ -32,7 +32,7 @@ uninstall_tls: TopLevelStep,
allocator: Allocator,
user_input_options: UserInputOptionsMap,
available_options_map: AvailableOptionsMap,
-available_options_list: ArrayList(AvailableOption),
+available_options_list: std.array_list.Managed(AvailableOption),
verbose: bool,
verbose_link: bool,
verbose_cc: bool,
@@ -52,7 +52,7 @@ exe_dir: []const u8,
h_dir: []const u8,
install_path: []const u8,
sysroot: ?[]const u8 = null,
-search_prefixes: std.ArrayListUnmanaged([]const u8),
+search_prefixes: ArrayList([]const u8),
libc_file: ?[]const u8 = null,
/// Path to the directory containing build.zig.
build_root: Cache.Directory,
@@ -220,10 +220,10 @@ const UserInputOption = struct {
const UserValue = union(enum) {
flag: void,
scalar: []const u8,
- list: ArrayList([]const u8),
+ list: std.array_list.Managed([]const u8),
map: StringHashMap(*const UserValue),
lazy_path: LazyPath,
- lazy_path_list: ArrayList(LazyPath),
+ lazy_path_list: std.array_list.Managed(LazyPath),
};
const TypeId = enum {
@@ -277,10 +277,10 @@ pub fn create(
.allocator = arena,
.user_input_options = UserInputOptionsMap.init(arena),
.available_options_map = AvailableOptionsMap.init(arena),
- .available_options_list = ArrayList(AvailableOption).init(arena),
+ .available_options_list = std.array_list.Managed(AvailableOption).init(arena),
.top_level_steps = .{},
.default_step = undefined,
- .search_prefixes = .{},
+ .search_prefixes = .empty,
.install_prefix = undefined,
.lib_dir = undefined,
.exe_dir = undefined,
@@ -363,7 +363,7 @@ fn createChildOnly(
},
.user_input_options = user_input_options,
.available_options_map = AvailableOptionsMap.init(allocator),
- .available_options_list = ArrayList(AvailableOption).init(allocator),
+ .available_options_list = std.array_list.Managed(AvailableOption).init(allocator),
.verbose = parent.verbose,
.verbose_link = parent.verbose_link,
.verbose_cc = parent.verbose_cc,
@@ -468,7 +468,7 @@ fn addUserInputOptionFromArg(
}) catch @panic("OOM");
},
[]const LazyPath => return if (maybe_value) |v| {
- var list = ArrayList(LazyPath).initCapacity(arena, v.len) catch @panic("OOM");
+ var list = std.array_list.Managed(LazyPath).initCapacity(arena, v.len) catch @panic("OOM");
for (v) |lp| list.appendAssumeCapacity(lp.dupeInner(arena));
map.put(field.name, .{
.name = field.name,
@@ -484,7 +484,7 @@ fn addUserInputOptionFromArg(
}) catch @panic("OOM");
},
[]const []const u8 => return if (maybe_value) |v| {
- var list = ArrayList([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
+ var list = std.array_list.Managed([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
for (v) |s| list.appendAssumeCapacity(arena.dupe(u8, s) catch @panic("OOM"));
map.put(field.name, .{
.name = field.name,
@@ -542,7 +542,7 @@ fn addUserInputOptionFromArg(
},
.slice => switch (@typeInfo(ptr_info.child)) {
.@"enum" => return if (maybe_value) |v| {
- var list = ArrayList([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
+ var list = std.array_list.Managed([]const u8).initCapacity(arena, v.len) catch @panic("OOM");
for (v) |tag| list.appendAssumeCapacity(@tagName(tag));
map.put(field.name, .{
.name = field.name,
@@ -589,10 +589,10 @@ fn addUserInputOptionFromArg(
const OrderedUserValue = union(enum) {
flag: void,
scalar: []const u8,
- list: ArrayList([]const u8),
- map: ArrayList(Pair),
+ list: std.array_list.Managed([]const u8),
+ map: std.array_list.Managed(Pair),
lazy_path: LazyPath,
- lazy_path_list: ArrayList(LazyPath),
+ lazy_path_list: std.array_list.Managed(LazyPath),
const Pair = struct {
name: []const u8,
@@ -642,8 +642,8 @@ const OrderedUserValue = union(enum) {
}
}
- fn mapFromUnordered(allocator: Allocator, unordered: std.StringHashMap(*const UserValue)) ArrayList(Pair) {
- var ordered = ArrayList(Pair).init(allocator);
+ fn mapFromUnordered(allocator: Allocator, unordered: std.StringHashMap(*const UserValue)) std.array_list.Managed(Pair) {
+ var ordered = std.array_list.Managed(Pair).init(allocator);
var it = unordered.iterator();
while (it.next()) |entry| {
ordered.append(.{
@@ -694,7 +694,7 @@ const OrderedUserInputOption = struct {
// The hash should be consistent with the same values given a different order.
// This function takes a user input map, orders it, then hashes the contents.
fn hashUserInputOptionsMap(allocator: Allocator, user_input_options: UserInputOptionsMap, hasher: *std.hash.Wyhash) void {
- var ordered = ArrayList(OrderedUserInputOption).init(allocator);
+ var ordered = std.array_list.Managed(OrderedUserInputOption).init(allocator);
var it = user_input_options.iterator();
while (it.next()) |entry|
ordered.append(OrderedUserInputOption.fromUnordered(allocator, entry.value_ptr.*)) catch @panic("OOM");
@@ -1086,7 +1086,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw
const enum_options = if (type_id == .@"enum" or type_id == .enum_list) blk: {
const EnumType = if (type_id == .enum_list) @typeInfo(T).pointer.child else T;
const fields = comptime std.meta.fields(EnumType);
- var options = ArrayList([]const u8).initCapacity(b.allocator, fields.len) catch @panic("OOM");
+ var options = std.array_list.Managed([]const u8).initCapacity(b.allocator, fields.len) catch @panic("OOM");
inline for (fields) |field| {
options.appendAssumeCapacity(field.name);
@@ -1488,7 +1488,7 @@ pub fn addUserInputOption(b: *Build, name_raw: []const u8, value_raw: []const u8
switch (gop.value_ptr.value) {
.scalar => |s| {
// turn it into a list
- var list = ArrayList([]const u8).init(b.allocator);
+ var list = std.array_list.Managed([]const u8).init(b.allocator);
try list.append(s);
try list.append(value);
try b.user_input_options.put(name, .{
@@ -1596,7 +1596,7 @@ pub fn validateUserInputDidItFail(b: *Build) bool {
}
fn allocPrintCmd(gpa: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) error{OutOfMemory}![]u8 {
- var buf: std.ArrayListUnmanaged(u8) = .empty;
+ var buf: ArrayList(u8) = .empty;
if (opt_cwd) |cwd| try buf.print(gpa, "cd {s} && ", .{cwd});
for (argv) |arg| {
try buf.print(gpa, "{s} ", .{arg});
lib/std/hash_map.zig
@@ -1800,7 +1800,7 @@ test "put and remove loop in random order" {
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
defer map.deinit();
- var keys = std.ArrayList(u32).init(std.testing.allocator);
+ var keys = std.array_list.Managed(u32).init(std.testing.allocator);
defer keys.deinit();
const size = 32;
@@ -1834,7 +1834,7 @@ test "remove one million elements in random order" {
var map = Map.init(std.heap.page_allocator);
defer map.deinit();
- var keys = std.ArrayList(u32).init(std.heap.page_allocator);
+ var keys = std.array_list.Managed(u32).init(std.heap.page_allocator);
defer keys.deinit();
var i: u32 = 0;
lib/std/heap.zig
@@ -673,7 +673,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var slice = try allocator.alignedAlloc(u8, .@"16", alloc_size);
defer allocator.free(slice);
- var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
+ var stuff_to_free = std.array_list.Managed([]align(16) u8).init(debug_allocator);
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
lib/std/Io.zig
@@ -117,7 +117,7 @@ pub fn GenericReader(
pub inline fn readAllArrayList(
self: Self,
- array_list: *std.ArrayList(u8),
+ array_list: *std.array_list.Managed(u8),
max_append_size: usize,
) (error{StreamTooLong} || Allocator.Error || Error)!void {
return @errorCast(self.any().readAllArrayList(array_list, max_append_size));
@@ -126,7 +126,7 @@ pub fn GenericReader(
pub inline fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
- array_list: *std.ArrayListAligned(u8, alignment),
+ array_list: *std.array_list.AlignedManaged(u8, alignment),
max_append_size: usize,
) (error{StreamTooLong} || Allocator.Error || Error)!void {
return @errorCast(self.any().readAllArrayListAligned(
@@ -146,7 +146,7 @@ pub fn GenericReader(
pub inline fn readUntilDelimiterArrayList(
self: Self,
- array_list: *std.ArrayList(u8),
+ array_list: *std.array_list.Managed(u8),
delimiter: u8,
max_size: usize,
) (NoEofError || Allocator.Error || error{StreamTooLong})!void {
lib/std/priority_dequeue.zig
@@ -964,7 +964,7 @@ fn fuzzTestMinMax(rng: std.Random, queue_size: usize) !void {
}
fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.Random, size: usize) ![]u32 {
- var array = std.ArrayList(u32).init(allocator);
+ var array = std.array_list.Managed(u32).init(allocator);
try array.ensureTotalCapacity(size);
var i: usize = 0;
lib/std/process.zig
@@ -1241,10 +1241,10 @@ pub fn argsAlloc(allocator: Allocator) ![][:0]u8 {
var it = try argsWithAllocator(allocator);
defer it.deinit();
- var contents = std.ArrayList(u8).init(allocator);
+ var contents = std.array_list.Managed(u8).init(allocator);
defer contents.deinit();
- var slice_list = std.ArrayList(usize).init(allocator);
+ var slice_list = std.array_list.Managed(usize).init(allocator);
defer slice_list.deinit();
while (it.next()) |arg| {
lib/std/std.zig
@@ -1,9 +1,5 @@
pub const ArrayHashMap = array_hash_map.ArrayHashMap;
pub const ArrayHashMapUnmanaged = array_hash_map.ArrayHashMapUnmanaged;
-pub const ArrayList = @import("array_list.zig").ArrayList;
-pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned;
-pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged;
-pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged;
pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap;
pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
@@ -43,6 +39,24 @@ pub const Treap = @import("treap.zig").Treap;
pub const Tz = tz.Tz;
pub const Uri = @import("Uri.zig");
+/// A contiguous, growable list of items in memory. This is a wrapper around a
+/// slice of `T` values.
+///
+/// The same allocator must be used throughout its entire lifetime. Initialize
+/// directly with `empty` or `initCapacity`, and deinitialize with `deinit` or
+/// `toOwnedSlice`.
+pub fn ArrayList(comptime T: type) type {
+ return array_list.Aligned(T, null);
+}
+pub const array_list = @import("array_list.zig");
+
+/// Deprecated; use `array_list.Aligned`.
+pub const ArrayListAligned = array_list.Aligned;
+/// Deprecated; use `array_list.Aligned`.
+pub const ArrayListAlignedUnmanaged = array_list.Aligned;
+/// Deprecated; use `ArrayList`.
+pub const ArrayListUnmanaged = ArrayList;
+
pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
lib/std/treap.zig
@@ -641,7 +641,7 @@ test "node.{prev(),next()} with random data" {
var treap = TestTreap{};
// A slow, stupid but correct reference. Ordered.
- var golden = std.ArrayList(u64).init(std.testing.allocator);
+ var golden = std.array_list.Managed(u64).init(std.testing.allocator);
defer golden.deinit();
// Insert.
lib/std/unicode.zig
@@ -916,7 +916,7 @@ test fmtUtf8 {
}
fn utf16LeToUtf8ArrayListImpl(
- result: *std.ArrayList(u8),
+ result: *std.array_list.Managed(u8),
utf16le: []const u16,
comptime surrogates: Surrogates,
) (switch (surrogates) {
@@ -967,7 +967,7 @@ fn utf16LeToUtf8ArrayListImpl(
pub const Utf16LeToUtf8AllocError = mem.Allocator.Error || Utf16LeToUtf8Error;
-pub fn utf16LeToUtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16) Utf16LeToUtf8AllocError!void {
+pub fn utf16LeToUtf8ArrayList(result: *std.array_list.Managed(u8), utf16le: []const u16) Utf16LeToUtf8AllocError!void {
try result.ensureUnusedCapacity(utf16le.len);
return utf16LeToUtf8ArrayListImpl(result, utf16le, .cannot_encode_surrogate_half);
}
@@ -975,7 +975,7 @@ pub fn utf16LeToUtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16)
/// Caller must free returned memory.
pub fn utf16LeToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) Utf16LeToUtf8AllocError![]u8 {
// optimistically guess that it will all be ascii.
- var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
+ var result = try std.array_list.Managed(u8).initCapacity(allocator, utf16le.len);
errdefer result.deinit();
try utf16LeToUtf8ArrayListImpl(&result, utf16le, .cannot_encode_surrogate_half);
@@ -985,7 +985,7 @@ pub fn utf16LeToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) Utf16L
/// Caller must free returned memory.
pub fn utf16LeToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) Utf16LeToUtf8AllocError![:0]u8 {
// optimistically guess that it will all be ascii (and allocate space for the null terminator)
- var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len + 1);
+ var result = try std.array_list.Managed(u8).initCapacity(allocator, utf16le.len + 1);
errdefer result.deinit();
try utf16LeToUtf8ArrayListImpl(&result, utf16le, .cannot_encode_surrogate_half);
@@ -1117,7 +1117,7 @@ test utf16LeToUtf8 {
}
}
-fn utf8ToUtf16LeArrayListImpl(result: *std.ArrayList(u16), utf8: []const u8, comptime surrogates: Surrogates) !void {
+fn utf8ToUtf16LeArrayListImpl(result: *std.array_list.Managed(u16), utf8: []const u8, comptime surrogates: Surrogates) !void {
assert(result.unusedCapacitySlice().len >= utf8.len);
var remaining = utf8;
@@ -1155,14 +1155,14 @@ fn utf8ToUtf16LeArrayListImpl(result: *std.ArrayList(u16), utf8: []const u8, com
}
}
-pub fn utf8ToUtf16LeArrayList(result: *std.ArrayList(u16), utf8: []const u8) error{ InvalidUtf8, OutOfMemory }!void {
+pub fn utf8ToUtf16LeArrayList(result: *std.array_list.Managed(u16), utf8: []const u8) error{ InvalidUtf8, OutOfMemory }!void {
try result.ensureUnusedCapacity(utf8.len);
return utf8ToUtf16LeArrayListImpl(result, utf8, .cannot_encode_surrogate_half);
}
pub fn utf8ToUtf16LeAlloc(allocator: mem.Allocator, utf8: []const u8) error{ InvalidUtf8, OutOfMemory }![]u16 {
// optimistically guess that it will not require surrogate pairs
- var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len);
+ var result = try std.array_list.Managed(u16).initCapacity(allocator, utf8.len);
errdefer result.deinit();
try utf8ToUtf16LeArrayListImpl(&result, utf8, .cannot_encode_surrogate_half);
@@ -1171,7 +1171,7 @@ pub fn utf8ToUtf16LeAlloc(allocator: mem.Allocator, utf8: []const u8) error{ Inv
pub fn utf8ToUtf16LeAllocZ(allocator: mem.Allocator, utf8: []const u8) error{ InvalidUtf8, OutOfMemory }![:0]u16 {
// optimistically guess that it will not require surrogate pairs
- var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
+ var result = try std.array_list.Managed(u16).initCapacity(allocator, utf8.len + 1);
errdefer result.deinit();
try utf8ToUtf16LeArrayListImpl(&result, utf8, .cannot_encode_surrogate_half);
@@ -1258,19 +1258,19 @@ test utf8ToUtf16Le {
test utf8ToUtf16LeArrayList {
{
- var list = std.ArrayList(u16).init(testing.allocator);
+ var list = std.array_list.Managed(u16).init(testing.allocator);
defer list.deinit();
try utf8ToUtf16LeArrayList(&list, "๐ท");
try testing.expectEqualSlices(u8, "\x01\xd8\x37\xdc", mem.sliceAsBytes(list.items));
}
{
- var list = std.ArrayList(u16).init(testing.allocator);
+ var list = std.array_list.Managed(u16).init(testing.allocator);
defer list.deinit();
try utf8ToUtf16LeArrayList(&list, "\u{10FFFF}");
try testing.expectEqualSlices(u8, "\xff\xdb\xff\xdf", mem.sliceAsBytes(list.items));
}
{
- var list = std.ArrayList(u16).init(testing.allocator);
+ var list = std.array_list.Managed(u16).init(testing.allocator);
defer list.deinit();
const result = utf8ToUtf16LeArrayList(&list, "\xf4\x90\x80\x80");
try testing.expectError(error.InvalidUtf8, result);
@@ -1331,7 +1331,7 @@ test utf8ToUtf16LeAllocZ {
test "ArrayList functions on a re-used list" {
// utf8ToUtf16LeArrayList
{
- var list = std.ArrayList(u16).init(testing.allocator);
+ var list = std.array_list.Managed(u16).init(testing.allocator);
defer list.deinit();
const init_slice = utf8ToUtf16LeStringLiteral("abcdefg");
@@ -1345,7 +1345,7 @@ test "ArrayList functions on a re-used list" {
// utf16LeToUtf8ArrayList
{
- var list = std.ArrayList(u8).init(testing.allocator);
+ var list = std.array_list.Managed(u8).init(testing.allocator);
defer list.deinit();
const init_slice = "abcdefg";
@@ -1359,7 +1359,7 @@ test "ArrayList functions on a re-used list" {
// wtf8ToWtf16LeArrayList
{
- var list = std.ArrayList(u16).init(testing.allocator);
+ var list = std.array_list.Managed(u16).init(testing.allocator);
defer list.deinit();
const init_slice = utf8ToUtf16LeStringLiteral("abcdefg");
@@ -1373,7 +1373,7 @@ test "ArrayList functions on a re-used list" {
// wtf16LeToWtf8ArrayList
{
- var list = std.ArrayList(u8).init(testing.allocator);
+ var list = std.array_list.Managed(u8).init(testing.allocator);
defer list.deinit();
const init_slice = "abcdefg";
@@ -1750,7 +1750,7 @@ pub const Wtf8Iterator = struct {
}
};
-pub fn wtf16LeToWtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16) mem.Allocator.Error!void {
+pub fn wtf16LeToWtf8ArrayList(result: *std.array_list.Managed(u8), utf16le: []const u16) mem.Allocator.Error!void {
try result.ensureUnusedCapacity(utf16le.len);
return utf16LeToUtf8ArrayListImpl(result, utf16le, .can_encode_surrogate_half);
}
@@ -1758,7 +1758,7 @@ pub fn wtf16LeToWtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16)
/// Caller must free returned memory.
pub fn wtf16LeToWtf8Alloc(allocator: mem.Allocator, wtf16le: []const u16) mem.Allocator.Error![]u8 {
// optimistically guess that it will all be ascii.
- var result = try std.ArrayList(u8).initCapacity(allocator, wtf16le.len);
+ var result = try std.array_list.Managed(u8).initCapacity(allocator, wtf16le.len);
errdefer result.deinit();
try utf16LeToUtf8ArrayListImpl(&result, wtf16le, .can_encode_surrogate_half);
@@ -1768,7 +1768,7 @@ pub fn wtf16LeToWtf8Alloc(allocator: mem.Allocator, wtf16le: []const u16) mem.Al
/// Caller must free returned memory.
pub fn wtf16LeToWtf8AllocZ(allocator: mem.Allocator, wtf16le: []const u16) mem.Allocator.Error![:0]u8 {
// optimistically guess that it will all be ascii (and allocate space for the null terminator)
- var result = try std.ArrayList(u8).initCapacity(allocator, wtf16le.len + 1);
+ var result = try std.array_list.Managed(u8).initCapacity(allocator, wtf16le.len + 1);
errdefer result.deinit();
try utf16LeToUtf8ArrayListImpl(&result, wtf16le, .can_encode_surrogate_half);
@@ -1779,14 +1779,14 @@ pub fn wtf16LeToWtf8(wtf8: []u8, wtf16le: []const u16) usize {
return utf16LeToUtf8Impl(wtf8, wtf16le, .can_encode_surrogate_half) catch |err| switch (err) {};
}
-pub fn wtf8ToWtf16LeArrayList(result: *std.ArrayList(u16), wtf8: []const u8) error{ InvalidWtf8, OutOfMemory }!void {
+pub fn wtf8ToWtf16LeArrayList(result: *std.array_list.Managed(u16), wtf8: []const u8) error{ InvalidWtf8, OutOfMemory }!void {
try result.ensureUnusedCapacity(wtf8.len);
return utf8ToUtf16LeArrayListImpl(result, wtf8, .can_encode_surrogate_half);
}
pub fn wtf8ToWtf16LeAlloc(allocator: mem.Allocator, wtf8: []const u8) error{ InvalidWtf8, OutOfMemory }![]u16 {
// optimistically guess that it will not require surrogate pairs
- var result = try std.ArrayList(u16).initCapacity(allocator, wtf8.len);
+ var result = try std.array_list.Managed(u16).initCapacity(allocator, wtf8.len);
errdefer result.deinit();
try utf8ToUtf16LeArrayListImpl(&result, wtf8, .can_encode_surrogate_half);
@@ -1795,7 +1795,7 @@ pub fn wtf8ToWtf16LeAlloc(allocator: mem.Allocator, wtf8: []const u8) error{ Inv
pub fn wtf8ToWtf16LeAllocZ(allocator: mem.Allocator, wtf8: []const u8) error{ InvalidWtf8, OutOfMemory }![:0]u16 {
// optimistically guess that it will not require surrogate pairs
- var result = try std.ArrayList(u16).initCapacity(allocator, wtf8.len + 1);
+ var result = try std.array_list.Managed(u16).initCapacity(allocator, wtf8.len + 1);
errdefer result.deinit();
try utf8ToUtf16LeArrayListImpl(&result, wtf8, .can_encode_surrogate_half);
lib/std/zig.zig
@@ -349,7 +349,7 @@ pub const LtoMode = enum { none, full, thin };
/// Renders a `std.Target.Cpu` value into a textual representation that can be parsed
/// via the `-mcpu` flag passed to the Zig compiler.
/// Appends the result to `buffer`.
-pub fn serializeCpu(buffer: *std.ArrayList(u8), cpu: std.Target.Cpu) Allocator.Error!void {
+pub fn serializeCpu(buffer: *std.array_list.Managed(u8), cpu: std.Target.Cpu) Allocator.Error!void {
const all_features = cpu.arch.allFeaturesList();
var populated_cpu_features = cpu.model.features;
populated_cpu_features.populateDependencies(all_features);
@@ -377,7 +377,7 @@ pub fn serializeCpu(buffer: *std.ArrayList(u8), cpu: std.Target.Cpu) Allocator.E
}
pub fn serializeCpuAlloc(ally: Allocator, cpu: std.Target.Cpu) Allocator.Error![]u8 {
- var buffer = std.ArrayList(u8).init(ally);
+ var buffer = std.array_list.Managed(u8).init(ally);
try serializeCpu(&buffer, cpu);
return buffer.toOwnedSlice();
}
@@ -633,7 +633,7 @@ pub fn parseTargetQueryOrReportFatalError(
return std.Target.Query.parse(opts_with_diags) catch |err| switch (err) {
error.UnknownCpuModel => {
help: {
- var help_text = std.ArrayList(u8).init(allocator);
+ var help_text = std.array_list.Managed(u8).init(allocator);
defer help_text.deinit();
for (diags.arch.?.allCpuModels()) |cpu| {
help_text.print(" {s}\n", .{cpu.name}) catch break :help;
@@ -646,7 +646,7 @@ pub fn parseTargetQueryOrReportFatalError(
},
error.UnknownCpuFeature => {
help: {
- var help_text = std.ArrayList(u8).init(allocator);
+ var help_text = std.array_list.Managed(u8).init(allocator);
defer help_text.deinit();
for (diags.arch.?.allFeaturesList()) |feature| {
help_text.print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
@@ -659,7 +659,7 @@ pub fn parseTargetQueryOrReportFatalError(
},
error.UnknownObjectFormat => {
help: {
- var help_text = std.ArrayList(u8).init(allocator);
+ var help_text = std.array_list.Managed(u8).init(allocator);
defer help_text.deinit();
inline for (@typeInfo(std.Target.ObjectFormat).@"enum".fields) |field| {
help_text.print(" {s}\n", .{field.name}) catch break :help;
@@ -670,7 +670,7 @@ pub fn parseTargetQueryOrReportFatalError(
},
error.UnknownArchitecture => {
help: {
- var help_text = std.ArrayList(u8).init(allocator);
+ var help_text = std.array_list.Managed(u8).init(allocator);
defer help_text.deinit();
inline for (@typeInfo(std.Target.Cpu.Arch).@"enum".fields) |field| {
help_text.print(" {s}\n", .{field.name}) catch break :help;
src/arch/riscv64/CodeGen.zig
@@ -101,7 +101,7 @@ reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// within different branches. Special consideration is needed when a branch
/// joins with its parent, to make sure all instructions have the same MCValue
/// across each runtime branch upon joining.
-branch_stack: *std.ArrayList(Branch),
+branch_stack: *std.array_list.Managed(Branch),
// Currently set vector properties, null means they haven't been set yet in the function.
avl: ?u64,
@@ -674,7 +674,7 @@ fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, compt
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa);
- var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity(
+ var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity(
stack.get(),
@typeInfo(ExpectedContents).array.len,
);
@@ -753,7 +753,7 @@ pub fn generate(
const fn_type = Type.fromInterned(func.ty);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
- var branch_stack = std.ArrayList(Branch).init(gpa);
+ var branch_stack = std.array_list.Managed(Branch).init(gpa);
defer {
assert(branch_stack.items.len == 1);
branch_stack.items[0].deinit(gpa);
@@ -4883,7 +4883,7 @@ fn genCall(
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
}
- var reg_locks = std.ArrayList(?RegisterLock).init(allocator);
+ var reg_locks = std.array_list.Managed(?RegisterLock).init(allocator);
defer reg_locks.deinit();
try reg_locks.ensureTotalCapacity(8);
defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| func.register_manager.unlockReg(lock);
@@ -6056,7 +6056,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
extra_i += inputs.len;
var result: MCValue = .none;
- var args = std.ArrayList(MCValue).init(func.gpa);
+ var args = std.array_list.Managed(MCValue).init(func.gpa);
try args.ensureTotalCapacity(outputs.len + inputs.len);
defer {
for (args.items) |arg| if (arg.getReg()) |reg| func.register_manager.unlockReg(.{
src/arch/sparc64/CodeGen.zig
@@ -88,7 +88,7 @@ reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// within different branches. Special consideration is needed when a branch
/// joins with its parent, to make sure all instructions have the same MCValue
/// across each runtime branch upon joining.
-branch_stack: *std.ArrayList(Branch),
+branch_stack: *std.array_list.Managed(Branch),
// Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
@@ -276,7 +276,7 @@ pub fn generate(
const file_scope = zcu.navFileScope(func.owner_nav);
const target = &file_scope.mod.?.resolved_target.result;
- var branch_stack = std.ArrayList(Branch).init(gpa);
+ var branch_stack = std.array_list.Managed(Branch).init(gpa);
defer {
assert(branch_stack.items.len == 1);
branch_stack.items[0].deinit(gpa);
src/arch/wasm/CodeGen.zig
@@ -1301,7 +1301,7 @@ fn resolveCallingConventionValues(
};
if (cc == .naked) return result;
- var args = std.ArrayList(WValue).init(gpa);
+ var args = std.array_list.Managed(WValue).init(gpa);
defer args.deinit();
// Check if we store the result as a pointer to the stack rather than
@@ -7132,7 +7132,7 @@ fn airErrorSetHasValue(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = try cg.allocLocal(Type.bool);
const names = error_set_ty.errorSetNames(zcu);
- var values = try std.ArrayList(u32).initCapacity(cg.gpa, names.len);
+ var values = try std.array_list.Managed(u32).initCapacity(cg.gpa, names.len);
defer values.deinit();
var lowest: ?u32 = null;
src/arch/x86_64/CodeGen.zig
@@ -169359,7 +169359,7 @@ fn restoreState(self: *CodeGen, state: State, deaths: []const Air.Inst.Index, co
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
- var reg_locks = if (opts.update_tracking) {} else try std.ArrayList(RegisterLock).initCapacity(
+ var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity(
stack.get(),
@typeInfo(ExpectedContents).array.len,
);
@@ -178175,7 +178175,7 @@ fn genCall(self: *CodeGen, info: union(enum) {
const frame_indices = try allocator.alloc(FrameIndex, args.len);
defer allocator.free(frame_indices);
- var reg_locks: std.ArrayList(?RegisterLock) = .init(allocator);
+ var reg_locks: std.array_list.Managed(?RegisterLock) = .init(allocator);
defer reg_locks.deinit();
try reg_locks.ensureTotalCapacity(16);
defer for (reg_locks.items) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
@@ -179786,7 +179786,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
extra_i += inputs.len;
var result: MCValue = .none;
- var args: std.ArrayList(MCValue) = .init(self.gpa);
+ var args: std.array_list.Managed(MCValue) = .init(self.gpa);
try args.ensureTotalCapacity(outputs.len + inputs.len);
defer {
for (args.items) |arg| if (arg.getReg()) |reg| self.register_manager.unlockReg(.{
src/arch/x86_64/encoder.zig
@@ -1216,7 +1216,7 @@ const TestEncode = struct {
};
test "encode" {
- var buf = std.ArrayList(u8).init(testing.allocator);
+ var buf = std.array_list.Managed(u8).init(testing.allocator);
defer buf.deinit();
const inst: Instruction = try .new(.none, .mov, &.{
@@ -2647,7 +2647,7 @@ test "assemble" {
// zig fmt: on
var as = Assembler.init(input);
- var output = std.ArrayList(u8).init(testing.allocator);
+ var output = std.array_list.Managed(u8).init(testing.allocator);
defer output.deinit();
try as.assemble(output.writer());
try expectEqualHexStrings(expected, output.items, input);
@@ -2691,7 +2691,7 @@ test "assemble - Jcc" {
const input = @tagName(mnemonic[0]) ++ " 0x0";
const expected = [_]u8{ 0x0f, mnemonic[1], 0x0, 0x0, 0x0, 0x0 };
var as = Assembler.init(input);
- var output = std.ArrayList(u8).init(testing.allocator);
+ var output = std.array_list.Managed(u8).init(testing.allocator);
defer output.deinit();
try as.assemble(output.writer());
try expectEqualHexStrings(&expected, output.items, input);
@@ -2736,7 +2736,7 @@ test "assemble - SETcc" {
const input = @tagName(mnemonic[0]) ++ " al";
const expected = [_]u8{ 0x0f, mnemonic[1], 0xC0 };
var as = Assembler.init(input);
- var output = std.ArrayList(u8).init(testing.allocator);
+ var output = std.array_list.Managed(u8).init(testing.allocator);
defer output.deinit();
try as.assemble(output.writer());
try expectEqualHexStrings(&expected, output.items, input);
@@ -2781,7 +2781,7 @@ test "assemble - CMOVcc" {
const input = @tagName(mnemonic[0]) ++ " rax, rbx";
const expected = [_]u8{ 0x48, 0x0f, mnemonic[1], 0xC3 };
var as = Assembler.init(input);
- var output = std.ArrayList(u8).init(testing.allocator);
+ var output = std.array_list.Managed(u8).init(testing.allocator);
defer output.deinit();
try as.assemble(output.writer());
try expectEqualHexStrings(&expected, output.items, input);
src/codegen/spirv/CodeGen.zig
@@ -970,10 +970,10 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
return try cg.constInt(backing_ty, @as(u64, @bitCast(limbs)));
}
- var types = std.ArrayList(Type).init(gpa);
+ var types = std.array_list.Managed(Type).init(gpa);
defer types.deinit();
- var constituents = std.ArrayList(Id).init(gpa);
+ var constituents = std.array_list.Managed(Id).init(gpa);
defer constituents.deinit();
var it = struct_type.iterateRuntimeOrder(ip);
@@ -1519,13 +1519,13 @@ fn resolveType(cg: *CodeGen, ty: Type, repr: Repr) Error!Id {
return try cg.resolveType(.fromInterned(struct_type.backingIntTypeUnordered(ip)), .direct);
}
- var member_types = std.ArrayList(Id).init(gpa);
+ var member_types = std.array_list.Managed(Id).init(gpa);
defer member_types.deinit();
- var member_names = std.ArrayList([]const u8).init(gpa);
+ var member_names = std.array_list.Managed([]const u8).init(gpa);
defer member_names.deinit();
- var member_offsets = std.ArrayList(u32).init(gpa);
+ var member_offsets = std.array_list.Managed(u32).init(gpa);
defer member_offsets.deinit();
var it = struct_type.iterateRuntimeOrder(ip);
src/codegen/spirv/Module.zig
@@ -281,7 +281,7 @@ pub fn addEntryPointDeps(
module: *Module,
decl_index: Decl.Index,
seen: *std.DynamicBitSetUnmanaged,
- interface: *std.ArrayList(Id),
+ interface: *std.array_list.Managed(Id),
) !void {
const decl = module.declPtr(decl_index);
const deps = module.decl_deps.items[decl.begin_dep..decl.end_dep];
@@ -307,7 +307,7 @@ fn entryPoints(module: *Module) !Section {
var entry_points = Section{};
errdefer entry_points.deinit(module.gpa);
- var interface = std.ArrayList(Id).init(module.gpa);
+ var interface = std.array_list.Managed(Id).init(module.gpa);
defer interface.deinit();
var seen = try std.DynamicBitSetUnmanaged.initEmpty(module.gpa, module.decls.items.len);
src/codegen/llvm.zig
@@ -53,7 +53,7 @@ fn subArchName(target: *const std.Target, comptime family: std.Target.Cpu.Arch.F
}
pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 {
- var llvm_triple = std.ArrayList(u8).init(allocator);
+ var llvm_triple = std.array_list.Managed(u8).init(allocator);
defer llvm_triple.deinit();
const llvm_arch = switch (target.cpu.arch) {
@@ -820,7 +820,7 @@ pub const Object = struct {
}
{
- var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 8);
+ var module_flags = try std.array_list.Managed(Builder.Metadata).initCapacity(o.gpa, 8);
defer module_flags.deinit();
const behavior_error = try o.builder.metadataConstant(try o.builder.intConst(.i32, 1));
@@ -2583,7 +2583,7 @@ pub const Object = struct {
.@"fn" => {
const fn_info = zcu.typeToFunc(ty).?;
- var debug_param_types = std.ArrayList(Builder.Metadata).init(gpa);
+ var debug_param_types = std.array_list.Managed(Builder.Metadata).init(gpa);
defer debug_param_types.deinit();
try debug_param_types.ensureUnusedCapacity(3 + fn_info.param_types.len);
@@ -5254,7 +5254,7 @@ pub const FuncGen = struct {
const target = zcu.getTarget();
const sret = firstParamSRet(fn_info, zcu, target);
- var llvm_args = std.ArrayList(Builder.Value).init(self.gpa);
+ var llvm_args = std.array_list.Managed(Builder.Value).init(self.gpa);
defer llvm_args.deinit();
var attributes: Builder.FunctionAttributes.Wip = .{};
@@ -7536,7 +7536,7 @@ pub const FuncGen = struct {
const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
// hackety hacks until stage2 has proper inline asm in the frontend.
- var rendered_template = std.ArrayList(u8).init(gpa);
+ var rendered_template = std.array_list.Managed(u8).init(gpa);
defer rendered_template.deinit();
const State = enum { start, percent, input, modifier };
src/libs/freebsd.zig
@@ -76,7 +76,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
switch (crt_file) {
.scrt1_o => {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.appendSlice(&.{
"-O2",
"-fno-common",
@@ -89,7 +89,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try cflags.append("-mlongcall");
}
- var acflags = std.ArrayList([]const u8).init(arena);
+ var acflags = std.array_list.Managed([]const u8).init(arena);
try acflags.appendSlice(&.{
"-DLOCORE",
// See `Compilation.addCCArgs`.
@@ -510,7 +510,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
};
{
- var map_contents = std.ArrayList(u8).init(arena);
+ var map_contents = std.array_list.Managed(u8).init(arena);
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
try map_contents.writer().print("FBSD_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
}
@@ -518,7 +518,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
map_contents.deinit();
}
- var stubs_asm = std.ArrayList(u8).init(gpa);
+ var stubs_asm = std.array_list.Managed(u8).init(gpa);
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
@@ -529,7 +529,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_writer.writeAll(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.ArrayList(u8).init(arena);
+ var sym_name_buf = std.array_list.Managed(u8).init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
var weak_linkages = try std.DynamicBitSetUnmanaged.initEmpty(arena, metadata.all_versions.len);
src/libs/glibc.zig
@@ -186,7 +186,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
switch (crt_file) {
.scrt1_o => {
const start_o: Compilation.CSourceFile = blk: {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try add_include_dirs(comp, arena, &args);
try args.appendSlice(&[_][]const u8{
"-D_LIBC_REENTRANT",
@@ -210,7 +210,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
};
};
const abi_note_o: Compilation.CSourceFile = blk: {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-I",
try lib_path(comp, arena, lib_libc_glibc ++ "csu"),
@@ -306,7 +306,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (deps) |dep| {
if (!dep.include) continue;
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
"-fgnu89-inline",
@@ -364,7 +364,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
const s = path.sep_str;
- var result = std.ArrayList(u8).init(arena);
+ var result = std.array_list.Managed(u8).init(arena);
try result.appendSlice(comp.dirs.zig_lib.path orelse ".");
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
if (is_sparc) {
@@ -408,7 +408,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
return result.items;
}
-fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
+fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.array_list.Managed([]const u8)) error{OutOfMemory}!void {
const target = comp.getTarget();
const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
@@ -484,7 +484,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
fn add_include_dirs_arch(
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
target: *const std.Target,
opt_nptl: ?[]const u8,
dir: []const u8,
@@ -749,7 +749,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
};
{
- var map_contents = std.ArrayList(u8).init(arena);
+ var map_contents = std.array_list.Managed(u8).init(arena);
for (metadata.all_versions[0 .. target_ver_index + 1]) |ver| {
if (ver.patch == 0) {
try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
@@ -761,7 +761,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
- var stubs_asm = std.ArrayList(u8).init(gpa);
+ var stubs_asm = std.array_list.Managed(u8).init(gpa);
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
@@ -773,7 +773,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_asm.appendSlice(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.ArrayList(u8).init(arena);
+ var sym_name_buf = std.array_list.Managed(u8).init(arena);
var opt_symbol_name: ?[]const u8 = null;
var versions_buffer: [32]u8 = undefined;
var versions_len: usize = undefined;
src/libs/libcxx.zig
@@ -190,7 +190,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
else
&libcxx_base_files;
- var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxx_files.len);
+ var c_source_files = try std.array_list.Managed(Compilation.CSourceFile).initCapacity(arena, libcxx_files.len);
for (libcxx_files) |cxx_src| {
// These don't compile on WASI due to e.g. `fchmod` usage.
@@ -201,7 +201,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
if (std.mem.startsWith(u8, cxx_src, "src/support/ibm/") and target.os.tag != .zos)
continue;
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try addCxxArgs(comp, arena, &cflags);
@@ -233,7 +233,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
// These depend on only the zig lib directory file path, which is
// purposefully either in the cache or not in the cache. The decision
// should not be overridden here.
- var cache_exempt_flags = std.ArrayList([]const u8).init(arena);
+ var cache_exempt_flags = std.array_list.Managed([]const u8).init(arena);
try cache_exempt_flags.append("-I");
try cache_exempt_flags.append(cxx_include_path);
@@ -385,7 +385,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
return error.AlreadyReported;
};
- var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxxabi_files.len);
+ var c_source_files = try std.array_list.Managed(Compilation.CSourceFile).initCapacity(arena, libcxxabi_files.len);
for (libcxxabi_files) |cxxabi_src| {
if (!comp.config.any_non_single_threaded and std.mem.startsWith(u8, cxxabi_src, "src/cxa_thread_atexit.cpp"))
@@ -394,7 +394,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
(std.mem.eql(u8, cxxabi_src, "src/cxa_exception.cpp") or std.mem.eql(u8, cxxabi_src, "src/cxa_personality.cpp")))
continue;
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try addCxxArgs(comp, arena, &cflags);
@@ -425,7 +425,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
// These depend on only the zig lib directory file path, which is
// purposefully either in the cache or not in the cache. The decision
// should not be overridden here.
- var cache_exempt_flags = std.ArrayList([]const u8).init(arena);
+ var cache_exempt_flags = std.array_list.Managed([]const u8).init(arena);
try cache_exempt_flags.append("-I");
try cache_exempt_flags.append(cxxabi_include_path);
@@ -497,7 +497,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
pub fn addCxxArgs(
comp: *const Compilation,
arena: std.mem.Allocator,
- cflags: *std.ArrayList([]const u8),
+ cflags: *std.array_list.Managed([]const u8),
) error{OutOfMemory}!void {
const target = comp.getTarget();
const optimize_mode = comp.compilerRtOptMode();
src/libs/libtsan.zig
@@ -113,12 +113,12 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
return error.AlreadyReported;
};
- var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
+ var c_source_files = std.array_list.Managed(Compilation.CSourceFile).init(arena);
try c_source_files.ensureUnusedCapacity(tsan_sources.len);
const tsan_include_path = try comp.dirs.zig_lib.join(arena, &.{"libtsan"});
for (tsan_sources) |tsan_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
@@ -139,7 +139,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
};
try c_source_files.ensureUnusedCapacity(platform_tsan_sources.len);
for (platform_tsan_sources) |tsan_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
@@ -163,7 +163,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
.x86_64 => "tsan_rtl_amd64.S",
else => return error.TSANUnsupportedCPUArchitecture,
};
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
@@ -182,7 +182,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
"libtsan", "sanitizer_common",
});
for (sanitizer_common_sources) |common_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(sanitizer_common_include_path);
@@ -206,7 +206,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
&sanitizer_nolibc_sources;
try c_source_files.ensureUnusedCapacity(to_c_or_not_to_c_sources.len);
for (to_c_or_not_to_c_sources) |c_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(sanitizer_common_include_path);
@@ -226,7 +226,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try c_source_files.ensureUnusedCapacity(sanitizer_symbolizer_sources.len);
for (sanitizer_symbolizer_sources) |c_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
@@ -246,7 +246,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try c_source_files.ensureUnusedCapacity(interception_sources.len);
for (interception_sources) |c_src| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(interception_include_path);
@@ -323,7 +323,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
comp.tsan_lib = crt_file;
}
-fn addCcArgs(target: *const std.Target, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
+fn addCcArgs(target: *const std.Target, args: *std.array_list.Managed([]const u8)) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-nostdinc++",
"-fvisibility=hidden",
src/libs/libunwind.zig
@@ -87,7 +87,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const root_name = "unwind";
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
for (unwind_src_list, 0..) |unwind_src, i| {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
switch (Compilation.classifyFileExt(unwind_src)) {
.c => {
src/libs/mingw.zig
@@ -33,7 +33,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
switch (crt_file) {
.crt2_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCrtCcArgs(comp, arena, &args);
if (comp.mingw_unicode_entry_point) {
try args.appendSlice(&.{ "-DUNICODE", "-D_UNICODE" });
@@ -53,7 +53,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
},
.dllcrt2_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCrtCcArgs(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
.{
@@ -70,10 +70,10 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
},
.libmingw32_lib => {
- var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
+ var c_source_files = std.array_list.Managed(Compilation.CSourceFile).init(arena);
{
- var crt_args = std.ArrayList([]const u8).init(arena);
+ var crt_args = std.array_list.Managed([]const u8).init(arena);
try addCrtCcArgs(comp, arena, &crt_args);
for (mingw32_generic_src) |dep| {
@@ -150,7 +150,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
}
{
- var winpthreads_args = std.ArrayList([]const u8).init(arena);
+ var winpthreads_args = std.array_list.Managed([]const u8).init(arena);
try addCcArgs(comp, arena, &winpthreads_args);
try winpthreads_args.appendSlice(&[_][]const u8{
"-DIN_WINPTHREAD",
@@ -186,7 +186,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
fn addCcArgs(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-std=gnu11",
@@ -200,7 +200,7 @@ fn addCcArgs(
fn addCrtCcArgs(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
) error{OutOfMemory}!void {
try addCcArgs(comp, arena, args);
@@ -401,7 +401,7 @@ fn findDef(
else => unreachable,
};
- var override_path = std.ArrayList(u8).init(allocator);
+ var override_path = std.array_list.Managed(u8).init(allocator);
defer override_path.deinit();
const s = path.sep_str;
src/libs/musl.zig
@@ -29,7 +29,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
switch (in_crt_file) {
.crt1_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
@@ -49,7 +49,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
});
},
.rcrt1_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
@@ -70,7 +70,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
});
},
.scrt1_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
@@ -112,10 +112,10 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
}
}
- var c_source_files = std.ArrayList(Compilation.CSourceFile).init(comp.gpa);
+ var c_source_files = std.array_list.Managed(Compilation.CSourceFile).init(comp.gpa);
defer c_source_files.deinit();
- var override_path = std.ArrayList(u8).init(comp.gpa);
+ var override_path = std.array_list.Managed(u8).init(comp.gpa);
defer override_path.deinit();
const s = path.sep_str;
@@ -161,7 +161,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
continue;
}
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCcArgs(comp, arena, &args, ext == .o3);
const c_source_file = try c_source_files.addOne();
c_source_file.* = .{
@@ -390,7 +390,7 @@ fn addSrcFile(arena: Allocator, source_table: *std.StringArrayHashMap(Ext), file
fn addCcArgs(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
want_O3: bool,
) error{OutOfMemory}!void {
const target = comp.getTarget();
src/libs/netbsd.zig
@@ -69,13 +69,13 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
switch (crt_file) {
.scrt0_o => {
- var cflags = std.ArrayList([]const u8).init(arena);
+ var cflags = std.array_list.Managed([]const u8).init(arena);
try cflags.appendSlice(&.{
"-DHAVE_INITFINI_ARRAY",
"-w", // Disable all warnings.
});
- var acflags = std.ArrayList([]const u8).init(arena);
+ var acflags = std.array_list.Managed([]const u8).init(arena);
try acflags.appendSlice(&.{
// See `Compilation.addCCArgs`.
try std.fmt.allocPrint(arena, "-D__NetBSD_Version__={d}", .{(target_version.major * 100_000_000) + (target_version.minor * 1_000_000)}),
@@ -454,7 +454,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
break :blk latest_index;
};
- var stubs_asm = std.ArrayList(u8).init(gpa);
+ var stubs_asm = std.array_list.Managed(u8).init(gpa);
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
@@ -465,7 +465,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
try stubs_writer.writeAll(".text\n");
var sym_i: usize = 0;
- var sym_name_buf = std.ArrayList(u8).init(arena);
+ var sym_name_buf = std.array_list.Managed(u8).init(arena);
var opt_symbol_name: ?[]const u8 = null;
var inc_fbs = std.io.fixedBufferStream(metadata.inclusions);
src/libs/wasi_libc.zig
@@ -41,7 +41,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
switch (crt_file) {
.crt1_reactor_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{});
try addLibcBottomHalfIncludes(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
@@ -56,7 +56,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
return comp.build_crt_file("crt1-reactor", .Obj, .@"wasi crt1-reactor.o", prog_node, &files, .{});
},
.crt1_command_o => {
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{});
try addLibcBottomHalfIncludes(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
@@ -71,11 +71,11 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
return comp.build_crt_file("crt1-command", .Obj, .@"wasi crt1-command.o", prog_node, &files, .{});
},
.libc_a => {
- var libc_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
+ var libc_sources = std.array_list.Managed(Compilation.CSourceFile).init(arena);
{
// Compile emmalloc.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true, .no_strict_aliasing = true });
for (emmalloc_src_files) |file_path| {
try libc_sources.append(.{
@@ -90,7 +90,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libc-bottom-half.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try addLibcBottomHalfIncludes(comp, arena, &args);
@@ -107,7 +107,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libc-top-half.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try addLibcTopHalfIncludes(comp, arena, &args);
@@ -124,7 +124,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libdl.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try addLibcBottomHalfIncludes(comp, arena, &args);
@@ -141,7 +141,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libwasi-emulated-process-clocks.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try args.appendSlice(&.{
"-I",
@@ -167,7 +167,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libwasi-emulated-getpid.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try addLibcBottomHalfIncludes(comp, arena, &args);
@@ -184,7 +184,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libwasi-emulated-mman.
- var args = std.ArrayList([]const u8).init(arena);
+ var args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &args, .{ .want_O3 = true });
try addLibcBottomHalfIncludes(comp, arena, &args);
@@ -201,7 +201,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
{
// Compile libwasi-emulated-signal.
- var bottom_args = std.ArrayList([]const u8).init(arena);
+ var bottom_args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &bottom_args, .{ .want_O3 = true });
for (emulated_signal_bottom_half_src_files) |file_path| {
@@ -214,7 +214,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
});
}
- var top_args = std.ArrayList([]const u8).init(arena);
+ var top_args = std.array_list.Managed([]const u8).init(arena);
try addCCArgs(comp, arena, &top_args, .{ .want_O3 = true });
try addLibcTopHalfIncludes(comp, arena, &top_args);
try top_args.append("-D_WASI_EMULATED_SIGNAL");
@@ -259,7 +259,7 @@ const CCOptions = struct {
fn addCCArgs(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
options: CCOptions,
) error{OutOfMemory}!void {
const target = comp.getTarget();
@@ -298,7 +298,7 @@ fn addCCArgs(
fn addLibcBottomHalfIncludes(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-I",
@@ -370,7 +370,7 @@ fn addLibcBottomHalfIncludes(
fn addLibcTopHalfIncludes(
comp: *Compilation,
arena: Allocator,
- args: *std.ArrayList([]const u8),
+ args: *std.array_list.Managed([]const u8),
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-I",
src/link/Elf/Atom.zig
@@ -221,7 +221,7 @@ pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
}
}
-pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.Elf64_Rela)) !void {
+pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.array_list.Managed(elf.Elf64_Rela)) !void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
const cpu_arch = elf_file.getTarget().cpu.arch;
@@ -607,7 +607,7 @@ fn reportUndefined(
};
const gop = try undefs.getOrPut(idx);
if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList(Elf.Ref).init(gpa);
+ gop.value_ptr.* = std.array_list.Managed(Elf.Ref).init(gpa);
}
try gop.value_ptr.append(.{ .index = self.atom_index, .file = self.file_index });
return true;
src/link/Elf/AtomList.zig
@@ -89,7 +89,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
list.dirty = false;
}
-pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void {
+pub fn write(list: AtomList, buffer: *std.array_list.Managed(u8), undefs: anytype, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
@@ -126,7 +126,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi
buffer.clearRetainingCapacity();
}
-pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *Elf) !void {
+pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
src/link/Elf/eh_frame.zig
@@ -195,7 +195,7 @@ pub fn calcEhFrameSize(elf_file: *Elf) !usize {
break :blk math.cast(usize, sym.atom(elf_file).?.size) orelse return error.Overflow;
} else 0;
- var cies = std.ArrayList(Cie).init(gpa);
+ var cies = std.array_list.Managed(Cie).init(gpa);
defer cies.deinit();
for (elf_file.objects.items) |index| {
@@ -413,7 +413,7 @@ fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_R
};
}
-pub fn writeEhFrameRelocs(elf_file: *Elf, relocs: *std.ArrayList(elf.Elf64_Rela)) !void {
+pub fn writeEhFrameRelocs(elf_file: *Elf, relocs: *std.array_list.Managed(elf.Elf64_Rela)) !void {
relocs_log.debug("{x}: .eh_frame", .{
elf_file.sections.items(.shdr)[elf_file.section_indexes.eh_frame.?].sh_addr,
});
@@ -493,7 +493,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
}
};
- var entries = std.ArrayList(Entry).init(gpa);
+ var entries = std.array_list.Managed(Entry).init(gpa);
defer entries.deinit();
try entries.ensureTotalCapacityPrecise(num_fdes);
src/link/Elf/gc.zig
@@ -1,14 +1,14 @@
pub fn gcAtoms(elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- var roots = std.ArrayList(*Atom).init(gpa);
+ var roots = std.array_list.Managed(*Atom).init(gpa);
defer roots.deinit();
try collectRoots(&roots, elf_file);
mark(roots, elf_file);
prune(elf_file);
}
-fn collectRoots(roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
+fn collectRoots(roots: *std.array_list.Managed(*Atom), elf_file: *Elf) !void {
if (elf_file.linkerDefinedPtr()) |obj| {
if (obj.entrySymbol(elf_file)) |sym| {
try markSymbol(sym, roots, elf_file);
@@ -82,7 +82,7 @@ fn collectRoots(roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
}
}
-fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
+fn markSymbol(sym: *Symbol, roots: *std.array_list.Managed(*Atom), elf_file: *Elf) !void {
if (sym.mergeSubsection(elf_file)) |msub| {
msub.alive = true;
return;
@@ -133,7 +133,7 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
}
}
-fn mark(roots: std.ArrayList(*Atom), elf_file: *Elf) void {
+fn mark(roots: std.array_list.Managed(*Atom), elf_file: *Elf) void {
for (roots.items) |root| {
gc_track_live_log.debug("root atom({d})", .{root.atom_index});
markLive(root, elf_file);
src/link/Elf/relocatable.zig
@@ -44,7 +44,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
try zig_object.readFileContents(elf_file);
}
- var files = std.ArrayList(File.Index).init(gpa);
+ var files = std.array_list.Managed(File.Index).init(gpa);
defer files.deinit();
try files.ensureTotalCapacityPrecise(elf_file.objects.items.len + 1);
if (elf_file.zigObjectPtr()) |zig_object| files.appendAssumeCapacity(zig_object.index);
@@ -100,7 +100,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
}
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(total_size);
@@ -347,7 +347,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
fn writeAtoms(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
const slice = elf_file.sections.slice();
@@ -377,7 +377,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
return error.Overflow;
- var relocs = try std.ArrayList(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
+ var relocs = try std.array_list.Managed(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
defer relocs.deinit();
for (atom_list.items) |ref| {
@@ -407,7 +407,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer());
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
@@ -421,7 +421,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const shdr = slice.items(.shdr)[shndx];
const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
return error.Overflow;
- var relocs = try std.ArrayList(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
+ var relocs = try std.array_list.Managed(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
defer relocs.deinit();
try eh_frame.writeEhFrameRelocs(elf_file, &relocs);
assert(relocs.items.len == num_relocs);
@@ -446,7 +446,7 @@ fn writeGroups(elf_file: *Elf) !void {
for (elf_file.group_sections.items) |cgs| {
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try cgs.write(elf_file, buffer.writer());
assert(buffer.items.len == sh_size);
src/link/Elf/synthetic_sections.zig
@@ -18,7 +18,7 @@ pub const DynamicSection = struct {
if (rpath_list.len == 0) return;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- var rpath = std.ArrayList(u8).init(gpa);
+ var rpath = std.array_list.Managed(u8).init(gpa);
defer rpath.deinit();
for (rpath_list, 0..) |path, i| {
if (i > 0) try rpath.append(':');
@@ -1350,7 +1350,7 @@ pub const VerneedSection = struct {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- var verneed = std.ArrayList(VersionedSymbol).init(gpa);
+ var verneed = std.array_list.Managed(VersionedSymbol).init(gpa);
defer verneed.deinit();
try verneed.ensureTotalCapacity(dynsyms.len);
src/link/MachO/dyld_info/bind.zig
@@ -34,7 +34,7 @@ pub const Bind = struct {
const gpa = macho_file.base.comp.gpa;
const cpu_arch = macho_file.getTarget().cpu.arch;
- var objects = try std.ArrayList(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
+ var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
defer objects.deinit();
objects.appendSliceAssumeCapacity(macho_file.objects.items);
if (macho_file.getZigObject()) |obj| objects.appendAssumeCapacity(obj.index);
@@ -286,7 +286,7 @@ pub const WeakBind = struct {
const gpa = macho_file.base.comp.gpa;
const cpu_arch = macho_file.getTarget().cpu.arch;
- var objects = try std.ArrayList(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
+ var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
defer objects.deinit();
objects.appendSliceAssumeCapacity(macho_file.objects.items);
if (macho_file.getZigObject()) |obj| objects.appendAssumeCapacity(obj.index);
src/link/MachO/dyld_info/Rebase.zig
@@ -25,7 +25,7 @@ pub fn updateSize(rebase: *Rebase, macho_file: *MachO) !void {
const gpa = macho_file.base.comp.gpa;
- var objects = try std.ArrayList(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
+ var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 2);
defer objects.deinit();
objects.appendSliceAssumeCapacity(macho_file.objects.items);
if (macho_file.getZigObject()) |obj| objects.appendAssumeCapacity(obj.index);
src/link/MachO/dyld_info/Trie.zig
@@ -134,7 +134,7 @@ fn finalize(self: *Trie, allocator: Allocator) !void {
const tracy = trace(@src());
defer tracy.end();
- var ordered_nodes = std.ArrayList(Node.Index).init(allocator);
+ var ordered_nodes = std.array_list.Managed(Node.Index).init(allocator);
defer ordered_nodes.deinit();
try ordered_nodes.ensureTotalCapacityPrecise(self.nodes.items(.is_terminal).len);
src/link/MachO/CodeSignature.zig
@@ -276,7 +276,7 @@ pub fn writeAdhocSignature(
.count = 0,
};
- var blobs = std.ArrayList(Blob).init(allocator);
+ var blobs = std.array_list.Managed(Blob).init(allocator);
defer blobs.deinit();
self.code_directory.inner.execSegBase = opts.exec_seg_base;
@@ -304,7 +304,7 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
try req.write(buf.writer());
Sha256.hash(buf.items, &hash, .{});
@@ -316,7 +316,7 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
try ents.write(buf.writer());
Sha256.hash(buf.items, &hash, .{});
src/link/MachO/dead_strip.zig
@@ -1,12 +1,12 @@
pub fn gcAtoms(macho_file: *MachO) !void {
const gpa = macho_file.base.comp.gpa;
- var objects = try std.ArrayList(File.Index).initCapacity(gpa, macho_file.objects.items.len + 1);
+ var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 1);
defer objects.deinit();
for (macho_file.objects.items) |index| objects.appendAssumeCapacity(index);
if (macho_file.internal_object) |index| objects.appendAssumeCapacity(index);
- var roots = std.ArrayList(*Atom).init(gpa);
+ var roots = std.array_list.Managed(*Atom).init(gpa);
defer roots.deinit();
try collectRoots(&roots, objects.items, macho_file);
@@ -14,7 +14,7 @@ pub fn gcAtoms(macho_file: *MachO) !void {
prune(objects.items, macho_file);
}
-fn collectRoots(roots: *std.ArrayList(*Atom), objects: []const File.Index, macho_file: *MachO) !void {
+fn collectRoots(roots: *std.array_list.Managed(*Atom), objects: []const File.Index, macho_file: *MachO) !void {
for (objects) |index| {
const object = macho_file.getFile(index).?;
for (object.getSymbols(), 0..) |*sym, i| {
@@ -76,7 +76,7 @@ fn collectRoots(roots: *std.ArrayList(*Atom), objects: []const File.Index, macho
}
}
-fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), macho_file: *MachO) !void {
+fn markSymbol(sym: *Symbol, roots: *std.array_list.Managed(*Atom), macho_file: *MachO) !void {
const atom = sym.getAtom(macho_file) orelse return;
if (markAtom(atom)) try roots.append(atom);
}
src/link/MachO/Dylib.zig
@@ -814,7 +814,7 @@ pub const TargetMatcher = struct {
const targets = switch (tbd) {
.v3 => |v3| blk: {
- var targets = std.ArrayList([]const u8).init(arena.allocator());
+ var targets = std.array_list.Managed([]const u8).init(arena.allocator());
for (v3.archs) |arch| {
if (mem.eql(u8, v3.platform, "zippered")) {
// From Xcode 10.3 โ 11.3.1, macos SDK .tbd files specify platform as 'zippered'
src/link/MachO/eh_frame.zig
@@ -275,7 +275,7 @@ pub fn calcSize(macho_file: *MachO) !u32 {
var offset: u32 = 0;
- var cies = std.ArrayList(Cie).init(macho_file.base.comp.gpa);
+ var cies = std.array_list.Managed(Cie).init(macho_file.base.comp.gpa);
defer cies.deinit();
for (macho_file.objects.items) |index| {
src/link/MachO/InternalObject.zig
@@ -402,7 +402,7 @@ pub fn resolveLiterals(self: *InternalObject, lp: *MachO.LiteralPool, macho_file
const gpa = macho_file.base.comp.gpa;
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
const slice = self.sections.slice();
src/link/MachO/Object.zig
@@ -199,7 +199,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
}
};
- var nlists = try std.ArrayList(NlistIdx).initCapacity(gpa, self.symtab.items(.nlist).len);
+ var nlists = try std.array_list.Managed(NlistIdx).initCapacity(gpa, self.symtab.items(.nlist).len);
defer nlists.deinit();
for (self.symtab.items(.nlist), 0..) |nlist, i| {
if (nlist.stab() or !nlist.sect()) continue;
@@ -633,7 +633,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const gpa = macho_file.base.comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
var sections_data = std.AutoHashMap(u32, []const u8).init(gpa);
src/link/MachO/relocatable.zig
@@ -3,7 +3,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
const diags = &macho_file.base.comp.link_diags;
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
- var positionals = std.ArrayList(link.Input).init(gpa);
+ var positionals = std.array_list.Managed(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
positionals.appendSliceAssumeCapacity(comp.link_inputs);
@@ -81,7 +81,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
const gpa = comp.gpa;
const diags = &macho_file.base.comp.link_diags;
- var positionals = std.ArrayList(link.Input).init(gpa);
+ var positionals = std.array_list.Managed(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
@@ -143,7 +143,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try zo.readFileContents(macho_file);
}
- var files = std.ArrayList(File.Index).init(gpa);
+ var files = std.array_list.Managed(File.Index).init(gpa);
defer files.deinit();
try files.ensureTotalCapacityPrecise(macho_file.objects.items.len + 1);
if (macho_file.getZigObject()) |zo| files.appendAssumeCapacity(zo.index);
@@ -205,7 +205,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(macho_file)});
}
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(total_size);
const writer = buffer.writer();
@@ -417,7 +417,7 @@ fn calcSymtabSize(macho_file: *MachO) error{OutOfMemory}!void {
var nimports: u32 = 0;
var strsize: u32 = 1;
- var objects = try std.ArrayList(File.Index).initCapacity(gpa, macho_file.objects.items.len + 1);
+ var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 1);
defer objects.deinit();
if (macho_file.getZigObject()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(macho_file.objects.items);
src/link/SpirV/BinaryModule.zig
@@ -280,7 +280,7 @@ pub const Parser = struct {
self: *Parser,
binary: BinaryModule,
inst: Instruction,
- offsets: *std.ArrayList(u16),
+ offsets: *std.array_list.Managed(u16),
) !void {
const index = self.opcode_table.get(mapSetAndOpcode(.core, @intFromEnum(inst.opcode))).?;
const operands = InstructionSet.core.instructions()[index].operands;
@@ -333,7 +333,7 @@ pub const Parser = struct {
inst: Instruction,
operands: []const spec.Operand,
start_offset: usize,
- offsets: *std.ArrayList(u16),
+ offsets: *std.array_list.Managed(u16),
) !usize {
var offset = start_offset;
for (operands) |operand| {
@@ -348,7 +348,7 @@ pub const Parser = struct {
inst: Instruction,
operand: spec.Operand,
start_offset: usize,
- offsets: *std.ArrayList(u16),
+ offsets: *std.array_list.Managed(u16),
) !usize {
var offset = start_offset;
switch (operand.quantifier) {
@@ -371,7 +371,7 @@ pub const Parser = struct {
inst: Instruction,
kind: spec.OperandKind,
start_offset: usize,
- offsets: *std.ArrayList(u16),
+ offsets: *std.array_list.Managed(u16),
) !usize {
var offset = start_offset;
if (offset >= inst.operands.len) return error.InvalidPhysicalFormat;
src/link/SpirV/lower_invocation_globals.zig
@@ -74,9 +74,9 @@ const ModuleInfo = struct {
param_types: []const ResultId,
}).init(arena);
var calls = std.AutoArrayHashMap(ResultId, void).init(arena);
- var callee_store = std.ArrayList(ResultId).init(arena);
+ var callee_store = std.array_list.Managed(ResultId).init(arena);
var function_invocation_globals = std.AutoArrayHashMap(ResultId, void).init(arena);
- var result_id_offsets = std.ArrayList(u16).init(arena);
+ var result_id_offsets = std.array_list.Managed(u16).init(arena);
var invocation_globals = std.AutoArrayHashMap(ResultId, InvocationGlobal).init(arena);
var maybe_current_function: ?ResultId = null;
@@ -498,8 +498,8 @@ const ModuleBuilder = struct {
binary: BinaryModule,
info: ModuleInfo,
) !void {
- var result_id_offsets = std.ArrayList(u16).init(self.arena);
- var operands = std.ArrayList(u32).init(self.arena);
+ var result_id_offsets = std.array_list.Managed(u16).init(self.arena);
+ var operands = std.array_list.Managed(u32).init(self.arena);
var maybe_current_function: ?ResultId = null;
var it = binary.iterateInstructionsFrom(binary.sections.functions);
src/link/tapi/yaml/test.zig
@@ -407,7 +407,7 @@ test "duplicate map keys" {
}
fn testStringify(expected: []const u8, input: anytype) !void {
- var output = std.ArrayList(u8).init(testing.allocator);
+ var output = std.array_list.Managed(u8).init(testing.allocator);
defer output.deinit();
try yaml_mod.stringify(testing.allocator, input, output.writer());
src/link/tapi/parse.zig
@@ -231,7 +231,7 @@ pub const Tree = struct {
pub fn parse(self: *Tree, source: []const u8) !void {
var tokenizer = Tokenizer{ .buffer = source };
- var tokens = std.ArrayList(Token).init(self.allocator);
+ var tokens = std.array_list.Managed(Token).init(self.allocator);
defer tokens.deinit();
var line: usize = 0;
src/link/tapi/Tokenizer.zig
@@ -297,7 +297,7 @@ fn testExpected(source: []const u8, expected: []const Token.Id) !void {
.buffer = source,
};
- var given = std.ArrayList(Token.Id).init(testing.allocator);
+ var given = std.array_list.Managed(Token.Id).init(testing.allocator);
defer given.deinit();
while (true) {
src/link/tapi/yaml.zig
@@ -172,7 +172,7 @@ pub const Value = union(enum) {
return Value{ .map = out_map };
} else if (node.cast(Node.List)) |list| {
- var out_list = std.ArrayList(Value).init(arena);
+ var out_list = std.array_list.Managed(Value).init(arena);
try out_list.ensureUnusedCapacity(list.values.items.len);
for (list.values.items) |elem| {
@@ -211,7 +211,7 @@ pub const Value = union(enum) {
.float => return Value{ .float = math.lossyCast(f64, input) },
.@"struct" => |info| if (info.is_tuple) {
- var list = std.ArrayList(Value).init(arena);
+ var list = std.array_list.Managed(Value).init(arena);
errdefer list.deinit();
try list.ensureTotalCapacityPrecise(info.fields.len);
@@ -262,7 +262,7 @@ pub const Value = union(enum) {
return Value{ .string = try arena.dupe(u8, input) };
}
- var list = std.ArrayList(Value).init(arena);
+ var list = std.array_list.Managed(Value).init(arena);
errdefer list.deinit();
try list.ensureTotalCapacityPrecise(input.len);
@@ -298,7 +298,7 @@ pub const Value = union(enum) {
pub const Yaml = struct {
arena: ArenaAllocator,
tree: ?Tree = null,
- docs: std.ArrayList(Value),
+ docs: std.array_list.Managed(Value),
pub fn deinit(self: *Yaml) void {
self.arena.deinit();
@@ -311,7 +311,7 @@ pub const Yaml = struct {
var tree = Tree.init(arena.allocator());
try tree.parse(source);
- var docs = std.ArrayList(Value).init(arena.allocator());
+ var docs = std.array_list.Managed(Value).init(arena.allocator());
try docs.ensureTotalCapacityPrecise(tree.docs.items.len);
for (tree.docs.items) |node| {
src/link/Wasm/Flush.zig
@@ -1051,7 +1051,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
},
}
- var debug_bytes = std.ArrayList(u8).init(gpa);
+ var debug_bytes = std.array_list.Managed(u8).init(gpa);
defer debug_bytes.deinit();
try emitProducerSection(gpa, binary_bytes);
@@ -1396,7 +1396,7 @@ pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), ex
try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode
}
-fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
+fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.array_list.Managed(u8)) !void {
const gpa = wasm.base.comp.gpa;
const writer = binary_bytes.writer(gpa);
try leb.writeUleb128(writer, @intFromEnum(Wasm.SubsectionType.segment_info));
src/link/C.zig
@@ -766,7 +766,7 @@ pub fn flushEmitH(zcu: *Zcu) !void {
// We collect a list of buffers to write, and write them all at once with pwritev ๐
const num_buffers = emit_h.decl_table.count() + 1;
- var all_buffers = try std.ArrayList(std.posix.iovec_const).initCapacity(zcu.gpa, num_buffers);
+ var all_buffers = try std.array_list.Managed(std.posix.iovec_const).initCapacity(zcu.gpa, num_buffers);
defer all_buffers.deinit();
var file_size: u64 = zig_h.len;
src/link/Coff.zig
@@ -771,7 +771,7 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8, resolve_relocs: bo
// if we are running in hot-code swapping mode or not.
// TODO: how crazy would it be to try and apply the actual image base of the loaded
// process for the in-file values rather than the Windows defaults?
- var relocs = std.ArrayList(*Relocation).init(gpa);
+ var relocs = std.array_list.Managed(*Relocation).init(gpa);
defer relocs.deinit();
if (resolve_relocs) {
@@ -1680,7 +1680,7 @@ fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
const section = coff.sections.get(@intFromEnum(sym.section_number) - 1).header;
const file_offset = section.pointer_to_raw_data + sym.value - section.virtual_address;
- var code = std.ArrayList(u8).init(gpa);
+ var code = std.array_list.Managed(u8).init(gpa);
defer code.deinit();
try code.resize(math.cast(usize, atom.size) orelse return error.Overflow);
assert(atom.size > 0);
@@ -1893,7 +1893,7 @@ pub fn updateLineNumber(coff: *Coff, pt: Zcu.PerThread, ti_id: InternPool.Tracke
fn writeBaseRelocations(coff: *Coff) !void {
const gpa = coff.base.comp.gpa;
- var page_table = std.AutoHashMap(u32, std.ArrayList(coff_util.BaseRelocation)).init(gpa);
+ var page_table = std.AutoHashMap(u32, std.array_list.Managed(coff_util.BaseRelocation)).init(gpa);
defer {
var it = page_table.valueIterator();
while (it.next()) |inner| {
@@ -1915,7 +1915,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
const page = mem.alignBackward(u32, rva, coff.page_size);
const gop = try page_table.getOrPut(page);
if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList(coff_util.BaseRelocation).init(gpa);
+ gop.value_ptr.* = std.array_list.Managed(coff_util.BaseRelocation).init(gpa);
}
try gop.value_ptr.append(.{
.offset = @as(u12, @intCast(rva - page)),
@@ -1936,7 +1936,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
const page = mem.alignBackward(u32, rva, coff.page_size);
const gop = try page_table.getOrPut(page);
if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList(coff_util.BaseRelocation).init(gpa);
+ gop.value_ptr.* = std.array_list.Managed(coff_util.BaseRelocation).init(gpa);
}
try gop.value_ptr.append(.{
.offset = @as(u12, @intCast(rva - page)),
@@ -1947,7 +1947,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
}
// Sort pages by address.
- var pages = try std.ArrayList(u32).initCapacity(gpa, page_table.count());
+ var pages = try std.array_list.Managed(u32).initCapacity(gpa, page_table.count());
defer pages.deinit();
{
var it = page_table.keyIterator();
@@ -1957,7 +1957,7 @@ fn writeBaseRelocations(coff: *Coff) !void {
}
mem.sort(u32, pages.items, {}, std.sort.asc(u32));
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
for (pages.items) |page| {
@@ -2030,7 +2030,7 @@ fn writeImportTables(coff: *Coff) !void {
try coff.growSection(coff.idata_section_index.?, needed_size);
// Do the actual writes
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(needed_size);
buffer.resize(needed_size) catch unreachable;
@@ -2153,7 +2153,7 @@ fn writeStrtab(coff: *Coff) !void {
log.debug("writing strtab from 0x{x} to 0x{x}", .{ coff.strtab_offset.?, coff.strtab_offset.? + needed_size });
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(needed_size);
buffer.appendSliceAssumeCapacity(coff.strtab.buffer.items);
@@ -2179,7 +2179,7 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
fn writeHeader(coff: *Coff) !void {
const target = &coff.base.comp.root_mod.resolved_target.result;
const gpa = coff.base.comp.gpa;
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
const writer = buffer.writer();
src/link/Elf.zig
@@ -882,7 +882,7 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
self.rela_plt.clearRetainingCapacity();
if (self.zigObjectPtr()) |zo| {
- var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)) = .init(gpa);
+ var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
@@ -1326,7 +1326,7 @@ fn scanRelocs(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const shared_objects = self.shared_objects.values();
- var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)) = .init(gpa);
+ var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
@@ -1849,7 +1849,7 @@ pub fn updateMergeSectionSizes(self: *Elf) !void {
pub fn writeMergeSections(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
for (self.merge_sections.items) |*msec| {
@@ -2214,7 +2214,7 @@ fn sortInitFini(self: *Elf) !void {
}
if (!is_init_fini and !is_ctor_dtor) continue;
- var entries = std.ArrayList(Entry).init(gpa);
+ var entries = std.array_list.Managed(Entry).init(gpa);
try entries.ensureTotalCapacityPrecise(atom_list.atoms.keys().len);
defer entries.deinit();
@@ -2771,7 +2771,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
// virtual and file offsets. However, the simple one will do for one
// as we are more interested in quick turnaround and compatibility
// with `findFreeSpace` mechanics than anything else.
- const Cover = std.ArrayList(u32);
+ const Cover = std.array_list.Managed(u32);
const gpa = self.base.comp.gpa;
var covers: [max_number_of_object_segments]Cover = undefined;
for (&covers) |*cover| {
@@ -2999,13 +2999,13 @@ fn allocateSpecialPhdrs(self: *Elf) void {
fn writeAtoms(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)) = .init(gpa);
+ var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
}
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
const slice = self.sections.slice();
@@ -3048,7 +3048,7 @@ pub fn updateSymtabSize(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const shared_objects = self.shared_objects.values();
- var files = std.ArrayList(File.Index).init(gpa);
+ var files = std.array_list.Managed(File.Index).init(gpa);
defer files.deinit();
try files.ensureTotalCapacityPrecise(self.objects.items.len + shared_objects.len + 2);
@@ -3166,7 +3166,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.verneed) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size());
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.verneed.size());
defer buffer.deinit();
try self.verneed.write(buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3174,7 +3174,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.dynamic) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynamic.size(self));
defer buffer.deinit();
try self.dynamic.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3182,7 +3182,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.dynsymtab) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size());
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.dynsym.size());
defer buffer.deinit();
try self.dynsym.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3201,7 +3201,7 @@ fn writeSyntheticSections(self: *Elf) !void {
};
const shdr = slice.items(.shdr)[shndx];
const sh_size = try self.cast(usize, shdr.sh_size);
- var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
assert(buffer.items.len == sh_size - existing_size);
@@ -3211,7 +3211,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.eh_frame_hdr) |shndx| {
const shdr = slice.items(.shdr)[shndx];
const sh_size = try self.cast(usize, shdr.sh_size);
- var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try eh_frame.writeEhFrameHdr(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3219,7 +3219,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.got) |index| {
const shdr = slice.items(.shdr)[index];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got.size(self));
defer buffer.deinit();
try self.got.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3235,7 +3235,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
try self.plt.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3243,7 +3243,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.got_plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.got_plt.size(self));
defer buffer.deinit();
try self.got_plt.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
@@ -3251,7 +3251,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.section_indexes.plt_got) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self));
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.plt_got.size(self));
defer buffer.deinit();
try self.plt_got.write(self, buffer.writer());
try self.pwriteAll(buffer.items, shdr.sh_offset);
src/link/Lld.zig
@@ -409,7 +409,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
);
} else {
// Create an LLD command line and invoke it.
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
@@ -863,7 +863,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
);
} else {
// Create an LLD command line and invoke it.
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
@@ -1412,7 +1412,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
);
} else {
// Create an LLD command line and invoke it.
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
src/link/MachO.zig
@@ -359,7 +359,7 @@ pub fn flush(
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, zcu_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, zcu_obj_path);
- var positionals = std.ArrayList(link.Input).init(gpa);
+ var positionals = std.array_list.Managed(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
@@ -404,7 +404,7 @@ pub fn flush(
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
- var system_libs = std.ArrayList(SystemLib).init(gpa);
+ var system_libs = std.array_list.Managed(SystemLib).init(gpa);
defer system_libs.deinit();
// frameworks
@@ -632,7 +632,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
break :p try p.toString(arena);
} else null;
- var argv = std.ArrayList([]const u8).init(arena);
+ var argv = std.array_list.Managed([]const u8).init(arena);
try argv.append("zig");
@@ -827,8 +827,8 @@ pub fn resolveLibSystem(
) !void {
const diags = &self.base.comp.link_diags;
- var test_path = std.ArrayList(u8).init(arena);
- var checked_paths = std.ArrayList([]const u8).init(arena);
+ var test_path = std.array_list.Managed(u8).init(arena);
+ var checked_paths = std.array_list.Managed([]const u8).init(arena);
success: {
if (self.sdk_layout) |sdk_layout| switch (sdk_layout) {
@@ -1065,8 +1065,8 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool {
/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
fn accessLibPath(
arena: Allocator,
- test_path: *std.ArrayList(u8),
- checked_paths: *std.ArrayList([]const u8),
+ test_path: *std.array_list.Managed(u8),
+ checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
name: []const u8,
) !bool {
@@ -1088,8 +1088,8 @@ fn accessLibPath(
fn accessFrameworkPath(
arena: Allocator,
- test_path: *std.ArrayList(u8),
- checked_paths: *std.ArrayList([]const u8),
+ test_path: *std.array_list.Managed(u8),
+ checked_paths: *std.array_list.Managed([]const u8),
search_dir: []const u8,
name: []const u8,
) !bool {
@@ -1138,7 +1138,7 @@ fn parseDependentDylibs(self: *MachO) !void {
while (index < self.dylibs.items.len) : (index += 1) {
const dylib_index = self.dylibs.items[index];
- var dependents = std.ArrayList(File.Index).init(gpa);
+ var dependents = std.array_list.Managed(File.Index).init(gpa);
defer dependents.deinit();
try dependents.ensureTotalCapacityPrecise(self.getFile(dylib_index).?.dylib.dependents.items.len);
@@ -1151,8 +1151,8 @@ fn parseDependentDylibs(self: *MachO) !void {
// 3. If name is a relative path, substitute @rpath, @loader_path, @executable_path with
// dependees list of rpaths, and search there.
// 4. Finally, just search the provided relative path directly in CWD.
- var test_path = std.ArrayList(u8).init(arena);
- var checked_paths = std.ArrayList([]const u8).init(arena);
+ var test_path = std.array_list.Managed(u8).init(arena);
+ var checked_paths = std.array_list.Managed([]const u8).init(arena);
const full_path = full_path: {
{
@@ -1550,7 +1550,7 @@ fn reportUndefs(self: *MachO) !void {
const max_notes = 4;
// We will sort by name, and then by file to ensure deterministic output.
- var keys = try std.ArrayList(SymbolResolver.Index).initCapacity(gpa, self.undefs.keys().len);
+ var keys = try std.array_list.Managed(SymbolResolver.Index).initCapacity(gpa, self.undefs.keys().len);
defer keys.deinit();
keys.appendSliceAssumeCapacity(self.undefs.keys());
self.sortGlobalSymbolsByName(keys.items);
@@ -1813,7 +1813,7 @@ pub fn sortSections(self: *MachO) !void {
const gpa = self.base.comp.gpa;
- var entries = try std.ArrayList(Entry).initCapacity(gpa, self.sections.slice().len);
+ var entries = try std.array_list.Managed(Entry).initCapacity(gpa, self.sections.slice().len);
defer entries.deinit();
for (0..self.sections.slice().len) |index| {
entries.appendAssumeCapacity(.{ .index = @intCast(index) });
@@ -2123,7 +2123,7 @@ fn initSegments(self: *MachO) !void {
}
};
- var entries = try std.ArrayList(Entry).initCapacity(gpa, self.segments.items.len);
+ var entries = try std.array_list.Managed(Entry).initCapacity(gpa, self.segments.items.len);
defer entries.deinit();
for (0..self.segments.items.len) |index| {
entries.appendAssumeCapacity(.{ .index = @intCast(index) });
@@ -2689,7 +2689,7 @@ pub fn writeDataInCode(self: *MachO) !void {
defer tracy.end();
const gpa = self.base.comp.gpa;
const cmd = self.data_in_code_cmd;
- var buffer = try std.ArrayList(u8).initCapacity(gpa, self.data_in_code.size());
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, self.data_in_code.size());
defer buffer.deinit();
try self.data_in_code.write(self, buffer.writer());
try self.pwriteAll(buffer.items, cmd.dataoff);
@@ -2701,7 +2701,7 @@ fn writeIndsymtab(self: *MachO) !void {
const gpa = self.base.comp.gpa;
const cmd = self.dysymtab_cmd;
const needed_size = cmd.nindirectsyms * @sizeOf(u32);
- var buffer = try std.ArrayList(u8).initCapacity(gpa, needed_size);
+ var buffer = try std.array_list.Managed(u8).initCapacity(gpa, needed_size);
defer buffer.deinit();
try self.indsymtab.write(self, buffer.writer());
try self.pwriteAll(buffer.items, cmd.indirectsymoff);
@@ -2746,7 +2746,7 @@ fn calcSymtabSize(self: *MachO) !void {
const gpa = self.base.comp.gpa;
- var files = std.ArrayList(File.Index).init(gpa);
+ var files = std.array_list.Managed(File.Index).init(gpa);
defer files.deinit();
try files.ensureTotalCapacityPrecise(self.objects.items.len + self.dylibs.items.len + 2);
if (self.zig_object) |index| files.appendAssumeCapacity(index);
@@ -3015,7 +3015,7 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
const seg = self.getTextSegment();
const offset = self.codesig_cmd.dataoff;
- var buffer = std.ArrayList(u8).init(self.base.comp.gpa);
+ var buffer = std.array_list.Managed(u8).init(self.base.comp.gpa);
defer buffer.deinit();
try buffer.ensureTotalCapacityPrecise(code_sig.size());
try code_sig.writeAdhocSignature(self, .{
@@ -3837,7 +3837,7 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
const max_notes = 3;
// We will sort by name, and then by file to ensure deterministic output.
- var keys = try std.ArrayList(SymbolResolver.Index).initCapacity(gpa, self.dupes.keys().len);
+ var keys = try std.array_list.Managed(SymbolResolver.Index).initCapacity(gpa, self.dupes.keys().len);
defer keys.deinit();
keys.appendSliceAssumeCapacity(self.dupes.keys());
self.sortGlobalSymbolsByName(keys.items);
@@ -4269,7 +4269,7 @@ pub const Platform = struct {
/// Caller owns the memory.
pub fn allocPrintTarget(plat: Platform, gpa: Allocator, cpu_arch: std.Target.Cpu.Arch) error{OutOfMemory}![]u8 {
- var buffer = std.ArrayList(u8).init(gpa);
+ var buffer = std.array_list.Managed(u8).init(gpa);
defer buffer.deinit();
try buffer.writer().print("{f}", .{plat.fmtTarget(cpu_arch)});
return buffer.toOwnedSlice();
src/link/tapi.zig
@@ -83,7 +83,7 @@ pub const Tbd = union(enum) {
/// Caller owns memory.
pub fn targets(self: Tbd, gpa: Allocator) error{OutOfMemory}![]const []const u8 {
- var out = std.ArrayList([]const u8).init(gpa);
+ var out = std.array_list.Managed([]const u8).init(gpa);
defer out.deinit();
switch (self) {
src/Package/Fetch.zig
@@ -173,7 +173,7 @@ pub const JobQueue = struct {
/// Creates the dependencies.zig source code for the build runner to obtain
/// via `@import("@dependencies")`.
- pub fn createDependenciesSource(jq: *JobQueue, buf: *std.ArrayList(u8)) Allocator.Error!void {
+ pub fn createDependenciesSource(jq: *JobQueue, buf: *std.array_list.Managed(u8)) Allocator.Error!void {
const keys = jq.table.keys();
assert(keys.len != 0); // caller should have added the first one
@@ -285,7 +285,7 @@ pub const JobQueue = struct {
try buf.appendSlice("};\n");
}
- pub fn createEmptyDependenciesSource(buf: *std.ArrayList(u8)) Allocator.Error!void {
+ pub fn createEmptyDependenciesSource(buf: *std.array_list.Managed(u8)) Allocator.Error!void {
try buf.appendSlice(
\\pub const packages = struct {};
\\pub const root_deps: []const struct { []const u8, []const u8 } = &.{};
@@ -1474,10 +1474,10 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
const root_dir = pkg_path.root_dir.handle;
// Collect all files, recursively, then sort.
- var all_files = std.ArrayList(*HashedFile).init(gpa);
+ var all_files = std.array_list.Managed(*HashedFile).init(gpa);
defer all_files.deinit();
- var deleted_files = std.ArrayList(*DeletedFile).init(gpa);
+ var deleted_files = std.array_list.Managed(*DeletedFile).init(gpa);
defer deleted_files.deinit();
// Track directories which had any files deleted from them so that empty directories
src/Package/Module.zig
@@ -336,8 +336,8 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
if (resolved_target.llvm_cpu_features) |x| break :b x;
if (!options.global.use_llvm) break :b null;
- var buf = std.ArrayList(u8).init(arena);
- var disabled_features = std.ArrayList(u8).init(arena);
+ var buf = std.array_list.Managed(u8).init(arena);
+ var disabled_features = std.array_list.Managed(u8).init(arena);
defer disabled_features.deinit();
// Append disabled features after enabled ones, so that their effects aren't overwritten.
src/Sema/bitcast.zig
@@ -102,7 +102,7 @@ fn bitCastInner(
.arena = sema.arena,
.skip_bits = skip_bits,
.remaining_bits = dest_ty.bitSize(zcu),
- .unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
+ .unpacked = std.array_list.Managed(InternPool.Index).init(sema.arena),
};
switch (endian) {
.little => {
@@ -163,7 +163,7 @@ fn bitCastSpliceInner(
.arena = sema.arena,
.skip_bits = 0,
.remaining_bits = splice_offset,
- .unpacked = std.ArrayList(InternPool.Index).init(sema.arena),
+ .unpacked = std.array_list.Managed(InternPool.Index).init(sema.arena),
};
switch (endian) {
.little => {
@@ -216,7 +216,7 @@ const UnpackValueBits = struct {
skip_bits: u64,
remaining_bits: u64,
extra_bits: u64 = undefined,
- unpacked: std.ArrayList(InternPool.Index),
+ unpacked: std.array_list.Managed(InternPool.Index),
fn add(unpack: *UnpackValueBits, val: Value) BitCastError!void {
const pt = unpack.pt;
src/Zcu/PerThread.zig
@@ -727,7 +727,7 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace: std.ArrayList(Zcu.LazySrcLoc) = .init(gpa);
+ var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
@@ -870,7 +870,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace: std.ArrayList(Zcu.LazySrcLoc) = .init(gpa);
+ var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
@@ -1097,7 +1097,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace: std.ArrayList(Zcu.LazySrcLoc) = .init(gpa);
+ var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
@@ -1471,7 +1471,7 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace: std.ArrayList(Zcu.LazySrcLoc) = .init(gpa);
+ var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
@@ -2807,7 +2807,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+ var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
// In the case of a generic function instance, this is the type of the
src/Builtin.zig
@@ -40,12 +40,12 @@ pub fn hash(opts: @This()) [std.Build.Cache.bin_digest_len]u8 {
}
pub fn generate(opts: @This(), allocator: Allocator) Allocator.Error![:0]u8 {
- var buffer = std.ArrayList(u8).init(allocator);
+ var buffer = std.array_list.Managed(u8).init(allocator);
try append(opts, &buffer);
return buffer.toOwnedSliceSentinel(0);
}
-pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
+pub fn append(opts: @This(), buffer: *std.array_list.Managed(u8)) Allocator.Error!void {
const target = opts.target;
const arch_family_name = @tagName(target.cpu.arch.family());
const zig_backend = opts.zig_backend;
src/Compilation.zig
@@ -3644,10 +3644,10 @@ pub fn saveState(comp: *Compilation) !void {
const gpa = comp.gpa;
- var bufs = std.ArrayList([]const u8).init(gpa);
+ var bufs = std.array_list.Managed([]const u8).init(gpa);
defer bufs.deinit();
- var pt_headers = std.ArrayList(Header.PerThread).init(gpa);
+ var pt_headers = std.array_list.Managed(Header.PerThread).init(gpa);
defer pt_headers.deinit();
if (comp.zcu) |zcu| {
@@ -3865,7 +3865,7 @@ pub fn saveState(comp: *Compilation) !void {
try af.finish();
}
-fn addBuf(list: *std.ArrayList([]const u8), buf: []const u8) void {
+fn addBuf(list: *std.array_list.Managed([]const u8), buf: []const u8) void {
if (buf.len == 0) return;
list.appendAssumeCapacity(buf);
}
@@ -5657,7 +5657,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module
log.info("C import source: {s}", .{out_h_path});
}
- var argv = std.ArrayList([]const u8).init(comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
try argv.append(@tagName(comp.config.c_frontend)); // argv[0] is program name, actual args start at [1]
@@ -6113,7 +6113,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
const target = comp.getTarget();
const o_ext = target.ofmt.fileExt(target.cpu.arch);
const digest = if (!comp.disable_c_depfile and try man.hit()) man.final() else blk: {
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
// In case we are doing passthrough mode, we need to detect -S and -emit-llvm.
@@ -6458,7 +6458,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
try o_dir.writeFile(.{ .sub_path = rc_basename, .data = input });
- var argv = std.ArrayList([]const u8).init(comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
try argv.appendSlice(&.{
@@ -6515,7 +6515,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// so we need a temporary filename.
const out_res_path = try comp.tmpFilePath(arena, res_filename);
- var argv = std.ArrayList([]const u8).init(comp.gpa);
+ var argv = std.array_list.Managed([]const u8).init(comp.gpa);
defer argv.deinit();
const depfile_filename = try std.fmt.allocPrint(arena, "{s}.d.json", .{rc_basename_noext});
@@ -6698,7 +6698,7 @@ pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error
pub fn addTranslateCCArgs(
comp: *Compilation,
arena: Allocator,
- argv: *std.ArrayList([]const u8),
+ argv: *std.array_list.Managed([]const u8),
ext: FileExt,
out_dep_path: ?[]const u8,
owner_mod: *Package.Module,
@@ -6713,7 +6713,7 @@ pub fn addTranslateCCArgs(
pub fn addCCArgs(
comp: *const Compilation,
arena: Allocator,
- argv: *std.ArrayList([]const u8),
+ argv: *std.array_list.Managed([]const u8),
ext: FileExt,
out_dep_path: ?[]const u8,
mod: *Package.Module,
src/fmt.zig
@@ -46,9 +46,9 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var check_flag = false;
var check_ast_flag = false;
var force_zon = false;
- var input_files = std.ArrayList([]const u8).init(gpa);
+ var input_files = std.array_list.Managed([]const u8).init(gpa);
defer input_files.deinit();
- var excluded_files = std.ArrayList([]const u8).init(gpa);
+ var excluded_files = std.array_list.Managed([]const u8).init(gpa);
defer excluded_files.deinit();
{
src/link.zig
@@ -166,7 +166,7 @@ pub const Diags = struct {
) Allocator.Error!void {
const gpa = diags.gpa;
- var context_lines = std.ArrayList([]const u8).init(gpa);
+ var context_lines = std.array_list.Managed([]const u8).init(gpa);
defer context_lines.deinit();
var current_err: ?*Lld = null;
src/main.zig
@@ -6,7 +6,6 @@ const fs = std.fs;
const mem = std.mem;
const process = std.process;
const Allocator = mem.Allocator;
-const ArrayList = std.ArrayList;
const Ast = std.zig.Ast;
const Color = std.zig.Color;
const warn = std.log.warn;
@@ -1876,8 +1875,8 @@ fn buildOutputType(
var c_out_mode: ?COutMode = null;
var out_path: ?[]const u8 = null;
var is_shared_lib = false;
- var preprocessor_args = std.ArrayList([]const u8).init(arena);
- var linker_args = std.ArrayList([]const u8).init(arena);
+ var preprocessor_args = std.array_list.Managed([]const u8).init(arena);
+ var linker_args = std.array_list.Managed([]const u8).init(arena);
var it = ClangArgIterator.init(arena, all_args);
var emit_llvm = false;
var needed = false;
@@ -3136,16 +3135,16 @@ fn buildOutputType(
}
}
- var resolved_frameworks = std.ArrayList(Compilation.Framework).init(arena);
+ var resolved_frameworks = std.array_list.Managed(Compilation.Framework).init(arena);
if (create_module.frameworks.keys().len > 0) {
- var test_path = std.ArrayList(u8).init(gpa);
+ var test_path = std.array_list.Managed(u8).init(gpa);
defer test_path.deinit();
- var checked_paths = std.ArrayList(u8).init(gpa);
+ var checked_paths = std.array_list.Managed(u8).init(gpa);
defer checked_paths.deinit();
- var failed_frameworks = std.ArrayList(struct {
+ var failed_frameworks = std.array_list.Managed(struct {
name: []const u8,
checked_paths: []const u8,
}).init(arena);
@@ -3774,7 +3773,7 @@ fn createModule(
try llvm_to_zig_name.put(llvm_name, feature.name);
}
- var mcpu_buffer = std.ArrayList(u8).init(gpa);
+ var mcpu_buffer = std.array_list.Managed(u8).init(gpa);
defer mcpu_buffer.deinit();
try mcpu_buffer.appendSlice(cli_mod.target_mcpu orelse "baseline");
@@ -4332,7 +4331,7 @@ fn runOrTest(
}),
};
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
if (test_exec_args.len == 0) {
@@ -4453,7 +4452,7 @@ fn runOrTestHotSwap(
};
defer gpa.free(exe_path);
- var argv = std.ArrayList([]const u8).init(gpa);
+ var argv = std.array_list.Managed([]const u8).init(gpa);
defer argv.deinit();
if (test_exec_args.len == 0) {
@@ -4570,7 +4569,7 @@ fn cmdTranslateC(
break :digest .{ bin_digest, hex_digest };
} else digest: {
if (fancy_output) |p| p.cache_hit = false;
- var argv = std.ArrayList([]const u8).init(arena);
+ var argv = std.array_list.Managed([]const u8).init(arena);
switch (comp.config.c_frontend) {
.aro => {},
.clang => {
@@ -4877,7 +4876,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena);
var override_local_cache_dir: ?[]const u8 = try EnvVar.ZIG_LOCAL_CACHE_DIR.get(arena);
var override_build_runner: ?[]const u8 = try EnvVar.ZIG_BUILD_RUNNER.get(arena);
- var child_argv = std.ArrayList([]const u8).init(arena);
+ var child_argv = std.array_list.Managed([]const u8).init(arena);
var reference_trace: ?u32 = null;
var debug_compile_errors = false;
var verbose_link = (native_os != .wasi or builtin.link_libc) and
@@ -5304,7 +5303,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (fetch_only) return cleanExit();
- var source_buf = std.ArrayList(u8).init(gpa);
+ var source_buf = std.array_list.Managed(u8).init(gpa);
defer source_buf.deinit();
try job_queue.createDependenciesSource(&source_buf);
const deps_mod = try createDependenciesModule(
@@ -5986,7 +5985,7 @@ pub const ClangArgIterator = struct {
// NOTE: The ArgIteratorResponseFile returns tokens from next() that are slices of an
// internal buffer. This internal buffer is arena allocated, so it is not cleaned up here.
- var resp_arg_list = std.ArrayList([]const u8).init(arena);
+ var resp_arg_list = std.array_list.Managed([]const u8).init(arena);
defer resp_arg_list.deinit();
{
while (self.arg_iterator_response_file.next()) |token| {
@@ -6868,8 +6867,8 @@ const ClangSearchSanitizer = struct {
};
fn accessFrameworkPath(
- test_path: *std.ArrayList(u8),
- checked_paths: *std.ArrayList(u8),
+ test_path: *std.array_list.Managed(u8),
+ checked_paths: *std.array_list.Managed(u8),
framework_dir_path: []const u8,
framework_name: []const u8,
) !bool {
@@ -7214,7 +7213,7 @@ fn createEmptyDependenciesModule(
dirs: Compilation.Directories,
global_options: Compilation.Config,
) !void {
- var source = std.ArrayList(u8).init(arena);
+ var source = std.array_list.Managed(u8).init(arena);
try Package.Fetch.JobQueue.createEmptyDependenciesSource(&source);
_ = try createDependenciesModule(
arena,
@@ -7418,7 +7417,7 @@ fn loadManifest(
const Templates = struct {
zig_lib_directory: Cache.Directory,
dir: fs.Dir,
- buffer: std.ArrayList(u8),
+ buffer: std.array_list.Managed(u8),
fn deinit(templates: *Templates) void {
templates.zig_lib_directory.handle.close();
@@ -7510,7 +7509,7 @@ fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
return .{
.zig_lib_directory = zig_lib_directory,
.dir = template_dir,
- .buffer = std.ArrayList(u8).init(gpa),
+ .buffer = std.array_list.Managed(u8).init(gpa),
};
}
src/RangeSet.zig
@@ -10,7 +10,7 @@ const RangeSet = @This();
const LazySrcLoc = Zcu.LazySrcLoc;
zcu: *Zcu,
-ranges: std.ArrayList(Range),
+ranges: std.array_list.Managed(Range),
pub const Range = struct {
first: InternPool.Index,
@@ -21,7 +21,7 @@ pub const Range = struct {
pub fn init(allocator: std.mem.Allocator, zcu: *Zcu) RangeSet {
return .{
.zcu = zcu,
- .ranges = std.ArrayList(Range).init(allocator),
+ .ranges = std.array_list.Managed(Range).init(allocator),
};
}
src/Sema.zig
@@ -63,7 +63,7 @@ func_index: InternPool.Index,
func_is_naked: bool,
/// Used to restore the error return trace when returning a non-error from a function.
error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
-comptime_err_ret_trace: *std.ArrayList(LazySrcLoc),
+comptime_err_ret_trace: *std.array_list.Managed(LazySrcLoc),
/// When semantic analysis needs to know the return type of the function whose body
/// is being analyzed, this `Type` should be used instead of going through `func`.
/// This will correctly handle the case of a comptime/inline function call of a
@@ -376,7 +376,7 @@ pub const Block = struct {
/// What mode to generate float operations in, set by @setFloatMode
float_mode: std.builtin.FloatMode = .strict,
- c_import_buf: ?*std.ArrayList(u8) = null,
+ c_import_buf: ?*std.array_list.Managed(u8) = null,
/// If not `null`, this boolean is set when a `dbg_var_ptr`, `dbg_var_val`, or `dbg_arg_inline`.
/// instruction is emitted. It signals that the innermost lexically
@@ -3931,7 +3931,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// Whilst constructing our mapping, we will also initialize optional and error union payloads when
// we encounter the corresponding pointers. For this reason, the ordering of `to_map` matters.
- var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len);
+ var to_map = try std.array_list.Managed(Air.Inst.Index).initCapacity(sema.arena, stores.len);
for (stores) |store_inst_idx| {
const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx));
@@ -5665,7 +5665,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
if (!build_options.have_llvm)
return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{});
- var c_import_buf = std.ArrayList(u8).init(gpa);
+ var c_import_buf = std.array_list.Managed(u8).init(gpa);
defer c_import_buf.deinit();
var child_block: Block = .{
@@ -10701,7 +10701,7 @@ const SwitchProngAnalysis = struct {
const prong_count = field_indices.len - in_mem_coercible.count();
const estimated_extra = prong_count * 6 + (prong_count / 10); // 2 for Case, 1 item, probably 3 insts; plus hints
- var cases_extra = try std.ArrayList(u32).initCapacity(sema.gpa, estimated_extra);
+ var cases_extra = try std.array_list.Managed(u32).initCapacity(sema.gpa, estimated_extra);
defer cases_extra.deinit();
{
@@ -17603,7 +17603,7 @@ fn typeInfoDecls(
const declaration_ty = try sema.getBuiltinType(src, .@"Type.Declaration");
- var decl_vals = std.ArrayList(InternPool.Index).init(gpa);
+ var decl_vals = std.array_list.Managed(InternPool.Index).init(gpa);
defer decl_vals.deinit();
var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa);
@@ -17645,7 +17645,7 @@ fn typeInfoNamespaceDecls(
sema: *Sema,
opt_namespace_index: InternPool.OptionalNamespaceIndex,
declaration_ty: Type,
- decl_vals: *std.ArrayList(InternPool.Index),
+ decl_vals: *std.array_list.Managed(InternPool.Index),
seen_namespaces: *std.AutoHashMap(*Namespace, void),
) !void {
const pt = sema.pt;
@@ -29670,7 +29670,7 @@ fn coerceInMemoryAllowedErrorSets(
}
}
- var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa);
+ var missing_error_buf = std.array_list.Managed(InternPool.NullTerminatedString).init(gpa);
defer missing_error_buf.deinit();
switch (src_ty.toIntern()) {
@@ -37151,7 +37151,7 @@ pub fn resolveDeclaredEnum(
var arena: std.heap.ArenaAllocator = .init(gpa);
defer arena.deinit();
- var comptime_err_ret_trace: std.ArrayList(Zcu.LazySrcLoc) = .init(gpa);
+ var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
src/translate_c.zig
@@ -924,10 +924,10 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
break :blk Tag.opaque_literal.init();
};
- var fields = std.ArrayList(ast.Payload.Record.Field).init(c.gpa);
+ var fields = std.array_list.Managed(ast.Payload.Record.Field).init(c.gpa);
defer fields.deinit();
- var functions = std.ArrayList(Node).init(c.gpa);
+ var functions = std.array_list.Managed(Node).init(c.gpa);
defer functions.deinit();
const flexible_field = flexibleArrayField(c, record_def);
@@ -2606,7 +2606,7 @@ fn transInitListExprRecord(
const ty_node = try transType(c, scope, ty, loc);
const init_count = expr.getNumInits();
- var field_inits = std.ArrayList(ast.Payload.ContainerInit.Initializer).init(c.gpa);
+ var field_inits = std.array_list.Managed(ast.Payload.ContainerInit.Initializer).init(c.gpa);
defer field_inits.deinit();
if (init_count == 0) {
@@ -3116,7 +3116,7 @@ fn transSwitch(
defer cond_scope.deinit();
const switch_expr = try transExpr(c, &cond_scope.base, stmt.getCond(), .used);
- var cases = std.ArrayList(Node).init(c.gpa);
+ var cases = std.array_list.Managed(Node).init(c.gpa);
defer cases.deinit();
var has_default = false;
@@ -3130,7 +3130,7 @@ fn transSwitch(
while (it != end_it) : (it += 1) {
switch (it[0].getStmtClass()) {
.CaseStmtClass => {
- var items = std.ArrayList(Node).init(c.gpa);
+ var items = std.array_list.Managed(Node).init(c.gpa);
defer items.deinit();
const sub = try transCaseStmt(c, base_scope, it[0], &items);
const res = try transSwitchProngStmt(c, base_scope, sub, it, end_it);
@@ -3185,7 +3185,7 @@ fn transSwitch(
/// Collects all items for this case, returns the first statement after the labels.
/// If items ends up empty, the prong should be translated as an else.
-fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *std.ArrayList(Node)) TransError!*const clang.Stmt {
+fn transCaseStmt(c: *Context, scope: *Scope, stmt: *const clang.Stmt, items: *std.array_list.Managed(Node)) TransError!*const clang.Stmt {
var sub = stmt;
var seen_default = false;
while (true) {
@@ -4716,7 +4716,7 @@ fn transCreateNodeNumber(c: *Context, num: anytype, num_kind: enum { int, float
}
fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias: *ast.Payload.Func) !Node {
- var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
+ var fn_params = std.array_list.Managed(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
for (proto_alias.data.params) |param| {
@@ -5115,7 +5115,7 @@ fn finishTransFnProto(
const scope = &c.global_scope.base;
const param_count: usize = if (fn_proto_ty != null) fn_proto_ty.?.getNumParams() else 0;
- var fn_params = try std.ArrayList(ast.Payload.Param).initCapacity(c.gpa, param_count);
+ var fn_params = try std.array_list.Managed(ast.Payload.Param).initCapacity(c.gpa, param_count);
defer fn_params.deinit();
var i: usize = 0;
@@ -5333,7 +5333,7 @@ fn transPreprocessorEntities(c: *Context, unit: *clang.ASTUnit) Error!void {
// TODO if we see #undef, delete it from the table
var it = unit.getLocalPreprocessingEntities_begin();
const it_end = unit.getLocalPreprocessingEntities_end();
- var tok_list = std.ArrayList(CToken).init(c.gpa);
+ var tok_list = std.array_list.Managed(CToken).init(c.gpa);
defer tok_list.deinit();
const scope = c.global_scope;
@@ -5484,7 +5484,7 @@ fn transMacroFnDefine(c: *Context, m: *MacroCtx) ParseError!void {
try m.skip(c, .l_paren);
- var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
+ var fn_params = std.array_list.Managed(ast.Payload.Param).init(c.gpa);
defer fn_params.deinit();
while (true) {
@@ -6459,7 +6459,7 @@ fn parseCPostfixExprInner(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?
m.i += 1;
node = try Tag.call.create(c.arena, .{ .lhs = node, .args = &[0]Node{} });
} else {
- var args = std.ArrayList(Node).init(c.gpa);
+ var args = std.array_list.Managed(Node).init(c.gpa);
defer args.deinit();
while (true) {
const arg = try parseCCondExpr(c, m, scope);
@@ -6480,7 +6480,7 @@ fn parseCPostfixExprInner(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?
.l_brace => {
// Check for designated field initializers
if (m.peek().? == .period) {
- var init_vals = std.ArrayList(ast.Payload.ContainerInitDot.Initializer).init(c.gpa);
+ var init_vals = std.array_list.Managed(ast.Payload.ContainerInitDot.Initializer).init(c.gpa);
defer init_vals.deinit();
while (true) {
@@ -6506,7 +6506,7 @@ fn parseCPostfixExprInner(c: *Context, m: *MacroCtx, scope: *Scope, type_name: ?
continue;
}
- var init_vals = std.ArrayList(Node).init(c.gpa);
+ var init_vals = std.array_list.Managed(Node).init(c.gpa);
defer init_vals.deinit();
while (true) {
src/Type.zig
@@ -3805,7 +3805,7 @@ fn resolveStructInner(
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+ var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir.?;
@@ -3864,7 +3864,7 @@ fn resolveUnionInner(
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
- var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
+ var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
const zir = zcu.namespacePtr(union_obj.namespace).fileScope(zcu).zir.?;
test/behavior/struct.zig
@@ -1826,7 +1826,7 @@ test "assign to slice.len of global variable" {
const S = struct {
const allocator = std.testing.allocator;
- var list = std.ArrayList(u32).init(allocator);
+ var list = std.array_list.Managed(u32).init(allocator);
};
S.list.items.len = 0;
test/behavior/var_args.zig
@@ -200,14 +200,14 @@ test "variadic functions" {
if (builtin.cpu.arch.isSPARC() and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23718
const S = struct {
- fn printf(list_ptr: *std.ArrayList(u8), format: [*:0]const u8, ...) callconv(.c) void {
+ fn printf(list_ptr: *std.array_list.Managed(u8), format: [*:0]const u8, ...) callconv(.c) void {
var ap = @cVaStart();
defer @cVaEnd(&ap);
vprintf(list_ptr, format, &ap);
}
fn vprintf(
- list: *std.ArrayList(u8),
+ list: *std.array_list.Managed(u8),
format: [*:0]const u8,
ap: *std.builtin.VaList,
) callconv(.c) void {
@@ -225,7 +225,7 @@ test "variadic functions" {
}
};
- var list = std.ArrayList(u8).init(std.testing.allocator);
+ var list = std.array_list.Managed(u8).init(std.testing.allocator);
defer list.deinit();
S.printf(&list, "dsd", @as(c_int, 1), @as([*:0]const u8, "hello"), @as(c_int, 5));
try std.testing.expectEqualStrings("1hello5", list.items);
test/src/Cases.zig
@@ -1,7 +1,15 @@
+const Cases = @This();
+const builtin = @import("builtin");
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const getExternalExecutor = std.zig.system.getExternalExecutor;
+const ArrayList = std.ArrayList;
+
gpa: Allocator,
arena: Allocator,
-cases: std.ArrayList(Case),
-translate: std.ArrayList(Translate),
+cases: std.array_list.Managed(Case),
+translate: std.array_list.Managed(Translate),
pub const IncrementalCase = struct {
base_path: []const u8,
@@ -40,7 +48,7 @@ pub const Case = struct {
output_mode: std.builtin.OutputMode,
optimize_mode: std.builtin.OptimizeMode = .Debug,
- files: std.ArrayList(File),
+ files: std.array_list.Managed(File),
case: ?union(enum) {
/// Check that it compiles with no errors.
Compile: void,
@@ -77,7 +85,7 @@ pub const Case = struct {
/// `lower_to_build_steps`. If null, file imports will assert.
import_path: ?[]const u8 = null,
- deps: std.ArrayList(DepModule),
+ deps: std.array_list.Managed(DepModule),
pub fn addSourceFile(case: *Case, name: []const u8, src: [:0]const u8) void {
case.files.append(.{ .path = name, .src = src }) catch @panic("OOM");
@@ -148,7 +156,7 @@ pub fn addExe(
.files = .init(ctx.arena),
.case = null,
.output_mode = .Exe,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@@ -167,7 +175,7 @@ pub fn exeFromCompiledC(ctx: *Cases, name: []const u8, target_query: std.Target.
.files = .init(ctx.arena),
.case = null,
.output_mode = .Exe,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
.link_libc = true,
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
@@ -197,7 +205,7 @@ pub fn addObjLlvm(ctx: *Cases, name: []const u8, target: std.Build.ResolvedTarge
.files = .init(ctx.arena),
.case = null,
.output_mode = .Obj,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
.backend = .llvm,
.emit_bin = can_emit_bin,
.emit_asm = can_emit_asm,
@@ -216,7 +224,7 @@ pub fn addObj(
.files = .init(ctx.arena),
.case = null,
.output_mode = .Obj,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@@ -233,7 +241,7 @@ pub fn addTest(
.case = null,
.output_mode = .Exe,
.is_test = true,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@@ -258,7 +266,7 @@ pub fn addC(ctx: *Cases, name: []const u8, target: std.Build.ResolvedTarget) *Ca
.files = .init(ctx.arena),
.case = null,
.output_mode = .Obj,
- .deps = std.ArrayList(DepModule).init(ctx.arena),
+ .deps = std.array_list.Managed(DepModule).init(ctx.arena),
}) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1];
}
@@ -364,7 +372,7 @@ fn addFromDirInner(
b: *std.Build,
) !void {
var it = try iterable_dir.walk(ctx.arena);
- var filenames: std.ArrayListUnmanaged([]const u8) = .empty;
+ var filenames: ArrayList([]const u8) = .empty;
while (try it.next()) |entry| {
if (entry.kind != .file) continue;
@@ -428,7 +436,7 @@ fn addFromDirInner(
continue;
}
- var cases = std.ArrayList(usize).init(ctx.arena);
+ var cases = std.array_list.Managed(usize).init(ctx.arena);
// Cross-product to get all possible test combinations
for (targets) |target_query| {
@@ -462,7 +470,7 @@ fn addFromDirInner(
.link_libc = link_libc,
.pic = pic,
.pie = pie,
- .deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
+ .deps = std.array_list.Managed(DepModule).init(ctx.cases.allocator),
.imports = imports,
.target = resolved_target,
});
@@ -495,8 +503,8 @@ fn addFromDirInner(
pub fn init(gpa: Allocator, arena: Allocator) Cases {
return .{
.gpa = gpa,
- .cases = std.ArrayList(Case).init(gpa),
- .translate = std.ArrayList(Translate).init(gpa),
+ .cases = std.array_list.Managed(Case).init(gpa),
+ .translate = std.array_list.Managed(Translate).init(gpa),
.arena = arena,
};
}
@@ -995,7 +1003,7 @@ const TestManifest = struct {
key: []const u8,
comptime T: type,
) ![]const T {
- var out = std.ArrayList(T).init(allocator);
+ var out = std.array_list.Managed(T).init(allocator);
defer out.deinit();
var it = self.getConfigForKey(key, T);
while (try it.next()) |item| {
@@ -1018,7 +1026,7 @@ const TestManifest = struct {
}
fn trailingSplit(self: TestManifest, allocator: Allocator) error{OutOfMemory}![]const u8 {
- var out = std.ArrayList(u8).init(allocator);
+ var out = std.array_list.Managed(u8).init(allocator);
defer out.deinit();
var trailing_it = self.trailing();
while (trailing_it.next()) |line| {
@@ -1032,7 +1040,7 @@ const TestManifest = struct {
}
fn trailingLines(self: TestManifest, allocator: Allocator) error{OutOfMemory}![]const []const u8 {
- var out = std.ArrayList([]const u8).init(allocator);
+ var out = std.array_list.Managed([]const u8).init(allocator);
defer out.deinit();
var it = self.trailing();
while (it.next()) |line| {
@@ -1043,9 +1051,9 @@ const TestManifest = struct {
fn trailingLinesSplit(self: TestManifest, allocator: Allocator) error{OutOfMemory}![]const []const u8 {
// Collect output lines split by empty lines
- var out = std.ArrayList([]const u8).init(allocator);
+ var out = std.array_list.Managed([]const u8).init(allocator);
defer out.deinit();
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
var it = self.trailing();
while (it.next()) |line| {
@@ -1119,13 +1127,6 @@ const TestManifest = struct {
}
};
-const Cases = @This();
-const builtin = @import("builtin");
-const std = @import("std");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const getExternalExecutor = std.zig.system.getExternalExecutor;
-
fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
return .{
.query = query,
test/src/check-stack-trace.zig
@@ -24,7 +24,7 @@ pub fn main() !void {
// - replace function name with symbolic string when optimize_mode != .Debug
// - skip empty lines
const got: []const u8 = got_result: {
- var buf = std.ArrayList(u8).init(arena);
+ var buf = std.array_list.Managed(u8).init(arena);
defer buf.deinit();
if (stderr.len != 0 and stderr[stderr.len - 1] == '\n') stderr = stderr[0 .. stderr.len - 1];
var it = mem.splitScalar(u8, stderr, '\n');
test/src/CompareOutput.zig
@@ -15,7 +15,7 @@ const Special = enum {
const TestCase = struct {
name: []const u8,
- sources: ArrayList(SourceFile),
+ sources: std.array_list.Managed(SourceFile),
expected_output: []const u8,
link_libc: bool,
special: Special,
@@ -41,7 +41,7 @@ const TestCase = struct {
pub fn createExtra(self: *CompareOutput, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
var tc = TestCase{
.name = name,
- .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
+ .sources = std.array_list.Managed(TestCase.SourceFile).init(self.b.allocator),
.expected_output = expected_output,
.link_libc = false,
.special = special,
@@ -170,7 +170,6 @@ pub fn addCase(self: *CompareOutput, case: TestCase) void {
const CompareOutput = @This();
const std = @import("std");
-const ArrayList = std.ArrayList;
const mem = std.mem;
const fs = std.fs;
const OptimizeMode = std.builtin.OptimizeMode;
test/src/RunTranslatedC.zig
@@ -6,7 +6,7 @@ target: std.Build.ResolvedTarget,
const TestCase = struct {
name: []const u8,
- sources: ArrayList(SourceFile),
+ sources: std.array_list.Managed(SourceFile),
expected_stdout: []const u8,
allow_warnings: bool,
@@ -34,7 +34,7 @@ pub fn create(
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
- .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
+ .sources = std.array_list.Managed(TestCase.SourceFile).init(self.b.allocator),
.expected_stdout = expected_stdout,
.allow_warnings = allow_warnings,
};
@@ -103,7 +103,6 @@ pub fn addCase(self: *RunTranslatedCContext, case: *const TestCase) void {
const RunTranslatedCContext = @This();
const std = @import("std");
-const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
const fs = std.fs;
test/src/TranslateC.zig
@@ -6,8 +6,8 @@ test_target_filters: []const []const u8,
const TestCase = struct {
name: []const u8,
- sources: ArrayList(SourceFile),
- expected_lines: ArrayList([]const u8),
+ sources: std.array_list.Managed(SourceFile),
+ expected_lines: std.array_list.Managed([]const u8),
allow_warnings: bool,
target: std.Target.Query = .{},
@@ -39,8 +39,8 @@ pub fn create(
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
- .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
- .expected_lines = ArrayList([]const u8).init(self.b.allocator),
+ .sources = std.array_list.Managed(TestCase.SourceFile).init(self.b.allocator),
+ .expected_lines = std.array_list.Managed([]const u8).init(self.b.allocator),
.allow_warnings = allow_warnings,
};
@@ -125,7 +125,6 @@ pub fn addCase(self: *TranslateCContext, case: *const TestCase) void {
const TranslateCContext = @This();
const std = @import("std");
-const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
const fs = std.fs;
test/standalone/windows_argv/fuzz.zig
@@ -41,7 +41,7 @@ pub fn main() !void {
std.debug.print("rand seed: {}\n", .{seed});
}
- var cmd_line_w_buf = std.ArrayList(u16).init(allocator);
+ var cmd_line_w_buf = std.array_list.Managed(u16).init(allocator);
defer cmd_line_w_buf.deinit();
var i: u64 = 0;
@@ -84,7 +84,7 @@ fn randomCommandLineW(allocator: Allocator, rand: std.Random) ![:0]const u16 {
};
const choices = rand.uintAtMostBiased(u16, 256);
- var buf = try std.ArrayList(u16).initCapacity(allocator, choices);
+ var buf = try std.array_list.Managed(u16).initCapacity(allocator, choices);
errdefer buf.deinit();
for (0..choices) |_| {
test/standalone/windows_argv/lib.zig
@@ -17,7 +17,7 @@ fn testArgv(expected_args: []const [*:0]const u16) !void {
const allocator = arena_state.allocator();
const args = try std.process.argsAlloc(allocator);
- var wtf8_buf = std.ArrayList(u8).init(allocator);
+ var wtf8_buf = std.array_list.Managed(u8).init(allocator);
var eql = true;
if (args.len != expected_args.len) eql = false;
test/standalone/windows_bat_args/fuzz.zig
@@ -42,7 +42,7 @@ pub fn main() anyerror!void {
try tmp.dir.setAsCwd();
defer tmp.parent_dir.setAsCwd() catch {};
- var buf = try std.ArrayList(u8).initCapacity(allocator, 128);
+ var buf = try std.array_list.Managed(u8).initCapacity(allocator, 128);
defer buf.deinit();
try buf.appendSlice("@echo off\n");
try buf.append('"');
@@ -80,7 +80,7 @@ fn testExec(allocator: std.mem.Allocator, args: []const []const u8, env: ?*std.p
}
fn testExecBat(allocator: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
- var argv = try std.ArrayList([]const u8).initCapacity(allocator, 1 + args.len);
+ var argv = try std.array_list.Managed([]const u8).initCapacity(allocator, 1 + args.len);
defer argv.deinit();
argv.appendAssumeCapacity(bat);
argv.appendSliceAssumeCapacity(args);
@@ -121,7 +121,7 @@ fn randomArg(allocator: Allocator, rand: std.Random) ![]const u8 {
};
const choices = rand.uintAtMostBiased(u16, 256);
- var buf = try std.ArrayList(u8).initCapacity(allocator, choices);
+ var buf = try std.array_list.Managed(u8).initCapacity(allocator, choices);
errdefer buf.deinit();
var last_codepoint: u21 = 0;
test/standalone/windows_bat_args/test.zig
@@ -16,7 +16,7 @@ pub fn main() anyerror!void {
try tmp.dir.setAsCwd();
defer tmp.parent_dir.setAsCwd() catch {};
- var buf = try std.ArrayList(u8).initCapacity(allocator, 128);
+ var buf = try std.array_list.Managed(u8).initCapacity(allocator, 128);
defer buf.deinit();
try buf.appendSlice("@echo off\n");
try buf.append('"');
@@ -127,7 +127,7 @@ fn testExec(allocator: std.mem.Allocator, args: []const []const u8, env: ?*std.p
}
fn testExecBat(allocator: std.mem.Allocator, bat: []const u8, args: []const []const u8, env: ?*std.process.EnvMap) !void {
- var argv = try std.ArrayList([]const u8).initCapacity(allocator, 1 + args.len);
+ var argv = try std.array_list.Managed([]const u8).initCapacity(allocator, 1 + args.len);
defer argv.deinit();
argv.appendAssumeCapacity(bat);
argv.appendSliceAssumeCapacity(args);
tools/docgen.zig
@@ -344,12 +344,12 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
var last_action: Action = .open;
var last_columns: ?u8 = null;
- var toc_buf = std.ArrayList(u8).init(allocator);
+ var toc_buf = std.array_list.Managed(u8).init(allocator);
defer toc_buf.deinit();
var toc = toc_buf.writer();
- var nodes = std.ArrayList(Node).init(allocator);
+ var nodes = std.array_list.Managed(Node).init(allocator);
defer nodes.deinit();
try toc.writeByte('\n');
@@ -449,7 +449,7 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
last_action = .close;
}
} else if (mem.eql(u8, tag_name, "see_also")) {
- var list = std.ArrayList(SeeAlsoItem).init(allocator);
+ var list = std.array_list.Managed(SeeAlsoItem).init(allocator);
errdefer list.deinit();
while (true) {
@@ -599,7 +599,7 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
}
fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
const out = buf.writer();
@@ -618,7 +618,7 @@ fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
}
fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
const out = buf.writer();
tools/doctest.zig
@@ -126,7 +126,7 @@ fn printOutput(
const obj_ext = builtin.object_format.fileExt(builtin.cpu.arch);
const print = std.debug.print;
- var shell_buffer = std.ArrayList(u8).init(arena);
+ var shell_buffer = std.array_list.Managed(u8).init(arena);
defer shell_buffer.deinit();
var shell_out = shell_buffer.writer();
@@ -134,7 +134,7 @@ fn printOutput(
switch (code.id) {
.exe => |expected_outcome| code_block: {
- var build_args = std.ArrayList([]const u8).init(arena);
+ var build_args = std.array_list.Managed([]const u8).init(arena);
defer build_args.deinit();
try build_args.appendSlice(&[_][]const u8{
zig_exe, "build-exe",
@@ -284,7 +284,7 @@ fn printOutput(
try shell_out.writeAll("\n");
},
.@"test" => {
- var test_args = std.ArrayList([]const u8).init(arena);
+ var test_args = std.array_list.Managed([]const u8).init(arena);
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
@@ -345,7 +345,7 @@ fn printOutput(
try shell_out.print("\n{s}{s}\n", .{ escaped_stderr, escaped_stdout });
},
.test_error => |error_match| {
- var test_args = std.ArrayList([]const u8).init(arena);
+ var test_args = std.array_list.Managed([]const u8).init(arena);
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
@@ -399,7 +399,7 @@ fn printOutput(
try shell_out.print("\n{s}\n", .{colored_stderr});
},
.test_safety => |error_match| {
- var test_args = std.ArrayList([]const u8).init(arena);
+ var test_args = std.array_list.Managed([]const u8).init(arena);
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
@@ -461,7 +461,7 @@ fn printOutput(
},
.obj => |maybe_error_match| {
const name_plus_obj_ext = try std.fmt.allocPrint(arena, "{s}{s}", .{ code_name, obj_ext });
- var build_args = std.ArrayList([]const u8).init(arena);
+ var build_args = std.array_list.Managed([]const u8).init(arena);
defer build_args.deinit();
try build_args.appendSlice(&[_][]const u8{
@@ -543,7 +543,7 @@ fn printOutput(
.output_mode = .Lib,
});
- var test_args = std.ArrayList([]const u8).init(arena);
+ var test_args = std.array_list.Managed([]const u8).init(arena);
defer test_args.deinit();
try test_args.appendSlice(&[_][]const u8{
@@ -975,7 +975,7 @@ fn skipPrefix(line: []const u8) []const u8 {
}
fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
const out = buf.writer();
@@ -1011,7 +1011,7 @@ fn termColor(allocator: Allocator, input: []const u8) ![]u8 {
const supported_sgr_colors = [_]u8{ 31, 32, 36 };
const supported_sgr_numbers = [_]u8{ 0, 1, 2 };
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
var out = buf.writer();
@@ -1401,7 +1401,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1418,7 +1418,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1432,7 +1432,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1451,7 +1451,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1472,7 +1472,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1491,7 +1491,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1514,7 +1514,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1536,7 +1536,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1553,7 +1553,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1572,7 +1572,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
@@ -1587,7 +1587,7 @@ test "printShell" {
\\</samp></pre></figure>
;
- var buffer = std.ArrayList(u8).init(test_allocator);
+ var buffer = std.array_list.Managed(u8).init(test_allocator);
defer buffer.deinit();
try printShell(buffer.writer(), shell_out, false);
tools/fetch_them_macos_headers.zig
@@ -73,7 +73,7 @@ pub fn main() anyerror!void {
const args = try std.process.argsAlloc(allocator);
- var argv = std.ArrayList([]const u8).init(allocator);
+ var argv = std.array_list.Managed([]const u8).init(allocator);
var sysroot: ?[]const u8 = null;
var args_iter = ArgsIterator{ .args = args[1..] };
@@ -145,7 +145,7 @@ fn fetchTarget(
ver.minor,
});
- var cc_argv = std.ArrayList([]const u8).init(arena);
+ var cc_argv = std.array_list.Managed([]const u8).init(arena);
try cc_argv.appendSlice(&[_][]const u8{
"cc",
"-arch",
tools/gen_macos_headers_c.zig
@@ -23,7 +23,7 @@ pub fn main() anyerror!void {
const args = try std.process.argsAlloc(arena);
if (args.len == 1) fatal("no command or option specified", .{});
- var positionals = std.ArrayList([]const u8).init(arena);
+ var positionals = std.array_list.Managed([]const u8).init(arena);
for (args[1..]) |arg| {
if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) {
@@ -35,7 +35,7 @@ pub fn main() anyerror!void {
var dir = try std.fs.cwd().openDir(positionals.items[0], .{ .no_follow = true });
defer dir.close();
- var paths = std.ArrayList([]const u8).init(arena);
+ var paths = std.array_list.Managed([]const u8).init(arena);
try findHeaders(arena, dir, "", &paths);
const SortFn = struct {
@@ -66,7 +66,7 @@ fn findHeaders(
arena: Allocator,
dir: std.fs.Dir,
prefix: []const u8,
- paths: *std.ArrayList([]const u8),
+ paths: *std.array_list.Managed([]const u8),
) anyerror!void {
var it = dir.iterate();
while (try it.next()) |entry| {
tools/gen_outline_atomics.zig
@@ -37,7 +37,7 @@ pub fn main() !void {
\\
);
- var footer = std.ArrayList(u8).init(arena);
+ var footer = std.array_list.Managed(u8).init(arena);
try footer.appendSlice("\ncomptime {\n");
for ([_]N{ .one, .two, .four, .eight, .sixteen }) |n| {
tools/gen_spirv_spec.zig
@@ -69,7 +69,7 @@ pub fn main() !void {
const core_spec = try readRegistry(CoreRegistry, dir, "spirv.core.grammar.json");
std.mem.sortUnstable(Instruction, core_spec.instructions, CmpInst{}, CmpInst.lt);
- var exts = std.ArrayList(Extension).init(allocator);
+ var exts = std.array_list.Managed(Extension).init(allocator);
var it = dir.iterate();
while (try it.next()) |entry| {
@@ -113,7 +113,7 @@ pub fn main() !void {
_ = try std.fs.File.stdout().write(formatted_output);
}
-fn readExtRegistry(exts: *std.ArrayList(Extension), dir: std.fs.Dir, sub_path: []const u8) !void {
+fn readExtRegistry(exts: *std.array_list.Managed(Extension), dir: std.fs.Dir, sub_path: []const u8) !void {
const filename = std.fs.path.basename(sub_path);
if (!std.mem.startsWith(u8, filename, "extinst.")) {
return;
@@ -296,8 +296,6 @@ fn render(
);
// Merge the operand kinds from all extensions together.
- // var all_operand_kinds = std.ArrayList(OperandKind).init(a);
- // try all_operand_kinds.appendSlice(registry.operand_kinds);
var all_operand_kinds = OperandKindMap.init(allocator);
for (registry.operand_kinds) |kind| {
try all_operand_kinds.putNoClobber(.{ "core", kind.kind }, kind);
@@ -544,7 +542,7 @@ fn renderOpcodes(
var inst_map = std.AutoArrayHashMap(u32, usize).init(allocator);
try inst_map.ensureTotalCapacity(instructions.len);
- var aliases = std.ArrayList(struct { inst: usize, alias: usize }).init(allocator);
+ var aliases = std.array_list.Managed(struct { inst: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(instructions.len);
for (instructions, 0..) |inst, i| {
@@ -657,7 +655,7 @@ fn renderValueEnum(
var enum_map = std.AutoArrayHashMap(u32, usize).init(allocator);
try enum_map.ensureTotalCapacity(enumerants.len);
- var aliases = std.ArrayList(struct { enumerant: usize, alias: usize }).init(allocator);
+ var aliases = std.array_list.Managed(struct { enumerant: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
for (enumerants, 0..) |enumerant, i| {
@@ -735,7 +733,7 @@ fn renderBitEnum(
var flags_by_bitpos = [_]?usize{null} ** 32;
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
- var aliases = std.ArrayList(struct { flag: usize, alias: u5 }).init(allocator);
+ var aliases = std.array_list.Managed(struct { flag: usize, alias: u5 }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
for (enumerants, 0..) |enumerant, i| {
tools/generate_JSONTestSuite.zig
@@ -19,7 +19,7 @@ pub fn main() !void {
\\
);
- var names = std.ArrayList([]const u8).init(allocator);
+ var names = std.array_list.Managed([]const u8).init(allocator);
var cwd = try std.fs.cwd().openDir(".", .{ .iterate = true });
var it = cwd.iterate();
while (try it.next()) |entry| {
tools/generate_linux_syscalls.zig
@@ -591,7 +591,7 @@ fn generateSyscallsFromTable(
const table = try linux_dir.readFile(arch_info.file_path, buf);
- var optional_array_list: ?std.ArrayList(u8) = if (arch_info.additional_enum) |_| std.ArrayList(u8).init(allocator) else null;
+ var optional_array_list: ?std.array_list.Managed(u8) = if (arch_info.additional_enum) |_| std.array_list.Managed(u8).init(allocator) else null;
const optional_writer = if (optional_array_list) |_| optional_array_list.?.writer() else null;
try writer.print("pub const {s} = enum(usize) {{\n", .{arch_info.enum_name});
tools/migrate_langref.zig
@@ -319,13 +319,13 @@ fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype
}
var mode: std.builtin.OptimizeMode = .Debug;
- var link_objects = std.ArrayList([]const u8).init(arena);
+ var link_objects = std.array_list.Managed([]const u8).init(arena);
var target_str: ?[]const u8 = null;
var link_libc = false;
var link_mode: ?std.builtin.LinkMode = null;
var disable_cache = false;
var verbose_cimport = false;
- var additional_options = std.ArrayList([]const u8).init(arena);
+ var additional_options = std.array_list.Managed([]const u8).init(arena);
const source_token = while (true) {
const content_tok = try eatToken(tokenizer, .content);
@@ -437,7 +437,7 @@ fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype
}
fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
- var buf = std.ArrayList(u8).init(allocator);
+ var buf = std.array_list.Managed(u8).init(allocator);
defer buf.deinit();
const out = buf.writer();
tools/process_headers.zig
@@ -130,7 +130,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
- var search_paths = std.ArrayList([]const u8).init(allocator);
+ var search_paths = std.array_list.Managed([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
var opt_abi: ?[]const u8 = null;
@@ -234,7 +234,7 @@ pub fn main() !void {
.musl => &[_][]const u8{ search_path, libc_dir, "usr", "local", "musl", "include" },
};
const target_include_dir = try std.fs.path.join(allocator, sub_path);
- var dir_stack = std.ArrayList([]const u8).init(allocator);
+ var dir_stack = std.array_list.Managed([]const u8).init(allocator);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
@@ -323,7 +323,7 @@ pub fn main() !void {
// gets their header in a separate arch directory.
var path_it = path_table.iterator();
while (path_it.next()) |path_kv| {
- var contents_list = std.ArrayList(*Contents).init(allocator);
+ var contents_list = std.array_list.Managed(*Contents).init(allocator);
{
var hash_it = path_kv.value_ptr.*.iterator();
while (hash_it.next()) |hash_kv| {
tools/update-linux-headers.zig
@@ -143,7 +143,7 @@ pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const arena = arena_state.allocator();
const args = try std.process.argsAlloc(arena);
- var search_paths = std.ArrayList([]const u8).init(arena);
+ var search_paths = std.array_list.Managed([]const u8).init(arena);
var opt_out_dir: ?[]const u8 = null;
var arg_i: usize = 1;
@@ -186,7 +186,7 @@ pub fn main() !void {
const target_include_dir = try std.fs.path.join(arena, &.{
search_path, linux_target.name, "include",
});
- var dir_stack = std.ArrayList([]const u8).init(arena);
+ var dir_stack = std.array_list.Managed([]const u8).init(arena);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
@@ -261,7 +261,7 @@ pub fn main() !void {
// gets their header in a separate arch directory.
var path_it = path_table.iterator();
while (path_it.next()) |path_kv| {
- var contents_list = std.ArrayList(*Contents).init(arena);
+ var contents_list = std.array_list.Managed(*Contents).init(arena);
{
var hash_it = path_kv.value_ptr.*.iterator();
while (hash_it.next()) |hash_kv| {
tools/update_clang_options.zig
@@ -699,7 +699,7 @@ pub fn main() anyerror!void {
defer parsed.deinit();
const root_map = &parsed.value.object;
- var all_objects = std.ArrayList(*json.ObjectMap).init(allocator);
+ var all_objects = std.array_list.Managed(*json.ObjectMap).init(allocator);
{
var it = root_map.iterator();
it_map: while (it.next()) |kv| {
tools/update_cpu_features.zig
@@ -1634,8 +1634,8 @@ fn processOneTarget(job: Job) void {
defer progress_node.end();
var features_table = std.StringHashMap(Feature).init(arena);
- var all_features = std.ArrayList(Feature).init(arena);
- var all_cpus = std.ArrayList(Cpu).init(arena);
+ var all_features = std.array_list.Managed(Feature).init(arena);
+ var all_cpus = std.array_list.Managed(Cpu).init(arena);
if (target.llvm) |llvm| {
const tblgen_progress = progress_node.start("running llvm-tblgen", 0);
@@ -1726,7 +1726,7 @@ fn processOneTarget(job: Job) void {
var zig_name = try llvmNameToZigName(arena, llvm_name);
var desc = kv.value_ptr.object.get("Desc").?.string;
- var deps = std.ArrayList([]const u8).init(arena);
+ var deps = std.array_list.Managed([]const u8).init(arena);
var omit = false;
var flatten = false;
var omit_deps: []const []const u8 = &.{};
@@ -1810,7 +1810,7 @@ fn processOneTarget(job: Job) void {
if (omitted) continue;
var zig_name = try llvmNameToZigName(arena, llvm_name);
- var deps = std.ArrayList([]const u8).init(arena);
+ var deps = std.array_list.Managed([]const u8).init(arena);
var omit_deps: []const []const u8 = &.{};
var extra_deps: []const []const u8 = &.{};
for (target.feature_overrides) |feature_override| {
@@ -1979,7 +1979,7 @@ fn processOneTarget(job: Job) void {
try putDep(&deps_set, features_table, dep);
}
try pruneFeatures(arena, features_table, &deps_set);
- var dependencies = std.ArrayList([]const u8).init(arena);
+ var dependencies = std.array_list.Managed([]const u8).init(arena);
{
var it = deps_set.keyIterator();
while (it.next()) |key| {
@@ -2024,7 +2024,7 @@ fn processOneTarget(job: Job) void {
try putDep(&deps_set, features_table, feature_zig_name);
}
try pruneFeatures(arena, features_table, &deps_set);
- var cpu_features = std.ArrayList([]const u8).init(arena);
+ var cpu_features = std.array_list.Managed([]const u8).init(arena);
{
var it = deps_set.keyIterator();
while (it.next()) |key| {
tools/update_crc_catalog.zig
@@ -139,7 +139,7 @@ pub fn main() anyerror!void {
_ = mem.replace(u8, snakecase, "-", "_", snakecase);
_ = mem.replace(u8, snakecase, "/", "_", snakecase);
- var buf = try std.ArrayList(u8).initCapacity(arena, snakecase.len);
+ var buf = try std.array_list.Managed(u8).initCapacity(arena, snakecase.len);
defer buf.deinit();
var prev: u8 = 0;
build.zig
@@ -3,7 +3,6 @@ const builtin = std.builtin;
const tests = @import("test/tests.zig");
const BufMap = std.BufMap;
const mem = std.mem;
-const ArrayList = std.ArrayList;
const io = std.io;
const fs = std.fs;
const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
@@ -925,7 +924,7 @@ fn addCxxKnownPath(
return error.RequiredLibraryNotFound;
const path_padded = run: {
- var args = std.ArrayList([]const u8).init(b.allocator);
+ var args = std.array_list.Managed([]const u8).init(b.allocator);
try args.append(ctx.cxx_compiler);
var it = std.mem.tokenizeAny(u8, ctx.cxx_compiler_arg1, &std.ascii.whitespace);
while (it.next()) |arg| try args.append(arg);