Commit 7a28c644aa
Changed files (18)
lib
std
src-self-hosted
tools
lib/std/build/emit_raw.zig
@@ -94,7 +94,7 @@ const BinaryElfOutput = struct {
sort.sort(*BinaryElfSegment, self.segments.span(), segmentSortCompare);
- if (self.segments.len > 0) {
+ if (self.segments.items.len > 0) {
const firstSegment = self.segments.at(0);
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
lib/std/http/headers.zig
@@ -139,7 +139,7 @@ pub const Headers = struct {
pub fn clone(self: Self, allocator: *Allocator) !Self {
var other = Headers.init(allocator);
errdefer other.deinit();
- try other.data.ensureCapacity(self.data.len);
+ try other.data.ensureCapacity(self.data.items.len);
try other.index.initCapacity(self.index.entries.len);
for (self.data.span()) |entry| {
try other.append(entry.name, entry.value, entry.never_index);
@@ -152,7 +152,7 @@ pub const Headers = struct {
}
pub fn append(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void {
- const n = self.data.len + 1;
+ const n = self.data.items.len + 1;
try self.data.ensureCapacity(n);
var entry: HeaderEntry = undefined;
if (self.index.get(name)) |kv| {
@@ -197,7 +197,7 @@ pub const Headers = struct {
if (self.index.remove(name)) |kv| {
var dex = &kv.value;
// iterate backwards
- var i = dex.len;
+ var i = dex.items.len;
while (i > 0) {
i -= 1;
const data_index = dex.at(i);
@@ -220,18 +220,18 @@ pub const Headers = struct {
const removed = self.data.orderedRemove(i);
const kv = self.index.get(removed.name).?;
var dex = &kv.value;
- if (dex.len == 1) {
+ if (dex.items.len == 1) {
// was last item; delete the index
_ = self.index.remove(kv.key);
dex.deinit();
removed.deinit();
self.allocator.free(kv.key);
} else {
- dex.shrink(dex.len - 1);
+ dex.shrink(dex.items.len - 1);
removed.deinit();
}
// if it was the last item; no need to rebuild index
- if (i != self.data.len) {
+ if (i != self.data.items.len) {
self.rebuild_index();
}
}
@@ -242,18 +242,18 @@ pub const Headers = struct {
const removed = self.data.swapRemove(i);
const kv = self.index.get(removed.name).?;
var dex = &kv.value;
- if (dex.len == 1) {
+ if (dex.items.len == 1) {
// was last item; delete the index
_ = self.index.remove(kv.key);
dex.deinit();
removed.deinit();
self.allocator.free(kv.key);
} else {
- dex.shrink(dex.len - 1);
+ dex.shrink(dex.items.len - 1);
removed.deinit();
}
// if it was the last item; no need to rebuild index
- if (i != self.data.len) {
+ if (i != self.data.items.len) {
self.rebuild_index();
}
}
@@ -277,7 +277,7 @@ pub const Headers = struct {
pub fn get(self: Self, allocator: *Allocator, name: []const u8) !?[]const HeaderEntry {
const dex = self.getIndices(name) orelse return null;
- const buf = try allocator.alloc(HeaderEntry, dex.len);
+ const buf = try allocator.alloc(HeaderEntry, dex.items.len);
var n: usize = 0;
for (dex.span()) |idx| {
buf[n] = self.data.at(idx);
@@ -301,7 +301,7 @@ pub const Headers = struct {
// adapted from mem.join
const total_len = blk: {
- var sum: usize = dex.len - 1; // space for separator(s)
+ var sum: usize = dex.items.len - 1; // space for separator(s)
for (dex.span()) |idx|
sum += self.data.at(idx).value.len;
break :blk sum;
@@ -330,7 +330,7 @@ pub const Headers = struct {
var it = self.index.iterator();
while (it.next()) |kv| {
var dex = &kv.value;
- dex.len = 0; // keeps capacity available
+ dex.items.len = 0; // keeps capacity available
}
}
{ // fill up indexes again; we know capacity is fine from before
lib/std/io/in_stream.zig
@@ -54,7 +54,7 @@ pub fn InStream(
/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
pub fn readAllArrayList(self: Self, array_list: *std.ArrayList(u8), max_append_size: usize) !void {
try array_list.ensureCapacity(math.min(max_append_size, 4096));
- const original_len = array_list.len;
+ const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
array_list.expandToCapacity();
lib/std/math/big/rational.zig
@@ -4,7 +4,6 @@ const math = std.math;
const mem = std.mem;
const testing = std.testing;
const Allocator = mem.Allocator;
-const ArrayList = std.ArrayList;
const bn = @import("int.zig");
const Limb = bn.Limb;
lib/std/special/build_runner.zig
@@ -171,7 +171,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
\\
);
- if (builder.available_options_list.len == 0) {
+ if (builder.available_options_list.items.len == 0) {
try out_stream.print(" (none)\n", .{});
} else {
for (builder.available_options_list.span()) |option| {
lib/std/build.zig
@@ -1779,7 +1779,7 @@ pub const LibExeObjStep = struct {
const self = @fieldParentPtr(LibExeObjStep, "step", step);
const builder = self.builder;
- if (self.root_src == null and self.link_objects.len == 0) {
+ if (self.root_src == null and self.link_objects.items.len == 0) {
warn("{}: linker needs 1 or more objects to link\n", .{self.step.name});
return error.NeedAnObject;
}
@@ -1847,7 +1847,7 @@ pub const LibExeObjStep = struct {
}
}
- if (self.build_options_contents.len > 0) {
+ if (self.build_options_contents.items.len > 0) {
const build_options_file = try fs.path.join(
builder.allocator,
&[_][]const u8{ builder.cache_root, builder.fmt("{}_build_options.zig", .{self.name}) },
lib/std/coff.zig
@@ -181,7 +181,7 @@ pub const Coff = struct {
}
pub fn loadSections(self: *Coff) !void {
- if (self.sections.len == self.coff_header.number_of_sections)
+ if (self.sections.items.len == self.coff_header.number_of_sections)
return;
try self.sections.ensureCapacity(self.coff_header.number_of_sections);
lib/std/debug.zig
@@ -1478,7 +1478,7 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) {
var coff_section: *coff.Section = undefined;
const mod_index = for (self.sect_contribs) |sect_contrib| {
- if (sect_contrib.Section > self.coff.sections.len) continue;
+ if (sect_contrib.Section > self.coff.sections.items.len) continue;
// Remember that SectionContribEntry.Section is 1-based.
coff_section = &self.coff.sections.span()[sect_contrib.Section - 1];
lib/std/fs.zig
@@ -1440,9 +1440,9 @@ pub const Walker = struct {
/// a reference to the path.
pub fn next(self: *Walker) !?Entry {
while (true) {
- if (self.stack.len == 0) return null;
+ if (self.stack.items.len == 0) return null;
// `top` becomes invalid after appending to `self.stack`.
- const top = &self.stack.span()[self.stack.len - 1];
+ const top = &self.stack.span()[self.stack.items.len - 1];
const dirname_len = top.dirname_len;
if (try top.dir_it.next()) |base| {
self.name_buffer.shrink(dirname_len);
@@ -1457,7 +1457,7 @@ pub const Walker = struct {
errdefer new_dir.close();
try self.stack.append(StackItem{
.dir_it = new_dir.iterate(),
- .dirname_len = self.name_buffer.len,
+ .dirname_len = self.name_buffer.items.len,
});
}
}
lib/std/json.zig
@@ -1556,7 +1556,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
else => {},
}
- try arraylist.ensureCapacity(arraylist.len + 1);
+ try arraylist.ensureCapacity(arraylist.items.len + 1);
const v = try parseInternal(ptrInfo.child, tok, tokens, options);
arraylist.appendAssumeCapacity(v);
}
@@ -1874,7 +1874,7 @@ pub const Parser = struct {
try p.transition(&arena.allocator, input, s.i - 1, token);
}
- debug.assert(p.stack.len == 1);
+ debug.assert(p.stack.items.len == 1);
return ValueTree{
.arena = arena,
@@ -1888,7 +1888,7 @@ pub const Parser = struct {
switch (p.state) {
.ObjectKey => switch (token) {
.ObjectEnd => {
- if (p.stack.len == 1) {
+ if (p.stack.items.len == 1) {
return;
}
@@ -1907,8 +1907,8 @@ pub const Parser = struct {
},
},
.ObjectValue => {
- var object = &p.stack.items[p.stack.len - 2].Object;
- var key = p.stack.items[p.stack.len - 1].String;
+ var object = &p.stack.items[p.stack.items.len - 2].Object;
+ var key = p.stack.items[p.stack.items.len - 1].String;
switch (token) {
.ObjectBegin => {
@@ -1950,11 +1950,11 @@ pub const Parser = struct {
}
},
.ArrayValue => {
- var array = &p.stack.items[p.stack.len - 1].Array;
+ var array = &p.stack.items[p.stack.items.len - 1].Array;
switch (token) {
.ArrayEnd => {
- if (p.stack.len == 1) {
+ if (p.stack.items.len == 1) {
return;
}
@@ -2021,12 +2021,12 @@ pub const Parser = struct {
}
fn pushToParent(p: *Parser, value: *const Value) !void {
- switch (p.stack.span()[p.stack.len - 1]) {
+ switch (p.stack.span()[p.stack.items.len - 1]) {
// Object Parent -> [ ..., object, <key>, value ]
Value.String => |key| {
_ = p.stack.pop();
- var object = &p.stack.items[p.stack.len - 1].Object;
+ var object = &p.stack.items[p.stack.items.len - 1].Object;
_ = try object.put(key, value.*);
p.state = .ObjectKey;
},
@@ -2165,7 +2165,7 @@ test "json.parser.dynamic" {
testing.expect(animated.Bool == false);
const array_of_object = image.Object.get("ArrayOfObject").?.value;
- testing.expect(array_of_object.Array.len == 1);
+ testing.expect(array_of_object.Array.items.len == 1);
const obj0 = array_of_object.Array.at(0).Object.get("n").?.value;
testing.expect(mem.eql(u8, obj0.String, "m"));
lib/std/net.zig
@@ -509,7 +509,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
try linuxLookupName(&lookup_addrs, &canon, name, family, flags, port);
- result.addrs = try arena.alloc(Address, lookup_addrs.len);
+ result.addrs = try arena.alloc(Address, lookup_addrs.items.len);
if (!canon.isNull()) {
result.canon_name = canon.toOwnedSlice();
}
@@ -554,7 +554,7 @@ fn linuxLookupName(
return name_err;
} else {
try linuxLookupNameFromHosts(addrs, canon, name, family, port);
- if (addrs.len == 0) {
+ if (addrs.items.len == 0) {
try linuxLookupNameFromDnsSearch(addrs, canon, name, family, port);
}
}
@@ -562,11 +562,11 @@ fn linuxLookupName(
try canon.resize(0);
try linuxLookupNameFromNull(addrs, family, flags, port);
}
- if (addrs.len == 0) return error.UnknownHostName;
+ if (addrs.items.len == 0) return error.UnknownHostName;
// No further processing is needed if there are fewer than 2
// results or if there are only IPv4 results.
- if (addrs.len == 1 or family == os.AF_INET) return;
+ if (addrs.items.len == 1 or family == os.AF_INET) return;
const all_ip4 = for (addrs.span()) |addr| {
if (addr.addr.any.family != os.AF_INET) break false;
} else true;
@@ -908,7 +908,7 @@ fn linuxLookupNameFromDnsSearch(
canon.shrink(canon_name.len + 1);
try canon.appendSlice(tok);
try linuxLookupNameFromDns(addrs, canon, canon.span(), family, rc, port);
- if (addrs.len != 0) return;
+ if (addrs.items.len != 0) return;
}
canon.shrink(canon_name.len);
@@ -967,7 +967,7 @@ fn linuxLookupNameFromDns(
dnsParse(ap[i], ctx, dnsParseCallback) catch {};
}
- if (addrs.len != 0) return;
+ if (addrs.items.len != 0) return;
if (ap[0].len < 4 or (ap[0][3] & 15) == 2) return error.TemporaryNameServerFailure;
if ((ap[0][3] & 15) == 0) return error.UnknownHostName;
if ((ap[0][3] & 15) == 3) return;
@@ -1049,7 +1049,7 @@ fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void {
}
}
- if (rc.ns.len == 0) {
+ if (rc.ns.items.len == 0) {
return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53);
}
}
@@ -1078,7 +1078,7 @@ fn resMSendRc(
var ns_list = std.ArrayList(Address).init(rc.ns.allocator);
defer ns_list.deinit();
- try ns_list.resize(rc.ns.len);
+ try ns_list.resize(rc.ns.items.len);
const ns = ns_list.span();
for (rc.ns.span()) |iplit, i| {
lib/std/unicode.zig
@@ -475,7 +475,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable;
- try result.resize(result.len + utf8_len);
+ try result.resize(result.items.len + utf8_len);
assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len);
out_index += utf8_len;
}
@@ -571,7 +571,7 @@ pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u
}
}
- const len = result.len;
+ const len = result.items.len;
try result.append(0);
return result.toOwnedSlice()[0..len :0];
}
src-self-hosted/libc_installation.zig
@@ -268,7 +268,7 @@ pub const LibCInstallation = struct {
try search_paths.append(line);
}
}
- if (search_paths.len == 0) {
+ if (search_paths.items.len == 0) {
return error.CCompilerCannotFindHeaders;
}
@@ -276,9 +276,9 @@ pub const LibCInstallation = struct {
const sys_include_dir_example_file = if (is_windows) "sys\\types.h" else "sys/errno.h";
var path_i: usize = 0;
- while (path_i < search_paths.len) : (path_i += 1) {
+ while (path_i < search_paths.items.len) : (path_i += 1) {
// search in reverse order
- const search_path_untrimmed = search_paths.at(search_paths.len - path_i - 1);
+ const search_path_untrimmed = search_paths.at(search_paths.items.len - path_i - 1);
const search_path = std.mem.trimLeft(u8, search_path_untrimmed, " ");
var search_dir = fs.cwd().openDir(search_path, .{}) catch |err| switch (err) {
error.FileNotFound,
src-self-hosted/stage2.zig
@@ -239,7 +239,7 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
}
if (stdin_flag) {
- if (input_files.len != 0) {
+ if (input_files.items.len != 0) {
try stderr.writeAll("cannot use --stdin with positional arguments\n");
process.exit(1);
}
@@ -273,7 +273,7 @@ fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void {
return;
}
- if (input_files.len == 0) {
+ if (input_files.items.len == 0) {
try stderr.writeAll("expected at least one source file argument\n");
process.exit(1);
}
src-self-hosted/translate_c.zig
@@ -4309,7 +4309,7 @@ fn makeRestorePoint(c: *Context) RestorePoint {
return RestorePoint{
.c = c,
.token_index = c.tree.tokens.len,
- .src_buf_index = c.source_buffer.len,
+ .src_buf_index = c.source_buffer.items.len,
};
}
@@ -4771,11 +4771,11 @@ fn appendToken(c: *Context, token_id: Token.Id, bytes: []const u8) !ast.TokenInd
fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: var) !ast.TokenIndex {
assert(token_id != .Invalid);
- const start_index = c.source_buffer.len;
+ const start_index = c.source_buffer.items.len;
errdefer c.source_buffer.shrink(start_index);
try c.source_buffer.outStream().print(format, args);
- const end_index = c.source_buffer.len;
+ const end_index = c.source_buffer.items.len;
const token_index = c.tree.tokens.len;
const new_token = try c.tree.tokens.addOne();
errdefer c.tree.tokens.shrink(token_index);
test/standalone/brace_expansion/main.zig
@@ -113,7 +113,7 @@ fn parse(tokens: *const ArrayList(Token), token_index: *usize) ParseError!Node {
fn expandString(input: []const u8, output: *ArrayListSentineled(u8, 0)) !void {
const tokens = try tokenize(input);
- if (tokens.len == 1) {
+ if (tokens.items.len == 1) {
return output.resize(0);
}
@@ -142,7 +142,7 @@ fn expandString(input: []const u8, output: *ArrayListSentineled(u8, 0)) !void {
const ExpandNodeError = error{OutOfMemory};
fn expandNode(node: Node, output: *ArrayList(ArrayListSentineled(u8, 0))) ExpandNodeError!void {
- assert(output.len == 0);
+ assert(output.items.len == 0);
switch (node) {
Node.Scalar => |scalar| {
try output.append(try ArrayListSentineled(u8, 0).init(global_allocator, scalar));
test/tests.zig
@@ -864,13 +864,13 @@ pub const CompileErrorContext = struct {
var err_iter = ErrLineIter.init(stderr);
var i: usize = 0;
ok = while (err_iter.next()) |line| : (i += 1) {
- if (i >= self.case.expected_errors.len) break false;
+ if (i >= self.case.expected_errors.items.len) break false;
const expected = self.case.expected_errors.at(i);
if (mem.indexOf(u8, line, expected) == null) break false;
continue;
} else true;
- ok = ok and i == self.case.expected_errors.len;
+ ok = ok and i == self.case.expected_errors.items.len;
if (!ok) {
warn("\n======== Expected these compile errors: ========\n", .{});
tools/merge_anal_dumps.zig
@@ -194,7 +194,7 @@ const Dump = struct {
for (other_files) |other_file, i| {
const gop = try self.file_map.getOrPut(other_file.String);
if (!gop.found_existing) {
- gop.kv.value = self.file_list.len;
+ gop.kv.value = self.file_list.items.len;
try self.file_list.append(other_file.String);
}
try other_file_to_mine.putNoClobber(i, gop.kv.value);
@@ -213,7 +213,7 @@ const Dump = struct {
};
const gop = try self.node_map.getOrPut(other_node);
if (!gop.found_existing) {
- gop.kv.value = self.node_list.len;
+ gop.kv.value = self.node_list.items.len;
try self.node_list.append(other_node);
}
try other_ast_node_to_mine.putNoClobber(i, gop.kv.value);
@@ -243,7 +243,7 @@ const Dump = struct {
};
const gop = try self.error_map.getOrPut(other_error);
if (!gop.found_existing) {
- gop.kv.value = self.error_list.len;
+ gop.kv.value = self.error_list.items.len;
try self.error_list.append(other_error);
}
try other_error_to_mine.putNoClobber(i, gop.kv.value);
@@ -304,7 +304,7 @@ const Dump = struct {
) !void {
const gop = try self.type_map.getOrPut(other_type);
if (!gop.found_existing) {
- gop.kv.value = self.type_list.len;
+ gop.kv.value = self.type_list.items.len;
try self.type_list.append(other_type);
}
try other_types_to_mine.putNoClobber(other_type_index, gop.kv.value);