Commit aeaef8c0ff
Changed files (216)
lib
std
compress
crypto
pcurves
event
fmt
parse_float
json
math
net
os
rand
target
zig
src
arch
aarch64
riscv64
sparc64
link
translate_c
lib/compiler_rt/atomics.zig
@@ -151,7 +151,7 @@ fn __atomic_compare_exchange(
_ = failure;
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
- for (ptr[0..size]) |b, i| {
+ for (ptr[0..size], 0..) |b, i| {
if (expected[i] != b) break;
} else {
// The two objects, ptr and expected, are equal
lib/compiler_rt/comparedf2_test.zig
@@ -94,8 +94,8 @@ fn generateVector(comptime a: f64, comptime b: f64) TestVector {
const test_vectors = init: {
@setEvalBranchQuota(10000);
var vectors: [arguments.len * arguments.len]TestVector = undefined;
- for (arguments[0..]) |arg_i, i| {
- for (arguments[0..]) |arg_j, j| {
+ for (arguments[0..], 0..) |arg_i, i| {
+ for (arguments[0..], 0..) |arg_j, j| {
vectors[(i * arguments.len) + j] = generateVector(arg_i, arg_j);
}
}
lib/compiler_rt/comparesf2_test.zig
@@ -94,8 +94,8 @@ fn generateVector(comptime a: f32, comptime b: f32) TestVector {
const test_vectors = init: {
@setEvalBranchQuota(10000);
var vectors: [arguments.len * arguments.len]TestVector = undefined;
- for (arguments[0..]) |arg_i, i| {
- for (arguments[0..]) |arg_j, j| {
+ for (arguments[0..], 0..) |arg_i, i| {
+ for (arguments[0..], 0..) |arg_j, j| {
vectors[(i * arguments.len) + j] = generateVector(arg_i, arg_j);
}
}
lib/std/atomic/Atomic.zig
@@ -548,7 +548,7 @@ test "Atomic.bitSet" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -562,7 +562,7 @@ test "Atomic.bitSet" {
try testing.expect(x.load(.SeqCst) & mask != 0);
// all the previous bits should have not changed (still be set)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
@@ -578,7 +578,7 @@ test "Atomic.bitReset" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
x.storeUnchecked(x.loadUnchecked() | mask);
@@ -593,7 +593,7 @@ test "Atomic.bitReset" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be reset)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
@@ -609,7 +609,7 @@ test "Atomic.bitToggle" {
var x = Atomic(Int).init(0);
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
- for (bit_array) |_, bit_index| {
+ for (bit_array, 0..) |_, bit_index| {
const bit = @intCast(std.math.Log2Int(Int), bit_index);
const mask = @as(Int, 1) << bit;
@@ -623,7 +623,7 @@ test "Atomic.bitToggle" {
try testing.expect(x.load(.SeqCst) & mask == 0);
// all the previous bits should have not changed (still be toggled back)
- for (bit_array[0..bit_index]) |_, prev_bit_index| {
+ for (bit_array[0..bit_index], 0..) |_, prev_bit_index| {
const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
const prev_mask = @as(Int, 1) << prev_bit;
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
lib/std/atomic/queue.zig
@@ -212,11 +212,11 @@ test "std.atomic.Queue" {
try expect(context.queue.isEmpty());
var putters: [put_thread_count]std.Thread = undefined;
- for (putters) |*t| {
+ for (&putters) |*t| {
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]std.Thread = undefined;
- for (getters) |*t| {
+ for (&getters) |*t| {
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
lib/std/atomic/stack.zig
@@ -117,11 +117,11 @@ test "std.atomic.stack" {
}
} else {
var putters: [put_thread_count]std.Thread = undefined;
- for (putters) |*t| {
+ for (&putters) |*t| {
t.* = try std.Thread.spawn(.{}, startPuts, .{&context});
}
var getters: [put_thread_count]std.Thread = undefined;
- for (getters) |*t| {
+ for (&getters) |*t| {
t.* = try std.Thread.spawn(.{}, startGets, .{&context});
}
lib/std/Build/CompileStep.zig
@@ -1016,7 +1016,7 @@ pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void {
assert(self.kind == .@"test");
const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
duped_args[i] = if (arg) |a| self.builder.dupe(a) else null;
}
self.exec_cmd_args = duped_args;
@@ -1040,7 +1040,7 @@ fn appendModuleArgs(
{
const keys = module.dependencies.keys();
- for (module.dependencies.values()) |sub_module, i| {
+ for (module.dependencies.values(), 0..) |sub_module, i| {
const sub_name = keys[i];
try cs.appendModuleArgs(zig_args, sub_name, sub_module);
}
@@ -1575,7 +1575,7 @@ fn make(step: *Step) !void {
{
const keys = self.modules.keys();
- for (self.modules.values()) |module, i| {
+ for (self.modules.values(), 0..) |module, i| {
const name = keys[i];
try self.appendModuleArgs(&zig_args, name, module);
}
@@ -1750,7 +1750,7 @@ fn make(step: *Step) !void {
const args_to_escape = zig_args.items[2..];
var escaped_args = try ArrayList([]const u8).initCapacity(builder.allocator, args_to_escape.len);
arg_blk: for (args_to_escape) |arg| {
- for (arg) |c, arg_idx| {
+ for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
var escaped = try ArrayList(u8).initCapacity(builder.allocator, arg.len + 1);
lib/std/Build/ConfigHeaderStep.zig
@@ -350,7 +350,7 @@ fn render_blank(
try output.appendSlice("\n");
const values = defines.values();
- for (defines.keys()) |name, i| {
+ for (defines.keys(), 0..) |name, i| {
try renderValueC(output, name, values[i]);
}
@@ -361,7 +361,7 @@ fn render_blank(
fn render_nasm(output: *std.ArrayList(u8), defines: std.StringArrayHashMap(Value)) !void {
const values = defines.values();
- for (defines.keys()) |name, i| {
+ for (defines.keys(), 0..) |name, i| {
try renderValueNasm(output, name, values[i]);
}
}
lib/std/Build/FmtStep.zig
@@ -19,7 +19,7 @@ pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
self.argv[0] = builder.zig_exe;
self.argv[1] = "fmt";
- for (paths) |path, i| {
+ for (paths, 0..) |path, i| {
self.argv[2 + i] = builder.pathFromRoot(path);
}
return self;
lib/std/compress/deflate/compressor.zig
@@ -159,7 +159,7 @@ fn levels(compression: Compression) CompressionLevel {
fn matchLen(a: []u8, b: []u8, max: u32) u32 {
var bounded_a = a[0..max];
var bounded_b = b[0..max];
- for (bounded_a) |av, i| {
+ for (bounded_a, 0..) |av, i| {
if (bounded_b[i] != av) {
return @intCast(u32, i);
}
@@ -312,14 +312,14 @@ pub fn Compressor(comptime WriterType: anytype) type {
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (https://golang.org/issue/18625).
- for (self.hash_prev) |v, i| {
+ for (self.hash_prev, 0..) |v, i| {
if (v > delta) {
self.hash_prev[i] = @intCast(u32, v - delta);
} else {
self.hash_prev[i] = 0;
}
}
- for (self.hash_head) |v, i| {
+ for (self.hash_head, 0..) |v, i| {
if (v > delta) {
self.hash_head[i] = @intCast(u32, v - delta);
} else {
@@ -391,7 +391,7 @@ pub fn Compressor(comptime WriterType: anytype) type {
var dst = self.hash_match[0..dst_size];
_ = self.bulk_hasher(to_check, dst);
var new_h: u32 = 0;
- for (dst) |val, i| {
+ for (dst, 0..) |val, i| {
var di = i + index;
new_h = val;
var hh = &self.hash_head[new_h & hash_mask];
@@ -1102,7 +1102,7 @@ test "bulkHash4" {
defer testing.allocator.free(dst);
_ = bulkHash4(y, dst);
- for (dst) |got, i| {
+ for (dst, 0..) |got, i| {
var want = hash4(y[i..]);
try testing.expectEqual(want, got);
}
lib/std/compress/deflate/compressor_test.zig
@@ -171,7 +171,7 @@ test "deflate/inflate" {
var large_data_chunk = try testing.allocator.alloc(u8, 100_000);
defer testing.allocator.free(large_data_chunk);
// fill with random data
- for (large_data_chunk) |_, i| {
+ for (large_data_chunk, 0..) |_, i| {
large_data_chunk[i] = @truncate(u8, i) *% @truncate(u8, i);
}
try testToFromWithLimit(large_data_chunk, limits);
@@ -205,7 +205,7 @@ test "very long sparse chunk" {
n -= cur - s.l;
cur = s.l;
}
- for (b[0..n]) |_, i| {
+ for (b[0..n], 0..) |_, i| {
if (s.cur + i >= s.l -| (1 << 16)) {
b[i] = 1;
} else {
@@ -451,7 +451,7 @@ test "inflate reset" {
defer compressed_strings[0].deinit();
defer compressed_strings[1].deinit();
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
var comp = try compressor(
testing.allocator,
compressed_strings[i].writer(),
@@ -498,7 +498,7 @@ test "inflate reset dictionary" {
defer compressed_strings[0].deinit();
defer compressed_strings[1].deinit();
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
var comp = try compressor(
testing.allocator,
compressed_strings[i].writer(),
lib/std/compress/deflate/decompressor.zig
@@ -165,7 +165,7 @@ const HuffmanDecoder = struct {
}
}
- for (lengths) |n, li| {
+ for (lengths, 0..) |n, li| {
if (n == 0) {
continue;
}
@@ -213,7 +213,7 @@ const HuffmanDecoder = struct {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
- for (self.chunks) |chunk, i| {
+ for (self.chunks, 0..) |chunk, i| {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
lib/std/compress/deflate/deflate_fast.zig
@@ -264,7 +264,7 @@ pub const DeflateFast = struct {
var a = src[@intCast(usize, s)..@intCast(usize, s1)];
b = b[0..a.len];
// Extend the match to be as long as possible.
- for (a) |_, i| {
+ for (a, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i);
}
@@ -285,7 +285,7 @@ pub const DeflateFast = struct {
b = b[0..a.len];
}
a = a[0..b.len];
- for (b) |_, i| {
+ for (b, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i);
}
@@ -301,7 +301,7 @@ pub const DeflateFast = struct {
// Continue looking for more matches in the current block.
a = src[@intCast(usize, s + n)..@intCast(usize, s1)];
b = src[0..a.len];
- for (a) |_, i| {
+ for (a, 0..) |_, i| {
if (a[i] != b[i]) {
return @intCast(i32, i) + n;
}
@@ -330,7 +330,7 @@ pub const DeflateFast = struct {
fn shiftOffsets(self: *Self) void {
if (self.prev_len == 0) {
// We have no history; just clear the table.
- for (self.table) |_, i| {
+ for (self.table, 0..) |_, i| {
self.table[i] = TableEntry{ .val = 0, .offset = 0 };
}
self.cur = max_match_offset + 1;
@@ -338,7 +338,7 @@ pub const DeflateFast = struct {
}
// Shift down everything in the table that isn't already too far away.
- for (self.table) |_, i| {
+ for (self.table, 0..) |_, i| {
var v = self.table[i].offset - self.cur + max_match_offset + 1;
if (v < 0) {
// We want to reset self.cur to max_match_offset + 1, so we need to shift
lib/std/compress/deflate/deflate_fast_test.zig
@@ -18,7 +18,7 @@ test "best speed" {
var abcabc = try testing.allocator.alloc(u8, 131_072);
defer testing.allocator.free(abcabc);
- for (abcabc) |_, i| {
+ for (abcabc, 0..) |_, i| {
abcabc[i] = @intCast(u8, i % 128);
}
lib/std/compress/deflate/dict_decoder.zig
@@ -378,7 +378,7 @@ test "dictionary decoder" {
_ = try want.write(".");
var str = poem;
- for (poem_refs) |ref, i| {
+ for (poem_refs, 0..) |ref, i| {
_ = i;
if (ref.dist == 0) {
try util.writeString(&dd, got, str[0..ref.length]);
lib/std/compress/deflate/huffman_bit_writer.zig
@@ -197,7 +197,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
lit_enc: *hm_code.HuffmanEncoder,
off_enc: *hm_code.HuffmanEncoder,
) void {
- for (self.codegen_freq) |_, i| {
+ for (self.codegen_freq, 0..) |_, i| {
self.codegen_freq[i] = 0;
}
@@ -208,12 +208,12 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var codegen = self.codegen; // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
var cgnl = codegen[0..num_literals];
- for (cgnl) |_, i| {
+ for (cgnl, 0..) |_, i| {
cgnl[i] = @intCast(u8, lit_enc.codes[i].len);
}
cgnl = codegen[num_literals .. num_literals + num_offsets];
- for (cgnl) |_, i| {
+ for (cgnl, 0..) |_, i| {
cgnl[i] = @intCast(u8, off_enc.codes[i].len);
}
codegen[num_literals + num_offsets] = bad_code;
@@ -600,10 +600,10 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
var num_literals: u32 = 0;
var num_offsets: u32 = 0;
- for (self.literal_freq) |_, i| {
+ for (self.literal_freq, 0..) |_, i| {
self.literal_freq[i] = 0;
}
- for (self.offset_freq) |_, i| {
+ for (self.offset_freq, 0..) |_, i| {
self.offset_freq[i] = 0;
}
@@ -691,7 +691,7 @@ pub fn HuffmanBitWriter(comptime WriterType: type) type {
}
// Clear histogram
- for (self.literal_freq) |_, i| {
+ for (self.literal_freq, 0..) |_, i| {
self.literal_freq[i] = 0;
}
lib/std/compress/deflate/huffman_code.zig
@@ -71,7 +71,7 @@ pub const HuffmanEncoder = struct {
// Number of non-zero literals
var count: u32 = 0;
// Set list to be the set of all non-zero literals and their frequencies
- for (freq) |f, i| {
+ for (freq, 0..) |f, i| {
if (f != 0) {
list[count] = LiteralNode{ .literal = @intCast(u16, i), .freq = f };
count += 1;
@@ -86,7 +86,7 @@ pub const HuffmanEncoder = struct {
if (count <= 2) {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
- for (list) |node, i| {
+ for (list, 0..) |node, i| {
// "list" is in order of increasing literal value.
self.codes[node.literal].set(@intCast(u16, i), 1);
}
@@ -103,7 +103,7 @@ pub const HuffmanEncoder = struct {
pub fn bitLength(self: *HuffmanEncoder, freq: []u16) u32 {
var total: u32 = 0;
- for (freq) |f, i| {
+ for (freq, 0..) |f, i| {
if (f != 0) {
total += @intCast(u32, f) * @intCast(u32, self.codes[i].len);
}
@@ -258,7 +258,7 @@ pub const HuffmanEncoder = struct {
var code = @as(u16, 0);
var list = list_arg;
- for (bit_count) |bits, n| {
+ for (bit_count, 0..) |bits, n| {
code <<= 1;
if (n == 0 or bits == 0) {
continue;
@@ -340,7 +340,7 @@ pub fn generateFixedLiteralEncoding(allocator: Allocator) !HuffmanEncoder {
pub fn generateFixedOffsetEncoding(allocator: Allocator) !HuffmanEncoder {
var h = try newHuffmanEncoder(allocator, 30);
var codes = h.codes;
- for (codes) |_, ch| {
+ for (codes, 0..) |_, ch| {
codes[ch] = HuffCode{ .code = bu.bitReverse(u16, @intCast(u16, ch), 5), .len = 5 };
}
return h;
lib/std/compress/lzma/decode/rangecoder.zig
@@ -174,8 +174,8 @@ pub const LenDecoder = struct {
pub fn reset(self: *LenDecoder) void {
self.choice = 0x400;
self.choice2 = 0x400;
- for (self.low_coder) |*t| t.reset();
- for (self.mid_coder) |*t| t.reset();
+ for (&self.low_coder) |*t| t.reset();
+ for (&self.mid_coder) |*t| t.reset();
self.high_coder.reset();
}
};
lib/std/compress/lzma/decode.zig
@@ -143,7 +143,7 @@ pub const DecoderState = struct {
}
self.lzma_props = new_props;
- for (self.pos_slot_decoder) |*t| t.reset();
+ for (&self.pos_slot_decoder) |*t| t.reset();
self.align_decoder.reset();
self.pos_decoders = .{0x400} ** 115;
self.is_match = .{0x400} ** 192;
lib/std/crypto/25519/ed25519.zig
@@ -344,7 +344,7 @@ pub const Ed25519 = struct {
var a_batch: [count]Curve = undefined;
var expected_r_batch: [count]Curve = undefined;
- for (signature_batch) |signature, i| {
+ for (signature_batch, 0..) |signature, i| {
const r = signature.sig.r;
const s = signature.sig.s;
try Curve.scalar.rejectNonCanonical(s);
@@ -360,7 +360,7 @@ pub const Ed25519 = struct {
}
var hram_batch: [count]Curve.scalar.CompressedScalar = undefined;
- for (signature_batch) |signature, i| {
+ for (signature_batch, 0..) |signature, i| {
var h = Sha512.init(.{});
h.update(&r_batch[i]);
h.update(&signature.public_key.bytes);
@@ -371,20 +371,20 @@ pub const Ed25519 = struct {
}
var z_batch: [count]Curve.scalar.CompressedScalar = undefined;
- for (z_batch) |*z| {
+ for (&z_batch) |*z| {
crypto.random.bytes(z[0..16]);
mem.set(u8, z[16..], 0);
}
var zs_sum = Curve.scalar.zero;
- for (z_batch) |z, i| {
+ for (z_batch, 0..) |z, i| {
const zs = Curve.scalar.mul(z, s_batch[i]);
zs_sum = Curve.scalar.add(zs_sum, zs);
}
zs_sum = Curve.scalar.mul8(zs_sum);
var zhs: [count]Curve.scalar.CompressedScalar = undefined;
- for (z_batch) |z, i| {
+ for (z_batch, 0..) |z, i| {
zhs[i] = Curve.scalar.mul(z, hram_batch[i]);
}
lib/std/crypto/25519/edwards25519.zig
@@ -161,7 +161,7 @@ pub const Edwards25519 = struct {
fn slide(s: [32]u8) [2 * 32]i8 {
const reduced = if ((s[s.len - 1] & 0x80) == 0) s else scalar.reduce(s);
var e: [2 * 32]i8 = undefined;
- for (reduced) |x, i| {
+ for (reduced, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
@@ -308,7 +308,7 @@ pub const Edwards25519 = struct {
var bpc: [9]Edwards25519 = undefined;
mem.copy(Edwards25519, bpc[0..], basePointPc[0..bpc.len]);
- for (ps) |p, i| {
+ for (ps, 0..) |p, i| {
if (p.is_base) {
pcs[i] = bpc;
} else {
@@ -317,13 +317,13 @@ pub const Edwards25519 = struct {
}
}
var es: [count][2 * 32]i8 = undefined;
- for (ss) |s, i| {
+ for (ss, 0..) |s, i| {
es[i] = slide(s);
}
var q = Edwards25519.identityElement;
var pos: usize = 2 * 32 - 1;
while (true) : (pos -= 1) {
- for (es) |e, i| {
+ for (es, 0..) |e, i| {
const slot = e[pos];
if (slot > 0) {
q = q.add(pcs[i][@intCast(usize, slot)]);
lib/std/crypto/aes/aesni.zig
@@ -200,7 +200,7 @@ fn KeySchedule(comptime Aes: type) type {
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc(false, rc, t1.repr, t1.repr);
}
@@ -212,7 +212,7 @@ fn KeySchedule(comptime Aes: type) type {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
lib/std/crypto/aes/armcrypto.zig
@@ -250,7 +250,7 @@ fn KeySchedule(comptime Aes: type) type {
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc128(rc, t1.repr);
}
@@ -262,7 +262,7 @@ fn KeySchedule(comptime Aes: type) type {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
- inline for (rcs) |rc, round| {
+ inline for (rcs, 0..) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc256(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
lib/std/crypto/aes/soft.zig
@@ -471,7 +471,7 @@ fn generateSbox(invert: bool) [256]u8 {
fn generateTable(invert: bool) [4][256]u32 {
var table: [4][256]u32 = undefined;
- for (generateSbox(invert)) |value, index| {
+ for (generateSbox(invert), 0..) |value, index| {
table[0][index] = mul(value, if (invert) 0xb else 0x3);
table[0][index] |= math.shl(u32, mul(value, if (invert) 0xd else 0x1), 8);
table[0][index] |= math.shl(u32, mul(value, if (invert) 0x9 else 0x1), 16);
lib/std/crypto/pcurves/p256/scalar.zig
@@ -187,7 +187,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
lib/std/crypto/pcurves/p384/scalar.zig
@@ -175,7 +175,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero };
{
lib/std/crypto/pcurves/secp256k1/scalar.zig
@@ -187,7 +187,7 @@ const ScalarDouble = struct {
var s = s_;
if (endian == .Big) {
- for (s_) |x, i| s[s.len - 1 - i] = x;
+ for (s_, 0..) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
lib/std/crypto/pcurves/common.zig
@@ -65,7 +65,7 @@ pub fn Field(comptime params: FieldParams) type {
/// Swap the endianness of an encoded element.
pub fn orderSwap(s: [encoded_length]u8) [encoded_length]u8 {
var t = s;
- for (s) |x, i| t[t.len - 1 - i] = x;
+ for (s, 0..) |x, i| t[t.len - 1 - i] = x;
return t;
}
lib/std/crypto/pcurves/p256.zig
@@ -321,7 +321,7 @@ pub const P256 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
lib/std/crypto/pcurves/p384.zig
@@ -321,7 +321,7 @@ pub const P384 = struct {
fn slide(s: [48]u8) [2 * 48 + 1]i8 {
var e: [2 * 48 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
lib/std/crypto/pcurves/secp256k1.zig
@@ -349,7 +349,7 @@ pub const Secp256k1 = struct {
fn slide(s: [32]u8) [2 * 32 + 1]i8 {
var e: [2 * 32 + 1]i8 = undefined;
- for (s) |x, i| {
+ for (s, 0..) |x, i| {
e[i * 2 + 0] = @as(i8, @truncate(u4, x));
e[i * 2 + 1] = @as(i8, @truncate(u4, x >> 4));
}
lib/std/crypto/aegis.zig
@@ -170,7 +170,7 @@ pub const Aegis128L = struct {
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
- for (computed_tag) |_, j| {
+ for (computed_tag, 0..) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@@ -339,7 +339,7 @@ pub const Aegis256 = struct {
}
const computed_tag = state.mac(ad.len, m.len);
var acc: u8 = 0;
- for (computed_tag) |_, j| {
+ for (computed_tag, 0..) |_, j| {
acc |= (computed_tag[j] ^ tag[j]);
}
if (acc != 0) {
@@ -562,7 +562,7 @@ test "Aegis256 test vector 3" {
test "Aegis MAC" {
const key = [_]u8{0x00} ** Aegis128LMac.key_length;
var msg: [64]u8 = undefined;
- for (msg) |*m, i| {
+ for (&msg, 0..) |*m, i| {
m.* = @truncate(u8, i);
}
const st_init = Aegis128LMac.init(&key);
lib/std/crypto/aes.zig
@@ -115,11 +115,11 @@ test "expand 128-bit key" {
const dec = Aes128.initDec(key);
var exp: [16]u8 = undefined;
- for (enc.key_schedule.round_keys) |round_key, i| {
+ for (enc.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_enc[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
- for (dec.key_schedule.round_keys) |round_key, i| {
+ for (dec.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_dec[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
@@ -154,11 +154,11 @@ test "expand 256-bit key" {
const dec = Aes256.initDec(key);
var exp: [16]u8 = undefined;
- for (enc.key_schedule.round_keys) |round_key, i| {
+ for (enc.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_enc[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
- for (dec.key_schedule.round_keys) |round_key, i| {
+ for (dec.key_schedule.round_keys, 0..) |round_key, i| {
_ = try std.fmt.hexToBytes(&exp, exp_dec[i]);
try testing.expectEqualSlices(u8, &exp, &round_key.toBytes());
}
lib/std/crypto/aes_gcm.zig
@@ -50,7 +50,7 @@ fn AesGcm(comptime Aes: anytype) type {
mem.writeIntBig(u64, final_block[8..16], m.len * 8);
mac.update(&final_block);
mac.final(tag);
- for (t) |x, i| {
+ for (t, 0..) |x, i| {
tag[i] ^= x;
}
}
@@ -82,12 +82,12 @@ fn AesGcm(comptime Aes: anytype) type {
mac.update(&final_block);
var computed_tag: [Ghash.mac_length]u8 = undefined;
mac.final(&computed_tag);
- for (t) |x, i| {
+ for (t, 0..) |x, i| {
computed_tag[i] ^= x;
}
var acc: u8 = 0;
- for (computed_tag) |_, p| {
+ for (computed_tag, 0..) |_, p| {
acc |= (computed_tag[p] ^ tag[p]);
}
if (acc != 0) {
lib/std/crypto/aes_ocb.zig
@@ -155,7 +155,7 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lx.star);
var pad = offset;
aes_enc_ctx.encrypt(&pad, &pad);
- for (m[i * 16 ..]) |x, j| {
+ for (m[i * 16 ..], 0..) |x, j| {
c[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
@@ -220,7 +220,7 @@ fn AesOcb(comptime Aes: anytype) type {
xorWith(&offset, lx.star);
var pad = offset;
aes_enc_ctx.encrypt(&pad, &pad);
- for (c[i * 16 ..]) |x, j| {
+ for (c[i * 16 ..], 0..) |x, j| {
m[i * 16 + j] = pad[j] ^ x;
}
var e = [_]u8{0} ** 16;
@@ -242,14 +242,14 @@ fn AesOcb(comptime Aes: anytype) type {
inline fn xorBlocks(x: Block, y: Block) Block {
var z: Block = x;
- for (z) |*v, i| {
+ for (&z, 0..) |*v, i| {
v.* = x[i] ^ y[i];
}
return z;
}
inline fn xorWith(x: *Block, y: Block) void {
- for (x) |*v, i| {
+ for (x, 0..) |*v, i| {
v.* ^= y[i];
}
}
lib/std/crypto/argon2.zig
@@ -188,13 +188,13 @@ fn initBlocks(
mem.writeIntLittle(u32, h0[Blake2b512.digest_length..][0..4], 0);
blake2bLong(&block0, h0);
- for (blocks.items[j + 0]) |*v, i| {
+ for (&blocks.items[j + 0], 0..) |*v, i| {
v.* = mem.readIntLittle(u64, block0[i * 8 ..][0..8]);
}
mem.writeIntLittle(u32, h0[Blake2b512.digest_length..][0..4], 1);
blake2bLong(&block0, h0);
- for (blocks.items[j + 1]) |*v, i| {
+ for (&blocks.items[j + 1], 0..) |*v, i| {
v.* = mem.readIntLittle(u64, block0[i * 8 ..][0..8]);
}
}
@@ -352,7 +352,7 @@ fn processBlockGeneric(
comptime xor: bool,
) void {
var t: [block_length]u64 = undefined;
- for (t) |*v, i| {
+ for (&t, 0..) |*v, i| {
v.* = in1[i] ^ in2[i];
}
var i: usize = 0;
@@ -375,11 +375,11 @@ fn processBlockGeneric(
}
}
if (xor) {
- for (t) |v, j| {
+ for (t, 0..) |v, j| {
out[j] ^= in1[j] ^ in2[j] ^ v;
}
} else {
- for (t) |v, j| {
+ for (t, 0..) |v, j| {
out[j] = in1[j] ^ in2[j] ^ v;
}
}
@@ -428,12 +428,12 @@ fn finalize(
const lanes = memory / threads;
var lane: u24 = 0;
while (lane < threads - 1) : (lane += 1) {
- for (blocks.items[(lane * lanes) + lanes - 1]) |v, i| {
+ for (blocks.items[(lane * lanes) + lanes - 1], 0..) |v, i| {
blocks.items[memory - 1][i] ^= v;
}
}
var block: [1024]u8 = undefined;
- for (blocks.items[memory - 1]) |v, i| {
+ for (blocks.items[memory - 1], 0..) |v, i| {
mem.writeIntLittle(u64, block[i * 8 ..][0..8], v);
}
blake2bLong(out, &block);
lib/std/crypto/ascon.zig
@@ -74,7 +74,7 @@ pub fn State(comptime endian: builtin.Endian) type {
/// Byte-swap the entire state if the architecture doesn't match the required endianness.
pub fn endianSwap(self: *Self) void {
- for (self.st) |*w| {
+ for (&self.st) |*w| {
w.* = mem.toNative(u64, w.*, endian);
}
}
lib/std/crypto/bcrypt.zig
@@ -437,7 +437,7 @@ pub fn bcrypt(
}
var ct: [ct_length]u8 = undefined;
- for (cdata) |c, i| {
+ for (cdata, 0..) |c, i| {
mem.writeIntBig(u32, ct[i * 4 ..][0..4], c);
}
return ct[0..dk_length].*;
@@ -505,7 +505,7 @@ const pbkdf_prf = struct {
// copy out
var out: [32]u8 = undefined;
- for (cdata) |v, i| {
+ for (cdata, 0..) |v, i| {
std.mem.writeIntLittle(u32, out[4 * i ..][0..4], v);
}
lib/std/crypto/blake2.zig
@@ -133,7 +133,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
- for (d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
+ for (&d.h) |*x| x.* = mem.nativeToLittle(u32, x.*);
mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
}
@@ -141,7 +141,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
var m: [16]u32 = undefined;
var v: [16]u32 = undefined;
- for (m) |*r, i| {
+ for (&m, 0..) |*r, i| {
r.* = mem.readIntLittle(u32, b[4 * i ..][0..4]);
}
@@ -180,7 +180,7 @@ pub fn Blake2s(comptime out_bits: usize) type {
}
}
- for (d.h) |*r, i| {
+ for (&d.h, 0..) |*r, i| {
r.* ^= v[i] ^ v[i + 8];
}
}
@@ -568,7 +568,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
- for (d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
+ for (&d.h) |*x| x.* = mem.nativeToLittle(u64, x.*);
mem.copy(u8, out[0..], @ptrCast(*[digest_length]u8, &d.h));
}
@@ -576,7 +576,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
var m: [16]u64 = undefined;
var v: [16]u64 = undefined;
- for (m) |*r, i| {
+ for (&m, 0..) |*r, i| {
r.* = mem.readIntLittle(u64, b[8 * i ..][0..8]);
}
@@ -615,7 +615,7 @@ pub fn Blake2b(comptime out_bits: usize) type {
}
}
- for (d.h) |*r, i| {
+ for (&d.h, 0..) |*r, i| {
r.* ^= v[i] ^ v[i + 8];
}
}
lib/std/crypto/blake3.zig
@@ -192,7 +192,7 @@ const CompressGeneric = struct {
for (MSG_SCHEDULE) |schedule| {
round(&state, block_words, schedule);
}
- for (chaining_value) |_, i| {
+ for (chaining_value, 0..) |_, i| {
state[i] ^= state[i + 8];
state[i + 8] ^= chaining_value[i];
}
@@ -211,7 +211,7 @@ fn first8Words(words: [16]u32) [8]u32 {
fn wordsFromLittleEndianBytes(comptime count: usize, bytes: [count * 4]u8) [count]u32 {
var words: [count]u32 = undefined;
- for (&words) |*word, i| {
+ for (&words, 0..) |*word, i| {
word.* = mem.readIntSliceLittle(u32, bytes[4 * i ..]);
}
return words;
@@ -658,7 +658,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
// Setup input pattern
var input_pattern: [251]u8 = undefined;
- for (input_pattern) |*e, i| e.* = @truncate(u8, i);
+ for (&input_pattern, 0..) |*e, i| e.* = @truncate(u8, i);
// Write repeating input pattern to hasher
var input_counter = input_len;
lib/std/crypto/Certificate.zig
@@ -1092,7 +1092,7 @@ pub const rsa = struct {
if (exponent_elem.identifier.tag != .integer) return error.CertificateFieldHasWrongDataType;
// Skip over meaningless zeroes in the modulus.
const modulus_raw = pub_key[modulus_elem.slice.start..modulus_elem.slice.end];
- const modulus_offset = for (modulus_raw) |byte, i| {
+ const modulus_offset = for (modulus_raw, 0..) |byte, i| {
if (byte != 0) break i;
} else modulus_raw.len;
return .{
lib/std/crypto/chacha20.zig
@@ -197,7 +197,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize) type {
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -338,7 +338,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
fn hchacha20(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -543,7 +543,7 @@ fn ChaChaPoly1305(comptime rounds_nb: usize) type {
mac.final(computedTag[0..]);
var acc: u8 = 0;
- for (computedTag) |_, i| {
+ for (computedTag, 0..) |_, i| {
acc |= computedTag[i] ^ tag[i];
}
if (acc != 0) {
lib/std/crypto/cmac.zig
@@ -46,19 +46,19 @@ pub fn Cmac(comptime BlockCipher: type) type {
const left = block_length - self.pos;
var m = msg;
if (m.len > left) {
- for (self.buf[self.pos..]) |*b, i| b.* ^= m[i];
+ for (self.buf[self.pos..], 0..) |*b, i| b.* ^= m[i];
m = m[left..];
self.cipher_ctx.encrypt(&self.buf, &self.buf);
self.pos = 0;
}
while (m.len > block_length) {
- for (self.buf[0..block_length]) |*b, i| b.* ^= m[i];
+ for (self.buf[0..block_length], 0..) |*b, i| b.* ^= m[i];
m = m[block_length..];
self.cipher_ctx.encrypt(&self.buf, &self.buf);
self.pos = 0;
}
if (m.len > 0) {
- for (self.buf[self.pos..][0..m.len]) |*b, i| b.* ^= m[i];
+ for (self.buf[self.pos..][0..m.len], 0..) |*b, i| b.* ^= m[i];
self.pos += m.len;
}
}
@@ -69,7 +69,7 @@ pub fn Cmac(comptime BlockCipher: type) type {
mac = self.k2;
mac[self.pos] ^= 0x80;
}
- for (mac) |*b, i| b.* ^= self.buf[i];
+ for (&mac, 0..) |*b, i| b.* ^= self.buf[i];
self.cipher_ctx.encrypt(out, &mac);
}
lib/std/crypto/ghash_polyval.zig
@@ -320,7 +320,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
if (st.leftover > 0) {
const want = math.min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
- for (mc) |x, i| {
+ for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
mb = mb[want..];
@@ -337,7 +337,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
mb = mb[want..];
}
if (mb.len > 0) {
- for (mb) |x, i| {
+ for (mb, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
st.leftover += mb.len;
lib/std/crypto/gimli.zig
@@ -45,7 +45,7 @@ pub const State = struct {
}
inline fn endianSwap(self: *Self) void {
- for (self.data) |*w| {
+ for (&self.data) |*w| {
w.* = mem.littleToNative(u32, w.*);
}
}
@@ -228,7 +228,7 @@ pub const Hash = struct {
while (in.len > 0) {
const left = State.RATE - self.buf_off;
const ps = math.min(in.len, left);
- for (buf[self.buf_off .. self.buf_off + ps]) |*p, i| {
+ for (buf[self.buf_off .. self.buf_off + ps], 0..) |*p, i| {
p.* ^= in[i];
}
self.buf_off += ps;
@@ -329,12 +329,12 @@ pub const Aead = struct {
// exactly one final non-full block, in the same way as Gimli-Hash.
var data = ad;
while (data.len >= State.RATE) : (data = data[State.RATE..]) {
- for (buf[0..State.RATE]) |*p, i| {
+ for (buf[0..State.RATE], 0..) |*p, i| {
p.* ^= data[i];
}
state.permute();
}
- for (buf[0..data.len]) |*p, i| {
+ for (buf[0..data.len], 0..) |*p, i| {
p.* ^= data[i];
}
@@ -371,13 +371,13 @@ pub const Aead = struct {
in = in[State.RATE..];
out = out[State.RATE..];
}) {
- for (in[0..State.RATE]) |v, i| {
+ for (in[0..State.RATE], 0..) |v, i| {
buf[i] ^= v;
}
mem.copy(u8, out[0..State.RATE], buf[0..State.RATE]);
state.permute();
}
- for (in[0..]) |v, i| {
+ for (in[0..], 0..) |v, i| {
buf[i] ^= v;
out[i] = buf[i];
}
@@ -414,13 +414,13 @@ pub const Aead = struct {
out = out[State.RATE..];
}) {
const d = in[0..State.RATE].*;
- for (d) |v, i| {
+ for (d, 0..) |v, i| {
out[i] = buf[i] ^ v;
}
mem.copy(u8, buf[0..State.RATE], d[0..State.RATE]);
state.permute();
}
- for (buf[0..in.len]) |*p, i| {
+ for (buf[0..in.len], 0..) |*p, i| {
const d = in[i];
out[i] = p.* ^ d;
p.* = d;
lib/std/crypto/hmac.zig
@@ -46,11 +46,11 @@ pub fn Hmac(comptime Hash: type) type {
mem.copy(u8, scratch[0..], key);
}
- for (ctx.o_key_pad) |*b, i| {
+ for (&ctx.o_key_pad, 0..) |*b, i| {
b.* = scratch[i] ^ 0x5c;
}
- for (i_key_pad) |*b, i| {
+ for (&i_key_pad, 0..) |*b, i| {
b.* = scratch[i] ^ 0x36;
}
lib/std/crypto/md5.zig
@@ -110,7 +110,7 @@ pub const Md5 = struct {
d.round(d.buf[0..]);
- for (d.s) |s, j| {
+ for (d.s, 0..) |s, j| {
mem.writeIntLittle(u32, out[4 * j ..][0..4], s);
}
}
lib/std/crypto/pbkdf2.zig
@@ -138,7 +138,7 @@ pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, com
mem.copy(u8, prev_block[0..], new_block[0..]);
// F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
- for (dk_block) |_, j| {
+ for (dk_block, 0..) |_, j| {
dk_block[j] ^= new_block[j];
}
}
lib/std/crypto/poly1305.zig
@@ -82,7 +82,7 @@ pub const Poly1305 = struct {
if (st.leftover > 0) {
const want = std.math.min(block_length - st.leftover, mb.len);
const mc = mb[0..want];
- for (mc) |x, i| {
+ for (mc, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
mb = mb[want..];
@@ -103,7 +103,7 @@ pub const Poly1305 = struct {
// store leftover
if (mb.len > 0) {
- for (mb) |x, i| {
+ for (mb, 0..) |x, i| {
st.buf[st.leftover + i] = x;
}
st.leftover += mb.len;
lib/std/crypto/salsa20.zig
@@ -157,7 +157,7 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -240,7 +240,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
}
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
- for (x) |w, i| {
+ for (x, 0..) |w, i| {
mem.writeIntLittle(u32, out[i * 4 ..][0..4], w);
}
}
@@ -282,7 +282,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
fn hsalsa(input: [16]u8, key: [32]u8) [32]u8 {
var c: [4]u32 = undefined;
- for (c) |_, i| {
+ for (c, 0..) |_, i| {
c[i] = mem.readIntLittle(u32, input[4 * i ..][0..4]);
}
const ctx = initContext(keyToWords(key), c);
@@ -413,7 +413,7 @@ pub const XSalsa20Poly1305 = struct {
var computedTag: [tag_length]u8 = undefined;
mac.final(&computedTag);
var acc: u8 = 0;
- for (computedTag) |_, i| {
+ for (computedTag, 0..) |_, i| {
acc |= computedTag[i] ^ tag[i];
}
if (acc != 0) {
lib/std/crypto/scrypt.zig
@@ -31,7 +31,7 @@ fn blockCopy(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
}
fn blockXor(dst: []align(16) u32, src: []align(16) const u32, n: usize) void {
- for (src[0 .. n * 16]) |v, i| {
+ for (src[0 .. n * 16], 0..) |v, i| {
dst[i] ^= v;
}
}
@@ -90,7 +90,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
var x = @alignCast(16, xy[0 .. 32 * r]);
var y = @alignCast(16, xy[32 * r ..]);
- for (x) |*v1, j| {
+ for (x, 0..) |*v1, j| {
v1.* = mem.readIntSliceLittle(u32, b[4 * j ..]);
}
@@ -115,7 +115,7 @@ fn smix(b: []align(16) u8, r: u30, n: usize, v: []align(16) u32, xy: []align(16)
blockMix(&tmp, y, x, r);
}
- for (x) |v1, j| {
+ for (x, 0..) |v1, j| {
mem.writeIntLittle(u32, b[4 * j ..][0..4], v1);
}
}
@@ -350,7 +350,7 @@ const crypt_format = struct {
fn intDecode(comptime T: type, src: *const [(@bitSizeOf(T) + 5) / 6]u8) !T {
var v: T = 0;
- for (src) |x, i| {
+ for (src, 0..) |x, i| {
const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
v |= @intCast(T, vi) << @intCast(math.Log2Int(T), i * 6);
}
@@ -365,10 +365,10 @@ const crypt_format = struct {
}
const leftover = src[i * 4 ..];
var v: u24 = 0;
- for (leftover) |_, j| {
+ for (leftover, 0..) |_, j| {
v |= @as(u24, try intDecode(u6, leftover[j..][0..1])) << @intCast(u5, j * 6);
}
- for (dst[i * 3 ..]) |*x, j| {
+ for (dst[i * 3 ..], 0..) |*x, j| {
x.* = @truncate(u8, v >> @intCast(u5, j * 8));
}
}
@@ -381,7 +381,7 @@ const crypt_format = struct {
}
const leftover = src[i * 3 ..];
var v: u24 = 0;
- for (leftover) |x, j| {
+ for (leftover, 0..) |x, j| {
v |= @as(u24, x) << @intCast(u5, j * 8);
}
intEncode(dst[i * 4 ..], v);
lib/std/crypto/sha1.zig
@@ -105,7 +105,7 @@ pub const Sha1 = struct {
d.round(d.buf[0..]);
- for (d.s) |s, j| {
+ for (d.s, 0..) |s, j| {
mem.writeIntBig(u32, out[4 * j ..][0..4], s);
}
}
lib/std/crypto/sha2.zig
@@ -175,7 +175,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
// May truncate for possible 224 output
const rr = d.s[0 .. params.digest_bits / 32];
- for (rr) |s, j| {
+ for (rr, 0..) |s, j| {
mem.writeIntBig(u32, out[4 * j ..][0..4], s);
}
}
@@ -199,7 +199,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
fn round(d: *Self, b: *const [64]u8) void {
var s: [64]u32 align(16) = undefined;
- for (@ptrCast(*align(1) const [16]u32, b)) |*elem, i| {
+ for (@ptrCast(*align(1) const [16]u32, b), 0..) |*elem, i| {
s[i] = mem.readIntBig(u32, mem.asBytes(elem));
}
@@ -665,7 +665,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
// May truncate for possible 384 output
const rr = d.s[0 .. params.digest_bits / 64];
- for (rr) |s, j| {
+ for (rr, 0..) |s, j| {
mem.writeIntBig(u64, out[8 * j ..][0..8], s);
}
}
lib/std/crypto/sha3.zig
@@ -43,7 +43,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
// absorb
while (len >= rate) {
- for (d.s[offset .. offset + rate]) |*r, i|
+ for (d.s[offset .. offset + rate], 0..) |*r, i|
r.* ^= b[ip..][i];
keccakF(1600, &d.s);
@@ -54,7 +54,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
offset = 0;
}
- for (d.s[offset .. offset + len]) |*r, i|
+ for (d.s[offset .. offset + len], 0..) |*r, i|
r.* ^= b[ip..][i];
d.offset = offset + len;
@@ -126,7 +126,7 @@ fn keccakF(comptime F: usize, d: *[F / 8]u8) void {
var t = [_]u64{0} ** 1;
var c = [_]u64{0} ** 5;
- for (s) |*r, i| {
+ for (&s, 0..) |*r, i| {
r.* = mem.readIntLittle(u64, d[8 * i ..][0..8]);
}
@@ -171,7 +171,7 @@ fn keccakF(comptime F: usize, d: *[F / 8]u8) void {
s[0] ^= round;
}
- for (s) |r, i| {
+ for (s, 0..) |r, i| {
mem.writeIntLittle(u64, d[8 * i ..][0..8], r);
}
}
lib/std/crypto/siphash.zig
@@ -339,7 +339,7 @@ test "siphash64-2-4 sanity" {
const siphash = SipHash64(2, 4);
var buffer: [64]u8 = undefined;
- for (vectors) |vector, i| {
+ for (vectors, 0..) |vector, i| {
buffer[i] = @intCast(u8, i);
var out: [siphash.mac_length]u8 = undefined;
@@ -419,7 +419,7 @@ test "siphash128-2-4 sanity" {
const siphash = SipHash128(2, 4);
var buffer: [64]u8 = undefined;
- for (vectors) |vector, i| {
+ for (vectors, 0..) |vector, i| {
buffer[i] = @intCast(u8, i);
var out: [siphash.mac_length]u8 = undefined;
@@ -430,7 +430,7 @@ test "siphash128-2-4 sanity" {
test "iterative non-divisible update" {
var buf: [1024]u8 = undefined;
- for (buf) |*e, i| {
+ for (&buf, 0..) |*e, i| {
e.* = @truncate(u8, i);
}
lib/std/crypto/test.zig
@@ -13,7 +13,7 @@ pub fn assertEqualHash(comptime Hasher: anytype, comptime expected_hex: *const [
// Assert `expected` == hex(`input`) where `input` is a bytestring
pub fn assertEqual(comptime expected_hex: [:0]const u8, input: []const u8) !void {
var expected_bytes: [expected_hex.len / 2]u8 = undefined;
- for (expected_bytes) |*r, i| {
+ for (&expected_bytes, 0..) |*r, i| {
r.* = fmt.parseInt(u8, expected_hex[2 * i .. 2 * i + 2], 16) catch unreachable;
}
lib/std/crypto/tls.zig
@@ -344,7 +344,7 @@ pub inline fn array(comptime elem_size: comptime_int, bytes: anytype) [2 + bytes
pub inline fn enum_array(comptime E: type, comptime tags: []const E) [2 + @sizeOf(E) * tags.len]u8 {
assert(@sizeOf(E) == 2);
var result: [tags.len * 2]u8 = undefined;
- for (tags) |elem, i| {
+ for (tags, 0..) |elem, i| {
result[i * 2] = @truncate(u8, @enumToInt(elem) >> 8);
result[i * 2 + 1] = @truncate(u8, @enumToInt(elem));
}
lib/std/crypto/utils.zig
@@ -18,7 +18,7 @@ pub fn timingSafeEql(comptime T: type, a: T, b: T) bool {
@compileError("Elements to be compared must be integers");
}
var acc = @as(C, 0);
- for (a) |x, i| {
+ for (a, 0..) |x, i| {
acc |= x ^ b[i];
}
const s = @typeInfo(C).Int.bits;
@@ -64,7 +64,7 @@ pub fn timingSafeCompare(comptime T: type, a: []const T, b: []const T, endian: E
eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
}
} else {
- for (a) |x1, i| {
+ for (a, 0..) |x1, i| {
const x2 = b[i];
gt |= @truncate(T, (@as(Cext, x2) -% @as(Cext, x1)) >> bits) & eq;
eq &= @truncate(T, (@as(Cext, (x2 ^ x1)) -% 1) >> bits);
lib/std/crypto/xoodoo.zig
@@ -66,7 +66,7 @@ pub const State = struct {
/// XOR bytes into the beginning of the state.
pub fn addBytes(self: *State, bytes: []const u8) void {
self.endianSwap();
- for (self.asBytes()[0..bytes.len]) |*byte, i| {
+ for (self.asBytes()[0..bytes.len], 0..) |*byte, i| {
byte.* ^= bytes[i];
}
self.endianSwap();
lib/std/event/loop.zig
@@ -278,7 +278,7 @@ pub const Loop = struct {
const empty_kevs = &[0]os.Kevent{};
- for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
.data = ResumeNode.EventFd{
.base = ResumeNode{
@@ -343,7 +343,7 @@ pub const Loop = struct {
const empty_kevs = &[0]os.Kevent{};
- for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+ for (self.eventfd_resume_nodes, 0..) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
.data = ResumeNode.EventFd{
.base = ResumeNode{
lib/std/fmt/parse_float/decimal.zig
@@ -475,7 +475,7 @@ pub fn Decimal(comptime T: type) type {
const x = pow2_to_pow5_table[shift];
// Compare leading digits of current to check if lexicographically less than cutoff.
- for (x.cutoff) |p5, i| {
+ for (x.cutoff, 0..) |p5, i| {
if (i >= self.num_digits) {
return x.delta - 1;
} else if (self.digits[i] == p5 - '0') { // digits are stored as integers
lib/std/fs/path.zig
@@ -48,7 +48,7 @@ fn joinSepMaybeZ(allocator: Allocator, separator: u8, comptime sepPredicate: fn
// Find first non-empty path index.
const first_path_index = blk: {
- for (paths) |path, index| {
+ for (paths, 0..) |path, index| {
if (path.len == 0) continue else break :blk index;
}
@@ -476,7 +476,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
var drive_kind = WindowsPath.Kind.None;
var have_abs_path = false;
var first_index: usize = 0;
- for (paths) |p, i| {
+ for (paths, 0..) |p, i| {
const parsed = windowsParsePath(p);
if (parsed.is_abs) {
have_abs_path = true;
@@ -504,7 +504,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
first_index = 0;
var correct_disk_designator = false;
- for (paths) |p, i| {
+ for (paths, 0..) |p, i| {
const parsed = windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
if (parsed.kind == drive_kind) {
lib/std/fs/wasi.zig
@@ -15,7 +15,7 @@ pub const Preopens = struct {
names: []const []const u8,
pub fn find(p: Preopens, name: []const u8) ?os.fd_t {
- for (p.names) |elem_name, i| {
+ for (p.names, 0..) |elem_name, i| {
if (mem.eql(u8, elem_name, name)) {
return @intCast(os.fd_t, i);
}
lib/std/hash/crc.zig
@@ -35,7 +35,7 @@ pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined;
- for (table) |*e, i| {
+ for (&table, 0..) |*e, i| {
var crc: I = i;
if (algorithm.reflect_input) {
var j: usize = 0;
@@ -124,7 +124,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
@setEvalBranchQuota(20000);
var tables: [8][256]u32 = undefined;
- for (tables[0]) |*e, i| {
+ for (&tables[0], 0..) |*e, i| {
var crc = @intCast(u32, i);
var j: usize = 0;
while (j < 8) : (j += 1) {
@@ -217,7 +217,7 @@ pub fn Crc32SmallWithPoly(comptime poly: Polynomial) type {
const lookup_table = block: {
var table: [16]u32 = undefined;
- for (table) |*e, i| {
+ for (&table, 0..) |*e, i| {
var crc = @intCast(u32, i * 16);
var j: usize = 0;
while (j < 8) : (j += 1) {
lib/std/hash/wyhash.zig
@@ -207,7 +207,7 @@ test "test vectors streaming" {
test "iterative non-divisible update" {
var buf: [8192]u8 = undefined;
- for (buf) |*e, i| {
+ for (&buf, 0..) |*e, i| {
e.* = @truncate(u8, i);
}
lib/std/heap/general_purpose_allocator.zig
@@ -349,7 +349,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
/// Emits log messages for leaks and then returns whether there were any leaks.
pub fn detectLeaks(self: *Self) bool {
var leaks = false;
- for (self.buckets) |optional_bucket, bucket_i| {
+ for (self.buckets, 0..) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
const size_class = @as(usize, 1) << @intCast(math.Log2Int(usize), bucket_i);
const used_bits_count = usedBitsCount(size_class);
lib/std/heap/WasmPageAllocator.zig
@@ -62,7 +62,7 @@ const FreeBlock = struct {
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
@setCold(true);
- for (self.data) |segment, i| {
+ for (self.data, 0..) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
const has_enough_bits = @popCount(segment) >= num_pages;
lib/std/json/test.zig
@@ -2717,7 +2717,7 @@ test "string copy option" {
const copy_addr = &obj_copy.get("noescape").?.String[0];
var found_nocopy = false;
- for (input) |_, index| {
+ for (input, 0..) |_, index| {
try testing.expect(copy_addr != &input[index]);
if (nocopy_addr == &input[index]) {
found_nocopy = true;
lib/std/math/big/int.zig
@@ -1478,11 +1478,11 @@ pub const Mutable = struct {
// const x_trailing = std.mem.indexOfScalar(Limb, x.limbs[0..x.len], 0).?;
// const y_trailing = std.mem.indexOfScalar(Limb, y.limbs[0..y.len], 0).?;
- const x_trailing = for (x.limbs[0..x.len]) |xi, i| {
+ const x_trailing = for (x.limbs[0..x.len], 0..) |xi, i| {
if (xi != 0) break i;
} else unreachable;
- const y_trailing = for (y.limbs[0..y.len]) |yi, i| {
+ const y_trailing = for (y.limbs[0..y.len], 0..) |yi, i| {
if (yi != 0) break i;
} else unreachable;
@@ -2108,7 +2108,7 @@ pub const Const = struct {
if (@sizeOf(UT) <= @sizeOf(Limb)) {
r = @intCast(UT, self.limbs[0]);
} else {
- for (self.limbs[0..self.limbs.len]) |_, ri| {
+ for (self.limbs[0..self.limbs.len], 0..) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
r <<= limb_bits;
r |= limb;
@@ -3594,7 +3594,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
assert(quo.len >= a.len);
rem.* = 0;
- for (a) |_, ri| {
+ for (a, 0..) |_, ri| {
const i = a.len - ri - 1;
const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
@@ -3620,7 +3620,7 @@ fn lldiv0p5(quo: []Limb, rem: *Limb, a: []const Limb, b: HalfLimb) void {
assert(quo.len >= a.len);
rem.* = 0;
- for (a) |_, ri| {
+ for (a, 0..) |_, ri| {
const i = a.len - ri - 1;
const ai_high = a[i] >> half_limb_bits;
const ai_low = a[i] & ((1 << half_limb_bits) - 1);
@@ -4028,7 +4028,7 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
// - Each mixed-product term appears twice for each column,
// - Squares are always in the 2k (0 <= k < N) column
- for (x_norm) |v, i| {
+ for (x_norm, 0..) |v, i| {
// Accumulate all the x[i]*x[j] (with x!=j) products
const overflow = llmulLimb(.add, r[2 * i + 1 ..], x_norm[i + 1 ..], v);
assert(!overflow);
@@ -4037,7 +4037,7 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
// Each product appears twice, multiply by 2
llshl(r, r[0 .. 2 * x_norm.len], 1);
- for (x_norm) |v, i| {
+ for (x_norm, 0..) |v, i| {
// Compute and add the squares
const overflow = llmulLimb(.add, r[2 * i ..], x[i .. i + 1], v);
assert(!overflow);
lib/std/math/big/rational.zig
@@ -70,7 +70,7 @@ pub const Rational = struct {
start += 1;
}
- for (str) |c, i| {
+ for (str, 0..) |c, i| {
switch (state) {
State.Integer => {
switch (c) {
lib/std/meta/trailer_flags.zig
@@ -21,7 +21,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub const ActiveFields = std.enums.EnumFieldStruct(FieldEnum, bool, false);
pub const FieldValues = blk: {
comptime var fields: [bit_count]Type.StructField = undefined;
- inline for (@typeInfo(Fields).Struct.fields) |struct_field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |struct_field, i| {
fields[i] = Type.StructField{
.name = struct_field.name,
.type = ?struct_field.type,
@@ -61,7 +61,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
/// `fields` is a boolean struct where each active field is set to `true`
pub fn init(fields: ActiveFields) Self {
var self: Self = .{ .bits = 0 };
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@field(fields, field.name))
self.bits |= 1 << i;
}
@@ -70,7 +70,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
/// `fields` is a struct with each field set to an optional value
pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void {
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@field(fields, field.name)) |value|
self.set(p, @intToEnum(FieldEnum, i), value);
}
@@ -101,7 +101,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub fn offset(self: Self, comptime field: FieldEnum) usize {
var off: usize = 0;
- inline for (@typeInfo(Fields).Struct.fields) |field_info, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field_info, i| {
const active = (self.bits & (1 << i)) != 0;
if (i == @enumToInt(field)) {
assert(active);
@@ -119,7 +119,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
pub fn sizeInBytes(self: Self) usize {
var off: usize = 0;
- inline for (@typeInfo(Fields).Struct.fields) |field, i| {
+ inline for (@typeInfo(Fields).Struct.fields, 0..) |field, i| {
if (@sizeOf(field.type) == 0)
continue;
if ((self.bits & (1 << i)) != 0) {
lib/std/net/test.zig
@@ -30,7 +30,7 @@ test "parse and render IPv6 addresses" {
"ff01::fb",
"::ffff:123.5.123.5",
};
- for (ips) |ip, i| {
+ for (ips, 0..) |ip, i| {
var addr = net.Address.parseIp6(ip, 0) catch unreachable;
var newIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newIp[1 .. newIp.len - 3]));
lib/std/os/uefi/protocols/device_path_protocol.zig
@@ -61,7 +61,7 @@ pub const DevicePathProtocol = extern struct {
// The same as new.getPath(), but not const as we're filling it in.
var ptr = @ptrCast([*:0]align(1) u16, @ptrCast([*]u8, new) + @sizeOf(MediaDevicePath.FilePathDevicePath));
- for (path) |s, i|
+ for (path, 0..) |s, i|
ptr[i] = s;
ptr[path.len] = 0;
lib/std/os/linux.zig
@@ -1245,7 +1245,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
// see https://www.openwall.com/lists/musl/2014/06/07/5
const kvlen = if (vlen > IOV_MAX) IOV_MAX else vlen; // matches kernel
var next_unsent: usize = 0;
- for (msgvec[0..kvlen]) |*msg, i| {
+ for (msgvec[0..kvlen], 0..) |*msg, i| {
var size: i32 = 0;
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
lib/std/os/windows.zig
@@ -2858,7 +2858,7 @@ pub const GUID = extern struct {
assert(s[18] == '-');
assert(s[23] == '-');
var bytes: [16]u8 = undefined;
- for (hex_offsets) |hex_offset, i| {
+ for (hex_offsets, 0..) |hex_offset, i| {
bytes[i] = (try std.fmt.charToDigit(s[hex_offset], 16)) << 4 |
try std.fmt.charToDigit(s[hex_offset + 1], 16);
}
lib/std/rand/ziggurat.zig
@@ -83,13 +83,13 @@ fn ZigTableGen(
tables.x[0] = v / f(r);
tables.x[1] = r;
- for (tables.x[2..256]) |*entry, i| {
+ for (tables.x[2..256], 0..) |*entry, i| {
const last = tables.x[2 + i - 1];
entry.* = f_inv(v / last + f(last));
}
tables.x[256] = 0;
- for (tables.f[0..]) |*entry, i| {
+ for (tables.f[0..], 0..) |*entry, i| {
entry.* = f(tables.x[i]);
}
lib/std/target/aarch64.zig
@@ -1269,7 +1269,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/amdgpu.zig
@@ -1033,7 +1033,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/arc.zig
@@ -23,7 +23,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/arm.zig
@@ -1631,7 +1631,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/avr.zig
@@ -329,7 +329,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/bpf.zig
@@ -35,7 +35,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/csky.zig
@@ -416,7 +416,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/hexagon.zig
@@ -268,7 +268,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/m68k.zig
@@ -153,7 +153,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/mips.zig
@@ -387,7 +387,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/msp430.zig
@@ -41,7 +41,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/nvptx.zig
@@ -221,7 +221,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/powerpc.zig
@@ -592,7 +592,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/riscv.zig
@@ -660,7 +660,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/s390x.zig
@@ -263,7 +263,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/sparc.zig
@@ -131,7 +131,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/spirv.zig
@@ -2075,7 +2075,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/ve.zig
@@ -23,7 +23,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/wasm.zig
@@ -89,7 +89,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/target/x86.zig
@@ -1045,7 +1045,7 @@ pub const all_features = blk: {
}),
};
const ti = @typeInfo(Feature);
- for (result) |*elem, i| {
+ for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
lib/std/Thread/Condition.zig
@@ -341,7 +341,7 @@ test "Condition - wait and signal" {
};
var multi_wait = MultiWait{};
- for (multi_wait.threads) |*t| {
+ for (&multi_wait.threads) |*t| {
t.* = try std.Thread.spawn(.{}, MultiWait.run, .{&multi_wait});
}
@@ -389,7 +389,7 @@ test "Condition - signal" {
};
var signal_test = SignalTest{};
- for (signal_test.threads) |*t| {
+ for (&signal_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, SignalTest.run, .{&signal_test});
}
@@ -457,7 +457,7 @@ test "Condition - multi signal" {
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
- for (threads) |*t, i| {
+ for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
@@ -468,7 +468,7 @@ test "Condition - multi signal" {
for (threads) |t| t.join();
// The first paddle will be hit one last time by the last paddle.
- for (paddles) |p, i| {
+ for (paddles, 0..) |p, i| {
const expected = @as(u32, num_iterations) + @boolToInt(i == 0);
try testing.expectEqual(p.value, expected);
}
@@ -513,7 +513,7 @@ test "Condition - broadcasting" {
};
var broadcast_test = BroadcastTest{};
- for (broadcast_test.threads) |*t| {
+ for (&broadcast_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, BroadcastTest.run, .{&broadcast_test});
}
@@ -584,7 +584,7 @@ test "Condition - broadcasting - wake all threads" {
var broadcast_test = BroadcastTest{};
var thread_id: usize = 1;
- for (broadcast_test.threads) |*t| {
+ for (&broadcast_test.threads) |*t| {
t.* = try std.Thread.spawn(.{}, BroadcastTest.run, .{ &broadcast_test, thread_id });
thread_id += 1;
}
lib/std/Thread/Futex.zig
@@ -895,7 +895,7 @@ test "Futex - signaling" {
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
- for (threads) |*t, i| {
+ for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
@@ -950,14 +950,14 @@ test "Futex - broadcasting" {
threads: [num_threads]std.Thread = undefined,
fn run(self: *@This()) !void {
- for (self.barriers) |*barrier| {
+ for (&self.barriers) |*barrier| {
try barrier.wait();
}
}
};
var broadcast = Broadcast{};
- for (broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
+ for (&broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
for (broadcast.threads) |t| t.join();
}
lib/std/Thread/Mutex.zig
@@ -245,7 +245,7 @@ const NonAtomicCounter = struct {
}
fn inc(self: *NonAtomicCounter) void {
- for (@bitCast([2]u64, self.get() + 1)) |v, i| {
+ for (@bitCast([2]u64, self.get() + 1), 0..) |v, i| {
@ptrCast(*volatile u64, &self.value[i]).* = v;
}
}
@@ -277,7 +277,7 @@ test "Mutex - many uncontended" {
};
var runners = [_]Runner{.{}} ** num_threads;
- for (runners) |*r| r.thread = try Thread.spawn(.{}, Runner.run, .{r});
+ for (&runners) |*r| r.thread = try Thread.spawn(.{}, Runner.run, .{r});
for (runners) |r| r.thread.join();
for (runners) |r| try testing.expectEqual(r.counter.get(), num_increments);
}
@@ -312,7 +312,7 @@ test "Mutex - many contended" {
var runner = Runner{};
var threads: [num_threads]Thread = undefined;
- for (threads) |*t| t.* = try Thread.spawn(.{}, Runner.run, .{&runner});
+ for (&threads) |*t| t.* = try Thread.spawn(.{}, Runner.run, .{&runner});
for (threads) |t| t.join();
try testing.expectEqual(runner.counter.get(), num_increments * num_threads);
lib/std/Thread/ResetEvent.zig
@@ -274,7 +274,7 @@ test "ResetEvent - broadcast" {
var ctx = Context{};
var threads: [num_threads - 1]std.Thread = undefined;
- for (threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
+ for (&threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
defer for (threads) |t| t.join();
ctx.run();
lib/std/Thread/RwLock.zig
@@ -364,7 +364,7 @@ test "RwLock - concurrent access" {
var runner = Runner{};
var threads: [num_writers + num_readers]std.Thread = undefined;
- for (threads[0..num_writers]) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
+ for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner});
for (threads) |t| t.join();
lib/std/Thread/Semaphore.zig
@@ -54,7 +54,7 @@ test "Thread.Semaphore" {
var n: i32 = 0;
var ctx = TestContext{ .sem = &sem, .n = &n };
- for (threads) |*t| t.* = try std.Thread.spawn(.{}, TestContext.worker, .{&ctx});
+ for (&threads) |*t| t.* = try std.Thread.spawn(.{}, TestContext.worker, .{&ctx});
for (threads) |t| t.join();
sem.wait();
try testing.expect(n == num_threads);
lib/std/zig/system/linux.zig
@@ -223,7 +223,7 @@ const ArmCpuinfoImpl = struct {
};
var known_models: [self.cores.len]?*const Target.Cpu.Model = undefined;
- for (self.cores[0..self.core_no]) |core, i| {
+ for (self.cores[0..self.core_no], 0..) |core, i| {
known_models[i] = cpu_models.isKnown(.{
.architecture = core.architecture,
.implementer = core.implementer,
lib/std/zig/system/NativeTargetInfo.zig
@@ -273,7 +273,7 @@ fn detectAbiAndDynamicLinker(
assert(@enumToInt(Target.Abi.none) == 0);
const fields = std.meta.fields(Target.Abi)[1..];
var array: [fields.len]Target.Abi = undefined;
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
array[i] = @field(Target.Abi, field.name);
}
break :blk array;
lib/std/zig/system/windows.zig
@@ -34,7 +34,7 @@ pub fn detectRuntimeVersion() WindowsVersion {
// checking the build number against a known set of
// values
var last_idx: usize = 0;
- for (WindowsVersion.known_win10_build_numbers) |build, i| {
+ for (WindowsVersion.known_win10_build_numbers, 0..) |build, i| {
if (version_info.dwBuildNumber >= build)
last_idx = i;
}
@@ -92,7 +92,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
var tmp_bufs: [fields_info.len][max_value_len]u8 align(@alignOf(std.os.windows.UNICODE_STRING)) = undefined;
- inline for (fields_info) |field, i| {
+ inline for (fields_info, 0..) |field, i| {
const ctx: *anyopaque = blk: {
switch (@field(args, field.name).value_type) {
REG.SZ,
@@ -153,7 +153,7 @@ fn getCpuInfoFromRegistry(core: usize, args: anytype) !void {
);
switch (res) {
.SUCCESS => {
- inline for (fields_info) |field, i| switch (@field(args, field.name).value_type) {
+ inline for (fields_info, 0..) |field, i| switch (@field(args, field.name).value_type) {
REG.SZ,
REG.EXPAND_SZ,
REG.MULTI_SZ,
lib/std/zig/Ast.zig
@@ -136,7 +136,7 @@ pub fn tokenLocation(self: Ast, start_offset: ByteOffset, token_index: TokenInde
.line_end = self.source.len,
};
const token_start = self.tokens.items(.start)[token_index];
- for (self.source[start_offset..]) |c, i| {
+ for (self.source[start_offset..], 0..) |c, i| {
if (i + start_offset == token_start) {
loc.line_end = i + start_offset;
while (loc.line_end < self.source.len and self.source[loc.line_end] != '\n') {
@@ -179,7 +179,7 @@ pub fn tokenSlice(tree: Ast, token_index: TokenIndex) []const u8 {
pub fn extraData(tree: Ast, index: usize, comptime T: type) T {
const fields = std.meta.fields(T);
var result: T = undefined;
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
comptime assert(field.type == Node.Index);
@field(result, field.name) = tree.extra_data[index + i];
}
@@ -2183,7 +2183,7 @@ fn fullAsmComponents(tree: Ast, info: full.Asm.Components) full.Asm {
if (token_tags[info.asm_token + 1] == .keyword_volatile) {
result.volatile_token = info.asm_token + 1;
}
- const outputs_end: usize = for (info.items) |item, i| {
+ const outputs_end: usize = for (info.items, 0..) |item, i| {
switch (node_tags[item]) {
.asm_output => continue,
else => break i,
lib/std/zig/CrossTarget.zig
@@ -317,7 +317,7 @@ pub fn parse(args: ParseOptions) !CrossTarget {
index += 1;
}
const feature_name = cpu_features[start..index];
- for (all_features) |feature, feat_index_usize| {
+ for (all_features, 0..) |feature, feat_index_usize| {
const feat_index = @intCast(Target.Cpu.Feature.Set.Index, feat_index_usize);
if (mem.eql(u8, feature_name, feature.name)) {
set.addFeature(feat_index);
lib/std/zig/fmt.zig
@@ -25,7 +25,7 @@ pub fn fmtId(bytes: []const u8) std.fmt.Formatter(formatId) {
pub fn isValidId(bytes: []const u8) bool {
if (bytes.len == 0) return false;
if (mem.eql(u8, bytes, "_")) return false;
- for (bytes) |c, i| {
+ for (bytes, 0..) |c, i| {
switch (c) {
'_', 'a'...'z', 'A'...'Z' => {},
'0'...'9' => if (i == 0) return false,
lib/std/zig/parser_test.zig
@@ -6158,7 +6158,7 @@ fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
std.debug.print("errors found: {any}\n", .{tree.errors});
return err;
};
- for (expected_errors) |expected, i| {
+ for (expected_errors, 0..) |expected, i| {
try std.testing.expectEqual(expected, tree.errors[i].tag);
}
}
lib/std/zig/render.zig
@@ -1407,7 +1407,7 @@ fn renderBuiltinCall(
// Render all on one line, no trailing comma.
try renderToken(ais, tree, builtin_token + 1, .none); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
const first_param_token = tree.firstToken(param_node);
if (token_tags[first_param_token] == .multiline_string_literal_line or
hasSameLineComment(tree, first_param_token - 1))
@@ -1739,7 +1739,7 @@ fn renderBlock(
try renderToken(ais, tree, lbrace, .none);
} else {
try renderToken(ais, tree, lbrace, .newline);
- for (statements) |stmt, i| {
+ for (statements, 0..) |stmt, i| {
if (i != 0) try renderExtraNewline(ais, tree, stmt);
switch (node_tags[stmt]) {
.global_var_decl,
@@ -1902,7 +1902,7 @@ fn renderArrayInit(
const section_end = sec_end: {
var this_line_first_expr: usize = 0;
var this_line_size = rowSize(tree, row_exprs, rbrace);
- for (row_exprs) |expr, i| {
+ for (row_exprs, 0..) |expr, i| {
// Ignore comment on first line of this section.
if (i == 0) continue;
const expr_last_token = tree.lastToken(expr);
@@ -1941,7 +1941,7 @@ fn renderArrayInit(
var column_counter: usize = 0;
var single_line = true;
var contains_newline = false;
- for (section_exprs) |expr, i| {
+ for (section_exprs, 0..) |expr, i| {
const start = sub_expr_buffer.items.len;
sub_expr_buffer_starts[i] = start;
@@ -1983,7 +1983,7 @@ fn renderArrayInit(
// Render exprs in current section.
column_counter = 0;
- for (section_exprs) |expr, i| {
+ for (section_exprs, 0..) |expr, i| {
const start = sub_expr_buffer_starts[i];
const end = sub_expr_buffer_starts[i + 1];
const expr_text = sub_expr_buffer.items[start..end];
@@ -2140,7 +2140,7 @@ fn renderContainerDecl(
if (token_tags[lbrace + 1] == .container_doc_comment) {
try renderContainerDocComments(ais, tree, lbrace + 1);
}
- for (container_decl.ast.members) |member, i| {
+ for (container_decl.ast.members, 0..) |member, i| {
if (i != 0) try renderExtraNewline(ais, tree, member);
switch (tree.nodes.items(.tag)[member]) {
// For container fields, ensure a trailing comma is added if necessary.
@@ -2226,7 +2226,7 @@ fn renderAsm(
try renderToken(ais, tree, colon1, .space); // :
ais.pushIndent();
- for (asm_node.outputs) |asm_output, i| {
+ for (asm_node.outputs, 0..) |asm_output, i| {
if (i + 1 < asm_node.outputs.len) {
const next_asm_output = asm_node.outputs[i + 1];
try renderAsmOutput(gpa, ais, tree, asm_output, .none);
@@ -2258,7 +2258,7 @@ fn renderAsm(
} else colon3: {
try renderToken(ais, tree, colon2, .space); // :
ais.pushIndent();
- for (asm_node.inputs) |asm_input, i| {
+ for (asm_node.inputs, 0..) |asm_input, i| {
if (i + 1 < asm_node.inputs.len) {
const next_asm_input = asm_node.inputs[i + 1];
try renderAsmInput(gpa, ais, tree, asm_input, .none);
@@ -2352,7 +2352,7 @@ fn renderParamList(
if (token_tags[after_last_param_tok] == .comma) {
ais.pushIndentNextLine();
try renderToken(ais, tree, lparen, .newline); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
if (i + 1 < params.len) {
try renderExpression(gpa, ais, tree, param_node, .none);
@@ -2377,7 +2377,7 @@ fn renderParamList(
try renderToken(ais, tree, lparen, .none); // (
- for (params) |param_node, i| {
+ for (params, 0..) |param_node, i| {
const first_param_token = tree.firstToken(param_node);
if (token_tags[first_param_token] == .multiline_string_literal_line or
hasSameLineComment(tree, first_param_token - 1))
@@ -3015,7 +3015,7 @@ fn rowSize(tree: Ast, exprs: []const Ast.Node.Index, rtoken: Ast.TokenIndex) usi
}
var count: usize = 1;
- for (exprs) |expr, i| {
+ for (exprs, 0..) |expr, i| {
if (i + 1 < exprs.len) {
const expr_last_token = tree.lastToken(expr) + 1;
if (!tree.tokensOnSameLine(expr_last_token, tree.firstToken(exprs[i + 1]))) return count;
lib/std/array_hash_map.zig
@@ -715,7 +715,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
return GetOrPutResult{
.key_ptr = item_key,
@@ -946,7 +946,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*, i)) {
return i;
}
@@ -1285,7 +1285,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*, i)) {
const removed_entry: KV = .{
@@ -1326,7 +1326,7 @@ pub fn ArrayHashMapUnmanaged(
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
- for (keys_array) |*item_key, i| {
+ for (keys_array, 0..) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*, i)) {
switch (removal_type) {
@@ -1634,7 +1634,7 @@ pub fn ArrayHashMapUnmanaged(
const items = if (store_hash) slice.items(.hash) else slice.items(.key);
const indexes = header.indexes(I);
- entry_loop: for (items) |key, i| {
+ entry_loop: for (items, 0..) |key, i| {
const h = if (store_hash) key else checkedHash(ctx, key);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
@@ -1730,7 +1730,7 @@ pub fn ArrayHashMapUnmanaged(
const indexes = header.indexes(I);
if (indexes.len == 0) return;
var is_empty = false;
- for (indexes) |idx, i| {
+ for (indexes, 0..) |idx, i| {
if (idx.isEmpty()) {
is_empty = true;
} else {
@@ -1826,7 +1826,7 @@ const min_bit_index = 5;
const max_capacity = (1 << max_bit_index) - 1;
const index_capacities = blk: {
var caps: [max_bit_index + 1]u32 = undefined;
- for (caps[0..max_bit_index]) |*item, i| {
+ for (caps[0..max_bit_index], 0..) |*item, i| {
item.* = (1 << i) * 3 / 5;
}
caps[max_bit_index] = max_capacity;
@@ -2025,7 +2025,7 @@ test "iterator hash map" {
try testing.expect(count == 3);
try testing.expect(it.next() == null);
- for (buffer) |_, i| {
+ for (buffer, 0..) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@@ -2037,7 +2037,7 @@ test "iterator hash map" {
if (count >= 2) break;
}
- for (buffer[0..2]) |_, i| {
+ for (buffer[0..2], 0..) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@@ -2299,7 +2299,7 @@ test "sort" {
map.sort(C{ .keys = map.keys() });
var x: i32 = 1;
- for (map.keys()) |key, i| {
+ for (map.keys(), 0..) |key, i| {
try testing.expect(key == x);
try testing.expect(map.values()[i] == x * 3);
x += 1;
lib/std/array_list.zig
@@ -183,7 +183,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
mem.copy(T, range, new_items);
const after_subrange = start + new_items.len;
- for (self.items[after_range..]) |item, i| {
+ for (self.items[after_range..], 0..) |item, i| {
self.items[after_subrange..][i] = item;
}
@@ -216,7 +216,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (newlen == i) return self.pop();
const old_item = self.items[i];
- for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
+ for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
@@ -666,7 +666,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
if (newlen == i) return self.pop();
const old_item = self.items[i];
- for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
+ for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j];
self.items[newlen] = undefined;
self.items.len = newlen;
return old_item;
@@ -1069,7 +1069,7 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
}
}
- for (list.items) |v, i| {
+ for (list.items, 0..) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.basic" {
}
}
- for (list.items) |v, i| {
+ for (list.items, 0..) |v, i| {
try testing.expect(v == @intCast(i32, i + 1));
}
lib/std/ascii.zig
@@ -272,7 +272,7 @@ test "ASCII character classes" {
/// Asserts `output.len >= ascii_string.len`.
pub fn lowerString(output: []u8, ascii_string: []const u8) []u8 {
std.debug.assert(output.len >= ascii_string.len);
- for (ascii_string) |c, i| {
+ for (ascii_string, 0..) |c, i| {
output[i] = toLower(c);
}
return output[0..ascii_string.len];
@@ -301,7 +301,7 @@ test "allocLowerString" {
/// Asserts `output.len >= ascii_string.len`.
pub fn upperString(output: []u8, ascii_string: []const u8) []u8 {
std.debug.assert(output.len >= ascii_string.len);
- for (ascii_string) |c, i| {
+ for (ascii_string, 0..) |c, i| {
output[i] = toUpper(c);
}
return output[0..ascii_string.len];
@@ -329,7 +329,7 @@ test "allocUpperString" {
/// Compares strings `a` and `b` case-insensitively and returns whether they are equal.
pub fn eqlIgnoreCase(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
- for (a) |a_c, i| {
+ for (a, 0..) |a_c, i| {
if (toLower(a_c) != toLower(b[i])) return false;
}
return true;
lib/std/base64.zig
@@ -140,7 +140,7 @@ pub const Base64Decoder = struct {
};
var char_in_alphabet = [_]bool{false} ** 256;
- for (alphabet_chars) |c, i| {
+ for (alphabet_chars, 0..) |c, i| {
assert(!char_in_alphabet[c]);
assert(pad_char == null or c != pad_char.?);
@@ -185,7 +185,7 @@ pub const Base64Decoder = struct {
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
- for (source) |c, src_idx| {
+ for (source, 0..) |c, src_idx| {
const d = decoder.char_to_index[c];
if (d == invalid_char) {
if (decoder.pad_char == null or c != decoder.pad_char.?) return error.InvalidCharacter;
@@ -258,7 +258,7 @@ pub const Base64DecoderWithIgnore = struct {
var acc_len: u4 = 0;
var dest_idx: usize = 0;
var leftover_idx: ?usize = null;
- for (source) |c, src_idx| {
+ for (source, 0..) |c, src_idx| {
if (decoder_with_ignore.char_is_ignored[c]) continue;
const d = decoder.char_to_index[c];
if (d == Base64Decoder.invalid_char) {
lib/std/bit_set.zig
@@ -494,14 +494,14 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// Flips all bits in this bit set which are present
/// in the toggles bit set.
pub fn toggleSet(self: *Self, toggles: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* ^= toggles.masks[i];
}
}
/// Flips every bit in the bit set.
pub fn toggleAll(self: *Self) void {
- for (self.masks) |*mask| {
+ for (&self.masks) |*mask| {
mask.* = ~mask.*;
}
@@ -515,7 +515,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// result in the first one. Bits in the result are
/// set if the corresponding bits were set in either input.
pub fn setUnion(self: *Self, other: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* |= other.masks[i];
}
}
@@ -524,7 +524,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// the result in the first one. Bits in the result are
/// set if the corresponding bits were set in both inputs.
pub fn setIntersection(self: *Self, other: Self) void {
- for (self.masks) |*mask, i| {
+ for (&self.masks, 0..) |*mask, i| {
mask.* &= other.masks[i];
}
}
@@ -544,7 +544,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// If no bits are set, returns null.
pub fn toggleFirstSet(self: *Self) ?usize {
var offset: usize = 0;
- const mask = for (self.masks) |*mask| {
+ const mask = for (&self.masks) |*mask| {
if (mask.* != 0) break mask;
offset += @bitSizeOf(MaskInt);
} else return null;
@@ -869,7 +869,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn toggleSet(self: *Self, toggles: Self) void {
assert(toggles.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* ^= toggles.masks[i];
}
}
@@ -897,7 +897,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn setUnion(self: *Self, other: Self) void {
assert(other.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* |= other.masks[i];
}
}
@@ -909,7 +909,7 @@ pub const DynamicBitSetUnmanaged = struct {
pub fn setIntersection(self: *Self, other: Self) void {
assert(other.bit_length == self.bit_length);
const num_masks = numMasks(self.bit_length);
- for (self.masks[0..num_masks]) |*mask, i| {
+ for (self.masks[0..num_masks], 0..) |*mask, i| {
mask.* &= other.masks[i];
}
}
lib/std/bounded_array.zig
@@ -169,7 +169,7 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
} else {
mem.copy(T, range, new_items);
const after_subrange = start + new_items.len;
- for (self.constSlice()[after_range..]) |item, i| {
+ for (self.constSlice()[after_range..], 0..) |item, i| {
self.slice()[after_subrange..][i] = item;
}
self.len -= len - new_items.len;
@@ -197,7 +197,7 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
const newlen = self.len - 1;
if (newlen == i) return self.pop();
const old_item = self.get(i);
- for (self.slice()[i..newlen]) |*b, j| b.* = self.get(i + 1 + j);
+ for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j);
self.set(newlen, undefined);
self.len = newlen;
return old_item;
lib/std/Build.zig
@@ -650,7 +650,7 @@ pub fn dupe(self: *Build, bytes: []const u8) []u8 {
/// Duplicates an array of strings without the need to handle out of memory.
pub fn dupeStrings(self: *Build, strings: []const []const u8) [][]u8 {
const array = self.allocator.alloc([]u8, strings.len) catch @panic("OOM");
- for (strings) |s, i| {
+ for (strings, 0..) |s, i| {
array[i] = self.dupe(s);
}
return array;
@@ -1051,7 +1051,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
const all_features = whitelist_cpu.arch.allFeaturesList();
var populated_cpu_features = whitelist_cpu.model.features;
populated_cpu_features.populateDependencies(all_features);
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
if (in_cpu_set) {
@@ -1059,7 +1059,7 @@ pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) Cros
}
}
log.err(" Remove: ", .{});
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = selected_cpu.features.isEnabled(i);
@@ -1748,7 +1748,7 @@ pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
var mcpu_buffer = ArrayList(u8).init(allocator);
try mcpu_buffer.appendSlice(cpu.model.name);
- for (all_features) |feature, i_usize| {
+ for (all_features, 0..) |feature, i_usize| {
const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
const in_cpu_set = populated_cpu_features.isEnabled(i);
const in_actual_set = cpu.features.isEnabled(i);
lib/std/child_process.zig
@@ -604,7 +604,7 @@ pub const ChildProcess = struct {
const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
- for (self.argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = if (self.env_map) |env_map| m: {
const envp_buf = try createNullDelimitedEnvMap(arena, env_map);
@@ -712,7 +712,7 @@ pub const ChildProcess = struct {
// Therefore, we do all the allocation for the execve() before the fork().
// This means we must do the null-termination of argv and env vars here.
const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
- for (self.argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
if (self.env_map) |env_map| {
@@ -1424,7 +1424,7 @@ fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8)
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
- for (argv) |arg, arg_i| {
+ for (argv, 0..) |arg, arg_i| {
if (arg_i != 0) try buf.append(' ');
if (mem.indexOfAny(u8, arg, " \t\n\"") == null) {
try buf.appendSlice(arg);
lib/std/coff.zig
@@ -1223,7 +1223,7 @@ pub const Coff = struct {
pub fn getSectionHeadersAlloc(self: *const Coff, allocator: mem.Allocator) ![]SectionHeader {
const section_headers = self.getSectionHeaders();
const out_buff = try allocator.alloc(SectionHeader, section_headers.len);
- for (out_buff) |*section_header, i| {
+ for (out_buff, 0..) |*section_header, i| {
section_header.* = section_headers[i];
}
lib/std/comptime_string_map.zig
@@ -21,7 +21,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs_list: anytype) type {
return a.key.len < b.key.len;
}
}).lenAsc;
- for (kvs_list) |kv, i| {
+ for (kvs_list, 0..) |kv, i| {
if (V != void) {
sorted_kvs[i] = .{ .key = kv.@"0", .value = kv.@"1" };
} else {
lib/std/debug.zig
@@ -213,7 +213,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
var addr_buf_stack: [32]usize = undefined;
const addr_buf = if (addr_buf_stack.len > addrs.len) addr_buf_stack[0..] else addrs;
const n = walkStackWindows(addr_buf[0..]);
- const first_index = for (addr_buf[0..n]) |addr, i| {
+ const first_index = for (addr_buf[0..n], 0..) |addr, i| {
if (addr == first_addr) {
break i;
}
@@ -224,13 +224,13 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
const end_index = math.min(first_index + addrs.len, n);
const slice = addr_buf[first_index..end_index];
// We use a for loop here because slice and addrs may alias.
- for (slice) |addr, i| {
+ for (slice, 0..) |addr, i| {
addrs[i] = addr;
}
stack_trace.index = slice.len;
} else {
var it = StackIterator.init(first_address, null);
- for (stack_trace.instruction_addresses) |*addr, i| {
+ for (stack_trace.instruction_addresses, 0..) |*addr, i| {
addr.* = it.next() orelse {
stack_trace.index = i;
return;
@@ -621,7 +621,7 @@ pub fn writeCurrentStackTraceWindows(
const n = walkStackWindows(addr_buf[0..]);
const addrs = addr_buf[0..n];
var start_i: usize = if (start_addr) |saddr| blk: {
- for (addrs) |addr, i| {
+ for (addrs, 0..) |addr, i| {
if (addr == saddr) break :blk i;
}
return;
@@ -2138,7 +2138,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
) catch return;
return;
};
- for (t.addrs[0..end]) |frames_array, i| {
+ for (t.addrs[0..end], 0..) |frames_array, i| {
stderr.print("{s}:\n", .{t.notes[i]}) catch return;
var frames_array_mutable = frames_array;
const frames = mem.sliceTo(frames_array_mutable[0..], 0);
lib/std/dwarf.zig
@@ -1064,7 +1064,7 @@ pub const DwarfInfo = struct {
.has_children = table_entry.has_children,
};
try result.attrs.resize(allocator, table_entry.attrs.items.len);
- for (table_entry.attrs.items) |attr, i| {
+ for (table_entry.attrs.items, 0..) |attr, i| {
result.attrs.items[i] = Die.Attr{
.id = attr.attr_id,
.value = try parseFormValue(
lib/std/enums.zig
@@ -35,7 +35,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
pub fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E {
comptime {
var result: [fields.len]E = undefined;
- for (fields) |f, i| {
+ for (fields, 0..) |f, i| {
result[i] = @field(E, f.name);
}
return &result;
@@ -1331,7 +1331,7 @@ pub fn EnumIndexer(comptime E: type) type {
pub const Key = E;
pub const count = fields_len;
pub fn indexOf(e: E) usize {
- for (keys) |k, i| {
+ for (keys, 0..) |k, i| {
if (k == e) return i;
}
unreachable;
lib/std/fmt.zig
@@ -570,7 +570,7 @@ pub fn formatType(
return writer.writeAll("{ ... }");
}
try writer.writeAll("{");
- inline for (info.fields) |f, i| {
+ inline for (info.fields, 0..) |f, i| {
if (i == 0) {
try writer.writeAll(" ");
} else {
@@ -585,7 +585,7 @@ pub fn formatType(
return writer.writeAll("{ ... }");
}
try writer.writeAll("{");
- inline for (info.fields) |f, i| {
+ inline for (info.fields, 0..) |f, i| {
if (i == 0) {
try writer.writeAll(" .");
} else {
@@ -612,7 +612,7 @@ pub fn formatType(
}
}
if (comptime std.meta.trait.isZigString(info.child)) {
- for (value) |item, i| {
+ for (value, 0..) |item, i| {
comptime checkTextFmt(actual_fmt);
if (i != 0) try formatBuf(", ", options, writer);
try formatBuf(item, options, writer);
@@ -659,7 +659,7 @@ pub fn formatType(
}
}
try writer.writeAll("{ ");
- for (value) |elem, i| {
+ for (value, 0..) |elem, i| {
try formatType(elem, actual_fmt, options, writer, max_depth - 1);
if (i != value.len - 1) {
try writer.writeAll(", ");
@@ -684,7 +684,7 @@ pub fn formatType(
}
}
try writer.writeAll("{ ");
- for (value) |elem, i| {
+ for (value, 0..) |elem, i| {
try formatType(elem, actual_fmt, options, writer, max_depth - 1);
if (i < value.len - 1) {
try writer.writeAll(", ");
lib/std/hash_map.zig
@@ -2119,7 +2119,7 @@ test "std.hash_map getOrPutAdapted" {
var real_keys: [keys.len]u64 = undefined;
- inline for (keys) |key_str, i| {
+ inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{});
try testing.expect(!result.found_existing);
real_keys[i] = std.fmt.parseInt(u64, key_str, 10) catch unreachable;
@@ -2129,7 +2129,7 @@ test "std.hash_map getOrPutAdapted" {
try testing.expectEqual(map.count(), keys.len);
- inline for (keys) |key_str, i| {
+ inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{});
try testing.expect(result.found_existing);
try testing.expectEqual(real_keys[i], result.key_ptr.*);
lib/std/heap.zig
@@ -724,7 +724,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
- for (slice) |*item, i| {
+ for (slice, 0..) |*item, i| {
item.* = try allocator.create(i32);
item.*.* = @intCast(i32, i);
}
@@ -732,7 +732,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
slice = try allocator.realloc(slice, 20000);
try testing.expect(slice.len == 20000);
- for (slice[0..100]) |item, i| {
+ for (slice[0..100], 0..) |item, i| {
try testing.expect(item.* == @intCast(i32, i));
allocator.destroy(item);
}
lib/std/json.zig
@@ -1280,7 +1280,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
}
},
.Array => {
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!parsedEqual(e, b[i])) return false;
return true;
},
@@ -1294,7 +1294,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
.One => return parsedEqual(a.*, b.*),
.Slice => {
if (a.len != b.len) return false;
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!parsedEqual(e, b[i])) return false;
return true;
},
@@ -1518,7 +1518,7 @@ fn parseInternal(
var r: T = undefined;
var fields_seen = [_]bool{false} ** structInfo.fields.len;
errdefer {
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
if (fields_seen[i] and !field.is_comptime) {
parseFree(field.type, @field(r, field.name), options);
}
@@ -1533,7 +1533,7 @@ fn parseInternal(
var child_options = options;
child_options.allow_trailing_data = true;
var found = false;
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
// TODO: using switches here segfault the compiler (#2727?)
if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice)))) {
// if (switch (stringToken.escapes) {
@@ -1584,7 +1584,7 @@ fn parseInternal(
else => return error.UnexpectedToken,
}
}
- inline for (structInfo.fields) |field, i| {
+ inline for (structInfo.fields, 0..) |field, i| {
if (!fields_seen[i]) {
if (field.default_value) |default_ptr| {
if (!field.is_comptime) {
@@ -2367,7 +2367,7 @@ pub fn stringify(
if (child_options.whitespace) |*whitespace| {
whitespace.indent_level += 1;
}
- for (value) |x, i| {
+ for (value, 0..) |x, i| {
if (i != 0) {
try out_stream.writeByte(',');
}
lib/std/mem.zig
@@ -169,7 +169,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values) |*v, i| v.* = @intCast(T, i);
+ for (values, 0..) |*v, i| v.* = @intCast(T, i);
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -185,7 +185,7 @@ test "Allocator.resize" {
var values = try testing.allocator.alloc(T, 100);
defer testing.allocator.free(values);
- for (values) |*v, i| v.* = @intToFloat(T, i);
+ for (values, 0..) |*v, i| v.* = @intToFloat(T, i);
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
@@ -201,7 +201,7 @@ pub fn copy(comptime T: type, dest: []T, source: []const T) void {
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
- for (source) |s, i|
+ for (source, 0..) |s, i|
dest[i] = s;
}
@@ -445,7 +445,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
var value: T = undefined;
- inline for (struct_info.fields) |field, i| {
+ inline for (struct_info.fields, 0..) |field, i| {
if (field.is_comptime) {
continue;
}
@@ -611,7 +611,7 @@ test "lessThan" {
pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
if (a.len != b.len) return false;
if (a.ptr == b.ptr) return true;
- for (a) |item, index| {
+ for (a, 0..) |item, index| {
if (b[index] != item) return false;
}
return true;
@@ -1261,7 +1261,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: Endian)
},
.Little => {
const ShiftType = math.Log2Int(ReturnType);
- for (bytes) |b, index| {
+ for (bytes, 0..) |b, index| {
result = result | (@as(ReturnType, b) << @intCast(ShiftType, index * 8));
}
},
@@ -1328,7 +1328,7 @@ pub fn readVarPackedInt(
},
.Little => {
int = read_bytes[0] >> bit_shift;
- for (read_bytes[1..]) |elem, i| {
+ for (read_bytes[1..], 0..) |elem, i| {
int |= (@as(uN, elem) << @intCast(Log2N, (8 * (i + 1) - bit_shift)));
}
},
@@ -2907,7 +2907,7 @@ pub fn indexOfMin(comptime T: type, slice: []const T) usize {
assert(slice.len > 0);
var best = slice[0];
var index: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item < best) {
best = item;
index = i + 1;
@@ -2928,7 +2928,7 @@ pub fn indexOfMax(comptime T: type, slice: []const T) usize {
assert(slice.len > 0);
var best = slice[0];
var index: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item > best) {
best = item;
index = i + 1;
@@ -2952,7 +2952,7 @@ pub fn indexOfMinMax(comptime T: type, slice: []const T) struct { index_min: usi
var maxVal = slice[0];
var minIdx: usize = 0;
var maxIdx: usize = 0;
- for (slice[1..]) |item, i| {
+ for (slice[1..], 0..) |item, i| {
if (item < minVal) {
minVal = item;
minIdx = i + 1;
@@ -3117,7 +3117,7 @@ test "replace" {
/// Replace all occurences of `needle` with `replacement`.
pub fn replaceScalar(comptime T: type, slice: []T, needle: T, replacement: T) void {
- for (slice) |e, i| {
+ for (slice, 0..) |e, i| {
if (e == needle) {
slice[i] = replacement;
}
@@ -3372,7 +3372,7 @@ test "asBytes" {
try testing.expect(eql(u8, asBytes(&deadbeef), deadbeef_bytes));
var codeface = @as(u32, 0xC0DEFACE);
- for (asBytes(&codeface).*) |*b|
+ for (asBytes(&codeface)) |*b|
b.* = 0;
try testing.expect(codeface == 0);
lib/std/meta.zig
@@ -117,7 +117,7 @@ pub fn stringToEnum(comptime T: type, str: []const u8) ?T {
const kvs = comptime build_kvs: {
const EnumKV = struct { []const u8, T };
var kvs_array: [@typeInfo(T).Enum.fields.len]EnumKV = undefined;
- inline for (@typeInfo(T).Enum.fields) |enumField, i| {
+ inline for (@typeInfo(T).Enum.fields, 0..) |enumField, i| {
kvs_array[i] = .{ enumField.name, @field(T, enumField.name) };
}
break :build_kvs kvs_array[0..];
@@ -552,7 +552,7 @@ pub fn fieldNames(comptime T: type) *const [fields(T).len][]const u8 {
comptime {
const fieldInfos = fields(T);
var names: [fieldInfos.len][]const u8 = undefined;
- for (fieldInfos) |field, i| {
+ for (fieldInfos, 0..) |field, i| {
names[i] = field.name;
}
return &names;
@@ -593,7 +593,7 @@ pub fn tags(comptime T: type) *const [fields(T).len]T {
comptime {
const fieldInfos = fields(T);
var res: [fieldInfos.len]T = undefined;
- for (fieldInfos) |field, i| {
+ for (fieldInfos, 0..) |field, i| {
res[i] = @field(T, field.name);
}
return &res;
@@ -631,7 +631,7 @@ pub fn FieldEnum(comptime T: type) type {
if (@typeInfo(T) == .Union) {
if (@typeInfo(T).Union.tag_type) |tag_type| {
- for (std.enums.values(tag_type)) |v, i| {
+ for (std.enums.values(tag_type), 0..) |v, i| {
if (@enumToInt(v) != i) break; // enum values not consecutive
if (!std.mem.eql(u8, @tagName(v), field_infos[i].name)) break; // fields out of order
} else {
@@ -642,7 +642,7 @@ pub fn FieldEnum(comptime T: type) type {
var enumFields: [field_infos.len]std.builtin.Type.EnumField = undefined;
var decls = [_]std.builtin.Type.Declaration{};
- inline for (field_infos) |field, i| {
+ inline for (field_infos, 0..) |field, i| {
enumFields[i] = .{
.name = field.name,
.value = i,
@@ -672,7 +672,7 @@ fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
const expected_fields = @typeInfo(expected).Enum.fields;
const actual_fields = @typeInfo(actual).Enum.fields;
if (expected_fields.len != actual_fields.len) return error.FailedTest;
- for (expected_fields) |expected_field, i| {
+ for (expected_fields, 0..) |expected_field, i| {
const actual_field = actual_fields[i];
try testing.expectEqual(expected_field.value, actual_field.value);
try testing.expectEqualStrings(expected_field.name, actual_field.name);
@@ -682,7 +682,7 @@ fn expectEqualEnum(expected: anytype, actual: @TypeOf(expected)) !void {
const expected_decls = @typeInfo(expected).Enum.decls;
const actual_decls = @typeInfo(actual).Enum.decls;
if (expected_decls.len != actual_decls.len) return error.FailedTest;
- for (expected_decls) |expected_decl, i| {
+ for (expected_decls, 0..) |expected_decl, i| {
const actual_decl = actual_decls[i];
try testing.expectEqual(expected_decl.is_pub, actual_decl.is_pub);
try testing.expectEqualStrings(expected_decl.name, actual_decl.name);
@@ -716,7 +716,7 @@ pub fn DeclEnum(comptime T: type) type {
const fieldInfos = std.meta.declarations(T);
var enumDecls: [fieldInfos.len]std.builtin.Type.EnumField = undefined;
var decls = [_]std.builtin.Type.Declaration{};
- inline for (fieldInfos) |field, i| {
+ inline for (fieldInfos, 0..) |field, i| {
enumDecls[i] = .{ .name = field.name, .value = i };
}
return @Type(.{
@@ -870,7 +870,7 @@ pub fn eql(a: anytype, b: @TypeOf(a)) bool {
},
.Array => {
if (a.len != b.len) return false;
- for (a) |e, i|
+ for (a, 0..) |e, i|
if (!eql(e, b[i])) return false;
return true;
},
@@ -988,7 +988,7 @@ pub fn intToEnum(comptime EnumTag: type, tag_int: anytype) IntToEnumError!EnumTa
/// Given a type and a name, return the field index according to source order.
/// Returns `null` if the field is not found.
pub fn fieldIndex(comptime T: type, comptime name: []const u8) ?comptime_int {
- inline for (fields(T)) |field, i| {
+ inline for (fields(T), 0..) |field, i| {
if (mem.eql(u8, field.name, name))
return i;
}
@@ -1008,7 +1008,7 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
comptime {
const decls = declarations(Namespace);
var array: [decls.len]*const Decl = undefined;
- for (decls) |decl, i| {
+ for (decls, 0..) |decl, i| {
array[i] = &@field(Namespace, decl.name);
}
std.sort.sort(*const Decl, &array, {}, S.declNameLessThan);
@@ -1069,7 +1069,7 @@ pub fn ArgsTuple(comptime Function: type) type {
@compileError("Cannot create ArgsTuple for variadic function");
var argument_field_list: [function_info.params.len]type = undefined;
- inline for (function_info.params) |arg, i| {
+ inline for (function_info.params, 0..) |arg, i| {
const T = arg.type.?;
argument_field_list[i] = T;
}
@@ -1090,7 +1090,7 @@ pub fn Tuple(comptime types: []const type) type {
fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
var tuple_fields: [types.len]std.builtin.Type.StructField = undefined;
- inline for (types) |T, i| {
+ inline for (types, 0..) |T, i| {
@setEvalBranchQuota(10_000);
var num_buf: [128]u8 = undefined;
tuple_fields[i] = .{
@@ -1129,7 +1129,7 @@ const TupleTester = struct {
if (expected.len != fields_list.len)
@compileError("Argument count mismatch");
- inline for (fields_list) |fld, i| {
+ inline for (fields_list, 0..) |fld, i| {
if (expected[i] != fld.type) {
@compileError("Field " ++ fld.name ++ " expected to be type " ++ @typeName(expected[i]) ++ ", but was type " ++ @typeName(fld.type));
}
lib/std/multi_array_list.zig
@@ -82,7 +82,7 @@ pub fn MultiArrayList(comptime S: type) type {
alignment: usize,
};
var data: [fields.len]Data = undefined;
- for (fields) |field_info, i| {
+ for (fields, 0..) |field_info, i| {
data[i] = .{
.size = @sizeOf(field_info.type),
.size_index = i,
@@ -98,7 +98,7 @@ pub fn MultiArrayList(comptime S: type) type {
std.sort.sort(Data, &data, {}, Sort.lessThan);
var sizes_bytes: [fields.len]usize = undefined;
var field_indexes: [fields.len]usize = undefined;
- for (data) |elem, i| {
+ for (data, 0..) |elem, i| {
sizes_bytes[i] = elem.size;
field_indexes[i] = elem.size_index;
}
@@ -131,7 +131,7 @@ pub fn MultiArrayList(comptime S: type) type {
.capacity = self.capacity,
};
var ptr: [*]u8 = self.bytes;
- for (sizes.bytes) |field_size, i| {
+ for (sizes.bytes, 0..) |field_size, i| {
result.ptrs[sizes.fields[i]] = ptr;
ptr += field_size * self.capacity;
}
@@ -148,7 +148,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Overwrite one array element with new data.
pub fn set(self: *Self, index: usize, elem: S) void {
const slices = self.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
slices.items(@intToEnum(Field, i))[index] = @field(elem, field_info.name);
}
}
@@ -157,7 +157,7 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn get(self: Self, index: usize) S {
const slices = self.slice();
var result: S = undefined;
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
@field(result, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
@@ -230,7 +230,7 @@ pub fn MultiArrayList(comptime S: type) type {
assert(index <= self.len);
self.len += 1;
const slices = self.slice();
- inline for (fields) |field_info, field_index| {
+ inline for (fields, 0..) |field_info, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i: usize = self.len - 1;
while (i > index) : (i -= 1) {
@@ -245,7 +245,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// retain list ordering.
pub fn swapRemove(self: *Self, index: usize) void {
const slices = self.slice();
- inline for (fields) |_, i| {
+ inline for (fields, 0..) |_, i| {
const field_slice = slices.items(@intToEnum(Field, i));
field_slice[index] = field_slice[self.len - 1];
field_slice[self.len - 1] = undefined;
@@ -257,7 +257,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// after it to preserve order.
pub fn orderedRemove(self: *Self, index: usize) void {
const slices = self.slice();
- inline for (fields) |_, field_index| {
+ inline for (fields, 0..) |_, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i = index;
while (i < self.len - 1) : (i += 1) {
@@ -293,7 +293,7 @@ pub fn MultiArrayList(comptime S: type) type {
capacityInBytes(new_len),
) catch {
const self_slice = self.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
@@ -315,7 +315,7 @@ pub fn MultiArrayList(comptime S: type) type {
self.len = new_len;
const self_slice = self.slice();
const other_slice = other.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
@@ -376,7 +376,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
const self_slice = self.slice();
const other_slice = other.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, other_slice.items(field), self_slice.items(field));
@@ -395,7 +395,7 @@ pub fn MultiArrayList(comptime S: type) type {
result.len = self.len;
const self_slice = self.slice();
const result_slice = result.slice();
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
mem.copy(field_info.type, result_slice.items(field), self_slice.items(field));
@@ -412,7 +412,7 @@ pub fn MultiArrayList(comptime S: type) type {
slice: Slice,
pub fn swap(sc: @This(), a_index: usize, b_index: usize) void {
- inline for (fields) |field_info, i| {
+ inline for (fields, 0..) |field_info, i| {
if (@sizeOf(field_info.type) != 0) {
const field = @intToEnum(Field, i);
const ptr = sc.slice.items(field);
lib/std/net.zig
@@ -325,7 +325,7 @@ pub const Ip6Address = extern struct {
var index: u8 = 0;
var scope_id = false;
var abbrv = false;
- for (buf) |c, i| {
+ for (buf, 0..) |c, i| {
if (scope_id) {
if (c >= '0' and c <= '9') {
const digit = c - '0';
@@ -444,7 +444,7 @@ pub const Ip6Address = extern struct {
var scope_id_value: [os.IFNAMESIZE - 1]u8 = undefined;
var scope_id_index: usize = 0;
- for (buf) |c, i| {
+ for (buf, 0..) |c, i| {
if (scope_id) {
// Handling of percent-encoding should be for an URI library.
if ((c >= '0' and c <= '9') or
@@ -602,7 +602,7 @@ pub const Ip6Address = extern struct {
.Big => big_endian_parts.*,
.Little => blk: {
var buf: [8]u16 = undefined;
- for (big_endian_parts) |part, i| {
+ for (big_endian_parts, 0..) |part, i| {
buf[i] = mem.bigToNative(u16, part);
}
break :blk buf;
@@ -909,7 +909,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A
result.canon_name = try canon.toOwnedSlice();
}
- for (lookup_addrs.items) |lookup_addr, i| {
+ for (lookup_addrs.items, 0..) |lookup_addr, i| {
result.addrs[i] = lookup_addr.addr;
assert(result.addrs[i].getPort() == port);
}
@@ -989,7 +989,7 @@ fn linuxLookupName(
// So far the label/precedence table cannot be customized.
// This implementation is ported from musl libc.
// A more idiomatic "ziggy" implementation would be welcome.
- for (addrs.items) |*addr, i| {
+ for (addrs.items, 0..) |*addr, i| {
var key: i32 = 0;
var sa6: os.sockaddr.in6 = undefined;
@memset(@ptrCast([*]u8, &sa6), 0, @sizeOf(os.sockaddr.in6));
@@ -1118,7 +1118,7 @@ const defined_policies = [_]Policy{
};
fn policyOf(a: [16]u8) *const Policy {
- for (defined_policies) |*policy| {
+ for (&defined_policies) |*policy| {
if (!mem.eql(u8, a[0..policy.len], policy.addr[0..policy.len])) continue;
if ((a[policy.len] & policy.mask) != policy.addr[policy.len]) continue;
return policy;
@@ -1502,7 +1502,7 @@ fn resMSendRc(
try ns_list.resize(rc.ns.items.len);
const ns = ns_list.items;
- for (rc.ns.items) |iplit, i| {
+ for (rc.ns.items, 0..) |iplit, i| {
ns[i] = iplit.addr;
assert(ns[i].getPort() == 53);
if (iplit.addr.any.family != os.AF.INET) {
lib/std/once.zig
@@ -53,7 +53,7 @@ test "Once executes its function just once" {
var threads: [10]std.Thread = undefined;
defer for (threads) |handle| handle.join();
- for (threads) |*handle| {
+ for (&threads) |*handle| {
handle.* = try std.Thread.spawn(.{}, struct {
fn thread_fn(x: u8) void {
_ = x;
lib/std/packed_int_array.zig
@@ -215,7 +215,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self = @as(Self, undefined);
- for (ints) |int, i| self.set(i, int);
+ for (ints, 0..) |int, i| self.set(i, int);
return self;
}
lib/std/pdb.zig
@@ -922,7 +922,7 @@ const Msf = struct {
}
const streams = try allocator.alloc(MsfStream, stream_count);
- for (streams) |*stream, i| {
+ for (streams, 0..) |*stream, i| {
const size = stream_sizes[i];
if (size == 0) {
stream.* = MsfStream{
lib/std/priority_dequeue.zig
@@ -430,7 +430,7 @@ pub fn PriorityDequeue(comptime T: type, comptime Context: type, comptime compar
const print = std.debug.print;
print("{{ ", .{});
print("items: ", .{});
- for (self.items) |e, i| {
+ for (self.items, 0..) |e, i| {
if (i >= self.len) break;
print("{}, ", .{e});
}
lib/std/priority_queue.zig
@@ -263,7 +263,7 @@ pub fn PriorityQueue(comptime T: type, comptime Context: type, comptime compareF
const print = std.debug.print;
print("{{ ", .{});
print("items: ", .{});
- for (self.items) |e, i| {
+ for (self.items, 0..) |e, i| {
if (i >= self.len) break;
print("{}, ", .{e});
}
lib/std/process.zig
@@ -874,7 +874,7 @@ pub fn argsAlloc(allocator: Allocator) ![][:0]u8 {
mem.copy(u8, result_contents, contents_slice);
var contents_index: usize = 0;
- for (slice_sizes) |len, i| {
+ for (slice_sizes, 0..) |len, i| {
const new_index = contents_index + len;
result_slice_list[i] = result_contents[contents_index..new_index :0];
contents_index = new_index + 1;
@@ -1148,7 +1148,7 @@ pub fn execve(
const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
- for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
+ for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
if (env_map) |m| {
lib/std/rand.zig
@@ -414,7 +414,7 @@ pub const Random = struct {
std.debug.assert(point < sum);
var accumulator: T = 0;
- for (proportions) |p, index| {
+ for (proportions, 0..) |p, index| {
accumulator += p;
if (point < accumulator) return index;
}
lib/std/simd.zig
@@ -89,7 +89,7 @@ pub fn VectorCount(comptime VectorType: type) type {
pub inline fn iota(comptime T: type, comptime len: usize) @Vector(len, T) {
comptime {
var out: [len]T = undefined;
- for (out) |*element, i| {
+ for (&out, 0..) |*element, i| {
element.* = switch (@typeInfo(T)) {
.Int => @intCast(T, i),
.Float => @intToFloat(T, i),
lib/std/sort.zig
@@ -1219,9 +1219,9 @@ fn testStableSort() !void {
IdAndValue{ .id = 2, .value = 0 },
},
};
- for (cases) |*case| {
+ for (&cases) |*case| {
insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue);
- for (case.*) |item, i| {
+ for (case.*, 0..) |item, i| {
try testing.expect(item.id == expected[i].id);
try testing.expect(item.value == expected[i].value);
}
@@ -1373,7 +1373,7 @@ fn fuzzTest(rng: std.rand.Random) !void {
var array = try testing.allocator.alloc(IdAndValue, array_size);
defer testing.allocator.free(array);
// populate with random data
- for (array) |*item, index| {
+ for (array, 0..) |*item, index| {
item.id = index;
item.value = rng.intRangeLessThan(i32, 0, 100);
}
@@ -1401,7 +1401,7 @@ pub fn argMin(
var smallest = items[0];
var smallest_index: usize = 0;
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
if (lessThan(context, item, smallest)) {
smallest = item;
smallest_index = i + 1;
@@ -1453,7 +1453,7 @@ pub fn argMax(
var biggest = items[0];
var biggest_index: usize = 0;
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
if (lessThan(context, biggest, item)) {
biggest = item;
biggest_index = i + 1;
lib/std/target.zig
@@ -720,7 +720,7 @@ pub const Target = struct {
/// Adds the specified feature set but not its dependencies.
pub fn addFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_c) {
- for (set.ints) |*int, i| int.* |= other_set.ints[i];
+ for (&set.ints, 0..) |*int, i| int.* |= other_set.ints[i];
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) | @as(@Vector(usize_count, usize), other_set.ints);
}
@@ -736,7 +736,7 @@ pub const Target = struct {
/// Removes the specified feature but not its dependents.
pub fn removeFeatureSet(set: *Set, other_set: Set) void {
if (builtin.zig_backend == .stage2_c) {
- for (set.ints) |*int, i| int.* &= ~other_set.ints[i];
+ for (&set.ints, 0..) |*int, i| int.* &= ~other_set.ints[i];
} else {
set.ints = @as(@Vector(usize_count, usize), set.ints) & ~@as(@Vector(usize_count, usize), other_set.ints);
}
@@ -747,7 +747,7 @@ pub const Target = struct {
var old = set.ints;
while (true) {
- for (all_features_list) |feature, index_usize| {
+ for (all_features_list, 0..) |feature, index_usize| {
const index = @intCast(Index, index_usize);
if (set.isEnabled(index)) {
set.addFeatureSet(feature.dependencies);
@@ -1330,7 +1330,7 @@ pub const Target = struct {
fn allCpusFromDecls(comptime cpus: type) []const *const Cpu.Model {
const decls = @typeInfo(cpus).Struct.decls;
var array: [decls.len]*const Cpu.Model = undefined;
- for (decls) |decl, i| {
+ for (decls, 0..) |decl, i| {
array[i] = &@field(cpus, decl.name);
}
return &array;
lib/std/testing.zig
@@ -384,7 +384,7 @@ fn SliceDiffer(comptime T: type) type {
const Self = @This();
pub fn write(self: Self, writer: anytype) !void {
- for (self.expected) |value, i| {
+ for (self.expected, 0..) |value, i| {
var full_index = self.start_index + i;
const diff = if (i < self.actual.len) !std.meta.eql(self.actual[i], value) else true;
if (diff) try self.ttyconf.setColor(writer, .Red);
@@ -405,7 +405,7 @@ const BytesDiffer = struct {
while (expected_iterator.next()) |chunk| {
// to avoid having to calculate diffs twice per chunk
var diffs: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 };
- for (chunk) |byte, i| {
+ for (chunk, 0..) |byte, i| {
var absolute_byte_index = (expected_iterator.index - chunk.len) + i;
const diff = if (absolute_byte_index < self.actual.len) self.actual[absolute_byte_index] != byte else true;
if (diff) diffs.set(i);
@@ -418,7 +418,7 @@ const BytesDiffer = struct {
if (chunk.len < 8) missing_columns += 1;
try writer.writeByteNTimes(' ', missing_columns);
}
- for (chunk) |byte, i| {
+ for (chunk, 0..) |byte, i| {
const byte_to_print = if (std.ascii.isPrint(byte)) byte else '.';
try self.writeByteDiff(writer, "{c}", byte_to_print, diffs.isSet(i));
}
@@ -1059,7 +1059,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
// Setup the tuple that will actually be used with @call (we'll need to insert
// the failing allocator in field @"0" before each @call)
var args: ArgsTuple = undefined;
- inline for (@typeInfo(@TypeOf(extra_args)).Struct.fields) |field, i| {
+ inline for (@typeInfo(@TypeOf(extra_args)).Struct.fields, 0..) |field, i| {
const arg_i_str = comptime str: {
var str_buf: [100]u8 = undefined;
const args_i = i + 1;
lib/std/wasm.zig
@@ -636,7 +636,7 @@ pub const Type = struct {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
_ = opt;
try writer.writeByte('(');
- for (self.params) |param, i| {
+ for (self.params, 0..) |param, i| {
try writer.print("{s}", .{@tagName(param)});
if (i + 1 != self.params.len) {
try writer.writeAll(", ");
@@ -646,7 +646,7 @@ pub const Type = struct {
if (self.returns.len == 0) {
try writer.writeAll("nil");
} else {
- for (self.returns) |return_ty, i| {
+ for (self.returns, 0..) |return_ty, i| {
try writer.print("{s}", .{@tagName(return_ty)});
if (i + 1 != self.returns.len) {
try writer.writeAll(", ");
lib/test_runner.zig
@@ -33,7 +33,7 @@ pub fn main() void {
async_frame_buffer = &[_]u8{};
var leaks: usize = 0;
- for (test_fn_list) |test_fn, i| {
+ for (test_fn_list, 0..) |test_fn, i| {
std.testing.allocator_instance = .{};
defer {
if (std.testing.allocator_instance.deinit()) {
src/arch/aarch64/CodeGen.zig
@@ -515,7 +515,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -1633,14 +1633,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -1648,7 +1648,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
const raw_reg = mcv.register;
@@ -1672,7 +1672,7 @@ fn allocRegs(
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -1694,7 +1694,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
const raw_reg = arg.bind.reg;
arg.reg.* = self.registerAlias(raw_reg, arg.ty);
@@ -1708,7 +1708,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4267,7 +4267,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4757,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4790,7 +4790,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -5069,7 +5069,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6373,7 +6373,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
@@ -6438,7 +6438,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
src/arch/aarch64/Emit.zig
@@ -80,7 +80,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add_immediate => try emit.mirAddSubtractImmediate(inst),
@@ -323,7 +323,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -368,7 +368,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
src/arch/arm/bits.zig
@@ -452,11 +452,11 @@ pub const Instruction = union(enum) {
const masks = comptime blk: {
const base_mask: u32 = std.math.maxInt(u8);
var result = [_]u32{0} ** 16;
- for (result) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
+ for (&result, 0..) |*mask, i| mask.* = std.math.rotr(u32, base_mask, 2 * i);
break :blk result;
};
- return for (masks) |mask, i| {
+ return for (masks, 0..) |mask, i| {
if (x & mask == x) {
break Operand{
.immediate = .{
src/arch/arm/CodeGen.zig
@@ -513,7 +513,7 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
}
- for (self.args) |*arg, arg_index| {
+ for (self.args, 0..) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
@@ -3105,14 +3105,14 @@ fn allocRegs(
var reused_read_arg: ?usize = null;
// Lock all args which are already allocated to registers
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
read_locks[i] = self.register_manager.lockReg(mcv.register);
}
}
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
write_locks[i] = self.register_manager.lockReg(arg.bind.reg);
}
@@ -3120,7 +3120,7 @@ fn allocRegs(
// Allocate registers for all args which aren't allocated to
// registers yet
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
const mcv = try arg.bind.resolveToMcv(self);
if (mcv == .register) {
arg.reg.* = mcv.register;
@@ -3141,7 +3141,7 @@ fn allocRegs(
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
- reuse_operand: for (read_args) |read_arg, i| {
+ reuse_operand: for (read_args, 0..) |read_arg, i| {
if (read_arg.bind == .inst) {
const operand = read_arg.bind.inst;
const mcv = try self.resolveInst(operand);
@@ -3161,7 +3161,7 @@ fn allocRegs(
}
}
} else {
- for (write_args) |arg, i| {
+ for (write_args, 0..) |arg, i| {
if (arg.bind == .reg) {
arg.reg.* = arg.bind.reg;
} else {
@@ -3173,7 +3173,7 @@ fn allocRegs(
// For all read_args which need to be moved from non-register to
// register, perform the move
- for (read_args) |arg, i| {
+ for (read_args, 0..) |arg, i| {
if (reused_read_arg) |j| {
// Check whether this read_arg was reused
if (i == j) continue;
@@ -4217,7 +4217,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Make space for the arguments passed via the stack
self.max_end_stack += info.stack_byte_count;
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4669,7 +4669,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -4702,7 +4702,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4991,7 +4991,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
const branch_into_prong_relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(branch_into_prong_relocs);
- for (items) |item, idx| {
+ for (items, 0..) |item, idx| {
const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq);
branch_into_prong_relocs[idx] = try self.condBr(cmp_result);
}
@@ -6296,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
}
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
@@ -6346,7 +6346,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (ty.abiSize(self.target.*) > 0) {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const param_alignment = ty.abiAlignment(self.target.*);
src/arch/arm/Emit.zig
@@ -77,7 +77,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirDataProcessing(inst),
@@ -239,7 +239,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -284,7 +284,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
src/arch/riscv64/CodeGen.zig
@@ -1689,7 +1689,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -2727,7 +2727,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
src/arch/riscv64/Emit.zig
@@ -38,7 +38,7 @@ pub fn emitMir(
const mir_tags = emit.mir.instructions.items(.tag);
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.add => try emit.mirRType(inst),
src/arch/sparc64/CodeGen.zig
@@ -1189,7 +1189,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
try self.register_manager.getReg(reg, null);
}
- for (info.args) |mc_arg, arg_i| {
+ for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(arg);
@@ -1450,7 +1450,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
- for (else_keys) |else_key, else_idx| {
+ for (else_keys, 0..) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -1484,7 +1484,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
- for (then_keys) |then_key, then_idx| {
+ for (then_keys, 0..) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
@@ -4363,7 +4363,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
.callee => abi.c_abi_int_param_regs_callee_view,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
src/arch/sparc64/Emit.zig
@@ -69,7 +69,7 @@ pub fn emitMir(
try emit.lowerBranches();
// Emit machine code
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
.dbg_line => try emit.mirDbgLine(inst),
@@ -513,7 +513,7 @@ fn lowerBranches(emit: *Emit) !void {
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
@@ -558,7 +558,7 @@ fn lowerBranches(emit: *Emit) !void {
all_branches_lowered = true;
var current_code_offset: usize = 0;
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
src/arch/wasm/CodeGen.zig
@@ -1255,7 +1255,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// reserve space and insert all prologue instructions at the front of the instruction list
// We insert them in reserve order as there is no insertSlice in multiArrayList.
try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
- for (prologue.items) |_, index| {
+ for (prologue.items, 0..) |_, index| {
const inst = prologue.items[prologue.items.len - 1 - index];
func.mir_instructions.insertAssumeCapacity(0, inst);
}
@@ -3117,7 +3117,7 @@ fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
const target_values = target_slice.items(.value);
try parent.values.ensureUnusedCapacity(func.gpa, branch.values.count());
- for (target_keys) |key, index| {
+ for (target_keys, 0..) |key, index| {
// TODO: process deaths from branches
parent.values.putAssumeCapacity(key, target_values[index]);
}
@@ -3501,7 +3501,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const values = try func.gpa.alloc(CaseValue, items.len);
errdefer func.gpa.free(values);
- for (items) |ref, i| {
+ for (items, 0..) |ref, i| {
const item_val = func.air.value(ref).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
@@ -3561,7 +3561,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
while (value <= highest) : (value += 1) {
// idx represents the branch we jump to
const idx = blk: {
- for (case_list.items) |case, idx| {
+ for (case_list.items, 0..) |case, idx| {
for (case.values) |case_value| {
if (case_value.integer == value) break :blk @intCast(u32, idx);
}
@@ -3588,7 +3588,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
- for (case_list.items) |case, index| {
+ for (case_list.items, 0..) |case, index| {
// when sparse, we use if/else-chain, so emit conditional checks
if (is_sparse) {
// for single value prong we can emit a simple if
@@ -4558,7 +4558,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const elem_val = try func.resolveInst(elem);
try func.store(offset, elem_val, elem_ty, 0);
@@ -4587,7 +4587,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// we ensure a new local is created so it's zero-initialized
const result = try func.ensureAllocLocal(backing_type);
var current_bit: u16 = 0;
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -4623,7 +4623,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
- for (elements) |elem, elem_index| {
+ for (elements, 0..) |elem, elem_index| {
if (result_ty.structFieldValueComptime(elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index);
@@ -6149,7 +6149,7 @@ fn callIntrinsic(
} else WValue{ .none = {} };
// Lower all arguments to the stack before we call our function
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
try func.lowerArg(.C, param_types[arg_i], arg);
src/arch/wasm/Emit.zig
@@ -44,7 +44,7 @@ pub fn emitMir(emit: *Emit) InnerError!void {
// before we emit the function body when lowering MIR
try emit.emitLocals();
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
switch (tag) {
// block instructions
src/arch/x86_64/abi.zig
@@ -335,7 +335,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -347,7 +347,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
@@ -379,7 +379,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target, .other);
- for (result) |*result_item, i| {
+ for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
@@ -431,7 +431,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
- for (result) |item, i| switch (item) {
+ for (result, 0..) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
@@ -443,7 +443,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
- for (result) |*item, i| {
+ for (&result, 0..) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
src/arch/x86_64/CodeGen.zig
@@ -186,7 +186,7 @@ const Branch = struct {
_ = options;
comptime assert(unused_format_string.len == 0);
try writer.writeAll("Branch {\n");
- for (ctx.insts) |inst, i| {
+ for (ctx.insts, 0..) |inst, i| {
const mcv = ctx.mcvs[i];
try writer.print(" %{d} => {}\n", .{ inst, mcv });
}
@@ -3951,7 +3951,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
};
defer if (ret_reg_lock) |lock| self.register_manager.unlockReg(lock);
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const mc_arg = info.args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
@@ -4912,7 +4912,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
var relocs = try self.gpa.alloc(u32, items.len);
defer self.gpa.free(relocs);
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
const item_mcv = try self.resolveInst(item);
relocs[item_i] = try self.genCondSwitchMir(condition_ty, condition, item_mcv);
}
@@ -4974,7 +4974,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
for (self.branch_stack.items) |bs| {
log.debug("{}", .{bs.fmtDebug()});
}
- for (branch_stack.items) |bs, i| {
+ for (branch_stack.items, 0..) |bs, i| {
log.debug("Case-{d} branch: {}", .{ i, bs.fmtDebug() });
}
@@ -4999,7 +4999,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const target_keys = target_slice.items(.key);
const target_values = target_slice.items(.value);
- for (target_keys) |target_key, target_idx| {
+ for (target_keys, 0..) |target_key, target_idx| {
const target_value = target_values[target_idx];
const canon_mcv = if (canon_branch.inst_table.fetchSwapRemove(target_key)) |canon_entry| blk: {
// The instruction's MCValue is overridden in both branches.
@@ -5032,7 +5032,7 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
const canon_slice = canon_branch.inst_table.entries.slice();
const canon_keys = canon_slice.items(.key);
const canon_values = canon_slice.items(.value);
- for (canon_keys) |canon_key, canon_idx| {
+ for (canon_keys, 0..) |canon_key, canon_idx| {
const canon_value = canon_values[canon_idx];
// We already deleted the items from this table that matched the target_branch.
// So these are all instructions that are only overridden in the canon branch.
@@ -6571,7 +6571,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
switch (result_ty.zigTypeTag()) {
.Struct => {
const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem
const elem_ty = result_ty.structFieldType(elem_i);
@@ -6586,7 +6586,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const elem_ty = result_ty.childType();
const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
- for (elements) |elem, elem_i| {
+ for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
const elem_off = @intCast(i32, elem_size * elem_i);
try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{});
@@ -6963,7 +6963,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
assert(ty.hasRuntimeBits());
const classes: []const abi.Class = switch (self.target.os.tag) {
@@ -7039,7 +7039,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => 0,
};
- for (param_types) |ty, i| {
+ for (param_types, 0..) |ty, i| {
if (!ty.hasRuntimeBits()) {
result.args[i] = .{ .none = {} };
continue;
src/arch/x86_64/Emit.zig
@@ -61,7 +61,7 @@ const Reloc = struct {
pub fn lowerMir(emit: *Emit) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
- for (mir_tags) |tag, index| {
+ for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
@@ -1544,7 +1544,7 @@ const OpCode = struct {
fn init(comptime in_bytes: []const u8) OpCode {
comptime assert(in_bytes.len <= 3);
comptime var bytes: [3]u8 = undefined;
- inline for (in_bytes) |x, i| {
+ inline for (in_bytes, 0..) |x, i| {
bytes[i] = x;
}
return .{ .bytes = bytes, .count = in_bytes.len };
src/arch/x86_64/Mir.zig
@@ -535,7 +535,7 @@ pub const RegisterList = struct {
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
- for (registers) |cpreg, i| {
+ for (registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return @intCast(u32, i);
}
unreachable; // register not in input register list!
src/codegen/spirv/Assembler.zig
@@ -392,7 +392,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
.OpTypeFunction => blk: {
const param_operands = operands[2..];
const param_types = try self.spv.arena.alloc(SpvType.Ref, param_operands.len);
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveTypeRef(param_operands[i].ref_id);
}
const payload = try self.spv.arena.create(SpvType.Payload.Function);
src/codegen/spirv/Module.zig
@@ -161,7 +161,7 @@ pub fn flush(self: Module, file: std.fs.File) !void {
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
var file_size: u64 = 0;
- for (iovc_buffers) |*iovc, i| {
+ for (&iovc_buffers, 0..) |*iovc, i| {
// Note, since spir-v supports both little and big endian we can ignore byte order here and
// just treat the words as a sequence of bytes.
const bytes = std.mem.sliceAsBytes(buffers[i]);
@@ -389,7 +389,7 @@ fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct
// Decorations for the struct members.
const extra = info.member_decoration_extra;
var extra_i: u32 = 0;
- for (info.members) |member, i| {
+ for (info.members, 0..) |member, i| {
const d = member.decorations;
const index = @intCast(Word, i);
switch (d.matrix_layout) {
src/codegen/spirv/Section.zig
@@ -195,7 +195,7 @@ fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDe
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
var mask: Word = 0;
- inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
+ inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| {
switch (@typeInfo(field.type)) {
.Optional => if (@field(operand, field.name) != null) {
mask |= 1 << @intCast(u5, bit);
src/codegen/spirv/type.zig
@@ -98,7 +98,7 @@ pub const Type = extern union {
const struct_b = b.payload(.@"struct");
if (struct_a.members.len != struct_b.members.len)
return false;
- for (struct_a.members) |mem_a, i| {
+ for (struct_a.members, 0..) |mem_a, i| {
if (!std.meta.eql(mem_a, struct_b.members[i]))
return false;
}
src/codegen/c.zig
@@ -253,7 +253,7 @@ fn formatIdent(
if (solo and isReservedIdent(ident)) {
try writer.writeAll("zig_e_");
}
- for (ident) |c, i| {
+ for (ident, 0..) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
'.' => try writer.writeByte('_'),
@@ -361,7 +361,7 @@ pub const Function = struct {
_ = mutability;
if (f.getFreeLocals().getPtrContext(ty, f.tyHashCtx())) |locals_list| {
- for (locals_list.items) |local_index, i| {
+ for (locals_list.items, 0..) |local_index, i| {
const local = &f.locals.items[local_index];
if (local.alignment >= alignment) {
local.loop_depth = @intCast(LoopDepth, f.free_locals_stack.items.len - 1);
@@ -1283,7 +1283,7 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
- for (field_vals) |field_val, field_index| {
+ for (field_vals, 0..) |field_val, field_index| {
const field_ty = ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBits()) continue;
@@ -1309,7 +1309,7 @@ pub const DeclGen = struct {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var eff_num_fields: usize = 0;
- for (field_vals) |_, index| {
+ for (field_vals, 0..) |_, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1331,7 +1331,7 @@ pub const DeclGen = struct {
var eff_index: usize = 0;
var needs_closing_paren = false;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1359,7 +1359,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -1719,7 +1719,7 @@ pub const DeclGen = struct {
{
const fields = t.tupleFields();
var field_id: usize = 0;
- for (fields.types) |field_ty, i| {
+ for (fields.types, 0..) |field_ty, i| {
if (!field_ty.hasRuntimeBits() or fields.values[i].tag() != .unreachable_value) continue;
try buffer.append(' ');
@@ -2130,7 +2130,7 @@ pub const DeclGen = struct {
try tuple_storage.ensureTotalCapacity(allocator, t.structFieldCount());
const fields = t.tupleFields();
- for (fields.values) |value, index|
+ for (fields.values, 0..) |value, index|
if (value.tag() == .unreachable_value)
tuple_storage.appendAssumeCapacity(.{
.type = fields.types[index],
@@ -2415,7 +2415,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len - "(".len;
try dg.renderTypeAndName(bw, enum_ty, .{ .identifier = "tag" }, .Const, 0, .Complete);
try buffer.appendSlice(") {\n switch (tag) {\n");
- for (enum_ty.enumFields().keys()) |name, index| {
+ for (enum_ty.enumFields().keys(), 0..) |name, index| {
const name_z = try dg.typedefs.allocator.dupeZ(u8, name);
defer dg.typedefs.allocator.free(name_z);
const name_bytes = name_z[0 .. name_z.len + 1];
@@ -2681,7 +2681,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
var max_name_len: usize = 0;
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
max_name_len = std.math.max(name.len, max_name_len);
var err_pl = Value.Payload.Error{ .data = .{ .name = name } };
try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other);
@@ -2724,7 +2724,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = name_prefix }, .Const, 0, .Complete);
try writer.writeAll(" = {");
- for (o.dg.module.error_name_list.items) |name, value| {
+ for (o.dg.module.error_name_list.items, 0..) |name, value| {
if (value != 0) try writer.writeByte(',');
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len };
@@ -2742,7 +2742,7 @@ fn genExports(o: *Object) !void {
defer tracy.end();
const fwd_decl_writer = o.dg.fwd_decl.writer();
- if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..]) |@"export", i| {
+ if (o.dg.module.decl_exports.get(o.dg.decl_index)) |exports| for (exports.items[1..], 0..) |@"export", i| {
try fwd_decl_writer.writeAll("zig_export(");
try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward, @intCast(u32, 1 + i));
try fwd_decl_writer.print(", {s}, {s});\n", .{
@@ -2800,7 +2800,7 @@ pub fn genFunc(f: *Function) !void {
// alignment, descending.
const free_locals = f.getFreeLocals();
const values = f.allocs.values();
- for (f.allocs.keys()) |local_index, i| {
+ for (f.allocs.keys(), 0..) |local_index, i| {
if (values[i]) continue; // static
const local = f.locals.items[local_index];
log.debug("inserting local {d} into free_locals", .{local_index});
@@ -4238,7 +4238,7 @@ fn airCall(
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
resolved_args[i] = try f.resolveInst(arg);
}
@@ -4303,7 +4303,7 @@ fn airCall(
try writer.writeByte('(');
var args_written: usize = 0;
- for (args) |arg, arg_i| {
+ for (args, 0..) |arg, arg_i| {
const ty = f.air.typeOf(arg);
if (!ty.hasRuntimeBitsIgnoreComptime()) continue;
if (args_written != 0) {
@@ -5043,7 +5043,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
extra_i = constraints_extra_begin;
var locals_index = locals_begin;
try writer.writeByte(':');
- for (outputs) |output, index| {
+ for (outputs, 0..) |output, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5067,7 +5067,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
}
try writer.writeByte(':');
- for (inputs) |input, index| {
+ for (inputs, 0..) |input, index| {
const extra_bytes = std.mem.sliceAsBytes(f.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -5426,7 +5426,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
};
const field_loc = switch (struct_ty.tag()) {
.@"struct" => switch (struct_ty.containerLayout()) {
- .Auto, .Extern => for (struct_ty.structFields().values()[index..]) |field, offset| {
+ .Auto, .Extern => for (struct_ty.structFields().values()[index..], 0..) |field, offset| {
if (field.ty.hasRuntimeBitsIgnoreComptime()) break FieldLoc{ .field = .{
.identifier = struct_ty.structFieldName(index + offset),
} };
@@ -5469,7 +5469,7 @@ fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struc
if (tuple.values[index].tag() != .unreachable_value) return CValue.none;
var id: usize = 0;
- break :field_name for (tuple.values) |value, i| {
+ break :field_name for (tuple.values, 0..) |value, i| {
if (value.tag() != .unreachable_value) continue;
if (!tuple.types[i].hasRuntimeBitsIgnoreComptime()) continue;
if (i >= index) break FieldLoc{ .field = .{ .field = id } };
@@ -6687,7 +6687,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
- for (elements) |element, i| {
+ for (elements, 0..) |element, i| {
resolved_elements[i] = try f.resolveInst(element);
}
{
@@ -6706,7 +6706,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
switch (inst_ty.zigTypeTag()) {
.Array, .Vector => {
const elem_ty = inst_ty.childType();
- for (resolved_elements) |element, i| {
+ for (resolved_elements, 0..) |element, i| {
try f.writeCValue(writer, local, .Other);
try writer.print("[{d}] = ", .{i});
try f.writeCValue(writer, element, .Other);
@@ -6727,7 +6727,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(")");
try writer.writeByte('{');
var empty = true;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
if (!empty) try writer.writeAll(", ");
@@ -6746,7 +6746,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("};\n");
var field_id: usize = 0;
- for (elements) |element, index| {
+ for (elements, 0..) |element, index| {
if (inst_ty.structFieldValueComptime(index)) |_| continue;
const element_ty = f.air.typeOf(element);
@@ -6784,7 +6784,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
var empty = true;
- for (elements) |_, index| {
+ for (elements, 0..) |_, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -6796,7 +6796,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
empty = false;
}
empty = true;
- for (resolved_elements) |element, index| {
+ for (resolved_elements, 0..) |element, index| {
const field_ty = inst_ty.structFieldType(index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -7608,7 +7608,7 @@ fn deinitFreeLocalsMap(gpa: mem.Allocator, map: *LocalsMap) void {
}
fn noticeBranchFrees(f: *Function, pre_locals_len: LocalIndex, inst: Air.Inst.Index) !void {
- for (f.locals.items[pre_locals_len..]) |*local, local_offset| {
+ for (f.locals.items[pre_locals_len..], 0..) |*local, local_offset| {
const local_index = pre_locals_len + @intCast(LocalIndex, local_offset);
if (f.allocs.contains(local_index)) continue; // allocs are not freeable
src/codegen/llvm.zig
@@ -600,7 +600,7 @@ pub const Object = struct {
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
- for (llvm_errors[1..]) |*llvm_error, i| {
+ for (llvm_errors[1..], 0..) |*llvm_error, i| {
const name = error_name_list[1..][i];
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_global = self.llvm_module.addGlobal(str_init.typeOf(), "");
@@ -691,7 +691,7 @@ pub const Object = struct {
object.extern_collisions.clearRetainingCapacity();
const export_keys = mod.decl_exports.keys();
- for (mod.decl_exports.values()) |export_list, i| {
+ for (mod.decl_exports.values(), 0..) |export_list, i| {
const decl_index = export_keys[i];
const llvm_global = object.decl_map.get(decl_index) orelse continue;
for (export_list.items) |exp| {
@@ -1076,7 +1076,7 @@ pub const Object = struct {
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
- for (field_types) |_, field_i_usize| {
+ for (field_types, 0..) |_, field_i_usize| {
const field_i = @intCast(c_uint, field_i_usize);
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
@@ -1495,7 +1495,7 @@ pub const Object = struct {
const int_info = ty.intInfo(target);
assert(int_info.bits != 0);
- for (field_names) |field_name, i| {
+ for (field_names, 0..) |field_name, i| {
const field_name_z = try gpa.dupeZ(u8, field_name);
defer gpa.free(field_name_z);
@@ -1992,7 +1992,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -2921,7 +2921,7 @@ pub const DeclGen = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
@@ -3432,7 +3432,7 @@ pub const DeclGen = struct {
const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
- for (elem_vals[0..len]) |elem_val, i| {
+ for (elem_vals[0..len], 0..) |elem_val, i| {
llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
}
@@ -3618,7 +3618,7 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var need_unnamed = false;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3680,7 +3680,7 @@ pub const DeclGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (field_vals) |field_val, i| {
+ for (field_vals, 0..) |field_val, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -3855,7 +3855,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -3880,7 +3880,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
}
return llvm.constVector(
@@ -3913,7 +3913,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
- for (llvm_elems) |*elem, i| {
+ for (llvm_elems, 0..) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = bytes[i],
@@ -4479,7 +4479,7 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
- for (body) |inst, i| {
+ for (body, 0..) |inst, i| {
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
@@ -4852,7 +4852,7 @@ pub const FuncGen = struct {
const llvm_ty = self.context.structType(llvm_types.ptr, @intCast(c_uint, llvm_types.len), .False);
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
- for (llvm_types) |field_ty, i_usize| {
+ for (llvm_types, 0..) |field_ty, i_usize| {
const i = @intCast(c_uint, i_usize);
const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, "");
const load_inst = self.builder.buildLoad(field_ty, field_ptr, "");
@@ -6250,7 +6250,7 @@ pub const FuncGen = struct {
var name_map: std.StringArrayHashMapUnmanaged(u16) = .{};
try name_map.ensureUnusedCapacity(arena, max_param_count);
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -6435,7 +6435,7 @@ pub const FuncGen = struct {
var name_start: usize = undefined;
var modifier_start: usize = undefined;
- for (asm_source) |byte, i| {
+ for (asm_source, 0..) |byte, i| {
switch (state) {
.start => switch (byte) {
'%' => state = .percent,
@@ -6526,7 +6526,7 @@ pub const FuncGen = struct {
.Auto,
"",
);
- for (llvm_param_attrs[0..param_count]) |llvm_elem_ty, i| {
+ for (llvm_param_attrs[0..param_count], 0..) |llvm_elem_ty, i| {
if (llvm_elem_ty) |llvm_ty| {
llvm.setCallElemTypeAttr(call, i, llvm_ty);
}
@@ -6534,7 +6534,7 @@ pub const FuncGen = struct {
var ret_val = call;
llvm_ret_i = 0;
- for (outputs) |output, i| {
+ for (outputs, 0..) |output, i| {
if (llvm_ret_indirect[i]) continue;
const output_value = if (return_count > 1) b: {
@@ -7416,7 +7416,7 @@ pub const FuncGen = struct {
const index_i32 = llvm_i32.constInt(i, .False);
var args: [3]*llvm.Value = undefined;
- for (args_vectors) |arg_vector, k| {
+ for (args_vectors, 0..) |arg_vector, k| {
args[k] = self.builder.buildExtractElement(arg_vector, index_i32, "");
}
const result_elem = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args_len, .C, .Auto, "");
@@ -8785,7 +8785,7 @@ pub const FuncGen = struct {
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count()));
- for (fields.keys()) |_, field_index| {
+ for (fields.keys(), 0..) |_, field_index| {
const this_tag_int_value = int: {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
@@ -8874,7 +8874,7 @@ pub const FuncGen = struct {
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
- for (fields.keys()) |name, field_index| {
+ for (fields.keys(), 0..) |name, field_index| {
const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False);
const str_init_llvm_ty = str_init.typeOf();
const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, "");
@@ -8998,7 +8998,7 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
- for (values) |*val, i| {
+ for (values, 0..) |*val, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
if (elem.isUndef()) {
@@ -9180,7 +9180,7 @@ pub const FuncGen = struct {
const llvm_u32 = self.context.intType(32);
var vector = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const index_u32 = llvm_u32.constInt(i, .False);
const llvm_elem = try self.resolveInst(elem);
vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
@@ -9197,7 +9197,7 @@ pub const FuncGen = struct {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
@@ -9229,7 +9229,7 @@ pub const FuncGen = struct {
const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9250,7 +9250,7 @@ pub const FuncGen = struct {
return alloca_inst;
} else {
var result = llvm_result_ty.getUndef();
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
@@ -9275,7 +9275,7 @@ pub const FuncGen = struct {
};
const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base);
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, i), .False),
@@ -9914,7 +9914,7 @@ pub const FuncGen = struct {
};
const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 };
const zero = usize_llvm_ty.constInt(0, .False);
- for (array_elements) |elem, i| {
+ for (array_elements, 0..) |elem, i| {
const indexes = [_]*llvm.Value{
zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
};
@@ -10327,7 +10327,7 @@ fn llvmFieldIndex(
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var llvm_field_index: c_uint = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
const field_align = field_ty.abiAlignment(target);
@@ -10938,7 +10938,7 @@ fn isByRef(ty: Type) bool {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var count: usize = 0;
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue;
count += 1;
src/codegen/spirv.zig
@@ -418,7 +418,7 @@ pub const DeclGen = struct {
const elem_refs = try self.gpa.alloc(IdRef, vector_len);
defer self.gpa.free(elem_refs);
- for (elem_refs) |*elem, i| {
+ for (elem_refs, 0..) |*elem, i| {
elem.* = try self.genConstant(elem_ty, elem_vals[i]);
}
try section.emit(self.spv.gpa, .OpConstantComposite, .{
@@ -498,7 +498,7 @@ pub const DeclGen = struct {
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
const param_types = try self.spv.arena.alloc(SpvType.Ref, ty.fnParamLen());
- for (param_types) |*param, i| {
+ for (param_types, 0..) |*param, i| {
param.* = try self.resolveType(ty.fnParamType(i));
}
src/link/MachO/dyld_info/bind.zig
@@ -51,7 +51,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (self.entries.items) |entry, i| {
+ for (self.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(self.entries.items[start..i], ctx, writer);
seg_id = entry.segment_id;
src/link/MachO/dyld_info/Rebase.zig
@@ -45,7 +45,7 @@ pub fn finalize(rebase: *Rebase, gpa: Allocator) !void {
var start: usize = 0;
var seg_id: ?u8 = null;
- for (rebase.entries.items) |entry, i| {
+ for (rebase.entries.items, 0..) |entry, i| {
if (seg_id != null and seg_id.? == entry.segment_id) continue;
try finalizeSegment(rebase.entries.items[start..i], writer);
seg_id = entry.segment_id;
src/link/MachO/dead_strip.zig
@@ -238,7 +238,7 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
}
}
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
// marking all references as live.
try markUnwindRecords(zld, @intCast(u32, object_id), alive);
src/link/MachO/DebugSymbols.zig
@@ -383,7 +383,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) !void {
// Write segment/section headers from the binary file first.
const end = macho_file.linkedit_segment_cmd_index.?;
- for (macho_file.segments.items[0..end]) |seg, i| {
+ for (macho_file.segments.items[0..end], 0..) |seg, i| {
const indexes = macho_file.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.fileoff = 0;
@@ -412,7 +412,7 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype)
}
}
// Next, commit DSYM's __LINKEDIT and __DWARF segments headers.
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items[indexes.start..indexes.end]) |header| {
@@ -477,7 +477,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (macho_file.locals.items) |sym, sym_id| {
+ for (macho_file.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (macho_file.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -547,7 +547,7 @@ fn writeStrtab(self: *DebugSymbols) !void {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
src/link/MachO/Dylib.zig
@@ -347,7 +347,7 @@ pub fn parseFromStub(
});
defer matcher.deinit();
- for (lib_stub.inner) |elem, stub_index| {
+ for (lib_stub.inner, 0..) |elem, stub_index| {
const is_match = switch (elem) {
.v3 => |stub| matcher.matchesArch(stub.archs),
.v4 => |stub| matcher.matchesTarget(stub.targets),
src/link/MachO/eh_frame.zig
@@ -16,7 +16,7 @@ const Zld = @import("zld.zig").Zld;
pub fn scanRelocs(zld: *Zld) !void {
const gpa = zld.gpa;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
var cies = std.AutoHashMap(u32, void).init(gpa);
defer cies.deinit();
@@ -108,7 +108,7 @@ pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
var eh_frame_offset: u32 = 0;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
var cies = std.AutoHashMap(u32, u32).init(gpa);
@@ -407,7 +407,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
@@ -467,7 +467,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type {
var creader = std.io.countingReader(stream.reader());
const reader = creader.reader();
- for (aug_str) |ch, i| switch (ch) {
+ for (aug_str, 0..) |ch, i| switch (ch) {
'z' => if (i > 0) {
return error.BadDwarfCfi;
} else {
src/link/MachO/Object.zig
@@ -201,7 +201,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
defer sorted_all_syms.deinit();
- for (self.in_symtab.?) |_, index| {
+ for (self.in_symtab.?, 0..) |_, index| {
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
}
@@ -211,7 +211,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
// is kind enough to specify the symbols in the correct order.
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
- for (sorted_all_syms.items) |sym_id, i| {
+ for (sorted_all_syms.items, 0..) |sym_id, i| {
const sym = sym_id.getSymbol(self);
if (sym.sect() and self.source_section_index_lookup[sym.n_sect - 1] == -1) {
@@ -380,7 +380,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
const gpa = zld.gpa;
const sections = self.getSourceSections();
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse {
log.debug(" unhandled section '{s},{s}'", .{ sect.segName(), sect.sectName() });
@@ -400,7 +400,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
}
if (self.in_symtab == null) {
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
if (sect.isDebug()) continue;
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
if (sect.size == 0) continue;
@@ -446,7 +446,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
var sorted_sections = try gpa.alloc(SortedSection, sections.len);
defer gpa.free(sorted_sections);
- for (sections) |sect, id| {
+ for (sections, 0..) |sect, id| {
sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
}
@@ -804,7 +804,7 @@ fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
try self.parseRelocs(gpa, sect_id);
const relocs = self.getRelocs(sect_id);
- for (unwind_records) |record, record_id| {
+ for (unwind_records, 0..) |record, record_id| {
const offset = record_id * @sizeOf(macho.compact_unwind_entry);
const rel_pos = filterRelocs(
relocs,
@@ -857,7 +857,7 @@ pub fn getSourceSectionByName(self: Object, segname: []const u8, sectname: []con
pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: []const u8) ?u8 {
const sections = self.getSourceSections();
- for (sections) |sect, i| {
+ for (sections, 0..) |sect, i| {
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
return @intCast(u8, i);
} else return null;
src/link/MachO/thunks.zig
@@ -329,7 +329,7 @@ fn createThunkAtom(zld: *Zld) !AtomIndex {
fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
- for (zld.thunks.items) |thunk, i| {
+ for (zld.thunks.items, 0..) |thunk, i| {
if (thunk.len == 0) continue;
const thunk_atom_index = thunk.getStartAtomIndex();
src/link/MachO/UnwindInfo.zig
@@ -126,7 +126,7 @@ const Page = struct {
ctx.page.start + ctx.page.count,
});
try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
- for (ctx.page.page_encodings[0..ctx.page.page_encodings_count]) |record_id, i| {
+ for (ctx.page.page_encodings[0..ctx.page.page_encodings_count], 0..) |record_id, i| {
const record = ctx.info.records.items[record_id];
const enc = record.compactUnwindEncoding;
try writer.print(" {d}: 0x{x:0>8}\n", .{ ctx.info.common_encodings_count + i, enc });
@@ -205,7 +205,7 @@ pub fn scanRelocs(zld: *Zld) !void {
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return;
const cpu_arch = zld.options.target.cpu.arch;
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
const unwind_records = object.getUnwindRecords();
for (object.exec_atoms.items) |atom_index| {
const record_id = object.unwind_records_lookup.get(atom_index) orelse continue;
@@ -244,7 +244,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
defer atom_indexes.deinit();
// TODO handle dead stripping
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
log.debug("collecting unwind records in {s} ({d})", .{ object.name, object_id });
const unwind_records = object.getUnwindRecords();
@@ -335,7 +335,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
var maybe_prev: ?macho.compact_unwind_entry = null;
- for (records.items) |record, i| {
+ for (records.items, 0..) |record, i| {
const record_id = blk: {
if (maybe_prev) |prev| {
const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
@@ -483,7 +483,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
// Save indices of records requiring LSDA relocation
try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
- for (info.records.items) |rec, i| {
+ for (info.records.items, 0..) |rec, i| {
info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
if (rec.lsda == 0) continue;
try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
@@ -556,7 +556,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const cpu_arch = zld.options.target.cpu.arch;
log.debug("Personalities:", .{});
- for (info.personalities[0..info.personalities_count]) |target, i| {
+ for (info.personalities[0..info.personalities_count], 0..) |target, i| {
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
const atom = zld.getAtom(atom_index);
const sym = zld.getSymbol(atom.getSymbolWithLoc());
@@ -581,7 +581,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
}
}
- for (info.records.items) |record, i| {
+ for (info.records.items, 0..) |record, i| {
log.debug("Unwind record at offset 0x{x}", .{i * @sizeOf(macho.compact_unwind_entry)});
log.debug(" start: 0x{x}", .{record.rangeStart});
log.debug(" length: 0x{x}", .{record.rangeLength});
@@ -621,7 +621,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
const lsda_base_offset = @intCast(u32, pages_base_offset -
(info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
- for (info.pages.items) |page, i| {
+ for (info.pages.items, 0..) |page, i| {
assert(page.count > 0);
const first_entry = info.records.items[page.start];
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
src/link/MachO/zld.zig
@@ -321,7 +321,7 @@ pub const Zld = struct {
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1092,7 +1092,7 @@ pub const Zld = struct {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1223,7 +1223,7 @@ pub const Zld = struct {
const global = SymbolWithLoc{ .sym_index = sym_index };
try self.globals.append(gpa, global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1311,7 +1311,7 @@ pub const Zld = struct {
});
}
- for (self.sections.items(.header)) |header, sect_id| {
+ for (self.sections.items(.header), 0..) |header, sect_id| {
if (header.size == 0) continue; // empty section
const segname = header.segName();
@@ -1385,7 +1385,7 @@ pub const Zld = struct {
const gpa = self.gpa;
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
const header = slice.items(.header)[sect_id];
var atom_index = first_atom_index;
@@ -1525,7 +1525,7 @@ pub const Zld = struct {
fn calcSectionSizes(self: *Zld) !void {
const slice = self.sections.slice();
- for (slice.items(.header)) |*header, sect_id| {
+ for (slice.items(.header), 0..) |*header, sect_id| {
if (header.size == 0) continue;
if (self.requiresThunks()) {
if (header.isCode() and !(header.type() == macho.S_SYMBOL_STUBS) and !mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1556,7 +1556,7 @@ pub const Zld = struct {
}
if (self.requiresThunks()) {
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
if (!header.isCode()) continue;
if (header.type() == macho.S_SYMBOL_STUBS) continue;
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
@@ -1568,7 +1568,7 @@ pub const Zld = struct {
}
fn allocateSegments(self: *Zld) !void {
- for (self.segments.items) |*segment, segment_index| {
+ for (self.segments.items, 0..) |*segment, segment_index| {
const is_text_segment = mem.eql(u8, segment.segName(), "__TEXT");
const base_size = if (is_text_segment) try load_commands.calcMinHeaderPad(self.gpa, self.options, .{
.segments = self.segments.items,
@@ -1606,7 +1606,7 @@ pub const Zld = struct {
var start = init_size;
const slice = self.sections.slice();
- for (slice.items(.header)[indexes.start..indexes.end]) |*header, sect_id| {
+ for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
const n_sect = @intCast(u8, indexes.start + sect_id + 1);
@@ -1750,7 +1750,7 @@ pub const Zld = struct {
}
fn writeSegmentHeaders(self: *Zld, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
@@ -1852,7 +1852,7 @@ pub const Zld = struct {
}
// Finally, unpack the rest.
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -1989,7 +1989,7 @@ pub const Zld = struct {
// Finally, unpack the rest.
const slice = self.sections.slice();
- for (slice.items(.header)) |header, sect_id| {
+ for (slice.items(.header), 0..) |header, sect_id| {
switch (header.type()) {
macho.S_LITERAL_POINTERS,
macho.S_REGULAR,
@@ -2710,7 +2710,7 @@ pub const Zld = struct {
const amt = try self.file.preadAll(locals_buf, self.symtab_cmd.symoff);
if (amt != locals_buf.len) return error.InputOutput;
- const istab: usize = for (locals) |local, i| {
+ const istab: usize = for (locals, 0..) |local, i| {
if (local.stab()) break i;
} else locals.len;
const nstabs = locals.len - istab;
@@ -2897,7 +2897,7 @@ pub const Zld = struct {
}
fn getSegmentByName(self: Zld, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -2921,7 +2921,7 @@ pub const Zld = struct {
pub fn getSectionByName(self: Zld, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -2929,7 +2929,7 @@ pub const Zld = struct {
pub fn getSectionIndexes(self: Zld, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -3220,7 +3220,7 @@ pub const Zld = struct {
fn logSegments(self: *Zld) void {
log.debug("segments:", .{});
- for (self.segments.items) |segment, i| {
+ for (self.segments.items, 0..) |segment, i| {
log.debug(" segment({d}): {s} @{x} ({x}), sizeof({x})", .{
i,
segment.segName(),
@@ -3233,7 +3233,7 @@ pub const Zld = struct {
fn logSections(self: *Zld) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x} ({x}), sizeof({x})", .{
i + 1,
header.segName(),
@@ -3271,10 +3271,10 @@ pub const Zld = struct {
const scoped_log = std.log.scoped(.symtab);
scoped_log.debug("locals:", .{});
- for (self.objects.items) |object, id| {
+ for (self.objects.items, 0..) |object, id| {
scoped_log.debug(" object({d}): {s}", .{ id, object.name });
if (object.in_symtab == null) continue;
- for (object.symtab) |sym, sym_id| {
+ for (object.symtab, 0..) |sym, sym_id| {
mem.set(u8, &buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3286,7 +3286,7 @@ pub const Zld = struct {
}
}
scoped_log.debug(" object(-1)", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.undf()) continue;
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
@@ -3298,7 +3298,7 @@ pub const Zld = struct {
}
scoped_log.debug("exports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3313,7 +3313,7 @@ pub const Zld = struct {
}
scoped_log.debug("imports:", .{});
- for (self.globals.items) |global, i| {
+ for (self.globals.items, 0..) |global, i| {
const sym = self.getSymbol(global);
if (!sym.undf()) continue;
if (sym.n_desc == N_DEAD) continue;
@@ -3328,7 +3328,7 @@ pub const Zld = struct {
}
scoped_log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3350,7 +3350,7 @@ pub const Zld = struct {
}
scoped_log.debug("__thread_ptrs entries:", .{});
- for (self.tlv_ptr_entries.items) |entry, i| {
+ for (self.tlv_ptr_entries.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3363,7 +3363,7 @@ pub const Zld = struct {
}
scoped_log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const atom_sym = entry.getAtomSymbol(self);
const target_sym = entry.getTargetSymbol(self);
const target_sym_name = entry.getTargetSymbolName(self);
@@ -3376,9 +3376,9 @@ pub const Zld = struct {
}
scoped_log.debug("thunks:", .{});
- for (self.thunks.items) |thunk, i| {
+ for (self.thunks.items, 0..) |thunk, i| {
scoped_log.debug(" thunk({d})", .{i});
- for (thunk.lookup.keys()) |target, j| {
+ for (thunk.lookup.keys(), 0..) |target, j| {
const target_sym = self.getSymbol(target);
const atom = self.getAtom(thunk.lookup.get(target).?);
const atom_sym = self.getSymbol(atom.getSymbolWithLoc());
@@ -3395,7 +3395,7 @@ pub const Zld = struct {
fn logAtoms(self: *Zld) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
+ for (slice.items(.first_atom_index), 0..) |first_atom_index, sect_id| {
var atom_index = first_atom_index;
if (atom_index == 0) continue;
@@ -3980,7 +3980,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
.unresolved = std.AutoArrayHashMap(u32, void).init(arena),
};
- for (zld.objects.items) |_, object_id| {
+ for (zld.objects.items, 0..) |_, object_id| {
try zld.resolveSymbolsInObject(@intCast(u32, object_id), &resolver);
}
@@ -4010,7 +4010,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
zld.entry_index = global_index;
}
- for (zld.objects.items) |*object, object_id| {
+ for (zld.objects.items, 0..) |*object, object_id| {
try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
}
src/link/tapi/yaml.zig
@@ -84,7 +84,7 @@ pub const Value = union(ValueType) {
const first = list[0];
if (first.is_compound()) {
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try writer.writeByteNTimes(' ', args.indentation);
try writer.writeAll("- ");
try elem.stringify(writer, .{
@@ -99,7 +99,7 @@ pub const Value = union(ValueType) {
}
try writer.writeAll("[ ");
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
try elem.stringify(writer, args);
if (i < len - 1) {
try writer.writeAll(", ");
@@ -112,7 +112,7 @@ pub const Value = union(ValueType) {
const len = keys.len;
if (len == 0) return;
- for (keys) |key, i| {
+ for (keys, 0..) |key, i| {
if (!args.should_inline_first_key or i != 0) {
try writer.writeByteNTimes(' ', args.indentation);
}
@@ -292,7 +292,7 @@ pub const Yaml = struct {
switch (@typeInfo(T)) {
.Array => |info| {
var parsed: T = undefined;
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -301,7 +301,7 @@ pub const Yaml = struct {
switch (info.size) {
.Slice => {
var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len);
- for (self.docs.items) |doc, i| {
+ for (self.docs.items, 0..) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
return parsed;
@@ -393,7 +393,7 @@ pub const Yaml = struct {
}
var parsed = try arena.alloc(ptr_info.child, value.list.len);
- for (value.list) |elem, i| {
+ for (value.list, 0..) |elem, i| {
parsed[i] = try self.parseValue(ptr_info.child, elem);
}
return parsed;
@@ -407,7 +407,7 @@ pub const Yaml = struct {
if (array_info.len != list.len) return error.ArraySizeMismatch;
var parsed: T = undefined;
- for (list) |elem, i| {
+ for (list, 0..) |elem, i| {
parsed[i] = try self.parseValue(array_info.child, elem);
}
src/link/Wasm/Object.zig
@@ -882,7 +882,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
list.deinit();
} else symbol_for_segment.deinit();
- for (object.symtable) |symbol, symbol_index| {
+ for (object.symtable, 0..) |symbol, symbol_index| {
switch (symbol.tag) {
.function, .data, .section => if (!symbol.isUndefined()) {
const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
@@ -896,7 +896,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
}
}
- for (object.relocatable_data) |relocatable_data, index| {
+ for (object.relocatable_data, 0..) |relocatable_data, index| {
const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
src/link/Coff.zig
@@ -486,7 +486,7 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
@@ -2191,7 +2191,7 @@ fn logSymtab(self: *Coff) void {
log.debug("symtab:", .{});
log.debug(" object(null)", .{});
- for (self.locals.items) |*sym, sym_id| {
+ for (self.locals.items, 0..) |*sym, sym_id| {
const where = if (sym.section_number == .UNDEFINED) "ord" else "sect";
const def_index: u16 = switch (sym.section_number) {
.UNDEFINED => 0, // TODO
@@ -2216,7 +2216,7 @@ fn logSymtab(self: *Coff) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const got_sym = self.getSymbol(.{ .sym_index = entry.sym_index, .file = null });
const target_sym = self.getSymbol(entry.target);
if (target_sym.section_number == .UNDEFINED) {
src/link/Dwarf.zig
@@ -339,7 +339,7 @@ pub const DeclState = struct {
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
const fields = ty.tupleFields();
- for (fields.types) |field, field_index| {
+ for (fields.types, 0..) |field, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -367,7 +367,7 @@ pub const DeclState = struct {
}
const fields = ty.structFields();
- for (fields.keys()) |field_name, field_index| {
+ for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits()) continue;
// DW.AT.member
@@ -409,7 +409,7 @@ pub const DeclState = struct {
.enum_numbered => ty.castTag(.enum_numbered).?.data.values,
else => unreachable,
};
- for (fields.keys()) |field_name, field_i| {
+ for (fields.keys(), 0..) |field_name, field_i| {
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
@@ -2252,14 +2252,14 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
1, // `DW.LNS.set_isa`
});
- for (paths.dirs) |dir, i| {
+ for (paths.dirs, 0..) |dir, i| {
log.debug("adding new include dir at {d} of '{s}'", .{ i + 1, dir });
di_buf.appendSliceAssumeCapacity(dir);
di_buf.appendAssumeCapacity(0);
}
di_buf.appendAssumeCapacity(0); // include directories sentinel
- for (paths.files) |file, i| {
+ for (paths.files, 0..) |file, i| {
const dir_index = paths.files_dirs_indexes[i];
log.debug("adding new file name at {d} of '{s}' referencing directory {d}", .{ i + 1, file, dir_index + 1 });
di_buf.appendSliceAssumeCapacity(file);
src/link/Elf.zig
@@ -1126,7 +1126,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
@@ -1138,7 +1138,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
defer gpa.free(buf);
- for (buf) |*phdr, i| {
+ for (buf, 0..) |*phdr, i| {
phdr.* = self.program_headers.items[i];
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
@@ -1193,7 +1193,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1207,7 +1207,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
defer gpa.free(buf);
- for (buf) |*shdr, i| {
+ for (buf, 0..) |*shdr, i| {
shdr.* = slice.items(.shdr)[i];
log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
@@ -1732,7 +1732,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
argv.appendAssumeCapacity("--as-needed");
var as_needed = true;
- for (system_libs) |link_lib, i| {
+ for (system_libs, 0..) |link_lib, i| {
const lib_as_needed = !system_libs_values[i].needed;
switch ((@as(u2, @boolToInt(lib_as_needed)) << 1) | @boolToInt(as_needed)) {
0b00, 0b11 => {},
@@ -2909,7 +2909,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -2929,7 +2929,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
const buf = try self.base.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
defer self.base.allocator.free(buf);
- for (buf) |*sym, i| {
+ for (buf, 0..) |*sym, i| {
const global = self.global_symbols.items[i];
sym.* = .{
.st_name = global.st_name,
@@ -3238,11 +3238,11 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
- for (self.local_symbols.items) |sym, id| {
+ for (self.local_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
- for (self.global_symbols.items) |sym, id| {
+ for (self.global_symbols.items, 0..) |sym, id| {
log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
src/link/MachO.zig
@@ -962,7 +962,7 @@ pub fn parseLibs(
syslibroot: ?[]const u8,
dependent_libs: anytype,
) !void {
- for (lib_names) |lib, i| {
+ for (lib_names, 0..) |lib, i| {
const lib_info = lib_infos[i];
log.debug("parsing lib path '{s}'", .{lib});
if (try self.parseDylib(lib, dependent_libs, .{
@@ -1584,7 +1584,7 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
const sym = self.getSymbolPtr(global);
const sym_name = self.getSymbolName(global);
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -1686,7 +1686,7 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
gop.value_ptr.* = sym_loc;
const global = gop.value_ptr.*;
- for (self.dylibs.items) |dylib, id| {
+ for (self.dylibs.items, 0..) |dylib, id| {
if (!dylib.symbols.contains(sym_name)) continue;
const dylib_id = @intCast(u16, id);
@@ -2852,7 +2852,7 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
});
// TODO: enforce order by increasing VM addresses in self.sections container.
- for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
+ for (self.sections.items(.header)[sect_id + 1 ..], 0..) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
@@ -3082,7 +3082,7 @@ pub fn initSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
fn insertSection(self: *MachO, segment_index: u8, header: macho.section_64) !u8 {
const precedence = getSectionPrecedence(header);
const indexes = self.getSectionIndexes(segment_index);
- const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end]) |hdr, i| {
+ const insertion_index = for (self.sections.items(.header)[indexes.start..indexes.end], 0..) |hdr, i| {
if (getSectionPrecedence(hdr) > precedence) break @intCast(u8, i + indexes.start);
} else indexes.end;
log.debug("inserting section '{s},{s}' at index {d}", .{
@@ -3133,7 +3133,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 {
}
fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@intCast(u8, i));
try writer.writeStruct(seg);
for (self.sections.items(.header)[indexes.start..indexes.end]) |header| {
@@ -3147,7 +3147,7 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
seg.filesize = 0;
seg.vmsize = 0;
- for (self.segments.items) |segment, id| {
+ for (self.segments.items, 0..) |segment, id| {
if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size);
@@ -3167,7 +3167,7 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom_index, i| {
+ for (self.rebases.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3197,7 +3197,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom_index, i| {
+ for (raw_bindings.keys(), 0..) |atom_index, i| {
const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
@@ -3417,7 +3417,7 @@ fn writeSymtab(self: *MachO) !SymtabCtx {
var locals = std.ArrayList(macho.nlist_64).init(gpa);
defer locals.deinit();
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
if (sym.n_strx == 0) continue; // no name, skip
const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
@@ -3736,7 +3736,7 @@ pub fn makeStaticString(bytes: []const u8) [16]u8 {
}
fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
- for (self.segments.items) |seg, i| {
+ for (self.segments.items, 0..) |seg, i| {
if (mem.eql(u8, segname, seg.segName())) return @intCast(u8, i);
} else return null;
}
@@ -3758,7 +3758,7 @@ pub fn getLinkeditSegmentPtr(self: *MachO) *macho.segment_command_64 {
pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) ?u8 {
// TODO investigate caching with a hashmap
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
return @intCast(u8, i);
} else return null;
@@ -3766,7 +3766,7 @@ pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8)
pub fn getSectionIndexes(self: MachO, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
- const nsects = for (self.segments.items) |seg, i| {
+ const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(u8, seg.nsects);
start += @intCast(u8, seg.nsects);
} else 0;
@@ -4160,7 +4160,7 @@ pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, p
pub fn logSections(self: *MachO) void {
log.debug("sections:", .{});
- for (self.sections.items(.header)) |header, i| {
+ for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x}, sizeof({x})", .{
i + 1,
header.segName(),
@@ -4197,7 +4197,7 @@ pub fn logSymtab(self: *MachO) void {
var buf: [4]u8 = undefined;
log.debug("symtab:", .{});
- for (self.locals.items) |sym, sym_id| {
+ for (self.locals.items, 0..) |sym, sym_id| {
const where = if (sym.undf() and !sym.tentative()) "ord" else "sect";
const def_index = if (sym.undf() and !sym.tentative())
@divTrunc(sym.n_desc, macho.N_SYMBOL_RESOLVER)
@@ -4220,7 +4220,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("GOT entries:", .{});
- for (self.got_entries.items) |entry, i| {
+ for (self.got_entries.items, 0..) |entry, i| {
const atom_sym = entry.getSymbol(self);
const target_sym = self.getSymbol(entry.target);
if (target_sym.undf()) {
@@ -4241,7 +4241,7 @@ pub fn logSymtab(self: *MachO) void {
}
log.debug("stubs entries:", .{});
- for (self.stubs.items) |entry, i| {
+ for (self.stubs.items, 0..) |entry, i| {
const target_sym = self.getSymbol(entry.target);
const atom_sym = entry.getSymbol(self);
assert(target_sym.undf());
@@ -4257,7 +4257,7 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ for (slice.items(.last_atom_index), 0..) |last_atom_index, i| {
var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
src/link/SpirV.zig
@@ -298,7 +298,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const values = try gpa.alloc(Value, air.values.len);
errdefer gpa.free(values);
- for (values) |*value, i| {
+ for (values, 0..) |*value, i| {
value.* = try air.values[i].copy(air_arena);
}
@@ -308,7 +308,7 @@ fn cloneAir(air: Air, gpa: Allocator, air_arena: Allocator) !Air {
const air_tags = instructions.items(.tag);
const air_datas = instructions.items(.data);
- for (air_tags) |tag, i| {
+ for (air_tags, 0..) |tag, i| {
switch (tag) {
.alloc, .ret_ptr, .const_ty => air_datas[i].ty = try air_datas[i].ty.copy(air_arena),
else => {},
src/link/tapi.zig
@@ -124,7 +124,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v4 = doc };
}
break :blk out;
@@ -142,7 +142,7 @@ pub const LibStub = struct {
log.debug("trying to parse as []TbdV3", .{});
const inner = lib_stub.yaml.parse([]TbdV3) catch break :err;
var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len);
- for (inner) |doc, i| {
+ for (inner, 0..) |doc, i| {
out[i] = .{ .v3 = doc };
}
break :blk out;
src/link/Wasm.zig
@@ -590,7 +590,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
const object: Object = wasm.objects.items[object_index];
log.debug("Resolving symbols in object: '{s}'", .{object.name});
- for (object.symtable) |symbol, i| {
+ for (object.symtable, 0..) |symbol, i| {
const sym_index = @intCast(u32, i);
const location: SymbolLoc = .{
.file = object_index,
@@ -794,7 +794,7 @@ fn validateFeatures(
// extract all the used, disallowed and required features from each
// linked object file so we can test them.
- for (wasm.objects.items) |object, object_index| {
+ for (wasm.objects.items, 0..) |object, object_index| {
for (object.features) |feature| {
const value = @intCast(u16, object_index) << 1 | @as(u1, 1);
switch (feature.prefix) {
@@ -815,7 +815,7 @@ fn validateFeatures(
// when we infer the features, we allow each feature found in the 'used' set
// and insert it into the 'allowed' set. When features are not inferred,
// we validate that a used feature is allowed.
- for (used) |used_set, used_index| {
+ for (used, 0..) |used_set, used_index| {
const is_enabled = @truncate(u1, used_set) != 0;
if (infer) {
allowed[used_index] = is_enabled;
@@ -849,7 +849,7 @@ fn validateFeatures(
}
// validate the linked object file has each required feature
- for (required) |required_feature, feature_index| {
+ for (required, 0..) |required_feature, feature_index| {
const is_required = @truncate(u1, required_feature) != 0;
if (is_required and !object_used_features[feature_index]) {
log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()});
@@ -1818,7 +1818,7 @@ fn sortDataSegments(wasm: *Wasm) !void {
/// original functions and their types. We need to know the type to verify it doesn't
/// contain any parameters.
fn setupInitFunctions(wasm: *Wasm) !void {
- for (wasm.objects.items) |object, file_index| {
+ for (wasm.objects.items, 0..) |object, file_index| {
try wasm.init_funcs.ensureUnusedCapacity(wasm.base.allocator, object.init_funcs.len);
for (object.init_funcs) |init_func| {
const symbol = object.symtable[init_func.symbol_index];
@@ -2717,7 +2717,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2732,7 +2732,7 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.setupStart();
try wasm.setupImports();
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(gpa, @intCast(u16, object_index), wasm);
}
@@ -2801,7 +2801,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.parseInputFiles(positionals.items);
- for (wasm.objects.items) |_, object_index| {
+ for (wasm.objects.items, 0..) |_, object_index| {
try wasm.resolveSymbolsInObject(@intCast(u16, object_index));
}
@@ -2850,7 +2850,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
- for (wasm.objects.items) |*object, object_index| {
+ for (wasm.objects.items, 0..) |*object, object_index| {
try object.parseIntoAtoms(wasm.base.allocator, @intCast(u16, object_index), wasm);
}
@@ -3362,7 +3362,7 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
try writer.writeAll(target_features);
try leb.writeULEB128(writer, features_count);
- for (enabled_features) |enabled, feature_index| {
+ for (enabled_features, 0..) |enabled, feature_index| {
if (enabled) {
const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) };
try leb.writeULEB128(writer, @enumToInt(feature.prefix));
src/translate_c/ast.zig
@@ -1765,7 +1765,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
_ = try c.addToken(.l_brace, "{");
var cases = try c.gpa.alloc(NodeIndex, payload.cases.len);
defer c.gpa.free(cases);
- for (payload.cases) |case, i| {
+ for (payload.cases, 0..) |case, i| {
cases[i] = try renderNode(c, case);
_ = try c.addToken(.comma, ",");
}
@@ -1800,7 +1800,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1));
defer c.gpa.free(items);
items[0] = 0;
- for (payload.cases) |item, i| {
+ for (payload.cases, 0..) |item, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
items[i] = try renderNode(c, item);
}
@@ -1950,7 +1950,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
inits[i] = try renderNode(c, init);
}
@@ -1984,7 +1984,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
defer c.gpa.free(inits);
inits[0] = 0;
inits[1] = 0;
- for (payload) |init, i| {
+ for (payload, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2022,7 +2022,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1));
defer c.gpa.free(inits);
inits[0] = 0;
- for (payload.inits) |init, i| {
+ for (payload.inits, 0..) |init, i| {
_ = try c.addToken(.period, ".");
_ = try c.addIdentifier(init.name);
_ = try c.addToken(.equal, "=");
@@ -2080,7 +2080,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
members[0] = 0;
members[1] = 0;
- for (payload.fields) |field, i| {
+ for (payload.fields, 0..) |field, i| {
const name_tok = try c.addTokenFmt(.identifier, "{s}", .{std.zig.fmtId(field.name)});
_ = try c.addToken(.colon, ":");
const type_expr = try renderNode(c, field.type);
@@ -2116,10 +2116,10 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
});
_ = try c.addToken(.comma, ",");
}
- for (payload.variables) |variable, i| {
+ for (payload.variables, 0..) |variable, i| {
members[payload.fields.len + i] = try renderNode(c, variable);
}
- for (payload.functions) |function, i| {
+ for (payload.functions, 0..) |function, i| {
members[payload.fields.len + num_vars + i] = try renderNode(c, function);
}
_ = try c.addToken(.r_brace, "}");
@@ -2171,7 +2171,7 @@ fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex
var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1));
defer c.gpa.free(rendered);
rendered[0] = 0;
- for (inits) |init, i| {
+ for (inits, 0..) |init, i| {
rendered[i] = try renderNode(c, init);
_ = try c.addToken(.comma, ",");
}
@@ -2539,7 +2539,7 @@ fn renderCall(c: *Context, lhs: NodeIndex, args: []const Node) !NodeIndex {
var rendered = try c.gpa.alloc(NodeIndex, args.len);
defer c.gpa.free(rendered);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
rendered[i] = try renderNode(c, arg);
}
@@ -2879,7 +2879,7 @@ fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.Ar
var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1));
errdefer rendered.deinit();
- for (params) |param, i| {
+ for (params, 0..) |param, i| {
if (i != 0) _ = try c.addToken(.comma, ",");
if (param.is_noalias) _ = try c.addToken(.keyword_noalias, "noalias");
if (param.name) |some| {
src/AstGen.zig
@@ -1505,7 +1505,7 @@ fn arrayInitExprInner(
extra_index += 1;
}
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const ri = if (elem_ty != .none)
ResultInfo{ .rl = .{ .coerced_ty = elem_ty } }
else if (array_ty_inst != .none and nodeMayNeedMemoryLocation(astgen.tree, elem_init, true)) ri: {
@@ -1562,7 +1562,7 @@ fn arrayInitExprRlPtrInner(
});
var extra_index = try reserveExtra(astgen, elements.len);
- for (elements) |elem_init, i| {
+ for (elements, 0..) |elem_init, i| {
const elem_ptr = try gz.addPlNode(.elem_ptr_imm, elem_init, Zir.Inst.ElemPtrImm{
.ptr = result_ptr,
.index = @intCast(u32, i),
@@ -6342,7 +6342,7 @@ fn forExpr(
{
var capture_token = for_full.payload_token;
- for (for_full.ast.inputs) |input, i_usize| {
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
const i = @intCast(u32, i_usize);
const capture_is_ref = token_tags[capture_token] == .asterisk;
const ident_tok = capture_token + @boolToInt(capture_is_ref);
@@ -6464,7 +6464,7 @@ fn forExpr(
const then_sub_scope = blk: {
var capture_token = for_full.payload_token;
var capture_sub_scope: *Scope = &then_scope.base;
- for (for_full.ast.inputs) |input, i_usize| {
+ for (for_full.ast.inputs, 0..) |input, i_usize| {
const i = @intCast(u32, i_usize);
const capture_is_ref = token_tags[capture_token] == .asterisk;
const ident_tok = capture_token + @boolToInt(capture_is_ref);
@@ -6974,7 +6974,7 @@ fn switchExpr(
zir_datas[switch_block].pl_node.payload_index = payload_index;
const strat = ri.rl.strategy(&block_scope);
- for (payloads.items[case_table_start..case_table_end]) |start_index, i| {
+ for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| {
var body_len_index = start_index;
var end_index = start_index;
const table_index = case_table_start + i;
@@ -7638,7 +7638,7 @@ fn asmExpr(
var output_type_bits: u32 = 0;
- for (full.outputs) |output_node, i| {
+ for (full.outputs, 0..) |output_node, i| {
const symbolic_name = main_tokens[output_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7675,7 +7675,7 @@ fn asmExpr(
var inputs_buffer: [32]Zir.Inst.Asm.Input = undefined;
const inputs = inputs_buffer[0..full.inputs.len];
- for (full.inputs) |input_node, i| {
+ for (full.inputs, 0..) |input_node, i| {
const symbolic_name = main_tokens[input_node];
const name = try astgen.identAsString(symbolic_name);
const constraint_token = symbolic_name + 2;
@@ -7848,7 +7848,7 @@ fn typeOf(
var typeof_scope = gz.makeSubBlock(scope);
typeof_scope.force_comptime = false;
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .{ .rl = .none }, arg, node);
astgen.extra.items[args_index + i] = @enumToInt(param_ref);
}
src/Autodoc.zig
@@ -1647,7 +1647,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1665,7 +1665,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1686,7 +1686,7 @@ fn walkInstruction(
std.debug.assert(operands.len > 0);
var array_type = try self.walkRef(file, parent_scope, parent_src, operands[0], false);
- for (operands[1..]) |op, idx| {
+ for (operands[1..], 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -1715,7 +1715,7 @@ fn walkInstruction(
const operands = file.zir.refSlice(extra.end, extra.data.operands_len);
const array_data = try self.arena.alloc(usize, operands.len);
- for (operands) |op, idx| {
+ for (operands, 0..) |op, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, op, false);
const expr_index = self.exprs.items.len;
try self.exprs.append(self.arena, wr.expr);
@@ -2386,7 +2386,7 @@ fn walkInstruction(
const array_data = try self.arena.alloc(usize, args.len);
var array_type: ?DocData.Expr = null;
- for (args) |arg, idx| {
+ for (args, 0..) |arg, idx| {
const wr = try self.walkRef(file, parent_scope, parent_src, arg, idx == 0);
if (idx == 0) {
array_type = wr.typeRef;
@@ -3470,7 +3470,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_enum.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_enum.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3517,7 +3517,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_union.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_union.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
@@ -3564,7 +3564,7 @@ fn tryResolveRefPath(
}
}
- for (self.ast_nodes.items[t_struct.src].fields.?) |ast_node, idx| {
+ for (self.ast_nodes.items[t_struct.src].fields.?, 0..) |ast_node, idx| {
const name = self.ast_nodes.items[ast_node].name.?;
if (std.mem.eql(u8, name, child_string)) {
// TODO: should we really create an artificial
src/codegen.zig
@@ -511,7 +511,7 @@ pub fn generateSymbol(
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = fields[index].ty;
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those seperately.
@@ -537,7 +537,7 @@ pub fn generateSymbol(
const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.aggregate).?.data;
- for (field_vals) |field_val, index| {
+ for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasRuntimeBits()) continue;
src/Compilation.zig
@@ -641,7 +641,7 @@ pub const AllErrors = struct {
}
const reference_trace = try allocator.alloc(Message, module_err_msg.reference_trace.len);
- for (reference_trace) |*reference, i| {
+ for (reference_trace, 0..) |*reference, i| {
const module_reference = module_err_msg.reference_trace[i];
if (module_reference.hidden != 0) {
reference.* = .{ .plain = .{ .msg = undefined, .count = module_reference.hidden } };
@@ -714,7 +714,7 @@ pub const AllErrors = struct {
const block = file.zir.extraData(Zir.Inst.Block, item.data.notes);
const body = file.zir.extra[block.end..][0..block.data.body_len];
notes = try arena.alloc(Message, body.len);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
const note_item = file.zir.extraData(Zir.Inst.CompileErrors.Item, body[i]);
const msg = file.zir.nullTerminatedString(note_item.data.msg);
const span = blk: {
@@ -786,7 +786,7 @@ pub const AllErrors = struct {
fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
const duped_list = try arena.alloc(Message, list.len);
- for (list) |item, i| {
+ for (list, 0..) |item, i| {
duped_list[i] = switch (item) {
.src => |src| .{ .src = .{
.msg = try arena.dupe(u8, src.msg),
@@ -1441,7 +1441,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const llvm_cpu_features: ?[*:0]const u8 = if (build_options.have_llvm and use_llvm) blk: {
var buf = std.ArrayList(u8).init(arena);
- for (options.target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (options.target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = options.target.cpu.features.isEnabled(index);
@@ -1818,7 +1818,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
var system_libs: std.StringArrayHashMapUnmanaged(SystemLib) = .{};
errdefer system_libs.deinit(gpa);
try system_libs.ensureTotalCapacity(gpa, options.system_lib_names.len);
- for (options.system_lib_names) |lib_name, i| {
+ for (options.system_lib_names, 0..) |lib_name, i| {
system_libs.putAssumeCapacity(lib_name, options.system_lib_infos[i]);
}
@@ -2880,7 +2880,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
for (self.lld_errors.items) |lld_error| {
const notes = try arena_allocator.alloc(AllErrors.Message, lld_error.context_lines.len);
- for (lld_error.context_lines) |context_line, i| {
+ for (lld_error.context_lines, 0..) |context_line, i| {
notes[i] = .{ .plain = .{
.msg = try arena_allocator.dupe(u8, context_line),
} };
@@ -3007,7 +3007,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
};
defer self.gpa.free(err_msg.notes);
- for (keys[1..]) |key, i| {
+ for (keys[1..], 0..) |key, i| {
const note_decl = module.declPtr(key);
err_msg.notes[i] = .{
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]),
@@ -3104,7 +3104,7 @@ pub fn performAllTheWork(
const notes = try mod.gpa.alloc(Module.ErrorMsg, file.references.items.len);
errdefer mod.gpa.free(notes);
- for (notes) |*note, i| {
+ for (notes, 0..) |*note, i| {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (file.references.items[i]) {
.import => |loc| try Module.ErrorMsg.init(
@@ -3740,7 +3740,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
@@ -4375,7 +4375,7 @@ pub fn addCCArgs(
// It would be really nice if there was a more compact way to communicate this info to Clang.
const all_features_list = target.cpu.arch.allFeaturesList();
try argv.ensureUnusedCapacity(all_features_list.len * 4);
- for (all_features_list) |feature, index_usize| {
+ for (all_features_list, 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
@@ -5203,7 +5203,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
std.zig.fmtId(generic_arch_name),
});
- for (target.cpu.arch.allFeaturesList()) |feature, index_usize| {
+ for (target.cpu.arch.allFeaturesList(), 0..) |feature, index_usize| {
const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize);
const is_enabled = target.cpu.features.isEnabled(index);
if (is_enabled) {
src/glibc.zig
@@ -698,7 +698,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
const metadata = try loadMetaData(comp.gpa, abilists_contents);
defer metadata.destroy(comp.gpa);
- const target_targ_index = for (metadata.all_targets) |targ, i| {
+ const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
if (targ.arch == target.cpu.arch and
targ.os == target.os.tag and
targ.abi == target.abi)
@@ -709,7 +709,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
unreachable; // target_util.available_libcs prevents us from getting here
};
- const target_ver_index = for (metadata.all_versions) |ver, i| {
+ const target_ver_index = for (metadata.all_versions, 0..) |ver, i| {
switch (ver.order(target_version)) {
.eq => break i,
.lt => continue,
@@ -743,7 +743,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var stubs_asm = std.ArrayList(u8).init(comp.gpa);
defer stubs_asm.deinit();
- for (libs) |lib, lib_i| {
+ for (libs, 0..) |lib, lib_i| {
stubs_asm.shrinkRetainingCapacity(0);
try stubs_asm.appendSlice(".text\n");
src/libc_installation.zig
@@ -66,7 +66,7 @@ pub const LibCInstallation = struct {
var line_it = std.mem.split(u8, line, "=");
const name = line_it.first();
const value = line_it.rest();
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (std.mem.eql(u8, name, field.name)) {
found_keys[i].found = true;
if (value.len == 0) {
@@ -79,7 +79,7 @@ pub const LibCInstallation = struct {
}
}
}
- inline for (fields) |field, i| {
+ inline for (fields, 0..) |field, i| {
if (!found_keys[i].found) {
log.err("missing field: {s}\n", .{field.name});
return error.ParseError;
@@ -640,7 +640,7 @@ fn printVerboseInvocation(
} else {
std.debug.print("Zig attempted to find the path to native system libc headers by executing this command:\n", .{});
}
- for (argv) |arg, i| {
+ for (argv, 0..) |arg, i| {
if (i != 0) std.debug.print(" ", .{});
std.debug.print("{s}", .{arg});
}
src/libunwind.zig
@@ -34,7 +34,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.basename = basename,
};
var c_source_files: [unwind_src_list.len]Compilation.CSourceFile = undefined;
- for (unwind_src_list) |unwind_src, i| {
+ for (unwind_src_list, 0..) |unwind_src, i| {
var cflags = std.ArrayList([]const u8).init(arena);
switch (Compilation.classifyFileExt(unwind_src)) {
src/Liveness.zig
@@ -384,7 +384,7 @@ pub fn categorizeOperand(
const args = @ptrCast([]const Air.Inst.Ref, air.extra[extra.end..][0..extra.data.args_len]);
if (args.len + 1 <= bpi - 1) {
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i + 1), .write);
}
return .write;
@@ -436,7 +436,7 @@ pub fn categorizeOperand(
const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @intCast(OperandInt, i), .none);
}
return .none;
@@ -1272,12 +1272,12 @@ fn analyzeInst(
defer for (case_deaths) |*cd| cd.deinit(gpa);
var total_deaths: u32 = 0;
- for (case_tables) |*ct, i| {
+ for (case_tables, 0..) |*ct, i| {
total_deaths += ct.count();
var it = ct.keyIterator();
while (it.next()) |key| {
const case_death = key.*;
- for (case_tables) |*ct_inner, j| {
+ for (case_tables, 0..) |*ct_inner, j| {
if (i == j) continue;
if (!ct_inner.contains(case_death)) {
// instruction is not referenced in this case
src/main.zig
@@ -3684,10 +3684,10 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
- for (argv.items) |arg, i| {
+ for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
- for (c_source_file.extra_flags) |arg, i| {
+ for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
@@ -4816,7 +4816,7 @@ extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
fn argsCopyZ(alloc: Allocator, args: []const []const u8) ![:null]?[*:0]u8 {
var argv = try alloc.allocSentinel(?[*:0]u8, args.len, null);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
argv[i] = try alloc.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation.
}
return argv;
src/Manifest.zig
@@ -123,7 +123,7 @@ pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
result[2] = hex_charset[Hash.digest_length >> 4];
result[3] = hex_charset[Hash.digest_length & 15];
- for (digest) |byte, i| {
+ for (digest, 0..) |byte, i| {
result[4 + i * 2] = hex_charset[byte >> 4];
result[5 + i * 2] = hex_charset[byte & 15];
}
src/mingw.zig
@@ -72,7 +72,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.mingw32_lib => {
var c_source_files: [mingw32_lib_deps.len]Compilation.CSourceFile = undefined;
- for (mingw32_lib_deps) |dep, i| {
+ for (mingw32_lib_deps, 0..) |dep, i| {
var args = std.ArrayList([]const u8).init(arena);
try args.appendSlice(&[_][]const u8{
"-DHAVE_CONFIG_H",
@@ -236,7 +236,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}),
});
var c_source_files: [uuid_src.len]Compilation.CSourceFile = undefined;
- for (uuid_src) |dep, i| {
+ for (uuid_src, 0..) |dep, i| {
c_source_files[i] = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"libc", "mingw", "libsrc", dep,
src/Module.zig
@@ -268,7 +268,7 @@ pub const MemoizedCall = struct {
if (a.func != b.func) return false;
assert(a.args.len == b.args.len);
- for (a.args) |a_arg, arg_i| {
+ for (a.args, 0..) |a_arg, arg_i| {
const b_arg = b.args[arg_i];
if (!a_arg.eql(b_arg, ctx.module)) {
return false;
@@ -1082,7 +1082,7 @@ pub const Struct = struct {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (i == index) {
return @intCast(u16, bit_sum);
}
@@ -1341,7 +1341,7 @@ pub const Union = struct {
assert(u.haveFieldTypes());
var most_alignment: u32 = 0;
var most_index: usize = undefined;
- for (u.fields.values()) |field, i| {
+ for (u.fields.values(), 0..) |field, i| {
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
@@ -1405,7 +1405,7 @@ pub const Union = struct {
var payload_size: u64 = 0;
var payload_align: u32 = 0;
const fields = u.fields.values();
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_align = a: {
@@ -3553,7 +3553,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
}
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
- for (zir.instructions.items(.data)) |*data, i| {
+ for (zir.instructions.items(.data), 0..) |*data, i| {
const union_tag = Zir.Inst.Tag.data_tags[@enumToInt(tags[i])];
const as_struct = @ptrCast(*HackDataLayout, data);
as_struct.* = .{
@@ -3740,7 +3740,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
@ptrCast([*]const u8, file.zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
- for (file.zir.instructions.items(.data)) |*data, i| {
+ for (file.zir.instructions.items(.data), 0..) |*data, i| {
const as_struct = @ptrCast(*const HackDataLayout, data);
safety_buffer[i] = as_struct.data;
}
@@ -6293,7 +6293,7 @@ pub fn populateTestFunctions(
// Add a dependency on each test name and function pointer.
try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
- for (mod.test_functions.keys()) |test_decl_index, i| {
+ for (mod.test_functions.keys(), 0..) |test_decl_index, i| {
const test_decl = mod.declPtr(test_decl_index);
const test_name_slice = mem.sliceTo(test_decl.name, 0);
const test_name_decl_index = n: {
src/objcopy.zig
@@ -312,7 +312,7 @@ const BinaryElfOutput = struct {
std.sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
- for (self.segments.items) |firstSegment, i| {
+ for (self.segments.items, 0..) |firstSegment, i| {
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
src/Package.zig
@@ -207,7 +207,7 @@ pub fn fetchAndAddDependencies(
var any_error = false;
const deps_list = manifest.dependencies.values();
- for (manifest.dependencies.keys()) |name, i| {
+ for (manifest.dependencies.keys(), 0..) |name, i| {
const dep = deps_list[i];
const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
src/print_air.zig
@@ -68,7 +68,7 @@ const Writer = struct {
indent: usize,
fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
- for (w.air.instructions.items(.tag)) |tag, i| {
+ for (w.air.instructions.items(.tag), 0..) |tag, i| {
const inst = @intCast(u32, i);
switch (tag) {
.constant, .const_ty => {
@@ -388,7 +388,7 @@ const Writer = struct {
try w.writeType(s, vector_ty);
try s.writeAll(", [");
- for (elements) |elem, i| {
+ for (elements, 0..) |elem, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, i, elem);
}
@@ -682,7 +682,7 @@ const Writer = struct {
const args = @ptrCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]);
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", [");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, 1 + i, arg);
}
@@ -743,7 +743,7 @@ const Writer = struct {
if (liveness_condbr.then_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.then_deaths) |operand, i| {
+ for (liveness_condbr.then_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -756,7 +756,7 @@ const Writer = struct {
if (liveness_condbr.else_deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (liveness_condbr.else_deaths) |operand, i| {
+ for (liveness_condbr.else_deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -790,7 +790,7 @@ const Writer = struct {
extra_index = case.end + case.data.items_len + case_body.len;
try s.writeAll(", [");
- for (items) |item, item_i| {
+ for (items, 0..) |item, item_i| {
if (item_i != 0) try s.writeAll(", ");
try w.writeInstRef(s, item, false);
}
@@ -800,7 +800,7 @@ const Writer = struct {
const deaths = liveness.deaths[case_i];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
@@ -821,7 +821,7 @@ const Writer = struct {
const deaths = liveness.deaths[liveness.deaths.len - 1];
if (deaths.len != 0) {
try s.writeByteNTimes(' ', w.indent);
- for (deaths) |operand, i| {
+ for (deaths, 0..) |operand, i| {
if (i != 0) try s.writeAll(" ");
try s.print("%{d}!", .{operand});
}
src/print_targets.zig
@@ -99,7 +99,7 @@ pub fn cmdTargets(
for (arch.allCpuModels()) |model| {
try jws.objectField(model.name);
try jws.beginArray();
- for (arch.allFeaturesList()) |feature, i| {
+ for (arch.allFeaturesList(), 0..) |feature, i| {
if (model.features.isEnabled(@intCast(u8, i))) {
try jws.arrayElem();
try jws.emitString(feature.name);
@@ -145,7 +145,7 @@ pub fn cmdTargets(
{
try jws.objectField("features");
try jws.beginArray();
- for (native_target.cpu.arch.allFeaturesList()) |feature, i_usize| {
+ for (native_target.cpu.arch.allFeaturesList(), 0..) |feature, i_usize| {
const index = @intCast(Target.Cpu.Feature.Set.Index, i_usize);
if (cpu.features.isEnabled(index)) {
try jws.arrayElem();
src/print_zir.zig
@@ -875,7 +875,7 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = self.code.refSlice(extra.end, extra.data.operands_len);
try stream.writeAll("{");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -1068,7 +1068,7 @@ const Writer = struct {
const src = LazySrcLoc.nodeOffset(extra.data.src_node);
const operands = self.code.refSlice(extra.end, extended.small);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, operand);
}
@@ -1392,7 +1392,7 @@ const Writer = struct {
try stream.writeAll("{\n");
self.indent += 2;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
try self.writeDocComment(stream, field.doc_comment_index);
try stream.writeByteNTimes(' ', self.indent);
try self.writeFlag(stream, "comptime ", field.is_comptime);
@@ -1959,7 +1959,7 @@ const Writer = struct {
try stream.writeByteNTimes(' ', self.indent);
if (is_inline) try stream.writeAll("inline ");
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
if (item_i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
}
@@ -2275,7 +2275,7 @@ const Writer = struct {
try self.writeBracedBody(stream, body);
try stream.writeAll(",[");
const args = self.code.refSlice(extra.end, extended.small);
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2334,7 +2334,7 @@ const Writer = struct {
try self.writeInstRef(stream, args[0]);
try stream.writeAll("{");
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2349,7 +2349,7 @@ const Writer = struct {
const args = self.code.refSlice(extra.end, extra.data.operands_len);
try stream.writeAll("{");
- for (args) |arg, i| {
+ for (args, 0..) |arg, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
@@ -2369,7 +2369,7 @@ const Writer = struct {
try stream.writeAll(", ");
try stream.writeAll(".{");
- for (elems) |elem, i| {
+ for (elems, 0..) |elem, i| {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, elem);
}
src/RangeSet.zig
@@ -79,7 +79,7 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
const target = self.module.getTarget();
// look for gaps
- for (self.ranges.items[1..]) |cur, i| {
+ for (self.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
src/register_manager.zig
@@ -82,7 +82,7 @@ pub fn RegisterManager(
comptime registers: []const Register,
reg: Register,
) ?std.math.IntFittingRange(0, registers.len - 1) {
- inline for (tracked_registers) |cpreg, i| {
+ inline for (tracked_registers, 0..) |cpreg, i| {
if (reg.id() == cpreg.id()) return i;
}
return null;
@@ -153,7 +153,7 @@ pub fn RegisterManager(
regs: [count]Register,
) [count]RegisterLock {
var buf: [count]RegisterLock = undefined;
- for (regs) |reg, i| {
+ for (regs, 0..) |reg, i| {
buf[i] = self.lockRegAssumeUnused(reg);
}
return buf;
@@ -207,7 +207,7 @@ pub fn RegisterManager(
}
assert(i == count);
- for (regs) |reg, j| {
+ for (regs, 0..) |reg, j| {
self.markRegAllocated(reg);
if (insts[j]) |inst| {
src/Sema.zig
@@ -3801,7 +3801,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty);
const empty_trash_count = trash_block.instructions.items.len;
- for (placeholders) |bitcast_inst, i| {
+ for (placeholders, 0..) |bitcast_inst, i| {
const sub_ptr_ty = sema.typeOf(Air.indexToRef(bitcast_inst));
if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) {
@@ -3917,7 +3917,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
defer gpa.free(runtime_arg_lens);
// First pass to look for comptime values.
- for (args) |zir_arg, i| {
+ for (args, 0..) |zir_arg, i| {
runtime_arg_lens[i] = .none;
if (zir_arg == .none) continue;
const object = try sema.resolveInst(zir_arg);
@@ -3957,7 +3957,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// Now for the runtime checks.
if (any_runtime and block.wantSafety()) {
- for (runtime_arg_lens) |arg_len, i| {
+ for (runtime_arg_lens, 0..) |arg_len, i| {
if (arg_len == .none) continue;
if (i == len_idx) continue;
const ok = try block.addBinOp(.cmp_eq, len, arg_len);
@@ -4247,7 +4247,7 @@ fn validateStructInit(
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const default_val = struct_ty.structFieldDefaultValue(i);
@@ -4313,7 +4313,7 @@ fn validateStructInit(
// ends up being comptime-known.
const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount());
- field: for (found_fields) |field_ptr, i| {
+ field: for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i);
@@ -4446,7 +4446,7 @@ fn validateStructInit(
try sema.resolveStructLayout(struct_ty);
// Our task is to insert `store` instructions for all the default field values.
- for (found_fields) |field_ptr, i| {
+ for (found_fields, 0..) |field_ptr, i| {
if (field_ptr != 0) continue;
const field_src = init_src; // TODO better source location
@@ -4540,7 +4540,7 @@ fn zirValidateArrayInit(
const air_tags = sema.air_instructions.items(.tag);
const air_datas = sema.air_instructions.items(.data);
- outer: for (instrs) |elem_ptr, i| {
+ outer: for (instrs, 0..) |elem_ptr, i| {
// Determine whether the value stored to this pointer is comptime-known.
if (array_ty.isTuple()) {
@@ -5059,7 +5059,7 @@ fn zirCompileLog(
const src_node = extra.data.src_node;
const args = sema.code.refSlice(extra.end, extended.small);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
if (i != 0) try writer.print(", ", .{});
const arg = try sema.resolveInst(arg_ref);
@@ -6277,7 +6277,7 @@ const GenericCallAdapter = struct {
if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false;
const other_comptime_args = other_key.comptime_args.?;
- for (other_comptime_args[0..ctx.func_ty_info.param_types.len]) |other_arg, i| {
+ for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| {
const this_arg = ctx.args[i];
const this_is_comptime = this_arg.val.tag() != .generic_poison;
const other_is_comptime = other_arg.val.tag() != .generic_poison;
@@ -6793,7 +6793,7 @@ fn analyzeCall(
assert(!func_ty_info.is_generic);
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
- for (uncasted_args) |uncasted_arg, i| {
+ for (uncasted_args, 0..) |uncasted_arg, i| {
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
@@ -7568,7 +7568,7 @@ fn resolveGenericInstantiationType(
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
if (!ty.isSimpleTupleOrAnonStruct()) return;
const tuple = ty.tupleFields();
- for (tuple.values) |field_val, i| {
+ for (tuple.values, 0..) |field_val, i| {
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
if (field_val.tag() == .unreachable_value) continue;
try sema.resolveLazyValue(field_val);
@@ -8642,7 +8642,7 @@ fn funcCommon(
const cc_resolved = cc orelse .Unspecified;
const param_types = try sema.arena.alloc(Type, block.params.items.len);
const comptime_params = try sema.arena.alloc(bool, block.params.items.len);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @truncate(u1, noalias_bits >> index) != 0;
@@ -8751,7 +8751,7 @@ fn funcCommon(
const tags = sema.code.instructions.items(.tag);
const data = sema.code.instructions.items(.data);
const param_body = sema.code.getParamBody(func_inst);
- for (block.params.items) |param, i| {
+ for (block.params.items, 0..) |param, i| {
if (!param.is_comptime) {
const param_index = param_body[i];
const param_src = switch (tags[param_index]) {
@@ -9850,7 +9850,7 @@ fn zirSwitchCapture(
const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?);
const first_field = union_obj.fields.values()[first_field_index];
- for (items[1..]) |item, i| {
+ for (items[1..], 0..) |item, i| {
const item_ref = try sema.resolveInst(item);
// Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
@@ -10180,7 +10180,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
@@ -10214,7 +10214,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.{},
);
errdefer msg.destroy(sema.gpa);
- for (seen_enum_fields) |seen_src, i| {
+ for (seen_enum_fields, 0..) |seen_src, i| {
if (seen_src != null) continue;
const field_name = operand_ty.enumFieldName(i);
@@ -10276,7 +10276,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemError(
block,
&seen_errors,
@@ -10418,7 +10418,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItem(
block,
&range_set,
@@ -10513,7 +10513,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemBool(
block,
&true_count,
@@ -10597,7 +10597,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + body_len;
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
try sema.validateSwitchItemSparse(
block,
&seen_values,
@@ -10908,7 +10908,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
}
- for (items) |item_ref, item_i| {
+ for (items, 0..) |item_ref, item_i| {
cases_len += 1;
const item = try sema.resolveInst(item_ref);
@@ -11094,7 +11094,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
operand_ty.fmt(sema.mod),
});
}
- for (seen_enum_fields) |f, i| {
+ for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
@@ -11237,7 +11237,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
const analyze_body = if (union_originally and !special.is_inline)
- for (seen_enum_fields) |seen_field, index| {
+ for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data;
const field_ty = union_obj.fields.values()[index].ty;
@@ -12217,7 +12217,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf);
elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod);
}
@@ -13661,7 +13661,7 @@ fn intRem(
) CompileError!Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -14771,7 +14771,7 @@ fn zirAsm(
const outputs = try sema.arena.alloc(ConstraintName, outputs_len);
var expr_ty = Air.Inst.Ref.void_type;
- for (out_args) |*arg, out_i| {
+ for (out_args, 0..) |*arg, out_i| {
const output = sema.code.extraData(Zir.Inst.Asm.Output, extra_i);
extra_i = output.end;
@@ -14798,7 +14798,7 @@ fn zirAsm(
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
- for (args) |*arg, arg_i| {
+ for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
extra_i = input.end;
@@ -15522,7 +15522,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer params_anon_decl.deinit();
const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len);
- for (param_vals) |*param_val, i| {
+ for (param_vals, 0..) |*param_val, i| {
const param_ty = info.param_types[i];
const is_generic = param_ty.tag() == .generic_poison;
const param_ty_val = if (is_generic)
@@ -15766,7 +15766,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: {
const names = ty.errorSetNames();
const vals = try fields_anon_decl.arena().alloc(Value, names.len);
- for (vals) |*field_val, i| {
+ for (vals, 0..) |*field_val, i| {
const name = names[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -15868,7 +15868,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const enum_fields = ty.enumFields();
const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count());
- for (enum_field_vals) |*field_val, i| {
+ for (enum_field_vals, 0..) |*field_val, i| {
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, i),
@@ -15965,7 +15965,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const union_fields = union_ty.unionFields();
const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count());
- for (union_field_vals) |*field_val, i| {
+ for (union_field_vals, 0..) |*field_val, i| {
const field = union_fields.values()[i];
const name = union_fields.keys()[i];
const name_val = v: {
@@ -16074,7 +16074,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tuple = struct_ty.tupleFields();
const field_types = tuple.types;
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len);
- for (struct_field_vals) |*struct_field_val, i| {
+ for (struct_field_vals, 0..) |*struct_field_val, i| {
const field_ty = field_types[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
@@ -16118,7 +16118,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const struct_fields = struct_ty.structFields();
const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count());
- for (struct_field_vals) |*field_val, i| {
+ for (struct_field_vals, 0..) |*field_val, i| {
const field = struct_fields.values()[i];
const name = struct_fields.keys()[i];
const name_val = v: {
@@ -16457,7 +16457,7 @@ fn zirTypeofPeer(
const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len);
defer sema.gpa.free(inst_list);
- for (args) |arg_ref, i| {
+ for (args, 0..) |arg_ref, i| {
inst_list[i] = try sema.resolveInst(arg_ref);
}
@@ -17568,7 +17568,7 @@ fn finishStructInit(
if (struct_ty.isAnonStruct()) {
const struct_obj = struct_ty.castTag(.anon_struct).?.data;
- for (struct_obj.values) |default_val, i| {
+ for (struct_obj.values, 0..) |default_val, i| {
if (field_inits[i] != .none) continue;
if (default_val.tag() == .unreachable_value) {
@@ -17604,7 +17604,7 @@ fn finishStructInit(
}
} else {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
if (field_inits[i] != .none) continue;
if (field.default_val.tag() == .unreachable_value) {
@@ -17645,7 +17645,7 @@ fn finishStructInit(
if (is_comptime) {
const values = try sema.arena.alloc(Value, field_inits.len);
- for (field_inits) |field_init, i| {
+ for (field_inits, 0..) |field_init, i| {
values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?;
}
const struct_val = try Value.Tag.aggregate.create(sema.arena, values);
@@ -17660,7 +17660,7 @@ fn finishStructInit(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (field_inits) |field_init, i_usize| {
+ for (field_inits, 0..) |field_init, i_usize| {
const i = @intCast(u32, i_usize);
const field_src = dest_src;
const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, alloc, i, field_src, struct_ty, true);
@@ -17693,7 +17693,7 @@ fn zirStructInitAnon(
const opt_runtime_index = rs: {
var runtime_index: ?usize = null;
var extra_index = extra.end;
- for (types) |*field_ty, i| {
+ for (types, 0..) |*field_ty, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17767,7 +17767,7 @@ fn zirStructInitAnon(
});
const alloc = try block.addTy(.alloc, alloc_ty);
var extra_index = extra.end;
- for (types) |field_ty, i_usize| {
+ for (types, 0..) |field_ty, i_usize| {
const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
@@ -17789,7 +17789,7 @@ fn zirStructInitAnon(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, types.len);
var extra_index = extra.end;
- for (types) |_, i| {
+ for (types, 0..) |_, i| {
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
element_refs[i] = try sema.resolveInst(item.data.init);
@@ -17817,7 +17817,7 @@ fn zirArrayInit(
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
- for (args[1..]) |arg, i| {
+ for (args[1..], 0..) |arg, i| {
const resolved_arg = try sema.resolveInst(arg);
const elem_ty = if (array_ty.zigTypeTag() == .Struct)
array_ty.structFieldType(i)
@@ -17838,7 +17838,7 @@ fn zirArrayInit(
resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some);
}
- const opt_runtime_index: ?u32 = for (resolved_args) |arg, i| {
+ const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
const comptime_known = try sema.isComptimeKnown(arg);
if (!comptime_known) break @intCast(u32, i);
} else null;
@@ -17846,7 +17846,7 @@ fn zirArrayInit(
const runtime_index = opt_runtime_index orelse {
const elem_vals = try sema.arena.alloc(Value, resolved_args.len);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
// We checked that all args are comptime above.
elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?;
}
@@ -17875,7 +17875,7 @@ fn zirArrayInit(
const alloc = try block.addTy(.alloc, alloc_ty);
if (array_ty.isTuple()) {
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
@@ -17897,7 +17897,7 @@ fn zirArrayInit(
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
- for (resolved_args) |arg, i| {
+ for (resolved_args, 0..) |arg, i| {
const index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try block.addPtrElemPtrTypeRef(alloc, index, elem_ptr_ty_ref);
_ = try block.addBinOp(.store, elem_ptr, arg);
@@ -17924,7 +17924,7 @@ fn zirArrayInitAnon(
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem);
@@ -17967,7 +17967,7 @@ fn zirArrayInitAnon(
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
- for (operands) |operand, i_usize| {
+ for (operands, 0..) |operand, i_usize| {
const i = @intCast(u32, i_usize);
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
@@ -17984,7 +17984,7 @@ fn zirArrayInitAnon(
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
- for (operands) |operand, i| {
+ for (operands, 0..) |operand, i| {
element_refs[i] = try sema.resolveInst(operand);
}
@@ -18187,7 +18187,7 @@ fn zirUnaryMath(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod);
}
@@ -19191,7 +19191,7 @@ fn reifyStruct(
if (layout == .Packed) {
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, index| {
+ for (struct_obj.fields.values(), 0..) |field, index| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -19820,7 +19820,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, operand_ty.vectorLen());
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod);
}
@@ -19922,7 +19922,7 @@ fn zirBitCount(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
const scalar_ty = operand_ty.scalarType();
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
const count = comptimeOp(elem_val, scalar_ty, target);
elem.* = try Value.Tag.int_u64.create(sema.arena, count);
@@ -19991,7 +19991,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena);
}
@@ -20040,7 +20040,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const vec_len = operand_ty.vectorLen();
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
}
@@ -20109,7 +20109,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
.Packed => {
var bit_sum: u64 = 0;
const fields = ty.structFields();
- for (fields.values()) |field, i| {
+ for (fields.values(), 0..) |field, i| {
if (i == field_index) {
return bit_sum;
}
@@ -21046,7 +21046,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
var buf: Value.ElemValueBuffer = undefined;
const elems = try sema.gpa.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf);
const should_choose_a = pred_elem_val.toBool();
if (should_choose_a) {
@@ -21396,12 +21396,12 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
func = bound_data.func_inst;
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount() + 1);
resolved_args[0] = bound_data.arg0_inst;
- for (resolved_args[1..]) |*resolved, i| {
+ for (resolved_args[1..], 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
} else {
resolved_args = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount());
- for (resolved_args) |*resolved, i| {
+ for (resolved_args, 0..) |*resolved, i| {
resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty);
}
}
@@ -21556,7 +21556,7 @@ fn analyzeMinMax(
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf);
elem.* = opFunc(lhs_elem_val, rhs_elem_val, target);
@@ -22453,7 +22453,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -22473,7 +22473,7 @@ fn explainWhyTypeIsComptimeInner(
if (ty.cast(Type.Payload.Union)) |payload| {
const union_obj = payload.data;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{
.index = i,
.range = .type,
@@ -23744,7 +23744,7 @@ fn structFieldPtrByIndex(
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
if (i == field_index) {
@@ -26053,7 +26053,7 @@ fn coerceInMemoryAllowedFns(
} };
}
- for (dest_info.param_types) |dest_param_ty, i| {
+ for (dest_info.param_types, 0..) |dest_param_ty, i| {
const src_param_ty = src_info.param_types[i];
if (dest_info.comptime_params[i] != src_info.comptime_params[i]) {
@@ -26583,7 +26583,7 @@ fn beginComptimePtrMutation(
// assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted.
assert(bytes.len >= dest_len);
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (elems) |*elem, i| {
+ for (elems, 0..) |*elem, i| {
elem.* = try Value.Tag.int_u64.create(arena, bytes[i]);
}
@@ -26612,7 +26612,7 @@ fn beginComptimePtrMutation(
const dest_len = parent.ty.arrayLenIncludingSentinel();
const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const elems = try arena.alloc(Value, @intCast(usize, dest_len));
- for (bytes) |byte, i| {
+ for (bytes, 0..) |byte, i| {
elems[i] = try Value.Tag.int_u64.create(arena, byte);
}
if (parent.ty.sentinel()) |sent_val| {
@@ -27583,7 +27583,7 @@ fn coerceEnumToUnion(
var msg: ?*Module.ErrorMsg = null;
errdefer if (msg) |some| some.destroy(sema.gpa);
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
if (field.ty.zigTypeTag() == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
@@ -27742,7 +27742,7 @@ fn coerceArrayLike(
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i| {
+ for (element_vals, 0..) |*elem, i| {
const index_ref = try sema.addConstant(
Type.usize,
try Value.Tag.int_u64.create(sema.arena, i),
@@ -27804,7 +27804,7 @@ fn coerceTupleToArray(
const dest_elem_ty = dest_ty.childType();
var runtime_src: ?LazySrcLoc = null;
- for (element_vals) |*elem, i_usize| {
+ for (element_vals, 0..) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
if (i_usize == inst_len) {
elem.* = dest_ty.sentinel().?;
@@ -27933,7 +27933,7 @@ fn coerceTupleToStruct(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const field_name = fields.keys()[i];
@@ -28031,7 +28031,7 @@ fn coerceTupleToTuple(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs) |*field_ref, i| {
+ for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const default_val = tuple_ty.structFieldDefaultValue(i);
@@ -29407,7 +29407,7 @@ fn resolvePeerTypes(
var seen_const = false;
var convert_to_slice = false;
var chosen_i: usize = 0;
- for (instructions[1..]) |candidate, candidate_i| {
+ for (instructions[1..], 0..) |candidate, candidate_i| {
const candidate_ty = sema.typeOf(candidate);
const chosen_ty = sema.typeOf(chosen);
@@ -30066,7 +30066,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
};
struct_obj.status = .layout_wip;
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30104,7 +30104,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count());
};
- for (struct_obj.fields.values()) |field, i| {
+ for (struct_obj.fields.values(), 0..) |field, i| {
optimized_order[i] = if (field.ty.hasRuntimeBits())
@intCast(u32, i)
else
@@ -30309,7 +30309,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
};
union_obj.status = .layout_wip;
- for (union_obj.fields.values()) |field, i| {
+ for (union_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@@ -30457,7 +30457,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) {
return true;
@@ -30972,7 +30972,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
// so that init values may depend on type layout.
const bodies_index = extra_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
const field_ty: Type = ty: {
if (zir_field.type_ref != .none) {
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
@@ -31094,7 +31094,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (any_inits) {
extra_index = bodies_index;
- for (fields) |zir_field, field_i| {
+ for (fields, 0..) |zir_field, field_i| {
extra_index += zir_field.type_body_len;
extra_index += zir_field.align_body_len;
if (zir_field.init_body_len > 0) {
@@ -31814,7 +31814,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.@"struct" => {
const resolved_ty = try sema.resolveTypeFields(ty);
const s = resolved_ty.castTag(.@"struct").?.data;
- for (s.fields.values()) |field, i| {
+ for (s.fields.values(), 0..) |field, i| {
if (field.is_comptime) continue;
if (field.ty.eql(resolved_ty, sema.mod)) {
const msg = try Module.ErrorMsg.create(
@@ -31835,7 +31835,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue;
@@ -32475,7 +32475,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) {
return true;
@@ -32635,7 +32635,7 @@ fn anonStructFieldIndex(
field_src: LazySrcLoc,
) !u32 {
const anon_struct = struct_ty.castTag(.anon_struct).?.data;
- for (anon_struct.names) |name, i| {
+ for (anon_struct.names, 0..) |name, i| {
if (mem.eql(u8, name, field_name)) {
return @intCast(u32, i);
}
@@ -32653,7 +32653,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32711,7 +32711,7 @@ fn intSub(
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32769,7 +32769,7 @@ fn floatAdd(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32826,7 +32826,7 @@ fn floatSub(
) !Value {
if (float_type.zigTypeTag() == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32884,7 +32884,7 @@ fn intSubWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -32938,7 +32938,7 @@ fn floatToInt(
if (float_ty.zigTypeTag() == .Vector) {
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
@@ -33138,7 +33138,7 @@ fn intFitsInType(
.aggregate => {
assert(ty.zigTypeTag() == .Vector);
- for (val.castTag(.aggregate).?.data) |elem, i| {
+ for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) {
if (vector_index) |some| some.* = i;
return false;
@@ -33235,7 +33235,7 @@ fn intAddWithOverflow(
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
@@ -33339,7 +33339,7 @@ fn compareVector(
) !Value {
assert(ty.zigTypeTag() == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
src/test.zig
@@ -664,7 +664,7 @@ pub const TestContext = struct {
errors: []const []const u8,
) void {
var array = self.updates.allocator.alloc(ErrorMsg, errors.len) catch @panic("out of memory");
- for (errors) |err_msg_line, i| {
+ for (errors, 0..) |err_msg_line, i| {
if (std.mem.startsWith(u8, err_msg_line, "error: ")) {
array[i] = .{
.plain = .{
@@ -1558,7 +1558,7 @@ pub const TestContext = struct {
});
defer comp.destroy();
- update: for (case.updates.items) |update, update_index| {
+ update: for (case.updates.items, 0..) |update, update_index| {
var update_node = root_node.start(update.name, 3);
update_node.activate();
defer update_node.end();
@@ -1631,7 +1631,7 @@ pub const TestContext = struct {
defer notes_to_check.deinit();
for (actual_errors.list) |actual_error| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
if (handled_errors[i]) continue;
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
@@ -1702,7 +1702,7 @@ pub const TestContext = struct {
}
}
while (notes_to_check.popOrNull()) |note| {
- for (case_error_list) |case_msg, i| {
+ for (case_error_list, 0..) |case_msg, i| {
const ex_tag: std.meta.Tag(@TypeOf(case_msg)) = case_msg;
switch (note.*) {
.src => |actual_msg| {
@@ -1752,7 +1752,7 @@ pub const TestContext = struct {
}
}
- for (handled_errors) |handled, i| {
+ for (handled_errors, 0..) |handled, i| {
if (!handled) {
print(
"\nExpected error not found:\n{s}\n{}\n{s}",
src/translate_c.zig
@@ -1423,7 +1423,7 @@ fn transConvertVectorExpr(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
init.* = try Tag.identifier.create(c.arena, name);
@@ -1454,7 +1454,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
const init_list = try c.arena.alloc(Node, mask_len);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const index_expr = try transExprCoercing(c, scope, expr.getExpr(@intCast(c_uint, i + 2)), .used);
const converted_index = try Tag.helpers_shuffle_vector_index.create(c.arena, .{ .lhs = index_expr, .rhs = vector_len });
init.* = converted_index;
@@ -2686,7 +2686,7 @@ fn transInitListExprArray(
const init_node = if (init_count != 0) blk: {
const init_list = try c.arena.alloc(Node, init_count);
- for (init_list) |*init, i| {
+ for (init_list, 0..) |*init, i| {
const elem_expr = expr.getInit(@intCast(c_uint, i));
init.* = try transExprCoercing(c, scope, elem_expr, .used);
}
@@ -2760,7 +2760,7 @@ fn transInitListExprVector(
}
const init_list = try c.arena.alloc(Node, num_elements);
- for (init_list) |*init, init_index| {
+ for (init_list, 0..) |*init, init_index| {
if (init_index < init_count) {
const tmp_decl = block_scope.statements.items[init_index];
const name = tmp_decl.castTag(.var_simple).?.data.name;
@@ -4649,7 +4649,7 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias:
const unwrap_expr = try Tag.unwrap.create(c.arena, init);
const args = try c.arena.alloc(Node, fn_params.items.len);
- for (fn_params.items) |param, i| {
+ for (fn_params.items, 0..) |param, i| {
args[i] = try Tag.identifier.create(c.arena, param.name.?);
}
const call_expr = try Tag.call.create(c.arena, .{
@@ -5293,7 +5293,7 @@ const PatternList = struct {
fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
- for (templates) |template, i| {
+ for (templates, 0..) |template, i| {
try patterns[i].init(allocator, template);
}
return PatternList{ .patterns = patterns };
@@ -5778,7 +5778,7 @@ fn parseCNumLit(c: *Context, m: *MacroCtx) ParseError!Node {
fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
var source = m.slice();
- for (source) |c, i| {
+ for (source, 0..) |c, i| {
if (c == '\"' or c == '\'') {
source = source[i..];
break;
src/type.zig
@@ -628,7 +628,7 @@ pub const Type = extern union {
const a_set = a.errorSetNames();
const b_set = b.errorSetNames();
if (a_set.len != b_set.len) return false;
- for (a_set) |a_item, i| {
+ for (a_set, 0..) |a_item, i| {
const b_item = b_set[i];
if (!std.mem.eql(u8, a_item, b_item)) return false;
}
@@ -675,7 +675,7 @@ pub const Type = extern union {
if (a_info.param_types.len != b_info.param_types.len)
return false;
- for (a_info.param_types) |a_param_ty, i| {
+ for (a_info.param_types, 0..) |a_param_ty, i| {
const b_param_ty = b_info.param_types[i];
if (a_info.comptime_params[i] != b_info.comptime_params[i])
return false;
@@ -824,12 +824,12 @@ pub const Type = extern union {
if (a_tuple.types.len != b_tuple.types.len) return false;
- for (a_tuple.types) |a_ty, i| {
+ for (a_tuple.types, 0..) |a_ty, i| {
const b_ty = b_tuple.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_tuple.values) |a_val, i| {
+ for (a_tuple.values, 0..) |a_val, i| {
const ty = a_tuple.types[i];
const b_val = b_tuple.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -855,17 +855,17 @@ pub const Type = extern union {
if (a_struct_obj.types.len != b_struct_obj.types.len) return false;
- for (a_struct_obj.names) |a_name, i| {
+ for (a_struct_obj.names, 0..) |a_name, i| {
const b_name = b_struct_obj.names[i];
if (!std.mem.eql(u8, a_name, b_name)) return false;
}
- for (a_struct_obj.types) |a_ty, i| {
+ for (a_struct_obj.types, 0..) |a_ty, i| {
const b_ty = b_struct_obj.types[i];
if (!eql(a_ty, b_ty, mod)) return false;
}
- for (a_struct_obj.values) |a_val, i| {
+ for (a_struct_obj.values, 0..) |a_val, i| {
const ty = a_struct_obj.types[i];
const b_val = b_struct_obj.values[i];
if (a_val.tag() == .unreachable_value) {
@@ -1073,7 +1073,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, fn_info.noalias_bits);
std.hash.autoHash(hasher, fn_info.param_types.len);
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
if (param_ty.tag() == .generic_poison) continue;
hashWithHasher(param_ty, hasher, mod);
@@ -1175,7 +1175,7 @@ pub const Type = extern union {
const tuple = ty.tupleFields();
std.hash.autoHash(hasher, tuple.types.len);
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
hashWithHasher(field_ty, hasher, mod);
const field_val = tuple.values[i];
if (field_val.tag() == .unreachable_value) continue;
@@ -1187,7 +1187,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
std.hash.autoHash(hasher, struct_obj.types.len);
- for (struct_obj.types) |field_ty, i| {
+ for (struct_obj.types, 0..) |field_ty, i| {
const field_name = struct_obj.names[i];
const field_val = struct_obj.values[i];
hasher.update(field_name);
@@ -1403,10 +1403,10 @@ pub const Type = extern union {
const payload = self.castTag(.tuple).?.data;
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.tuple.create(allocator, .{
@@ -1419,13 +1419,13 @@ pub const Type = extern union {
const names = try allocator.alloc([]const u8, payload.names.len);
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
- for (payload.names) |name, i| {
+ for (payload.names, 0..) |name, i| {
names[i] = try allocator.dupe(u8, name);
}
- for (payload.types) |ty, i| {
+ for (payload.types, 0..) |ty, i| {
types[i] = try ty.copy(allocator);
}
- for (payload.values) |val, i| {
+ for (payload.values, 0..) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.anon_struct.create(allocator, .{
@@ -1437,7 +1437,7 @@ pub const Type = extern union {
.function => {
const payload = self.castTag(.function).?.data;
const param_types = try allocator.alloc(Type, payload.param_types.len);
- for (payload.param_types) |param_ty, i| {
+ for (payload.param_types, 0..) |param_ty, i| {
param_types[i] = try param_ty.copy(allocator);
}
const other_comptime_params = payload.comptime_params[0..payload.param_types.len];
@@ -1678,7 +1678,7 @@ pub const Type = extern union {
.function => {
const payload = ty.castTag(.function).?.data;
try writer.writeAll("fn(");
- for (payload.param_types) |param_type, i| {
+ for (payload.param_types, 0..) |param_type, i| {
if (i != 0) try writer.writeAll(", ");
try param_type.dump("", .{}, writer);
}
@@ -1739,7 +1739,7 @@ pub const Type = extern union {
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -1756,7 +1756,7 @@ pub const Type = extern union {
.anon_struct => {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -1892,7 +1892,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -1908,7 +1908,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2063,7 +2063,7 @@ pub const Type = extern union {
.function => {
const fn_info = ty.fnInfo();
try writer.writeAll("fn(");
- for (fn_info.param_types) |param_ty, i| {
+ for (fn_info.param_types, 0..) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
if (fn_info.paramIsComptime(i)) {
try writer.writeAll("comptime ");
@@ -2137,7 +2137,7 @@ pub const Type = extern union {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
@@ -2154,7 +2154,7 @@ pub const Type = extern union {
const anon_struct = ty.castTag(.anon_struct).?.data;
try writer.writeAll("struct{");
- for (anon_struct.types) |field_ty, i| {
+ for (anon_struct.types, 0..) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = anon_struct.values[i];
if (val.tag() != .unreachable_value) {
@@ -2253,7 +2253,7 @@ pub const Type = extern union {
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2266,7 +2266,7 @@ pub const Type = extern union {
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
try writer.writeAll("error{");
- for (names) |name, i| {
+ for (names, 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.writeAll(name);
}
@@ -2568,7 +2568,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true;
@@ -3125,7 +3125,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!(field_ty.hasRuntimeBits())) continue;
@@ -5044,7 +5044,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.values) |val, i| {
+ for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
if (tuple.types[i].onePossibleValue() != null) continue;
@@ -5256,7 +5256,7 @@ pub const Type = extern union {
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
if (!have_comptime_val and field_ty.comptimeOnly()) return true;
}
@@ -5753,7 +5753,7 @@ pub const Type = extern union {
var bit_offset: u16 = undefined;
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
- for (struct_obj.fields.values()) |f, i| {
+ for (struct_obj.fields.values(), 0..) |f, i| {
if (!f.ty.hasRuntimeBits()) continue;
const field_bits = @intCast(u16, f.ty.bitSize(target));
@@ -5834,7 +5834,7 @@ pub const Type = extern union {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types) |field_ty, i| {
+ for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) {
// comptime field
src/value.zig
@@ -614,7 +614,7 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.alloc(Value, payload.data.len),
};
- for (new_payload.data) |*elem, i| {
+ for (new_payload.data, 0..) |*elem, i| {
elem.* = try payload.data[i].copy(arena);
}
return Value{ .ptr_otherwise = &new_payload.base };
@@ -891,7 +891,7 @@ pub const Value = extern union {
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
const result = try allocator.alloc(u8, @intCast(usize, len));
var elem_value_buf: ElemValueBuffer = undefined;
- for (result) |*elem, i| {
+ for (result, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
}
@@ -1282,7 +1282,7 @@ pub const Value = extern union {
.int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data),
else => unreachable,
};
- for (buffer[0..byte_count]) |_, i| switch (endian) {
+ for (buffer[0..byte_count], 0..) |_, i| switch (endian) {
.Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
.Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))),
};
@@ -1324,7 +1324,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
@@ -1431,7 +1431,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
@@ -1529,7 +1529,7 @@ pub const Value = extern union {
.Extern => {
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target));
const sz = @intCast(usize, ty.structFieldType(i).abiSize(target));
field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
@@ -1617,7 +1617,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
- for (elems) |_, i| {
+ for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena);
@@ -1632,7 +1632,7 @@ pub const Value = extern union {
var bits: u16 = 0;
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena);
bits += field_bits;
@@ -2259,7 +2259,7 @@ pub const Value = extern union {
if (ty.isSimpleTupleOrAnonStruct()) {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
- for (types) |field_ty, i| {
+ for (types, 0..) |field_ty, i| {
if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) {
return false;
}
@@ -2270,7 +2270,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Struct) {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
- for (fields) |field, i| {
+ for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
return false;
}
@@ -2279,7 +2279,7 @@ pub const Value = extern union {
}
const elem_ty = ty.childType();
- for (a_field_vals) |a_elem, i| {
+ for (a_field_vals, 0..) |a_elem, i| {
const b_elem = b_field_vals[i];
if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) {
@@ -2526,7 +2526,7 @@ pub const Value = extern union {
.empty_struct_value => {},
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
- for (field_values) |field_val, i| {
+ for (field_values, 0..) |field_val, i| {
const field_ty = ty.structFieldType(i);
field_val.hash(field_ty, hasher, mod);
}
@@ -3228,7 +3228,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (int_ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
@@ -3341,7 +3341,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3390,7 +3390,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3439,7 +3439,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3501,7 +3501,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3546,7 +3546,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3622,7 +3622,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
@@ -3661,7 +3661,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3697,7 +3697,7 @@ pub const Value = extern union {
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3728,7 +3728,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3764,7 +3764,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3800,7 +3800,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3841,7 +3841,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3882,7 +3882,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -3958,7 +3958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4005,7 +4005,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4052,7 +4052,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4089,7 +4089,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
@@ -4111,7 +4111,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
var bits_buf: Value.ElemValueBuffer = undefined;
@@ -4143,7 +4143,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4185,7 +4185,7 @@ pub const Value = extern union {
if (ty.zigTypeTag() == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4243,7 +4243,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4291,7 +4291,7 @@ pub const Value = extern union {
) !Value {
if (ty.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4320,7 +4320,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (ty.zigTypeTag() == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4372,7 +4372,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4408,7 +4408,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4467,7 +4467,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4526,7 +4526,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4585,7 +4585,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
@@ -4638,7 +4638,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4678,7 +4678,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4718,7 +4718,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4758,7 +4758,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4798,7 +4798,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4838,7 +4838,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4878,7 +4878,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
@@ -4918,7 +4918,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4958,7 +4958,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
@@ -4998,7 +4998,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5038,7 +5038,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5078,7 +5078,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5118,7 +5118,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5158,7 +5158,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
@@ -5205,7 +5205,7 @@ pub const Value = extern union {
const target = mod.getTarget();
if (float_type.zigTypeTag() == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
- for (result_data) |*scalar, i| {
+ for (result_data, 0..) |*scalar, i| {
var mulend1_buf: Value.ElemValueBuffer = undefined;
const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf);
var mulend2_buf: Value.ElemValueBuffer = undefined;
tools/gen_spirv_spec.zig
@@ -251,7 +251,7 @@ fn renderEnumerant(writer: anytype, enumerant: g.Enumerant) !void {
.int => |int| try writer.print("{}", .{int}),
}
try writer.writeAll(", .parameters = &[_]OperandKind{");
- for (enumerant.parameters) |param, i| {
+ for (enumerant.parameters, 0..) |param, i| {
if (i != 0)
try writer.writeAll(", ");
// Note, param.quantifier will always be one.
@@ -272,7 +272,7 @@ fn renderOpcodes(
var aliases = std.ArrayList(struct { inst: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(instructions.len);
- for (instructions) |inst, i| {
+ for (instructions, 0..) |inst, i| {
if (std.mem.eql(u8, inst.class.?, "@exclude")) {
continue;
}
@@ -397,7 +397,7 @@ fn renderValueEnum(
var aliases = std.ArrayList(struct { enumerant: usize, alias: usize }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
- for (enumerants) |enumerant, i| {
+ for (enumerants, 0..) |enumerant, i| {
const result = enum_map.getOrPutAssumeCapacity(enumerant.value.int);
if (!result.found_existing) {
result.value_ptr.* = i;
@@ -468,7 +468,7 @@ fn renderBitEnum(
var aliases = std.ArrayList(struct { flag: usize, alias: u5 }).init(allocator);
try aliases.ensureTotalCapacity(enumerants.len);
- for (enumerants) |enumerant, i| {
+ for (enumerants, 0..) |enumerant, i| {
if (enumerant.value != .bitflag) return error.InvalidRegistry;
const value = try parseHexInt(enumerant.value.bitflag);
if (value == 0) {
@@ -494,7 +494,7 @@ fn renderBitEnum(
}
}
- for (flags_by_bitpos) |maybe_flag_index, bitpos| {
+ for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
if (maybe_flag_index) |flag_index| {
try writer.print("{}", .{std.zig.fmtId(enumerants[flag_index].enumerant)});
} else {
@@ -521,7 +521,7 @@ fn renderBitEnum(
try writer.print("\npub const Extended = struct {{\n", .{});
- for (flags_by_bitpos) |maybe_flag_index, bitpos| {
+ for (flags_by_bitpos, 0..) |maybe_flag_index, bitpos| {
const flag_index = maybe_flag_index orelse {
try writer.print("_reserved_bit_{}: bool = false,\n", .{bitpos});
continue;
@@ -570,7 +570,7 @@ fn renderOperand(
try writer.writeAll("struct{");
- for (parameters) |param, j| {
+ for (parameters, 0..) |param, j| {
if (j != 0) {
try writer.writeAll(", ");
}
@@ -642,7 +642,7 @@ fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: us
// Translate to snake case.
name_buffer.len = 0;
- for (operand.kind) |c, i| {
+ for (operand.kind, 0..) |c, i| {
switch (c) {
'a'...'z', '0'...'9' => try name_buffer.append(c),
'A'...'Z' => if (i > 0 and std.ascii.isLower(operand.kind[i - 1])) {
@@ -658,7 +658,7 @@ fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: us
// For fields derived from type name, there could be any amount.
// Simply check against all other fields, and if another similar one exists, add a number.
- const need_extra_index = for (operands) |other_operand, i| {
+ const need_extra_index = for (operands, 0..) |other_operand, i| {
if (i != field_index and std.mem.eql(u8, operand.kind, other_operand.kind)) {
break true;
}
tools/gen_stubs.zig
@@ -45,7 +45,7 @@ const MultiSym = struct {
visib: elf.STV,
fn allPresent(ms: MultiSym) bool {
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) {
return false;
}
@@ -65,7 +65,7 @@ const MultiSym = struct {
fn commonSize(ms: MultiSym) ?u64 {
var size: ?u64 = null;
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) continue;
if (size) |s| {
if (ms.size[i] != s) {
@@ -80,7 +80,7 @@ const MultiSym = struct {
fn commonBinding(ms: MultiSym) ?u4 {
var binding: ?u4 = null;
- for (arches) |_, i| {
+ for (arches, 0..) |_, i| {
if (!ms.present[i]) continue;
if (binding) |b| {
if (ms.binding[i] != b) {
@@ -268,7 +268,7 @@ pub fn main() !void {
var prev_section: u16 = std.math.maxInt(u16);
var prev_pp_state: enum { none, ptr32, special } = .none;
- for (sym_table.values()) |multi_sym, sym_index| {
+ for (sym_table.values(), 0..) |multi_sym, sym_index| {
const name = sym_table.keys()[sym_index];
if (multi_sym.section != prev_section) {
@@ -309,7 +309,7 @@ pub fn main() !void {
var first = true;
try stdout.writeAll("#if ");
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
if (multi_sym.present[i]) continue;
if (!first) try stdout.writeAll(" && ");
@@ -333,7 +333,7 @@ pub fn main() !void {
} else if (multi_sym.isWeak64()) {
try stdout.print("WEAK64 {s}\n", .{name});
} else {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
log.info("symbol '{s}' binding on {s}: {d}", .{
name, @tagName(arch), multi_sym.binding[i],
});
@@ -355,7 +355,7 @@ pub fn main() !void {
} else if (multi_sym.isPtr2Size()) {
try stdout.print(".size {s}, PTR2_SIZE_BYTES\n", .{name});
} else {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
log.info("symbol '{s}' size on {s}: {d}", .{
name, @tagName(arch), multi_sym.size[i],
});
@@ -415,7 +415,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian)
// Find the offset of the dynamic symbol table.
var dynsym_index: u16 = 0;
- for (shdrs) |shdr, i| {
+ for (shdrs, 0..) |shdr, i| {
const sh_name = try arena.dupe(u8, mem.sliceTo(shstrtab[s(shdr.sh_name)..], 0));
log.debug("found section: {s}", .{sh_name});
if (mem.eql(u8, sh_name, ".dynsym")) {
@@ -566,7 +566,7 @@ fn archIndex(arch: std.Target.Cpu.Arch) u8 {
}
fn archSetName(arch_set: [arches.len]bool) []const u8 {
- for (arches) |arch, i| {
+ for (arches, 0..) |arch, i| {
if (arch_set[i]) {
return @tagName(arch);
}
tools/update_clang_options.zig
@@ -573,7 +573,7 @@ pub fn main() anyerror!void {
const Feature = @field(cpu_targets, decl.name).Feature;
const all_features = @field(cpu_targets, decl.name).all_features;
- for (all_features) |feat, i| {
+ for (all_features, 0..) |feat, i| {
const llvm_name = feat.llvm_name orelse continue;
const zig_feat = @intToEnum(Feature, i);
const zig_name = @tagName(zig_feat);
tools/update_cpu_features.zig
@@ -899,7 +899,7 @@ pub fn main() anyerror!void {
}
} else {
var threads = try arena.alloc(std.Thread, llvm_targets.len);
- for (llvm_targets) |llvm_target, i| {
+ for (llvm_targets, 0..) |llvm_target, i| {
const job = Job{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
@@ -1226,7 +1226,7 @@ fn processOneTarget(job: Job) anyerror!void {
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
- \\ for (result) |*elem, i| {
+ \\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }
tools/update_crc_catalog.zig
@@ -116,7 +116,7 @@ pub fn main() anyerror!void {
defer buf.deinit();
var prev: u8 = 0;
- for (snakecase) |c, i| {
+ for (snakecase, 0..) |c, i| {
if (c == '_') {
// do nothing
} else if (i == 0) {
tools/update_spirv_features.zig
@@ -130,7 +130,7 @@ pub fn main() !void {
\\
);
- for (versions) |ver, i| {
+ for (versions, 0..) |ver, i| {
try w.print(
\\ result[@enumToInt(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
@@ -203,7 +203,7 @@ pub fn main() !void {
try w.writeAll(
\\ const ti = @typeInfo(Feature);
- \\ for (result) |*elem, i| {
+ \\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }