Commit c04be630d9
Changed files (34)
src
arch
aarch64
arm
powerpc
riscv64
sparc64
wasm
x86_64
link
src/Liveness/Verify.zig → src/Air/Liveness/Verify.zig
@@ -334,7 +334,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const aggregate_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]));
var bt = self.liveness.iterateBigTomb(inst);
for (elements) |element| {
@@ -347,7 +347,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @as(
[]const Air.Inst.Ref,
- @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]),
+ @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]),
);
var bt = self.liveness.iterateBigTomb(inst);
@@ -363,12 +363,12 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
var extra_i = extra.end;
const outputs = @as(
[]const Air.Inst.Ref,
- @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]),
+ @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]),
);
extra_i += outputs.len;
const inputs = @as(
[]const Air.Inst.Ref,
- @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]),
+ @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]),
);
extra_i += inputs.len;
@@ -388,7 +388,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.@"try", .try_cold => {
const pl_op = data[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const cond_br_liveness = self.liveness.getCondBr(inst);
@@ -410,7 +410,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.try_ptr, .try_ptr_cold => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
- const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const cond_br_liveness = self.liveness.getCondBr(inst);
@@ -468,7 +468,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
- break :body self.air.extra[extra.end..][0..extra.data.body_len];
+ break :body self.air.extra.items[extra.end..][0..extra.data.body_len];
},
else => unreachable,
});
@@ -501,7 +501,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.loop => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
- const loop_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const loop_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
// The same stuff should be alive after the loop as before it.
const gop = try self.loops.getOrPut(self.gpa, inst);
@@ -519,8 +519,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.cond_br => {
const pl_op = data[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br_liveness = self.liveness.getCondBr(inst);
try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
@@ -636,7 +636,7 @@ const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.liveness_verify);
-const Air = @import("../Air.zig");
+const Air = @import("../../Air.zig");
const Liveness = @import("../Liveness.zig");
-const InternPool = @import("../InternPool.zig");
+const InternPool = @import("../../InternPool.zig");
const Verify = @This();
src/Air/Legalize.zig
@@ -0,0 +1,147 @@
+zcu: *const Zcu,
+air: Air,
+features: std.enums.EnumSet(Feature),
+
+pub const Feature = enum {
+ /// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs)
+ remove_shift_vector_rhs_splat,
+ /// Legalize reduce of a one element vector to a bitcast
+ reduce_one_elem_to_bitcast,
+};
+
+pub const Features = std.enums.EnumFieldStruct(Feature, bool, false);
+
+pub fn legalize(air: *Air, backend: std.builtin.CompilerBackend, zcu: *const Zcu) std.mem.Allocator.Error!void {
+ var l: Legalize = .{
+ .zcu = zcu,
+ .air = air.*,
+ .features = features: switch (backend) {
+ .other, .stage1 => unreachable,
+ inline .stage2_llvm,
+ .stage2_c,
+ .stage2_wasm,
+ .stage2_arm,
+ .stage2_x86_64,
+ .stage2_aarch64,
+ .stage2_x86,
+ .stage2_riscv64,
+ .stage2_sparc64,
+ .stage2_spirv64,
+ .stage2_powerpc,
+ => |ct_backend| {
+ const Backend = codegen.importBackend(ct_backend) orelse break :features .initEmpty();
+ break :features if (@hasDecl(Backend, "legalize_features"))
+ .init(Backend.legalize_features)
+ else
+ .initEmpty();
+ },
+ _ => unreachable,
+ },
+ };
+ defer air.* = l.air;
+ if (!l.features.bits.eql(.initEmpty())) try l.legalizeBody(l.air.getMainBody());
+}
+
+fn legalizeBody(l: *Legalize, body: []const Air.Inst.Index) std.mem.Allocator.Error!void {
+ const zcu = l.zcu;
+ const ip = &zcu.intern_pool;
+ const tags = l.air.instructions.items(.tag);
+ const data = l.air.instructions.items(.data);
+ for (body) |inst| inst: switch (tags[@intFromEnum(inst)]) {
+ else => {},
+
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .shr,
+ .shr_exact,
+ => |air_tag| if (l.features.contains(.remove_shift_vector_rhs_splat)) done: {
+ const bin_op = data[@intFromEnum(inst)].bin_op;
+ const ty = l.air.typeOf(bin_op.rhs, ip);
+ if (!ty.isVector(zcu)) break :done;
+ if (bin_op.rhs.toInterned()) |rhs_ip_index| switch (ip.indexToKey(rhs_ip_index)) {
+ else => {},
+ .aggregate => |aggregate| switch (aggregate.storage) {
+ else => {},
+ .repeated_elem => |splat| continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
+ .lhs = bin_op.lhs,
+ .rhs = Air.internedToRef(splat),
+ } }),
+ },
+ } else {
+ const rhs_inst = bin_op.rhs.toIndex().?;
+ switch (tags[@intFromEnum(rhs_inst)]) {
+ else => {},
+ .splat => continue :inst l.replaceInst(inst, air_tag, .{ .bin_op = .{
+ .lhs = bin_op.lhs,
+ .rhs = data[@intFromEnum(rhs_inst)].ty_op.operand,
+ } }),
+ }
+ }
+ },
+
+ .reduce,
+ .reduce_optimized,
+ => if (l.features.contains(.reduce_one_elem_to_bitcast)) done: {
+ const reduce = data[@intFromEnum(inst)].reduce;
+ const vector_ty = l.air.typeOf(reduce.operand, ip);
+ switch (vector_ty.vectorLen(zcu)) {
+ 0 => unreachable,
+ 1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{
+ .ty = Air.internedToRef(vector_ty.scalarType(zcu).toIntern()),
+ .operand = reduce.operand,
+ } }),
+ else => break :done,
+ }
+ },
+
+ .@"try", .try_cold => {
+ const pl_op = data[@intFromEnum(inst)].pl_op;
+ const extra = l.air.extraData(Air.Try, pl_op.payload);
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len]));
+ },
+ .try_ptr, .try_ptr_cold => {
+ const ty_pl = data[@intFromEnum(inst)].ty_pl;
+ const extra = l.air.extraData(Air.TryPtr, ty_pl.payload);
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len]));
+ },
+ .block, .loop => {
+ const ty_pl = data[@intFromEnum(inst)].ty_pl;
+ const extra = l.air.extraData(Air.Block, ty_pl.payload);
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len]));
+ },
+ .dbg_inline_block => {
+ const ty_pl = data[@intFromEnum(inst)].ty_pl;
+ const extra = l.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.body_len]));
+ },
+ .cond_br => {
+ const pl_op = data[@intFromEnum(inst)].pl_op;
+ const extra = l.air.extraData(Air.CondBr, pl_op.payload);
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end..][0..extra.data.then_body_len]));
+ try l.legalizeBody(@ptrCast(l.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
+ },
+ .switch_br, .loop_switch_br => {
+ const switch_br = l.air.unwrapSwitch(inst);
+ var it = switch_br.iterateCases();
+ while (it.next()) |case| try l.legalizeBody(case.body);
+ try l.legalizeBody(it.elseBody());
+ },
+ };
+}
+
+// inline to propagate comptime `tag`s
+inline fn replaceInst(l: *Legalize, inst: Air.Inst.Index, tag: Air.Inst.Tag, data: Air.Inst.Data) Air.Inst.Tag {
+ const ip = &l.zcu.intern_pool;
+ const orig_ty = if (std.debug.runtime_safety) l.air.typeOfIndex(inst, ip) else {};
+ l.air.instructions.items(.tag)[@intFromEnum(inst)] = tag;
+ l.air.instructions.items(.data)[@intFromEnum(inst)] = data;
+ if (std.debug.runtime_safety) std.debug.assert(l.air.typeOfIndex(inst, ip).toIntern() == orig_ty.toIntern());
+ return tag;
+}
+
+const Air = @import("../Air.zig");
+const codegen = @import("../codegen.zig");
+const Legalize = @This();
+const std = @import("std");
+const Zcu = @import("../Zcu.zig");
src/Liveness.zig → src/Air/Liveness.zig
@@ -12,9 +12,9 @@ const Allocator = std.mem.Allocator;
const Log2Int = std.math.Log2Int;
const Liveness = @This();
-const trace = @import("tracy.zig").trace;
-const Air = @import("Air.zig");
-const InternPool = @import("InternPool.zig");
+const trace = @import("../tracy.zig").trace;
+const Air = @import("../Air.zig");
+const InternPool = @import("../InternPool.zig");
pub const Verify = @import("Liveness/Verify.zig");
@@ -480,7 +480,7 @@ pub fn categorizeOperand(
const inst_data = air_datas[@intFromEnum(inst)].pl_op;
const callee = inst_data.operand;
const extra = air.extraData(Air.Call, inst_data.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra[extra.end..][0..extra.data.args_len]));
+ const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]));
if (args.len + 1 <= bpi - 1) {
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
for (args, 0..) |arg, i| {
@@ -532,7 +532,7 @@ pub fn categorizeOperand(
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const aggregate_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra[ty_pl.payload..][0..len]));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[ty_pl.payload..][0..len]));
if (elements.len <= bpi - 1) {
for (elements, 0..) |elem, i| {
@@ -611,7 +611,7 @@ pub fn categorizeOperand(
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
- break :body air.extra[extra.end..][0..extra.data.body_len];
+ break :body air.extra.items[extra.end..][0..extra.data.body_len];
},
else => unreachable,
});
@@ -630,8 +630,8 @@ pub fn categorizeOperand(
if (cond_extra.data.then_body_len > 2 or cond_extra.data.else_body_len > 2)
return .complex;
- const then_body: []const Air.Inst.Index = @ptrCast(air.extra[cond_extra.end..][0..cond_extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(air.extra[cond_extra.end + cond_extra.data.then_body_len ..][0..cond_extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end..][0..cond_extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end + cond_extra.data.then_body_len ..][0..cond_extra.data.else_body_len]);
if (then_body.len > 1 and air_tags[@intFromEnum(then_body[1])] != .unreach)
return .complex;
if (else_body.len > 1 and air_tags[@intFromEnum(else_body[1])] != .unreach)
@@ -1096,7 +1096,7 @@ fn analyzeInst(
const inst_data = inst_datas[@intFromEnum(inst)].pl_op;
const callee = inst_data.operand;
const extra = a.air.extraData(Air.Call, inst_data.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra.end..][0..extra.data.args_len]));
+ const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra.end..][0..extra.data.args_len]));
if (args.len + 1 <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
buf[0] = callee;
@@ -1135,7 +1135,7 @@ fn analyzeInst(
const ty_pl = inst_datas[@intFromEnum(inst)].ty_pl;
const aggregate_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[ty_pl.payload..][0..len]));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[ty_pl.payload..][0..len]));
if (elements.len <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
@@ -1190,9 +1190,9 @@ fn analyzeInst(
.assembly => {
const extra = a.air.extraData(Air.Asm, inst_datas[@intFromEnum(inst)].ty_pl.payload);
var extra_i: usize = extra.end;
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.outputs_len]));
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra[extra_i..][0..extra.data.inputs_len]));
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const num_operands = simple: {
@@ -1235,7 +1235,7 @@ fn analyzeInst(
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
- return analyzeInstBlock(a, pass, data, inst, ty_pl.ty, @ptrCast(a.air.extra[extra.end..][0..extra.data.body_len]));
+ return analyzeInstBlock(a, pass, data, inst, ty_pl.ty, @ptrCast(a.air.extra.items[extra.end..][0..extra.data.body_len]));
},
.loop => return analyzeInstLoop(a, pass, data, inst),
@@ -1596,7 +1596,7 @@ fn analyzeInstLoop(
) !void {
const inst_datas = a.air.instructions.items(.data);
const extra = a.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(a.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(a.air.extra.items[extra.end..][0..extra.data.body_len]);
const gpa = a.gpa;
try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none });
@@ -1657,13 +1657,13 @@ fn analyzeInstCondBr(
};
const then_body: []const Air.Inst.Index = switch (inst_type) {
- .cond_br => @ptrCast(a.air.extra[extra.end..][0..extra.data.then_body_len]),
+ .cond_br => @ptrCast(a.air.extra.items[extra.end..][0..extra.data.then_body_len]),
else => &.{}, // we won't use this
};
const else_body: []const Air.Inst.Index = @ptrCast(switch (inst_type) {
- .cond_br => a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len],
- .@"try", .try_ptr => a.air.extra[extra.end..][0..extra.data.body_len],
+ .cond_br => a.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len],
+ .@"try", .try_ptr => a.air.extra.items[extra.end..][0..extra.data.body_len],
});
switch (pass) {
src/Air/types_resolved.zig
@@ -171,7 +171,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+ @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
@@ -181,7 +181,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+ @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
@@ -270,7 +270,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
.aggregate_init => {
const ty = data.ty_pl.ty.toType();
const elems_len: usize = @intCast(ty.arrayLen(zcu));
- const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]);
+ const elems: []const Air.Inst.Ref = @ptrCast(air.extra.items[data.ty_pl.payload..][0..elems_len]);
if (!checkType(ty, zcu)) return false;
if (ty.zigTypeTag(zcu) == .@"struct") {
for (elems, 0..) |elem, elem_idx| {
@@ -336,7 +336,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
.call_never_inline,
=> {
const extra = air.extraData(Air.Call, data.pl_op.payload);
- const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]);
+ const args: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]);
if (!checkRef(data.pl_op.operand, zcu)) return false;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},
@@ -353,7 +353,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+ @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
@@ -364,7 +364,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(extra.data.ptr, zcu)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end..][0..extra.data.body_len]),
+ @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
zcu,
)) return false;
},
@@ -374,12 +374,12 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]),
+ @ptrCast(air.extra.items[extra.end..][0..extra.data.then_body_len]),
zcu,
)) return false;
if (!checkBody(
air,
- @ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
+ @ptrCast(air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
zcu,
)) return false;
},
@@ -404,8 +404,8 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
// Luckily, we only care about the inputs and outputs, so we don't have to do
// the whole null-terminated string dance.
- const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]);
- const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]);
+ const outputs: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..extra.data.outputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]);
for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false;
for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false;
},
src/arch/aarch64/CodeGen.zig
@@ -7,7 +7,6 @@ const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
-const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const Value = @import("../../Value.zig");
const link = @import("../../link.zig");
@@ -44,7 +43,7 @@ const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
@@ -71,7 +70,7 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
-reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
+reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// We postpone the creation of debug info for function args and locals
/// until after all Mir instructions have been generated. Only then we
@@ -273,7 +272,7 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
+ lbt: Air.Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const dies = bt.lbt.feed();
@@ -324,7 +323,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@@ -646,7 +645,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
continue;
const old_air_bookkeeping = self.air_bookkeeping;
- try self.ensureProcessDeathCapacity(Liveness.bpi);
+ try self.ensureProcessDeathCapacity(Air.Liveness.bpi);
self.reused_operands = @TypeOf(self.reused_operands).initEmpty();
switch (air_tags[@intFromEnum(inst)]) {
@@ -930,14 +929,14 @@ fn finishAirBookkeeping(self: *Self) void {
}
}
-fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
+fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Air.Liveness.bpi - 1]Air.Inst.Ref) void {
const tomb_bits = self.liveness.getTombBits(inst);
for (0.., operands) |op_index, op| {
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (self.reused_operands.isSet(op_index)) continue;
self.processDeath(op.toIndexAllowNone() orelse continue);
}
- if (tomb_bits & 1 << (Liveness.bpi - 1) == 0) {
+ if (tomb_bits & 1 << (Air.Liveness.bpi - 1) == 0) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(inst, result);
@@ -1568,7 +1567,7 @@ const ReuseMetadata = struct {
/// inputs to the Air instruction are omitted (e.g. when they can
/// be represented as immediates to the Mir instruction),
/// operand_mapping should reflect that fact.
- operand_mapping: []const Liveness.OperandInt,
+ operand_mapping: []const Air.Liveness.OperandInt,
};
/// Allocate a set of registers for use as arguments for a Mir
@@ -1835,7 +1834,7 @@ fn binOpImmediate(
const write_args = [_]WriteArg{
.{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
};
- const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0};
+ const operand_mapping: []const Air.Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0};
try self.allocRegs(
&read_args,
&write_args,
@@ -3584,7 +3583,7 @@ fn reuseOperand(
self: *Self,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
) bool {
if (!self.liveness.operandDies(inst, op_index))
@@ -4250,7 +4249,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]));
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const ty = self.typeOf(callee);
const pt = self.pt;
const zcu = pt.zcu;
@@ -4389,8 +4388,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
break :result info.return_value;
};
- if (args.len + 1 <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (args.len + 1 <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
buf[0] = callee;
@memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
@@ -4613,7 +4612,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const func = zcu.funcInfo(extra.data.func);
// TODO emit debug info for function change
_ = func;
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) InnerError!void {
@@ -4671,8 +4670,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_condbr = self.liveness.getCondBr(inst);
const reloc = try self.condBr(cond);
@@ -5016,7 +5015,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const start_index = @as(u32, @intCast(self.mir_instructions.len));
try self.genBody(body);
@@ -5036,7 +5035,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@@ -5255,9 +5254,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
const clobbers_len = @as(u31, @truncate(extra.data.flags));
var extra_i: usize = extra.end;
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]));
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]));
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5270,8 +5269,8 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
if (output != .none) {
return self.fail("TODO implement codegen for non-expr asm", .{});
}
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -5281,7 +5280,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
} else null;
for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5303,7 +5302,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -5312,7 +5311,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
}
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
if (mem.eql(u8, asm_source, "svc #0")) {
_ = try self.addInst(.{
@@ -5342,7 +5341,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) InnerError!void {
};
simple: {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
if (output == .none) continue;
@@ -6052,14 +6051,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(zcu);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
@@ -6095,7 +6094,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void {
const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
@@ -6122,7 +6121,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void {
fn airTryPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
- const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ const body = self.air.extra.items[extra.end..][0..extra.data.body_len];
_ = body;
return self.fail("TODO implement airTryPtr for arm", .{});
// return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none });
src/arch/arm/CodeGen.zig
@@ -7,7 +7,6 @@ const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
-const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const Value = @import("../../Value.zig");
const link = @import("../../link.zig");
@@ -45,7 +44,7 @@ const InnerError = CodeGenError || error{OutOfRegisters};
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
@@ -72,7 +71,7 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
-reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
+reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// We postpone the creation of debug info for function args and locals
/// until after all Mir instructions have been generated. Only then we
@@ -195,7 +194,7 @@ const BlockData = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
+ lbt: Air.Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const dies = bt.lbt.feed();
@@ -333,7 +332,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@@ -635,7 +634,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
continue;
const old_air_bookkeeping = self.air_bookkeeping;
- try self.ensureProcessDeathCapacity(Liveness.bpi);
+ try self.ensureProcessDeathCapacity(Air.Liveness.bpi);
self.reused_operands = @TypeOf(self.reused_operands).initEmpty();
switch (air_tags[@intFromEnum(inst)]) {
@@ -921,14 +920,14 @@ fn finishAirBookkeeping(self: *Self) void {
}
}
-fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
+fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Air.Liveness.bpi - 1]Air.Inst.Ref) void {
const tomb_bits = self.liveness.getTombBits(inst);
for (0.., operands) |op_index, op| {
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (self.reused_operands.isSet(op_index)) continue;
self.processDeath(op.toIndexAllowNone() orelse continue);
}
- if (tomb_bits & 1 << (Liveness.bpi - 1) == 0) {
+ if (tomb_bits & 1 << (Air.Liveness.bpi - 1) == 0) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(inst, result);
@@ -2617,7 +2616,7 @@ fn reuseOperand(
self: *Self,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
) bool {
if (!self.liveness.operandDies(inst, op_index))
@@ -3094,7 +3093,7 @@ const ReuseMetadata = struct {
/// inputs to the Air instruction are omitted (e.g. when they can
/// be represented as immediates to the Mir instruction),
/// operand_mapping should reflect that fact.
- operand_mapping: []const Liveness.OperandInt,
+ operand_mapping: []const Air.Liveness.OperandInt,
};
/// Allocate a set of registers for use as arguments for a Mir
@@ -3342,7 +3341,7 @@ fn binOpImmediate(
const write_args = [_]WriteArg{
.{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg },
};
- const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0};
+ const operand_mapping: []const Air.Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0};
try self.allocRegs(
&read_args,
&write_args,
@@ -4232,7 +4231,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const ty = self.typeOf(callee);
const pt = self.pt;
const zcu = pt.zcu;
@@ -4361,8 +4360,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
break :result info.return_value;
};
- if (args.len <= Liveness.bpi - 2) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (args.len <= Air.Liveness.bpi - 2) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
buf[0] = callee;
@memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
@@ -4585,7 +4584,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
const func = zcu.funcInfo(extra.data.func);
// TODO emit debug info for function change
_ = func;
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
@@ -4646,8 +4645,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_inst = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_condbr = self.liveness.getCondBr(inst);
const reloc: Mir.Inst.Index = try self.condBr(cond_inst);
@@ -4966,7 +4965,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const start_index: Mir.Inst.Index = @intCast(self.mir_instructions.len);
try self.genBody(body);
@@ -4986,7 +4985,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@@ -5199,9 +5198,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
- const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -5214,8 +5213,8 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
if (output != .none) {
return self.fail("TODO implement codegen for non-expr asm", .{});
}
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -5225,7 +5224,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else null;
for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5247,7 +5246,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -5256,7 +5255,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
}
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
if (mem.eql(u8, asm_source, "svc #0")) {
_ = try self.addInst(.{
@@ -5282,7 +5281,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
};
simple: {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
if (output == .none) continue;
@@ -6021,14 +6020,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(zcu);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for arm", .{});
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
@@ -6065,7 +6064,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const pt = self.pt;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.typeOf(pl_op.operand);
@@ -6092,7 +6091,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
- const body = self.air.extra[extra.end..][0..extra.data.body_len];
+ const body = self.air.extra.items[extra.end..][0..extra.data.body_len];
_ = body;
return self.fail("TODO implement airTryPtr for arm", .{});
// return self.finishAir(inst, result, .{ extra.data.ptr, .none, .none });
src/arch/powerpc/CodeGen.zig
@@ -5,7 +5,6 @@ const Air = @import("../../Air.zig");
const codegen = @import("../../codegen.zig");
const InternPool = @import("../../InternPool.zig");
const link = @import("../../link.zig");
-const Liveness = @import("../../Liveness.zig");
const Zcu = @import("../../Zcu.zig");
const assert = std.debug.assert;
@@ -17,7 +16,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
src/arch/riscv64/CodeGen.zig
@@ -10,7 +10,6 @@ const Allocator = mem.Allocator;
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
-const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const Value = @import("../../Value.zig");
const link = @import("../../link.zig");
@@ -54,7 +53,7 @@ const InnerError = CodeGenError || error{OutOfRegisters};
pt: Zcu.PerThread,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
bin_file: *link.File,
gpa: Allocator,
@@ -82,7 +81,7 @@ scope_generation: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
-reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
+reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay"
@@ -739,7 +738,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@@ -1426,7 +1425,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
verbose_tracking_log.debug("{}", .{func.fmtTracking()});
const old_air_bookkeeping = func.air_bookkeeping;
- try func.ensureProcessDeathCapacity(Liveness.bpi);
+ try func.ensureProcessDeathCapacity(Air.Liveness.bpi);
func.reused_operands = @TypeOf(func.reused_operands).initEmpty();
try func.inst_tracking.ensureUnusedCapacity(func.gpa, 1);
@@ -1731,7 +1730,7 @@ fn freeValue(func: *Func, value: MCValue) !void {
}
}
-fn feed(func: *Func, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void {
+fn feed(func: *Func, bt: *Air.Liveness.BigTomb, operand: Air.Inst.Ref) !void {
if (bt.feed()) if (operand.toIndex()) |inst| {
log.debug("feed inst: %{}", .{inst});
try func.processDeath(inst);
@@ -1776,11 +1775,11 @@ fn finishAir(
func: *Func,
inst: Air.Inst.Index,
result: MCValue,
- operands: [Liveness.bpi - 1]Air.Inst.Ref,
+ operands: [Air.Liveness.bpi - 1]Air.Inst.Ref,
) !void {
const tomb_bits = func.liveness.getTombBits(inst);
for (0.., operands) |op_index, op| {
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (func.reused_operands.isSet(op_index)) continue;
try func.processDeath(op.toIndexAllowNone() orelse continue);
}
@@ -3651,7 +3650,7 @@ fn airTlvDllimportPtr(func: *Func, inst: Air.Inst.Index) !void {
fn airTry(func: *Func, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = func.typeOf(pl_op.operand);
const result = try func.genTry(inst, pl_op.operand, body, operand_ty, false);
return func.finishAir(inst, result, .{ .none, .none, .none });
@@ -4419,7 +4418,7 @@ fn reuseOperand(
func: *Func,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
) bool {
return func.reuseOperandAdvanced(inst, operand, op_index, mcv, inst);
@@ -4429,7 +4428,7 @@ fn reuseOperandAdvanced(
func: *Func,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
maybe_tracked_inst: ?Air.Inst.Index,
) bool {
@@ -4816,7 +4815,7 @@ fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = func.air.extraData(Air.Call, pl_op.payload);
- const arg_refs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]);
+ const arg_refs: []const Air.Inst.Ref = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.args_len]);
const expected_num_args = 8;
const ExpectedContents = extern struct {
@@ -5232,7 +5231,7 @@ fn airDbgStmt(func: *Func, inst: Air.Inst.Index) !void {
fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
- try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+ try func.lowerBlock(inst, @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(func: *Func, inst: Air.Inst.Index) InnerError!void {
@@ -5284,8 +5283,8 @@ fn airCondBr(func: *Func, inst: Air.Inst.Index) !void {
const cond = try func.resolveInst(pl_op.operand);
const cond_ty = func.typeOf(pl_op.operand);
const extra = func.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_cond_br = func.liveness.getCondBr(inst);
// If the condition dies here in this condbr instruction, process
@@ -5644,7 +5643,7 @@ fn airLoop(func: *Func, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = func.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[loop.end..][0..loop.data.body_len]);
func.scope_generation += 1;
const state = try func.saveState();
@@ -5674,7 +5673,7 @@ fn jump(func: *Func, index: Mir.Inst.Index) !Mir.Inst.Index {
fn airBlock(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Block, ty_pl.payload);
- try func.lowerBlock(inst, @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+ try func.lowerBlock(inst, @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@@ -6063,9 +6062,9 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref =
- @ptrCast(func.air.extra[extra_i..][0..extra.data.outputs_len]);
+ @ptrCast(func.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
var result: MCValue = .none;
@@ -6083,8 +6082,8 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
var outputs_extra_i = extra_i;
for (outputs) |output| {
- const extra_bytes = mem.sliceAsBytes(func.air.extra[extra_i..]);
- const constraint = mem.sliceTo(mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
+ const extra_bytes = mem.sliceAsBytes(func.air.extra.items[extra_i..]);
+ const constraint = mem.sliceTo(mem.sliceAsBytes(func.air.extra.items[extra_i..]), 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -6141,7 +6140,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
}
for (inputs) |input| {
- const input_bytes = mem.sliceAsBytes(func.air.extra[extra_i..]);
+ const input_bytes = mem.sliceAsBytes(func.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(input_bytes, 0);
const name = mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -6177,7 +6176,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(func.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -6224,7 +6223,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
labels.deinit(func.gpa);
}
- const asm_source = std.mem.sliceAsBytes(func.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(func.air.extra.items[extra_i..])[0..extra.data.source_len];
var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;");
next_line: while (line_it.next()) |line| {
var mnem_it = mem.tokenizeAny(u8, line, " \t");
@@ -6493,9 +6492,9 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
return func.fail("undefined label: '{s}'", .{label.key_ptr.*});
for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
- const extra_bytes = mem.sliceAsBytes(func.air.extra[outputs_extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(func.air.extra.items[outputs_extra_i..]);
const constraint =
- mem.sliceTo(mem.sliceAsBytes(func.air.extra[outputs_extra_i..]), 0);
+ mem.sliceTo(mem.sliceAsBytes(func.air.extra.items[outputs_extra_i..]), 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -6508,7 +6507,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
}
simple: {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
if (output == .none) continue;
@@ -8027,7 +8026,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const result_ty = func.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(zcu));
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const elements: []const Air.Inst.Ref = @ptrCast(func.air.extra[ty_pl.payload..][0..len]);
+ const elements: []const Air.Inst.Ref = @ptrCast(func.air.extra.items[ty_pl.payload..][0..len]);
const result: MCValue = result: {
switch (result_ty.zigTypeTag(zcu)) {
@@ -8113,8 +8112,8 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
}
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
return func.finishAir(inst, result, buf);
}
src/arch/sparc64/CodeGen.zig
@@ -18,7 +18,6 @@ const codegen = @import("../../codegen.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
-const Liveness = @import("../../Liveness.zig");
const Type = @import("../../Type.zig");
const CodeGenError = codegen.CodeGenError;
const Endian = std.builtin.Endian;
@@ -50,7 +49,7 @@ const RegisterView = enum(u1) {
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
bin_file: *link.File,
target: *const std.Target,
func_index: InternPool.Index,
@@ -78,7 +77,7 @@ end_di_column: u32,
/// which is a relative jump, based on the address following the reloc.
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
-reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
+reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
/// Whenever there is a runtime branch, we push a Branch onto this stack,
/// and pop it off when the runtime branch joins. This provides an "overlay"
@@ -240,7 +239,7 @@ const CallMCValues = struct {
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
+ lbt: Air.Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const dies = bt.lbt.feed();
@@ -266,7 +265,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@@ -493,7 +492,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
continue;
const old_air_bookkeeping = self.air_bookkeeping;
- try self.ensureProcessDeathCapacity(Liveness.bpi);
+ try self.ensureProcessDeathCapacity(Air.Liveness.bpi);
self.reused_operands = @TypeOf(self.reused_operands).initEmpty();
switch (air_tags[@intFromEnum(inst)]) {
@@ -839,14 +838,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(zcu);
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len]));
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
const result: MCValue = res: {
if (self.liveness.isUnused(inst)) break :res MCValue.dead;
return self.fail("TODO implement airAggregateInit for {}", .{self.target.cpu.arch});
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
@@ -876,7 +875,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(zcu);
- const array_len = @as(u32, @intCast(array_ty.arrayLen(zcu)));
+ const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
const ptr_bytes = 8;
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
try self.genSetStack(ptr_ty, stack_offset, ptr);
@@ -890,11 +889,11 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = (extra.data.flags & 0x80000000) != 0;
- const clobbers_len = @as(u31, @truncate(extra.data.flags));
+ const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.outputs_len]));
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i .. extra_i + extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i .. extra_i + extra.data.inputs_len]));
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i .. extra_i + extra.data.inputs_len]);
extra_i += inputs.len;
const dead = !is_volatile and self.liveness.isUnused(inst);
@@ -907,8 +906,8 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
if (output != .none) {
return self.fail("TODO implement codegen for non-expr asm", .{});
}
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -918,7 +917,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else null;
for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -940,7 +939,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -949,7 +948,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
}
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
if (mem.eql(u8, asm_source, "ta 0x6d")) {
_ = try self.addInst(.{
@@ -980,7 +979,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
};
simple: {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
if (output == .none) continue;
@@ -1124,7 +1123,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@@ -1292,7 +1291,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end .. extra.end + extra.data.args_len]));
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end .. extra.end + extra.data.args_len]);
const ty = self.typeOf(callee);
const pt = self.pt;
const zcu = pt.zcu;
@@ -1376,8 +1375,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const result = info.return_value;
- if (args.len + 1 <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (args.len + 1 <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
buf[0] = callee;
@memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
@@ -1477,8 +1476,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_condbr = self.liveness.getCondBr(inst);
// Here we emit a branch to the false section.
@@ -1629,7 +1628,7 @@ fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
// TODO emit debug info for function change
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
@@ -1795,8 +1794,8 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end .. loop.end + loop.data.body_len]);
- const start = @as(u32, @intCast(self.mir_instructions.len));
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end .. loop.end + loop.data.body_len]);
+ const start: u32 = @intCast(self.mir_instructions.len);
try self.genBody(body);
try self.jump(start);
@@ -2514,7 +2513,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const zcu = self.pt.zcu;
const mcv = try self.resolveInst(operand);
const struct_ty = self.typeOf(operand);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
+ const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -2612,7 +2611,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const result: MCValue = result: {
const error_union_ty = self.typeOf(pl_op.operand);
const error_union = try self.resolveInst(pl_op.operand);
@@ -3478,7 +3477,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type)
return MCValue.none;
}
- const payload_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
+ const payload_offset: u32 = @intCast(errUnionPayloadOffset(payload_ty, zcu));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_offset => |off| {
@@ -3513,14 +3512,14 @@ fn finishAirBookkeeping(self: *Self) void {
}
}
-fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void {
+fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Air.Liveness.bpi - 1]Air.Inst.Ref) void {
const tomb_bits = self.liveness.getTombBits(inst);
for (0.., operands) |op_index, op| {
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (self.reused_operands.isSet(op_index)) continue;
self.processDeath(op.toIndexAllowNone() orelse continue);
}
- if (tomb_bits & 1 << (Liveness.bpi - 1) == 0) {
+ if (tomb_bits & 1 << (Air.Liveness.bpi - 1) == 0) {
log.debug("%{d} => {}", .{ inst, result });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
branch.inst_table.putAssumeCapacityNoClobber(inst, result);
@@ -3944,7 +3943,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.fieldType(1, zcu);
- const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
+ const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, zcu));
const cond_reg = try self.register_manager.allocReg(null, gp);
// TODO handle floating point CCRs
@@ -4449,7 +4448,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(zcu)));
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(zcu));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result_arg.* = .{ .register = argument_registers[next_register] };
@@ -4534,7 +4533,7 @@ fn ret(self: *Self, mcv: MCValue) !void {
try self.exitlude_jump_relocs.append(self.gpa, index);
}
-fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool {
+fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Air.Liveness.OperandInt, mcv: MCValue) bool {
if (!self.liveness.operandDies(inst, op_index))
return false;
@@ -4664,7 +4663,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const mcv = try self.resolveInst(operand);
const ptr_ty = self.typeOf(operand);
const struct_ty = ptr_ty.childType(zcu);
- const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
+ const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
src/arch/sparc64/Emit.zig
@@ -7,7 +7,6 @@ const assert = std.debug.assert;
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const ErrorMsg = Zcu.ErrorMsg;
-const Liveness = @import("../../Liveness.zig");
const log = std.log.scoped(.sparcv9_emit);
const Emit = @This();
src/arch/wasm/CodeGen.zig
@@ -17,7 +17,6 @@ const Value = @import("../../Value.zig");
const Compilation = @import("../../Compilation.zig");
const link = @import("../../link.zig");
const Air = @import("../../Air.zig");
-const Liveness = @import("../../Liveness.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const abi = @import("abi.zig");
@@ -39,7 +38,7 @@ owner_nav: InternPool.Nav.Index,
/// and block
block_depth: u32 = 0,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
gpa: mem.Allocator,
func_index: InternPool.Index,
/// Contains a list of current branches.
@@ -771,7 +770,7 @@ fn resolveValue(cg: *CodeGen, val: Value) InnerError!WValue {
/// NOTE: if result == .stack, it will be stored in .local
fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
- assert(operands.len <= Liveness.bpi - 1);
+ assert(operands.len <= Air.Liveness.bpi - 1);
var tomb_bits = cg.liveness.getTombBits(inst);
for (operands) |operand| {
const dies = @as(u1, @truncate(tomb_bits)) != 0;
@@ -811,7 +810,7 @@ inline fn currentBranch(cg: *CodeGen) *Branch {
const BigTomb = struct {
gen: *CodeGen,
inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
+ lbt: Air.Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void {
const dies = bt.lbt.feed();
@@ -1262,7 +1261,7 @@ pub fn function(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) Error!Function {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -2123,7 +2122,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
continue;
}
const old_bookkeeping_value = cg.air_bookkeeping;
- try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, Liveness.bpi);
+ try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, Air.Liveness.bpi);
try cg.genInst(inst);
if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) {
@@ -2217,7 +2216,7 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{});
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = cg.air.extraData(Air.Call, pl_op.payload);
- const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra[extra.end..][0..extra.data.args_len]);
+ const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.args_len]);
const ty = cg.typeOf(pl_op.operand);
const pt = cg.pt;
@@ -3410,7 +3409,7 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.Block, ty_pl.payload);
- try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
+ try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
@@ -3456,7 +3455,7 @@ fn endBlock(cg: *CodeGen) !void {
fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = cg.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[loop.end..][0..loop.data.body_len]);
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
@@ -3475,8 +3474,8 @@ fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try cg.resolveInst(pl_op.operand);
const extra = cg.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_condbr = cg.liveness.getCondBr(inst);
// result type is always noreturn, so use `block_empty` as type.
@@ -5238,7 +5237,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = cg.typeOfIndex(inst);
const len = @as(usize, @intCast(result_ty.arrayLen(zcu)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(cg.air.extra[ty_pl.payload..][0..len]));
+ const elements: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[ty_pl.payload..][0..len]);
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(zcu)) {
@@ -5352,8 +5351,8 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
@memcpy(buf[0..elements.len], elements);
return cg.finishAir(inst, result, &buf);
}
@@ -6454,7 +6453,7 @@ fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
// TODO
- try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
+ try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(
@@ -6472,7 +6471,7 @@ fn airTry(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try cg.resolveInst(pl_op.operand);
const extra = cg.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = cg.typeOf(pl_op.operand);
const result = try lowerTry(cg, inst, err_union, body, err_union_ty, false);
return cg.finishAir(inst, result, &.{pl_op.operand});
@@ -6483,7 +6482,7 @@ fn airTryPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try cg.resolveInst(extra.data.ptr);
- const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = cg.typeOf(extra.data.ptr).childType(zcu);
const result = try lowerTry(cg, inst, err_union_ptr, body, err_union_ty, true);
return cg.finishAir(inst, result, &.{extra.data.ptr});
src/arch/x86_64/CodeGen.zig
@@ -10,7 +10,6 @@ const wip_mir_log = std.log.scoped(.wip_mir);
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const Emit = @import("Emit.zig");
-const Liveness = @import("../../Liveness.zig");
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const Zcu = @import("../../Zcu.zig");
@@ -33,6 +32,11 @@ const FrameIndex = bits.FrameIndex;
const InnerError = codegen.CodeGenError || error{OutOfRegisters};
+pub const legalize_features: Air.Legalize.Features = .{
+ .remove_shift_vector_rhs_splat = false,
+ .reduce_one_elem_to_bitcast = true,
+};
+
/// Set this to `false` to uncover Sema OPV bugs.
/// https://github.com/ziglang/zig/issues/22419
const hack_around_sema_opv_bugs = true;
@@ -42,7 +46,7 @@ const err_ret_trace_index: Air.Inst.Index = @enumFromInt(std.math.maxInt(u32));
gpa: Allocator,
pt: Zcu.PerThread,
air: Air,
-liveness: Liveness,
+liveness: Air.Liveness,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
@@ -78,7 +82,7 @@ mir_table: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
/// which is a relative jump, based on the address following the reloc.
epilogue_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
-reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
+reused_operands: std.StaticBitSet(Air.Liveness.bpi - 1) = undefined,
inst_tracking: InstTrackingMap = .empty,
// Key is the block instruction
@@ -859,7 +863,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
@@ -63335,7 +63339,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const block = cg.air.extraData(Air.Block, ty_pl.payload);
if (cg.debug_output != .none) try cg.asmPseudo(.pseudo_dbg_enter_block_none);
- try cg.lowerBlock(inst, @ptrCast(cg.air.extra[block.end..][0..block.data.body_len]));
+ try cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len]));
if (cg.debug_output != .none) try cg.asmPseudo(.pseudo_dbg_leave_block_none);
},
.loop => if (use_old) try cg.airLoop(inst) else {
@@ -63346,7 +63350,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.target = @intCast(cg.mir_instructions.len),
});
defer assert(cg.loops.remove(inst));
- try cg.genBodyBlock(@ptrCast(cg.air.extra[block.end..][0..block.data.body_len]));
+ try cg.genBodyBlock(@ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len]));
},
.repeat => if (use_old) try cg.airRepeat(inst) else {
const repeat = air_datas[@intFromEnum(inst)].repeat;
@@ -84360,7 +84364,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.ops = .pseudo_dbg_enter_inline_func,
.data = .{ .func = dbg_inline_block.data.func },
});
- try cg.lowerBlock(inst, @ptrCast(cg.air.extra[dbg_inline_block.end..][0..dbg_inline_block.data.body_len]));
+ try cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[dbg_inline_block.end..][0..dbg_inline_block.data.body_len]));
if (cg.debug_output != .none) _ = try cg.addInst(.{
.tag = .pseudo,
.ops = .pseudo_dbg_leave_inline_func,
@@ -160620,7 +160624,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
var bt = cg.liveness.iterateBigTomb(inst);
switch (ip.indexToKey(agg_ty.toIntern())) {
inline .array_type, .vector_type => |sequence_type| {
- const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..@intCast(sequence_type.len)]);
+ const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[ty_pl.payload..][0..@intCast(sequence_type.len)]);
const elem_size = Type.fromInterned(sequence_type.child).abiSize(zcu);
var elem_disp: u31 = 0;
for (elems) |elem_ref| {
@@ -160638,7 +160642,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.struct_type => {
const loaded_struct = ip.loadStructType(agg_ty.toIntern());
- const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..loaded_struct.field_types.len]);
+ const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[ty_pl.payload..][0..loaded_struct.field_types.len]);
switch (loaded_struct.layout) {
.auto, .@"extern" => {
for (elems, 0..) |elem_ref, field_index| {
@@ -160657,7 +160661,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}
},
.tuple_type => |tuple_type| {
- const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra[ty_pl.payload..][0..tuple_type.types.len]);
+ const elems: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[ty_pl.payload..][0..tuple_type.types.len]);
var elem_disp: u31 = 0;
for (elems, 0..) |elem_ref, field_index| {
const elem_dies = bt.feed();
@@ -162630,7 +162634,7 @@ fn freeValue(self: *CodeGen, value: MCValue) !void {
}
}
-fn feed(self: *CodeGen, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) !void {
+fn feed(self: *CodeGen, bt: *Air.Liveness.BigTomb, operand: Air.Inst.Ref) !void {
if (bt.feed()) if (operand.toIndex()) |inst| try self.processDeath(inst);
}
@@ -162657,11 +162661,11 @@ fn finishAir(
self: *CodeGen,
inst: Air.Inst.Index,
result: MCValue,
- operands: [Liveness.bpi - 1]Air.Inst.Ref,
+ operands: [Air.Liveness.bpi - 1]Air.Inst.Ref,
) !void {
const tomb_bits = self.liveness.getTombBits(inst);
for (0.., operands) |op_index, op| {
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (self.reused_operands.isSet(op_index)) continue;
try self.processDeath(op.toIndexAllowNone() orelse continue);
}
@@ -167965,7 +167969,7 @@ fn reuseOperand(
self: *CodeGen,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
) bool {
return self.reuseOperandAdvanced(inst, operand, op_index, mcv, inst);
@@ -167975,7 +167979,7 @@ fn reuseOperandAdvanced(
self: *CodeGen,
inst: Air.Inst.Index,
operand: Air.Inst.Ref,
- op_index: Liveness.OperandInt,
+ op_index: Air.Liveness.OperandInt,
mcv: MCValue,
maybe_tracked_inst: ?Air.Inst.Index,
) bool {
@@ -172435,7 +172439,7 @@ fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const arg_refs: []const Air.Inst.Ref =
- @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
+ @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const ExpectedContents = extern struct {
tys: [16][@sizeOf(Type)]u8 align(@alignOf(Type)),
@@ -173349,7 +173353,7 @@ fn airCmpLtErrorsLen(self: *CodeGen, inst: Air.Inst.Index) !void {
fn airTry(self: *CodeGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = self.typeOf(pl_op.operand);
const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false);
return self.finishAir(inst, result, .{ .none, .none, .none });
@@ -173358,7 +173362,7 @@ fn airTry(self: *CodeGen, inst: Air.Inst.Index) !void {
fn airTryPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = self.typeOf(extra.data.ptr);
const result = try self.genTry(inst, extra.data.ptr, body, operand_ty, true);
return self.finishAir(inst, result, .{ .none, .none, .none });
@@ -173449,9 +173453,9 @@ fn airCondBr(self: *CodeGen, inst: Air.Inst.Index) !void {
const cond_ty = self.typeOf(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index =
- @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
+ @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index =
- @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_cond_br = self.liveness.getCondBr(inst);
// If the condition dies here in this condbr instruction, process
@@ -173838,7 +173842,7 @@ fn airLoop(self: *CodeGen, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const state = try self.saveState();
@@ -174469,9 +174473,9 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
- const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
var result: MCValue = .none;
@@ -174489,8 +174493,8 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
var outputs_extra_i = extra_i;
for (outputs) |output| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -174575,7 +174579,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
}
for (inputs) |input| {
- const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -174663,7 +174667,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -174719,7 +174723,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
labels.deinit(self.gpa);
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
var line_it = std.mem.tokenizeAny(u8, asm_source, "\n\r;");
next_line: while (line_it.next()) |line| {
var mnem_it = std.mem.tokenizeAny(u8, line, " \t");
@@ -175131,9 +175135,9 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
return self.fail("undefined label: '{s}'", .{label.key_ptr.*});
for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[outputs_extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[outputs_extra_i..]);
const constraint =
- std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[outputs_extra_i..]), 0);
+ std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[outputs_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -175146,7 +175150,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
}
simple: {
- var buf: [Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
+ var buf: [Air.Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
var buf_index: usize = 0;
for (outputs) |output| {
if (output == .none) continue;
@@ -179659,7 +179663,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
const result_ty = self.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(zcu));
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
const result: MCValue = result: {
switch (result_ty.zigTypeTag(zcu)) {
.@"struct" => {
@@ -179823,8 +179827,8 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
}
};
- if (elements.len <= Liveness.bpi - 1) {
- var buf: [Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
+ if (elements.len <= Air.Liveness.bpi - 1) {
+ var buf: [Air.Liveness.bpi - 1]Air.Inst.Ref = @splat(.none);
@memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
@@ -186387,7 +186391,7 @@ const Temp = struct {
for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| {
if (op_temp.index == temp.index) continue;
if (op_temp.tracking(cg).short != .dead) try op_temp.die(cg);
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (cg.reused_operands.isSet(op_index)) continue;
try cg.processDeath(op_ref.toIndexAllowNone() orelse continue);
}
@@ -186407,7 +186411,7 @@ const Temp = struct {
}
for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| {
if (op_temp.index != temp.index) continue;
- if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
+ if (tomb_bits & @as(Air.Liveness.Bpi, 1) << @intCast(op_index) == 0) continue;
if (cg.reused_operands.isSet(op_index)) continue;
try cg.processDeath(op_ref.toIndexAllowNone() orelse continue);
}
src/codegen/c.zig
@@ -14,7 +14,6 @@ const C = link.File.C;
const Decl = Zcu.Decl;
const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const InternPool = @import("../InternPool.zig");
const Alignment = InternPool.Alignment;
@@ -356,7 +355,7 @@ pub fn isMangledIdent(ident: []const u8, solo: bool) bool {
/// It is not available when generating .h file.
pub const Function = struct {
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
value_map: CValueMap,
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
next_arg_index: u32 = 0,
@@ -2323,9 +2322,9 @@ pub const DeclGen = struct {
const pt = dg.pt;
const zcu = pt.zcu;
- const int_info = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else std.builtin.Type.Int{
+ const int_info: std.builtin.Type.Int = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else .{
.signedness = .unsigned,
- .bits = @as(u16, @intCast(ty.bitSize(zcu))),
+ .bits = @intCast(ty.bitSize(zcu)),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -3179,7 +3178,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
// Remember how many locals there were before entering the body so that we can free any that
// were newly introduced. Any new locals must necessarily be logically free after the then
// branch is complete.
- const pre_locals_len = @as(LocalIndex, @intCast(f.locals.items.len));
+ const pre_locals_len: LocalIndex = @intCast(f.locals.items.len);
for (leading_deaths) |death| {
try die(f, inst, death.toRef());
@@ -4540,7 +4539,7 @@ fn airCall(
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra.end..][0..extra.data.args_len]));
+ const args: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.args_len]);
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
@@ -4708,7 +4707,7 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const owner_nav = ip.getNav(zcu.funcInfo(extra.data.func).owner_nav);
const writer = f.object.writer();
try writer.print("/* inline:{} */\n", .{owner_nav.fqn.fmt(&zcu.intern_pool)});
- return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]));
+ return lowerBlock(f, inst, @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
@@ -4729,7 +4728,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Block, ty_pl.payload);
- return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]));
+ return lowerBlock(f, inst, @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue {
@@ -4781,7 +4780,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = f.typeOf(pl_op.operand);
return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false);
}
@@ -4791,7 +4790,7 @@ fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = f.typeOf(extra.data.ptr).childType(zcu);
return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true);
}
@@ -5100,7 +5099,7 @@ fn airUnreach(f: *Function) !void {
fn airLoop(f: *Function, inst: Air.Inst.Index) !void {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = f.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(f.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[loop.end..][0..loop.data.body_len]);
const writer = f.object.writer();
// `repeat` instructions matching this loop will branch to
@@ -5116,8 +5115,8 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !void {
const cond = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
const extra = f.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(f.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
@@ -5322,12 +5321,12 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
- const clobbers_len = @as(u31, @truncate(extra.data.flags));
+ const clobbers_len: u31 = @truncate(extra.data.flags);
const gpa = f.object.dg.gpa;
var extra_i: usize = extra.end;
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.outputs_len]));
+ const outputs: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[extra_i..][0..extra.data.inputs_len]));
+ const inputs: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const result = result: {
@@ -5347,10 +5346,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
break :local inst_local;
} else .none;
- const locals_begin = @as(LocalIndex, @intCast(f.locals.items.len));
+ const locals_begin: LocalIndex = @intCast(f.locals.items.len);
const constraints_extra_begin = extra_i;
for (outputs) |output| {
- const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5384,7 +5383,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
for (inputs) |input| {
- const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5419,14 +5418,14 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
for (0..clobbers_len) |_| {
- const clobber = mem.sliceTo(mem.sliceAsBytes(f.air.extra[extra_i..]), 0);
+ const clobber = mem.sliceTo(mem.sliceAsBytes(f.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
}
{
- const asm_source = mem.sliceAsBytes(f.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = mem.sliceAsBytes(f.air.extra.items[extra_i..])[0..extra.data.source_len];
var stack = std.heap.stackFallback(256, f.object.dg.gpa);
const allocator = stack.get();
@@ -5484,7 +5483,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
var locals_index = locals_begin;
try writer.writeByte(':');
for (outputs, 0..) |output, index| {
- const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5508,7 +5507,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.writeByte(':');
for (inputs, 0..) |input, index| {
- const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -5531,7 +5530,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.writeByte(':');
for (0..clobbers_len) |clobber_i| {
- const clobber = mem.sliceTo(mem.sliceAsBytes(f.air.extra[extra_i..]), 0);
+ const clobber = mem.sliceTo(mem.sliceAsBytes(f.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -5546,7 +5545,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
extra_i = constraints_extra_begin;
locals_index = locals_begin;
for (outputs) |output| {
- const extra_bytes = mem.sliceAsBytes(f.air.extra[extra_i..]);
+ const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -6725,7 +6724,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const repr_bits = @as(u16, @intCast(ty.abiSize(zcu) * 8));
+ const repr_bits: u16 = @intCast(ty.abiSize(zcu) * 8);
const is_float = ty.isRuntimeFloat();
const is_128 = repr_bits == 128;
const repr_ty = if (is_float) pt.intType(.unsigned, repr_bits) catch unreachable else ty;
@@ -7325,8 +7324,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const inst_ty = f.typeOfIndex(inst);
- const len = @as(usize, @intCast(inst_ty.arrayLen(zcu)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(f.air.extra[ty_pl.payload..][0..len]));
+ const len: usize = @intCast(inst_ty.arrayLen(zcu));
+ const elements: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[ty_pl.payload..][0..len]);
const gpa = f.object.dg.gpa;
const resolved_elements = try gpa.alloc(CValue, elements.len);
defer gpa.free(resolved_elements);
@@ -7830,7 +7829,7 @@ fn IndentWriter(comptime UnderlyingWriter: type) type {
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
- if (bytes.len == 0) return @as(usize, 0);
+ if (bytes.len == 0) return 0;
const current_indent = self.indent_count * Self.indent_delta;
if (self.current_line_empty and current_indent > 0) {
@@ -7860,7 +7859,7 @@ fn IndentWriter(comptime UnderlyingWriter: type) type {
}
fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
- if (bytes.len == 0) return @as(usize, 0);
+ if (bytes.len == 0) return 0;
try self.underlying_writer.writeAll(bytes);
if (bytes[bytes.len - 1] == '\n') {
@@ -8048,7 +8047,7 @@ fn fmtStringLiteral(str: []const u8, sentinel: ?u8) std.fmt.Formatter(formatStri
fn undefPattern(comptime IntType: type) IntType {
const int_info = @typeInfo(IntType).int;
const UnsignedType = std.meta.Int(.unsigned, int_info.bits);
- return @as(IntType, @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3)));
+ return @bitCast(@as(UnsignedType, (1 << (int_info.bits | 1)) / 3));
}
const FormatIntLiteralContext = struct {
@@ -8188,9 +8187,9 @@ fn formatIntLiteral(
wrap.len = wrap.limbs.len;
const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
- var c_limb_int_info = std.builtin.Type.Int{
+ var c_limb_int_info: std.builtin.Type.Int = .{
.signedness = undefined,
- .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))),
+ .bits = @intCast(@divExact(c_bits, c_limb_info.count)),
};
var c_limb_ctype: CType = undefined;
@@ -8349,7 +8348,7 @@ fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
}
fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !void {
- assert(operands.len <= Liveness.bpi - 1);
+ assert(operands.len <= Air.Liveness.bpi - 1);
var tomb_bits = f.liveness.getTombBits(inst);
for (operands) |operand| {
const dies = @as(u1, @truncate(tomb_bits)) != 0;
@@ -8400,7 +8399,7 @@ fn freeLocal(f: *Function, inst: ?Air.Inst.Index, local_index: LocalIndex, ref_i
const BigTomb = struct {
f: *Function,
inst: Air.Inst.Index,
- lbt: Liveness.BigTomb,
+ lbt: Air.Liveness.BigTomb,
fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) !void {
const dies = bt.lbt.feed();
src/codegen/llvm.zig
@@ -18,7 +18,6 @@ const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const Value = @import("../Value.zig");
const Type = @import("../Type.zig");
const x86_64_abi = @import("../arch/x86_64/abi.zig");
@@ -1121,7 +1120,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) !void {
assert(std.meta.eql(pt, o.pt));
const zcu = pt.zcu;
@@ -4616,7 +4615,7 @@ pub const FuncGen = struct {
gpa: Allocator,
ng: *NavGen,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
wip: Builder.WipFunction,
is_naked: bool,
fuzz: ?Fuzz,
@@ -5183,7 +5182,7 @@ pub const FuncGen = struct {
fn airCall(self: *FuncGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const o = self.ng.object;
const pt = o.pt;
const zcu = pt.zcu;
@@ -5856,7 +5855,7 @@ pub const FuncGen = struct {
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
- return self.lowerBlock(inst, null, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ return self.lowerBlock(inst, null, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(
@@ -6140,8 +6139,8 @@ pub const FuncGen = struct {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const Hint = enum {
none,
@@ -6205,7 +6204,7 @@ pub const FuncGen = struct {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
@@ -6219,7 +6218,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(extra.data.ptr).childType(zcu);
const is_unused = self.liveness.isUnused(inst);
@@ -6550,7 +6549,7 @@ pub const FuncGen = struct {
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const loop_block = try self.wip.block(1, "Loop"); // `airRepeat` will increment incoming each time
_ = try self.wip.br(loop_block);
@@ -7076,7 +7075,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
self.arg_inline_index = 0;
- return self.lowerBlock(inst, extra.data.func, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ return self.lowerBlock(inst, extra.data.func, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7201,9 +7200,9 @@ pub const FuncGen = struct {
const clobbers_len: u31 = @truncate(extra.data.flags);
var extra_i: usize = extra.end;
- const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
var llvm_constraints: std.ArrayListUnmanaged(u8) = .empty;
@@ -7239,8 +7238,8 @@ pub const FuncGen = struct {
var rw_extra_i = extra_i;
for (outputs, llvm_ret_indirect, llvm_rw_vals) |output, *is_indirect, *llvm_rw_val| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -7320,7 +7319,7 @@ pub const FuncGen = struct {
}
for (inputs) |input| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -7385,8 +7384,8 @@ pub const FuncGen = struct {
}
for (outputs, llvm_ret_indirect, llvm_rw_vals, 0..) |output, is_indirect, llvm_rw_val, output_index| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[rw_extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[rw_extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[rw_extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[rw_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -7425,7 +7424,7 @@ pub const FuncGen = struct {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += clobber.len / 4 + 1;
@@ -7465,7 +7464,7 @@ pub const FuncGen = struct {
else => {},
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
// hackety hacks until stage2 has proper inline asm in the frontend.
var rendered_template = std.ArrayList(u8).init(self.gpa);
@@ -10628,7 +10627,7 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(zcu));
- const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
const llvm_result_ty = try o.lowerType(result_ty);
switch (result_ty.zigTypeTag(zcu)) {
src/codegen/spirv.zig
@@ -10,7 +10,6 @@ const Decl = Zcu.Decl;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const InternPool = @import("../InternPool.zig");
const spec = @import("spirv/spec.zig");
@@ -195,7 +194,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
do_codegen: bool,
) !void {
const zcu = pt.zcu;
@@ -242,7 +241,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) !void {
const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
@@ -303,7 +302,7 @@ const NavGen = struct {
/// The liveness analysis of the intermediate code for the declaration we are currently generating.
/// Note: If the declaration is not a function, this value will be undefined!
- liveness: Liveness,
+ liveness: Air.Liveness,
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
@@ -4627,7 +4626,7 @@ const NavGen = struct {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const result_ty = self.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(zcu));
- const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
+ const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[ty_pl.payload..][0..len]);
switch (result_ty.zigTypeTag(zcu)) {
.@"struct" => {
@@ -5474,7 +5473,7 @@ const NavGen = struct {
fn airBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
const inst_datas = self.air.instructions.items(.data);
const extra = self.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
- return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ return self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn lowerBlock(self: *NavGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?IdRef {
@@ -5657,8 +5656,8 @@ const NavGen = struct {
fn airCondBr(self: *NavGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[cond_br.end..][0..cond_br.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]);
const condition_id = try self.resolve(pl_op.operand);
const then_label = self.spv.allocId();
@@ -5717,7 +5716,7 @@ const NavGen = struct {
fn airLoop(self: *NavGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const body_label = self.spv.allocId();
@@ -5837,7 +5836,7 @@ const NavGen = struct {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union_id = try self.resolve(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
@@ -6344,7 +6343,7 @@ const NavGen = struct {
const old_base_line = self.base_line;
defer self.base_line = old_base_line;
self.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav);
- return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ return self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
}
fn airDbgVar(self: *NavGen, inst: Air.Inst.Index) !void {
@@ -6365,9 +6364,9 @@ const NavGen = struct {
if (!is_volatile and self.liveness.isUnused(inst)) return null;
var extra_i: usize = extra.end;
- const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]);
+ const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.outputs_len]);
extra_i += outputs.len;
- const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
+ const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
if (outputs.len > 1) {
@@ -6386,15 +6385,15 @@ const NavGen = struct {
if (output != .none) {
return self.todo("implement inline asm with non-returned output", .{});
}
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
// TODO: Record output and use it somewhere.
}
for (inputs) |input| {
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -6461,13 +6460,13 @@ const NavGen = struct {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0);
+ const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
extra_i += clobber.len / 4 + 1;
// TODO: Record clobber and use it somewhere.
}
}
- const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
as.assemble(asm_source) catch |err| switch (err) {
error.AssembleFail => {
@@ -6501,8 +6500,8 @@ const NavGen = struct {
for (outputs) |output| {
_ = output;
- const extra_bytes = std.mem.sliceAsBytes(self.air.extra[output_extra_i..]);
- const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[output_extra_i..]), 0);
+ const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[output_extra_i..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[output_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
output_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
@@ -6531,7 +6530,7 @@ const NavGen = struct {
const zcu = pt.zcu;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
- const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]);
+ const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const callee_ty = self.typeOf(pl_op.operand);
const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => callee_ty,
src/link/Elf/ZigObject.zig
@@ -1416,7 +1416,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -2367,7 +2367,6 @@ const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const InternPool = @import("../../InternPool.zig");
-const Liveness = @import("../../Liveness.zig");
const Zcu = @import("../../Zcu.zig");
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
src/link/MachO/ZigObject.zig
@@ -778,7 +778,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -1820,7 +1820,6 @@ const Atom = @import("Atom.zig");
const Dwarf = @import("../Dwarf.zig");
const File = @import("file.zig").File;
const InternPool = @import("../../InternPool.zig");
-const Liveness = @import("../../Liveness.zig");
const MachO = @import("../MachO.zig");
const Nlist = Object.Nlist;
const Zcu = @import("../../Zcu.zig");
src/link/C.zig
@@ -18,7 +18,6 @@ const trace = @import("../tracy.zig").trace;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
pub const zig_h = "#include \"zig.h\"\n";
@@ -180,7 +179,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
src/link/Coff.zig
@@ -1098,7 +1098,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -3802,7 +3802,6 @@ const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const Compilation = @import("../Compilation.zig");
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
src/link/Elf.zig
@@ -2385,7 +2385,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -5323,7 +5323,6 @@ const GotSection = synthetic_sections.GotSection;
const GotPltSection = synthetic_sections.GotPltSection;
const HashSection = synthetic_sections.HashSection;
const LinkerDefined = @import("Elf/LinkerDefined.zig");
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
const Object = @import("Elf/Object.zig");
src/link/Goff.zig
@@ -17,7 +17,6 @@ const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
base: link.File,
@@ -79,7 +78,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .goff)
@panic("Attempted to compile for object format that was disabled by build configuration");
src/link/MachO.zig
@@ -3074,7 +3074,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@@ -5496,7 +5496,6 @@ const ObjcStubsSection = synthetic.ObjcStubsSection;
const Object = @import("MachO/Object.zig");
const LazyBind = bind.LazyBind;
const LaSymbolPtrSection = synthetic.LaSymbolPtrSection;
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
src/link/Plan9.zig
@@ -12,7 +12,6 @@ const trace = @import("../tracy.zig").trace;
const File = link.File;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const AnalUnit = InternPool.AnalUnit;
@@ -389,7 +388,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
src/link/SpirV.zig
@@ -36,7 +36,6 @@ const codegen = @import("../codegen/spirv.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
@@ -118,7 +117,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
src/link/Wasm.zig
@@ -36,7 +36,6 @@ const abi = @import("../arch/wasm/abi.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
const InternPool = @import("../InternPool.zig");
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Zcu = @import("../Zcu.zig");
const codegen = @import("../codegen.zig");
@@ -3193,7 +3192,7 @@ pub fn deinit(wasm: *Wasm) void {
wasm.missing_exports.deinit(gpa);
}
-pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void {
+pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Air.Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
src/link/Xcoff.zig
@@ -17,7 +17,6 @@ const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
-const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
base: link.File,
@@ -79,7 +78,7 @@ pub fn updateFunc(
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .xcoff)
@panic("Attempted to compile for object format that was disabled by build configuration");
src/Zcu/PerThread.zig
@@ -16,7 +16,6 @@ const dev = @import("../dev.zig");
const InternPool = @import("../InternPool.zig");
const AnalUnit = InternPool.AnalUnit;
const introspect = @import("../introspect.zig");
-const Liveness = @import("../Liveness.zig");
const log = std.log.scoped(.zcu);
const Module = @import("../Package.zig").Module;
const Sema = @import("../Sema.zig");
@@ -1721,34 +1720,43 @@ fn analyzeFuncBody(
/// Takes ownership of `air`, even on error.
/// If any types referenced by `air` are unresolved, marks the codegen as failed.
-pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Air) Allocator.Error!void {
+pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Allocator.Error!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const comp = zcu.comp;
- defer {
- var air_mut = air;
- air_mut.deinit(gpa);
- }
-
const func = zcu.funcInfo(func_index);
const nav_index = func.owner_nav;
const nav = ip.getNav(nav_index);
- var liveness = try Liveness.analyze(gpa, air, ip);
+ const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0);
+ defer codegen_prog_node.end();
+
+ if (!air.typesFullyResolved(zcu)) {
+ // A type we depend on failed to resolve. This is a transitive failure.
+ // Correcting this failure will involve changing a type this function
+ // depends on, hence triggering re-analysis of this function, so this
+ // interacts correctly with incremental compilation.
+ return;
+ }
+
+ const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
+ try air.legalize(backend, zcu);
+
+ var liveness = try Air.Liveness.analyze(gpa, air.*, ip);
defer liveness.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
std.debug.print("# Begin Function AIR: {}:\n", .{nav.fqn.fmt(ip)});
- @import("../print_air.zig").dump(pt, air, liveness);
+ @import("../print_air.zig").dump(pt, air.*, liveness);
std.debug.print("# End Function AIR: {}\n\n", .{nav.fqn.fmt(ip)});
}
if (std.debug.runtime_safety) {
- var verify: Liveness.Verify = .{
+ var verify: Air.Liveness.Verify = .{
.gpa = gpa,
- .air = air,
+ .air = air.*,
.liveness = liveness,
.intern_pool = ip,
};
@@ -1768,16 +1776,8 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
};
}
- const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0);
- defer codegen_prog_node.end();
-
- if (!air.typesFullyResolved(zcu)) {
- // A type we depend on failed to resolve. This is a transitive failure.
- // Correcting this failure will involve changing a type this function
- // depends on, hence triggering re-analysis of this function, so this
- // interacts correctly with incremental compilation.
- } else if (comp.bin_file) |lf| {
- lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
+ if (comp.bin_file) |lf| {
+ lf.updateFunc(pt, func_index, air.*, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.Overflow, error.RelocationNotByteAligned => {
@@ -1791,7 +1791,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
},
};
} else if (zcu.llvm_object) |llvm_object| {
- llvm_object.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) {
+ llvm_object.updateFunc(pt, func_index, air.*, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
}
@@ -3080,9 +3080,13 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
try sema.flushExports();
+ defer {
+ sema.air_instructions = .empty;
+ sema.air_extra = .empty;
+ }
return .{
- .instructions = sema.air_instructions.toOwnedSlice(),
- .extra = try sema.air_extra.toOwnedSlice(gpa),
+ .instructions = sema.air_instructions.slice(),
+ .extra = sema.air_extra,
};
}
src/Air.zig
@@ -9,16 +9,19 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const Air = @This();
-const Value = @import("Value.zig");
-const Type = @import("Type.zig");
const InternPool = @import("InternPool.zig");
+const Type = @import("Type.zig");
+const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
const types_resolved = @import("Air/types_resolved.zig");
+pub const Legalize = @import("Air/Legalize.zig");
+pub const Liveness = @import("Air/Liveness.zig");
+
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
/// The first few indexes are reserved. See `ExtraIndex` for the values.
-extra: []const u32,
+extra: std.ArrayListUnmanaged(u32),
pub const ExtraIndex = enum(u32) {
/// Payload index of the main `Block` in the `extra` array.
@@ -244,22 +247,27 @@ pub const Inst = struct {
/// Uses the `bin_op` field.
bit_or,
/// Shift right. `>>`
+ /// The rhs type may be a scalar version of the lhs type.
/// Uses the `bin_op` field.
shr,
/// Shift right. The shift produces a poison value if it shifts out any non-zero bits.
+ /// The rhs type may be a scalar version of the lhs type.
/// Uses the `bin_op` field.
shr_exact,
/// Shift left. `<<`
+ /// The rhs type may be a scalar version of the lhs type.
/// Uses the `bin_op` field.
shl,
/// Shift left; For unsigned integers, the shift produces a poison value if it shifts
/// out any non-zero bits. For signed integers, the shift produces a poison value if
/// it shifts out any bits that disagree with the resultant sign bit.
+ /// The rhs type may be a scalar version of the lhs type.
/// Uses the `bin_op` field.
shl_exact,
/// Saturating integer shift left. `<<|`. The result is the same type as the `lhs`.
/// The `rhs` must have the same vector shape as the `lhs`, but with any unsigned
/// integer as the scalar type.
+ /// The rhs type may be a scalar version of the lhs type.
/// Uses the `bin_op` field.
shl_sat,
/// Bitwise XOR. `^`
@@ -1378,9 +1386,9 @@ pub const UnionInit = struct {
};
pub fn getMainBody(air: Air) []const Air.Inst.Index {
- const body_index = air.extra[@intFromEnum(ExtraIndex.main_block)];
+ const body_index = air.extra.items[@intFromEnum(ExtraIndex.main_block)];
const extra = air.extraData(Block, body_index);
- return @ptrCast(air.extra[extra.end..][0..extra.data.body_len]);
+ return @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]);
}
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
@@ -1656,9 +1664,9 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
- u32 => air.extra[i],
- InternPool.Index, Inst.Ref => @enumFromInt(air.extra[i]),
- i32, CondBr.BranchHints => @bitCast(air.extra[i]),
+ u32 => air.extra.items[i],
+ InternPool.Index, Inst.Ref => @enumFromInt(air.extra.items[i]),
+ i32, CondBr.BranchHints => @bitCast(air.extra.items[i]),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
i += 1;
@@ -1671,7 +1679,7 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
air.instructions.deinit(gpa);
- gpa.free(air.extra);
+ air.extra.deinit(gpa);
air.* = undefined;
}
@@ -1700,7 +1708,7 @@ pub const NullTerminatedString = enum(u32) {
pub fn toSlice(nts: NullTerminatedString, air: Air) [:0]const u8 {
if (nts == .none) return "";
- const bytes = std.mem.sliceAsBytes(air.extra[@intFromEnum(nts)..]);
+ const bytes = std.mem.sliceAsBytes(air.extra.items[@intFromEnum(nts)..]);
return bytes[0..std.mem.indexOfScalar(u8, bytes, 0).? :0];
}
};
@@ -1943,7 +1951,7 @@ pub const UnwrappedSwitch = struct {
return us.getHintInner(us.cases_len);
}
fn getHintInner(us: UnwrappedSwitch, idx: u32) std.builtin.BranchHint {
- const bag = us.air.extra[us.branch_hints_start..][idx / 10];
+ const bag = us.air.extra.items[us.branch_hints_start..][idx / 10];
const bits: u3 = @truncate(bag >> @intCast(3 * (idx % 10)));
return @enumFromInt(bits);
}
@@ -1971,13 +1979,13 @@ pub const UnwrappedSwitch = struct {
const extra = it.air.extraData(SwitchBr.Case, it.extra_index);
var extra_index = extra.end;
- const items: []const Inst.Ref = @ptrCast(it.air.extra[extra_index..][0..extra.data.items_len]);
+ const items: []const Inst.Ref = @ptrCast(it.air.extra.items[extra_index..][0..extra.data.items_len]);
extra_index += items.len;
// TODO: ptrcast from []const Inst.Ref to []const [2]Inst.Ref when supported
- const ranges_ptr: [*]const [2]Inst.Ref = @ptrCast(it.air.extra[extra_index..]);
+ const ranges_ptr: [*]const [2]Inst.Ref = @ptrCast(it.air.extra.items[extra_index..]);
const ranges: []const [2]Inst.Ref = ranges_ptr[0..extra.data.ranges_len];
extra_index += ranges.len * 2;
- const body: []const Inst.Index = @ptrCast(it.air.extra[extra_index..][0..extra.data.body_len]);
+ const body: []const Inst.Index = @ptrCast(it.air.extra.items[extra_index..][0..extra.data.body_len]);
extra_index += body.len;
it.extra_index = @intCast(extra_index);
@@ -1992,7 +2000,7 @@ pub const UnwrappedSwitch = struct {
/// Returns the body of the "default" (`else`) case.
pub fn elseBody(it: *CaseIterator) []const Inst.Index {
assert(it.next_case == it.cases_len);
- return @ptrCast(it.air.extra[it.extra_index..][0..it.else_body_len]);
+ return @ptrCast(it.air.extra.items[it.extra_index..][0..it.else_body_len]);
}
pub const Case = struct {
idx: u32,
@@ -2025,6 +2033,7 @@ pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch {
pub const typesFullyResolved = types_resolved.typesFullyResolved;
pub const typeFullyResolved = types_resolved.checkType;
pub const valFullyResolved = types_resolved.checkVal;
+pub const legalize = Legalize.legalize;
pub const CoveragePoint = enum(u1) {
/// Indicates the block is not a place of interest corresponding to
src/codegen.zig
@@ -14,7 +14,6 @@ const Allocator = mem.Allocator;
const Compilation = @import("Compilation.zig");
const ErrorMsg = Zcu.ErrorMsg;
const InternPool = @import("InternPool.zig");
-const Liveness = @import("Liveness.zig");
const Zcu = @import("Zcu.zig");
const Type = @import("Type.zig");
@@ -33,15 +32,18 @@ fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Featu
return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend");
}
-fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
+pub fn importBackend(comptime backend: std.builtin.CompilerBackend) ?type {
return switch (backend) {
.stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"),
.stage2_arm => @import("arch/arm/CodeGen.zig"),
+ .stage2_c => @import("codegen/c.zig"),
+ .stage2_llvm => @import("codegen/llvm.zig"),
.stage2_powerpc => @import("arch/powerpc/CodeGen.zig"),
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
+ .stage2_spirv64 => @import("codegen/spirv.zig"),
.stage2_x86_64 => @import("arch/x86_64/CodeGen.zig"),
- else => unreachable,
+ else => null,
};
}
@@ -51,7 +53,7 @@ pub fn generateFunction(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) CodeGenError!void {
@@ -68,7 +70,7 @@ pub fn generateFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
- return importBackend(backend).generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
+ return importBackend(backend).?.generate(lf, pt, src_loc, func_index, air, liveness, code, debug_output);
},
}
}
@@ -93,7 +95,7 @@ pub fn generateLazyFunction(
.stage2_x86_64,
=> |backend| {
dev.check(devFeatureForBackend(backend));
- return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
+ return importBackend(backend).?.generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},
}
}
src/link.zig
@@ -15,7 +15,6 @@ const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const Compilation = @import("Compilation.zig");
const LibCInstallation = std.zig.LibCInstallation;
-const Liveness = @import("Liveness.zig");
const Zcu = @import("Zcu.zig");
const InternPool = @import("InternPool.zig");
const Type = @import("Type.zig");
@@ -738,7 +737,7 @@ pub const File = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: Air,
- liveness: Liveness,
+ liveness: Air.Liveness,
) UpdateNavError!void {
switch (base.tag) {
inline else => |tag| {
@@ -1601,8 +1600,9 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void {
if (comp.remaining_prelink_tasks == 0) {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
- // This call takes ownership of `func.air`.
- pt.linkerUpdateFunc(func.func, func.air) catch |err| switch (err) {
+ var air = func.air;
+ defer air.deinit(comp.gpa);
+ pt.linkerUpdateFunc(func.func, &air) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
};
} else {
src/print_air.zig
@@ -6,20 +6,19 @@ const Zcu = @import("Zcu.zig");
const Value = @import("Value.zig");
const Type = @import("Type.zig");
const Air = @import("Air.zig");
-const Liveness = @import("Liveness.zig");
const InternPool = @import("InternPool.zig");
-pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
+pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Air.Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
(@sizeOf(Air.Inst.Tag) + 8);
- const extra_bytes = air.extra.len * @sizeOf(u32);
+ const extra_bytes = air.extra.items.len * @sizeOf(u32);
const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0;
const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0;
const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0;
const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes +
- @sizeOf(Liveness) + liveness_extra_bytes +
+ @sizeOf(Air.Liveness) + liveness_extra_bytes +
liveness_special_bytes + tomb_bytes;
// zig fmt: off
@@ -34,7 +33,7 @@ pub fn write(stream: anytype, pt: Zcu.PerThread, air: Air, liveness: ?Liveness)
, .{
fmtIntSizeBin(total_bytes),
air.instructions.len, fmtIntSizeBin(instruction_bytes),
- air.extra.len, fmtIntSizeBin(extra_bytes),
+ air.extra.items.len, fmtIntSizeBin(extra_bytes),
fmtIntSizeBin(tomb_bytes),
if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes),
if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes),
@@ -57,7 +56,7 @@ pub fn writeInst(
inst: Air.Inst.Index,
pt: Zcu.PerThread,
air: Air,
- liveness: ?Liveness,
+ liveness: ?Air.Liveness,
) void {
var writer: Writer = .{
.pt = pt,
@@ -70,11 +69,11 @@ pub fn writeInst(
writer.writeInst(stream, inst) catch return;
}
-pub fn dump(pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
+pub fn dump(pt: Zcu.PerThread, air: Air, liveness: ?Air.Liveness) void {
write(std.io.getStdErr().writer(), pt, air, liveness);
}
-pub fn dumpInst(inst: Air.Inst.Index, pt: Zcu.PerThread, air: Air, liveness: ?Liveness) void {
+pub fn dumpInst(inst: Air.Inst.Index, pt: Zcu.PerThread, air: Air, liveness: ?Air.Liveness) void {
writeInst(std.io.getStdErr().writer(), inst, pt, air, liveness);
}
@@ -82,7 +81,7 @@ const Writer = struct {
pt: Zcu.PerThread,
gpa: Allocator,
air: Air,
- liveness: ?Liveness,
+ liveness: ?Air.Liveness,
indent: usize,
skip_body: bool,
@@ -391,15 +390,15 @@ const Writer = struct {
},
else => unreachable,
}
- break :body w.air.extra[extra.end..][0..extra.data.body_len];
+ break :body w.air.extra.items[extra.end..][0..extra.data.body_len];
},
else => unreachable,
});
if (w.skip_body) return s.writeAll(", ...");
- const liveness_block = if (w.liveness) |liveness|
+ const liveness_block: Air.Liveness.BlockSlices = if (w.liveness) |liveness|
liveness.getBlock(inst)
else
- Liveness.BlockSlices{ .deaths = &.{} };
+ .{ .deaths = &.{} };
try s.writeAll(", {\n");
const old_indent = w.indent;
@@ -417,7 +416,7 @@ const Writer = struct {
fn writeLoop(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Block, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(w.air.extra[extra.end..][0..extra.data.body_len]);
+ const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
try w.writeType(s, ty_pl.ty.toType());
if (w.skip_body) return s.writeAll(", ...");
@@ -435,7 +434,7 @@ const Writer = struct {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const vector_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(vector_ty.arrayLen(zcu)));
- const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[ty_pl.payload..][0..len]));
+ const elements = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[ty_pl.payload..][0..len]));
try w.writeType(s, vector_ty);
try s.writeAll(", [");
@@ -622,13 +621,13 @@ const Writer = struct {
try s.writeAll(", volatile");
}
- const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.outputs_len]));
+ const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra_i..][0..extra.data.outputs_len]));
extra_i += outputs.len;
- const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra_i..][0..extra.data.inputs_len]));
+ const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
for (outputs) |output| {
- const extra_bytes = std.mem.sliceAsBytes(w.air.extra[extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(w.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
@@ -648,7 +647,7 @@ const Writer = struct {
}
for (inputs) |input| {
- const extra_bytes = std.mem.sliceAsBytes(w.air.extra[extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(w.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
@@ -665,7 +664,7 @@ const Writer = struct {
{
var clobber_i: u32 = 0;
while (clobber_i < clobbers_len) : (clobber_i += 1) {
- const extra_bytes = std.mem.sliceAsBytes(w.air.extra[extra_i..]);
+ const extra_bytes = std.mem.sliceAsBytes(w.air.extra.items[extra_i..]);
const clobber = std.mem.sliceTo(extra_bytes, 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
@@ -676,7 +675,7 @@ const Writer = struct {
try s.writeAll("}");
}
}
- const asm_source = std.mem.sliceAsBytes(w.air.extra[extra_i..])[0..extra.data.source_len];
+ const asm_source = std.mem.sliceAsBytes(w.air.extra.items[extra_i..])[0..extra.data.source_len];
try s.print(", \"{}\"", .{std.zig.fmtEscapes(asm_source)});
}
@@ -695,7 +694,7 @@ const Writer = struct {
fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Call, pl_op.payload);
- const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra[extra.end..][0..extra.data.args_len]));
+ const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra.end..][0..extra.data.args_len]));
try w.writeOperand(s, inst, 0, pl_op.operand);
try s.writeAll(", [");
for (args, 0..) |arg, i| {
@@ -720,11 +719,11 @@ const Writer = struct {
fn writeTry(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Try, pl_op.payload);
- const body: []const Air.Inst.Index = @ptrCast(w.air.extra[extra.end..][0..extra.data.body_len]);
- const liveness_condbr = if (w.liveness) |liveness|
+ const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
+ const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
- Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
+ .{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
@@ -754,11 +753,11 @@ const Writer = struct {
fn writeTryPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.TryPtr, ty_pl.payload);
- const body: []const Air.Inst.Index = @ptrCast(w.air.extra[extra.end..][0..extra.data.body_len]);
- const liveness_condbr = if (w.liveness) |liveness|
+ const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
+ const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
- Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
+ .{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, extra.data.ptr);
@@ -791,12 +790,12 @@ const Writer = struct {
fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.CondBr, pl_op.payload);
- const then_body: []const Air.Inst.Index = @ptrCast(w.air.extra[extra.end..][0..extra.data.then_body_len]);
- const else_body: []const Air.Inst.Index = @ptrCast(w.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
- const liveness_condbr = if (w.liveness) |liveness|
+ const then_body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
- Liveness.CondBrSlices{ .then_deaths = &.{}, .else_deaths = &.{} };
+ .{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
@@ -850,14 +849,14 @@ const Writer = struct {
fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const switch_br = w.air.unwrapSwitch(inst);
- const liveness = if (w.liveness) |liveness|
+ const liveness: Air.Liveness.SwitchBrTable = if (w.liveness) |liveness|
liveness.getSwitchBr(w.gpa, inst, switch_br.cases_len + 1) catch
@panic("out of memory")
else blk: {
const slice = w.gpa.alloc([]const Air.Inst.Index, switch_br.cases_len + 1) catch
@panic("out of memory");
@memset(slice, &.{});
- break :blk Liveness.SwitchBrTable{ .deaths = slice };
+ break :blk .{ .deaths = slice };
};
defer w.gpa.free(liveness.deaths);
@@ -956,10 +955,10 @@ const Writer = struct {
op_index: usize,
operand: Air.Inst.Ref,
) @TypeOf(s).Error!void {
- const small_tomb_bits = Liveness.bpi - 1;
+ const small_tomb_bits = Air.Liveness.bpi - 1;
const dies = if (w.liveness) |liveness| blk: {
if (op_index < small_tomb_bits)
- break :blk liveness.operandDies(inst, @as(Liveness.OperandInt, @intCast(op_index)));
+ break :blk liveness.operandDies(inst, @intCast(op_index));
var extra_index = liveness.special.get(inst).?;
var tomb_op_index: usize = small_tomb_bits;
while (true) {
src/Sema.zig
@@ -756,13 +756,7 @@ pub const Block = struct {
fn addReduce(block: *Block, operand: Air.Inst.Ref, operation: std.builtin.ReduceOp) !Air.Inst.Ref {
const sema = block.sema;
const zcu = sema.pt.zcu;
- const vector_ty = sema.typeOf(operand);
- switch (vector_ty.vectorLen(zcu)) {
- 0 => unreachable,
- 1 => return block.addBinOp(.array_elem_val, operand, .zero_usize),
- else => {},
- }
- const allow_optimized = switch (vector_ty.childType(zcu).zigTypeTag(zcu)) {
+ const allow_optimized = switch (sema.typeOf(operand).childType(zcu).zigTypeTag(zcu)) {
.float => true,
.bool, .int => false,
else => unreachable,
@@ -36849,7 +36843,7 @@ fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type {
pub fn getTmpAir(sema: Sema) Air {
return .{
.instructions = sema.air_instructions.slice(),
- .extra = sema.air_extra.items,
+ .extra = sema.air_extra,
};
}
src/Zcu.zig
@@ -30,7 +30,6 @@ const AstGen = std.zig.AstGen;
const Sema = @import("Sema.zig");
const target_util = @import("target.zig");
const build_options = @import("build_options");
-const Liveness = @import("Liveness.zig");
const isUpDir = @import("introspect.zig").isUpDir;
const clang = @import("clang.zig");
const InternPool = @import("InternPool.zig");
CMakeLists.txt
@@ -512,13 +512,15 @@ set(ZIG_STAGE2_SOURCES
lib/std/zig/llvm/bitcode_writer.zig
lib/std/zig/llvm/ir.zig
src/Air.zig
+ src/Air/Legalize.zig
+ src/Air/Liveness.zig
+ src/Air/Liveness/Verify.zig
+ src/Air/types_resolved.zig
src/Builtin.zig
src/Compilation.zig
src/Compilation/Config.zig
src/DarwinPosixSpawn.zig
src/InternPool.zig
- src/Liveness.zig
- src/Liveness/Verify.zig
src/Package.zig
src/Package/Fetch.zig
src/Package/Fetch/git.zig