Commit 9684947faa
Changed files (15)
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
Liveness
src/arch/aarch64/CodeGen.zig
@@ -669,11 +669,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airBinOp(inst, .add),
- .addwrap => try self.airBinOp(inst, .addwrap),
+ .add_wrap => try self.airBinOp(inst, .add_wrap),
.sub => try self.airBinOp(inst, .sub),
- .subwrap => try self.airBinOp(inst, .subwrap),
+ .sub_wrap => try self.airBinOp(inst, .sub_wrap),
.mul => try self.airBinOp(inst, .mul),
- .mulwrap => try self.airBinOp(inst, .mulwrap),
+ .mul_wrap => try self.airBinOp(inst, .mul_wrap),
.shl => try self.airBinOp(inst, .shl),
.shl_exact => try self.airBinOp(inst, .shl_exact),
.bool_and => try self.airBinOp(inst, .bool_and),
@@ -865,11 +865,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -888,6 +885,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.int_from_float_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return self.fail("TODO implement safety_checked_instructions", .{}),
+
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
@@ -2216,9 +2218,9 @@ fn wrappingArithmetic(
if (int_info.bits <= 64) {
// Generate an add/sub/mul
const result: MCValue = switch (tag) {
- .addwrap => try self.addSub(.add, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
- .subwrap => try self.addSub(.sub, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
- .mulwrap => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .add_wrap => try self.addSub(.add, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .sub_wrap => try self.addSub(.sub, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .mul_wrap => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
else => unreachable,
};
@@ -2458,9 +2460,9 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
.mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .addwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .subwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .mulwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .add_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .sub_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .mul_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
.bit_and => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
.bit_or => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
src/arch/arm/CodeGen.zig
@@ -653,11 +653,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, => try self.airBinOp(inst, .add),
- .addwrap => try self.airBinOp(inst, .addwrap),
+ .add_wrap => try self.airBinOp(inst, .add_wrap),
.sub, => try self.airBinOp(inst, .sub),
- .subwrap => try self.airBinOp(inst, .subwrap),
+ .sub_wrap => try self.airBinOp(inst, .sub_wrap),
.mul => try self.airBinOp(inst, .mul),
- .mulwrap => try self.airBinOp(inst, .mulwrap),
+ .mul_wrap => try self.airBinOp(inst, .mul_wrap),
.shl => try self.airBinOp(inst, .shl),
.shl_exact => try self.airBinOp(inst, .shl_exact),
.bool_and => try self.airBinOp(inst, .bool_and),
@@ -849,11 +849,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -872,6 +869,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.int_from_float_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return self.fail("TODO implement safety_checked_instructions", .{}),
+
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
@@ -1523,9 +1525,9 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
.mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .addwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .subwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
- .mulwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .add_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .sub_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
+ .mul_wrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
.bit_and => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
.bit_or => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst),
@@ -3694,9 +3696,9 @@ fn wrappingArithmetic(
if (int_info.bits <= 32) {
// Generate an add/sub/mul
const result: MCValue = switch (tag) {
- .addwrap => try self.addSub(.add, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
- .subwrap => try self.addSub(.sub, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
- .mulwrap => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .add_wrap => try self.addSub(.add, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .sub_wrap => try self.addSub(.sub, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
+ .mul_wrap => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst),
else => unreachable,
};
src/arch/riscv64/CodeGen.zig
@@ -492,12 +492,17 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.add => try self.airBinOp(inst, .add),
.sub => try self.airBinOp(inst, .sub),
- .addwrap => try self.airAddWrap(inst),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return self.fail("TODO implement safety_checked_instructions", .{}),
+
+ .add_wrap => try self.airAddWrap(inst),
.add_sat => try self.airAddSat(inst),
- .subwrap => try self.airSubWrap(inst),
+ .sub_wrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
- .mulwrap => try self.airMulWrap(inst),
+ .mul_wrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
@@ -679,11 +684,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
src/arch/sparc64/CodeGen.zig
@@ -508,11 +508,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.add => try self.airBinOp(inst, .add),
- .addwrap => try self.airBinOp(inst, .addwrap),
+ .add_wrap => try self.airBinOp(inst, .add_wrap),
.sub => try self.airBinOp(inst, .sub),
- .subwrap => try self.airBinOp(inst, .subwrap),
+ .sub_wrap => try self.airBinOp(inst, .sub_wrap),
.mul => try self.airBinOp(inst, .mul),
- .mulwrap => try self.airBinOp(inst, .mulwrap),
+ .mul_wrap => try self.airBinOp(inst, .mul_wrap),
.shl => try self.airBinOp(inst, .shl),
.shl_exact => try self.airBinOp(inst, .shl_exact),
.shr => try self.airBinOp(inst, .shr),
@@ -697,11 +697,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -720,6 +717,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.int_from_float_optimized,
=> @panic("TODO implement optimized float mode"),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => @panic("TODO implement safety_checked_instructions"),
+
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
.error_set_has_value => @panic("TODO implement error_set_has_value"),
.vector_store_elem => @panic("TODO implement vector_store_elem"),
@@ -2931,14 +2933,14 @@ fn binOp(
}
},
- .addwrap,
- .subwrap,
- .mulwrap,
+ .add_wrap,
+ .sub_wrap,
+ .mul_wrap,
=> {
const base_tag: Air.Inst.Tag = switch (tag) {
- .addwrap => .add,
- .subwrap => .sub,
- .mulwrap => .mul,
+ .add_wrap => .add,
+ .sub_wrap => .sub,
+ .mul_wrap => .mul,
else => unreachable,
};
src/arch/wasm/CodeGen.zig
@@ -1836,12 +1836,12 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.add => func.airBinOp(inst, .add),
.add_sat => func.airSatBinOp(inst, .add),
- .addwrap => func.airWrapBinOp(inst, .add),
+ .add_wrap => func.airWrapBinOp(inst, .add),
.sub => func.airBinOp(inst, .sub),
.sub_sat => func.airSatBinOp(inst, .sub),
- .subwrap => func.airWrapBinOp(inst, .sub),
+ .sub_wrap => func.airWrapBinOp(inst, .sub),
.mul => func.airBinOp(inst, .mul),
- .mulwrap => func.airWrapBinOp(inst, .mul),
+ .mul_wrap => func.airWrapBinOp(inst, .mul),
.div_float, .div_exact => func.airDiv(inst),
.div_trunc => func.airDivTrunc(inst),
.div_floor => func.airDivFloor(inst),
@@ -2041,11 +2041,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.fence => func.airFence(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -2064,6 +2061,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.int_from_float_optimized,
=> return func.fail("TODO implement optimized float mode", .{}),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return func.fail("TODO implement safety_checked_instructions", .{}),
+
.work_item_id,
.work_group_size,
.work_group_id,
src/arch/x86_64/CodeGen.zig
@@ -1755,9 +1755,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
=> |tag| try self.airUnOp(inst, tag),
.add,
- .addwrap,
+ .add_wrap,
.sub,
- .subwrap,
+ .sub_wrap,
.bool_and,
.bool_or,
.bit_and,
@@ -1773,7 +1773,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.shl, .shl_exact => try self.airShlShrBinOp(inst),
.mul => try self.airMulDivBinOp(inst),
- .mulwrap => try self.airMulDivBinOp(inst),
+ .mul_wrap => try self.airMulDivBinOp(inst),
.rem => try self.airMulDivBinOp(inst),
.mod => try self.airMulDivBinOp(inst),
@@ -1947,11 +1947,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -1970,6 +1967,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.int_from_float_optimized,
=> return self.fail("TODO implement optimized float mode", .{}),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return self.fail("TODO implement safety_checked_instructions", .{}),
+
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
@@ -2912,7 +2914,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const dst_info = dst_ty.intInfo(mod);
const src_ty = try mod.intType(dst_info.signedness, switch (tag) {
else => unreachable,
- .mul, .mulwrap => @max(
+ .mul, .mul_wrap => @max(
self.activeIntBits(bin_op.lhs),
self.activeIntBits(bin_op.rhs),
dst_info.bits / 2,
@@ -6155,7 +6157,7 @@ fn genMulDivBinOp(
const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod)));
if (switch (tag) {
else => unreachable,
- .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
+ .mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size,
} or src_abi_size > 8) return self.fail("TODO implement genMulDivBinOp from {} to {}", .{
src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
@@ -6172,13 +6174,13 @@ fn genMulDivBinOp(
const signedness = ty.intInfo(mod).signedness;
switch (tag) {
.mul,
- .mulwrap,
+ .mul_wrap,
.rem,
.div_trunc,
.div_exact,
=> {
const track_inst_rax = switch (tag) {
- .mul, .mulwrap => if (dst_abi_size <= 8) maybe_inst else null,
+ .mul, .mul_wrap => if (dst_abi_size <= 8) maybe_inst else null,
.div_exact, .div_trunc => maybe_inst,
else => null,
};
@@ -6191,19 +6193,19 @@ fn genMulDivBinOp(
try self.genIntMulDivOpMir(switch (signedness) {
.signed => switch (tag) {
- .mul, .mulwrap => .{ .i_, .mul },
+ .mul, .mul_wrap => .{ .i_, .mul },
.div_trunc, .div_exact, .rem => .{ .i_, .div },
else => unreachable,
},
.unsigned => switch (tag) {
- .mul, .mulwrap => .{ ._, .mul },
+ .mul, .mul_wrap => .{ ._, .mul },
.div_trunc, .div_exact, .rem => .{ ._, .div },
else => unreachable,
},
}, ty, lhs, rhs);
if (dst_abi_size <= 8) return .{ .register = registerAlias(switch (tag) {
- .mul, .mulwrap, .div_trunc, .div_exact => .rax,
+ .mul, .mul_wrap, .div_trunc, .div_exact => .rax,
.rem => .rdx,
else => unreachable,
}, dst_abi_size) };
@@ -6347,7 +6349,7 @@ fn genBinOp(
switch (lhs_mcv) {
.immediate => |imm| switch (imm) {
0 => switch (air_tag) {
- .sub, .subwrap => return self.genUnOp(maybe_inst, .neg, rhs_air),
+ .sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, rhs_air),
else => {},
},
else => {},
@@ -6357,7 +6359,7 @@ fn genBinOp(
const is_commutative = switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
.mul,
.bool_or,
.bit_or,
@@ -6427,11 +6429,11 @@ fn genBinOp(
if (!vec_op) {
switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> try self.genBinOpMir(.{ ._, .add }, lhs_ty, dst_mcv, src_mcv),
.sub,
- .subwrap,
+ .sub_wrap,
=> try self.genBinOpMir(.{ ._, .sub }, lhs_ty, dst_mcv, src_mcv),
.ptr_add,
@@ -6649,10 +6651,10 @@ fn genBinOp(
8 => switch (lhs_ty.vectorLen(mod)) {
1...16 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_b, .add } else .{ .p_b, .add },
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub },
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
@@ -6689,10 +6691,10 @@ fn genBinOp(
},
17...32 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_b, .add } else null,
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_b, .sub } else null,
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
@@ -6712,13 +6714,13 @@ fn genBinOp(
16 => switch (lhs_ty.vectorLen(mod)) {
1...8 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_w, .add } else .{ .p_w, .add },
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_w, .sub } else .{ .p_w, .sub },
.mul,
- .mulwrap,
+ .mul_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull },
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
@@ -6747,13 +6749,13 @@ fn genBinOp(
},
9...16 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_w, .add } else null,
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_w, .sub } else null,
.mul,
- .mulwrap,
+ .mul_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_w, .mull } else null,
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
@@ -6773,13 +6775,13 @@ fn genBinOp(
32 => switch (lhs_ty.vectorLen(mod)) {
1...4 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_d, .add } else .{ .p_d, .add },
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_d, .sub } else .{ .p_d, .sub },
.mul,
- .mulwrap,
+ .mul_wrap,
=> if (self.hasFeature(.avx))
.{ .vp_d, .mull }
else if (self.hasFeature(.sse4_1))
@@ -6821,13 +6823,13 @@ fn genBinOp(
},
5...8 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_d, .add } else null,
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_d, .sub } else null,
.mul,
- .mulwrap,
+ .mul_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_d, .mull } else null,
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
@@ -6847,10 +6849,10 @@ fn genBinOp(
64 => switch (lhs_ty.vectorLen(mod)) {
1...2 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_q, .add } else .{ .p_q, .add },
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub },
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
@@ -6859,10 +6861,10 @@ fn genBinOp(
},
3...4 => switch (air_tag) {
.add,
- .addwrap,
+ .add_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_q, .add } else null,
.sub,
- .subwrap,
+ .sub_wrap,
=> if (self.hasFeature(.avx2)) .{ .vp_q, .sub } else null,
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
@@ -7174,7 +7176,7 @@ fn genBinOp(
}
switch (air_tag) {
- .add, .addwrap, .sub, .subwrap, .mul, .mulwrap, .div_float, .div_exact => {},
+ .add, .add_wrap, .sub, .sub_wrap, .mul, .mul_wrap, .div_float, .div_exact => {},
.div_trunc, .div_floor => if (self.hasFeature(.sse4_1)) try self.genRound(
lhs_ty,
dst_reg,
src/codegen/c.zig
@@ -2860,9 +2860,9 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_floor => try airBinBuiltinCall(f, inst, "div_floor", .none),
.mod => try airBinBuiltinCall(f, inst, "mod", .none),
- .addwrap => try airBinBuiltinCall(f, inst, "addw", .bits),
- .subwrap => try airBinBuiltinCall(f, inst, "subw", .bits),
- .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .bits),
+ .add_wrap => try airBinBuiltinCall(f, inst, "addw", .bits),
+ .sub_wrap => try airBinBuiltinCall(f, inst, "subw", .bits),
+ .mul_wrap => try airBinBuiltinCall(f, inst, "mulw", .bits),
.add_sat => try airBinBuiltinCall(f, inst, "adds", .bits),
.sub_sat => try airBinBuiltinCall(f, inst, "subs", .bits),
@@ -3048,11 +3048,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.wasm_memory_grow => try airWasmMemoryGrow(f, inst),
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -3071,6 +3068,11 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.int_from_float_optimized,
=> return f.fail("TODO implement optimized float mode", .{}),
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ => return f.fail("TODO implement safety_checked_instructions", .{}),
+
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
.vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
src/codegen/llvm.zig
@@ -377,6 +377,9 @@ pub const Object = struct {
/// name collision.
extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void),
+ /// Memoizes a null `?usize` value.
+ null_opt_addr: ?*llvm.Value,
+
pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type);
/// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we
@@ -532,6 +535,7 @@ pub const Object = struct {
.di_type_map = .{},
.error_name_table = null,
.extern_collisions = .{},
+ .null_opt_addr = null,
};
}
@@ -2416,6 +2420,35 @@ pub const Object = struct {
return buffer.toOwnedSliceSentinel(0);
}
+ fn getNullOptAddr(o: *Object) !*llvm.Value {
+ if (o.null_opt_addr) |global| return global;
+
+ const mod = o.module;
+ const target = mod.getTarget();
+ const ty = try mod.intern(.{ .opt_type = .usize_type });
+ const null_opt_usize = try mod.intern(.{ .opt = .{
+ .ty = ty,
+ .val = .none,
+ } });
+
+ const llvm_init = try o.lowerValue(.{
+ .ty = ty.toType(),
+ .val = null_opt_usize.toValue(),
+ });
+ const global = o.llvm_module.addGlobalInAddressSpace(
+ llvm_init.typeOf(),
+ "",
+ toLlvmGlobalAddressSpace(.generic, target),
+ );
+ global.setLinkage(.Internal);
+ global.setUnnamedAddr(.True);
+ global.setAlignment(ty.toType().abiAlignment(mod));
+ global.setInitializer(llvm_init);
+
+ o.null_opt_addr = global;
+ return global;
+ }
+
/// If the llvm function does not exist, create it.
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
@@ -3141,8 +3174,8 @@ pub const Object = struct {
.func => |func| mod.funcPtr(func.index).owner_decl,
else => unreachable,
};
- const fn_decl = o.module.declPtr(fn_decl_index);
- try o.module.markDeclAlive(fn_decl);
+ const fn_decl = mod.declPtr(fn_decl_index);
+ try mod.markDeclAlive(fn_decl);
return o.resolveLlvmFunction(fn_decl_index);
},
.int => {
@@ -3682,11 +3715,12 @@ pub const Object = struct {
}
fn lowerIntAsPtr(o: *Object, val: Value) Error!*llvm.Value {
- switch (o.module.intern_pool.indexToKey(val.toIntern())) {
+ const mod = o.module;
+ switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => return o.context.pointerType(0).getUndef(),
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = val.toBigInt(&bigint_space, o.module);
+ const bigint = val.toBigInt(&bigint_space, mod);
const llvm_int = lowerBigInt(o, Type.usize, bigint);
return llvm_int.constIntToPtr(o.context.pointerType(0));
},
@@ -4306,15 +4340,25 @@ pub const FuncGen = struct {
const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
- .add => try self.airAdd(inst, false),
- .addwrap => try self.airAddWrap(inst, false),
- .add_sat => try self.airAddSat(inst),
- .sub => try self.airSub(inst, false),
- .subwrap => try self.airSubWrap(inst, false),
- .sub_sat => try self.airSubSat(inst),
- .mul => try self.airMul(inst, false),
- .mulwrap => try self.airMulWrap(inst, false),
- .mul_sat => try self.airMulSat(inst),
+ .add => try self.airAdd(inst, false),
+ .add_optimized => try self.airAdd(inst, true),
+ .add_wrap => try self.airAddWrap(inst),
+ .add_sat => try self.airAddSat(inst),
+
+ .sub => try self.airSub(inst, false),
+ .sub_optimized => try self.airSub(inst, true),
+ .sub_wrap => try self.airSubWrap(inst),
+ .sub_sat => try self.airSubSat(inst),
+
+ .mul => try self.airMul(inst, false),
+ .mul_optimized => try self.airMul(inst, true),
+ .mul_wrap => try self.airMulWrap(inst),
+ .mul_sat => try self.airMulSat(inst),
+
+ .add_safe => try self.airSafeArithmetic(inst, "llvm.sadd.with.overflow", "llvm.uadd.with.overflow"),
+ .sub_safe => try self.airSafeArithmetic(inst, "llvm.ssub.with.overflow", "llvm.usub.with.overflow"),
+ .mul_safe => try self.airSafeArithmetic(inst, "llvm.smul.with.overflow", "llvm.umul.with.overflow"),
+
.div_float => try self.airDivFloat(inst, false),
.div_trunc => try self.airDivTrunc(inst, false),
.div_floor => try self.airDivFloor(inst, false),
@@ -4331,12 +4375,6 @@ pub const FuncGen = struct {
.slice => try self.airSlice(inst),
.mul_add => try self.airMulAdd(inst),
- .add_optimized => try self.airAdd(inst, true),
- .addwrap_optimized => try self.airAddWrap(inst, true),
- .sub_optimized => try self.airSub(inst, true),
- .subwrap_optimized => try self.airSubWrap(inst, true),
- .mul_optimized => try self.airMul(inst, true),
- .mulwrap_optimized => try self.airMulWrap(inst, true),
.div_float_optimized => try self.airDivFloat(inst, true),
.div_trunc_optimized => try self.airDivTrunc(inst, true),
.div_floor_optimized => try self.airDivFloor(inst, true),
@@ -4859,6 +4897,48 @@ pub const FuncGen = struct {
}
}
+ fn buildSimplePanic(fg: *FuncGen, panic_id: Module.PanicId) !void {
+ const o = fg.dg.object;
+ const mod = o.module;
+ const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?;
+ const msg_decl = mod.declPtr(msg_decl_index);
+ const msg_len = msg_decl.ty.childType(mod).arrayLen(mod);
+ const msg_ptr = try o.lowerValue(.{
+ .ty = msg_decl.ty,
+ .val = msg_decl.val,
+ });
+ const null_opt_addr_global = try o.getNullOptAddr();
+ const target = mod.getTarget();
+ const llvm_usize = fg.context.intType(target.ptrBitWidth());
+ // example:
+ // call fastcc void @test2.panic(
+ // ptr @builtin.panic_messages.integer_overflow__anon_987, ; msg.ptr
+ // i64 16, ; msg.len
+ // ptr null, ; stack trace
+ // ptr @2, ; addr (null ?usize)
+ // )
+ const args = [4]*llvm.Value{
+ msg_ptr,
+ llvm_usize.constInt(msg_len, .False),
+ fg.context.pointerType(0).constNull(),
+ null_opt_addr_global,
+ };
+ const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?;
+ const panic_decl = mod.declPtr(panic_func.owner_decl);
+ const fn_info = mod.typeToFunc(panic_decl.ty).?;
+ const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl);
+ _ = fg.builder.buildCall(
+ try o.lowerType(panic_decl.ty),
+ panic_global,
+ &args,
+ args.len,
+ toLlvmCallConv(fn_info.cc, target),
+ .Auto,
+ "",
+ );
+ _ = fg.builder.buildUnreachable();
+ }
+
fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const o = self.dg.object;
const mod = o.module;
@@ -6945,9 +7025,55 @@ pub const FuncGen = struct {
return self.builder.buildNUWAdd(lhs, rhs, "");
}
- fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- self.builder.setFastMath(want_fast_math);
+ fn airSafeArithmetic(
+ fg: *FuncGen,
+ inst: Air.Inst.Index,
+ signed_intrinsic: []const u8,
+ unsigned_intrinsic: []const u8,
+ ) !?*llvm.Value {
+ const o = fg.dg.object;
+ const mod = o.module;
+ const bin_op = fg.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try fg.resolveInst(bin_op.lhs);
+ const rhs = try fg.resolveInst(bin_op.rhs);
+ const inst_ty = fg.typeOfIndex(inst);
+ const scalar_ty = inst_ty.scalarType(mod);
+ const is_scalar = scalar_ty.ip_index == inst_ty.ip_index;
+
+ const intrinsic_name = switch (scalar_ty.isSignedInt(mod)) {
+ true => signed_intrinsic,
+ false => unsigned_intrinsic,
+ };
+ const llvm_inst_ty = try o.lowerType(inst_ty);
+ const llvm_fn = fg.getIntrinsic(intrinsic_name, &.{llvm_inst_ty});
+ const result_struct = fg.builder.buildCall(
+ llvm_fn.globalGetValueType(),
+ llvm_fn,
+ &[_]*llvm.Value{ lhs, rhs },
+ 2,
+ .Fast,
+ .Auto,
+ "",
+ );
+ const overflow_bit = fg.builder.buildExtractValue(result_struct, 1, "");
+ const scalar_overflow_bit = switch (is_scalar) {
+ true => overflow_bit,
+ false => fg.builder.buildOrReduce(overflow_bit),
+ };
+
+ const fail_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowFail");
+ const ok_block = fg.context.appendBasicBlock(fg.llvm_func, "OverflowOk");
+ _ = fg.builder.buildCondBr(scalar_overflow_bit, fail_block, ok_block);
+
+ fg.builder.positionBuilderAtEnd(fail_block);
+ try fg.buildSimplePanic(.integer_overflow);
+
+ fg.builder.positionBuilderAtEnd(ok_block);
+ return fg.builder.buildExtractValue(result_struct, 0, "");
+ }
+
+ fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -6986,9 +7112,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWSub(lhs, rhs, "");
}
- fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- self.builder.setFastMath(want_fast_math);
-
+ fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -7026,9 +7150,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWMul(lhs, rhs, "");
}
- fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
- self.builder.setFastMath(want_fast_math);
-
+ fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
src/codegen/spirv.zig
@@ -1703,9 +1703,9 @@ pub const DeclGen = struct {
const air_tags = self.air.instructions.items(.tag);
const maybe_result_id: ?IdRef = switch (air_tags[inst]) {
// zig fmt: off
- .add, .addwrap => try self.airArithOp(inst, .OpFAdd, .OpIAdd, .OpIAdd, true),
- .sub, .subwrap => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub, true),
- .mul, .mulwrap => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul, true),
+ .add, .add_wrap => try self.airArithOp(inst, .OpFAdd, .OpIAdd, .OpIAdd, true),
+ .sub, .sub_wrap => try self.airArithOp(inst, .OpFSub, .OpISub, .OpISub, true),
+ .mul, .mul_wrap => try self.airArithOp(inst, .OpFMul, .OpIMul, .OpIMul, true),
.div_float,
.div_float_optimized,
src/Liveness/Verify.zig
@@ -198,19 +198,19 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// binary
.add,
+ .add_safe,
.add_optimized,
- .addwrap,
- .addwrap_optimized,
+ .add_wrap,
.add_sat,
.sub,
+ .sub_safe,
.sub_optimized,
- .subwrap,
- .subwrap_optimized,
+ .sub_wrap,
.sub_sat,
.mul,
+ .mul_safe,
.mul_optimized,
- .mulwrap,
- .mulwrap_optimized,
+ .mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
src/Air.zig
@@ -40,15 +40,25 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
add,
- /// Same as `add` with optimized float mode.
+ /// Integer addition. Wrapping is a safety panic.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// The panic handler function must be populated before lowering AIR
+ /// that contains this instruction.
+ /// This instruction will only be emitted if the backend has the
+ /// feature `safety_checked_instructions`.
+ /// Uses the `bin_op` field.
+ add_safe,
+ /// Float addition. The instruction is allowed to have equal or more
+ /// mathematical accuracy than strict IEEE-757 float addition.
+ /// If either operand is NaN, the result value is undefined.
+ /// Uses the `bin_op` field.
add_optimized,
- /// Integer addition. Wrapping is defined to be twos complement wrapping.
+ /// Twos complement wrapping integer addition.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
- addwrap,
- /// Same as `addwrap` with optimized float mode.
- addwrap_optimized,
+ add_wrap,
/// Saturating integer addition.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -59,15 +69,25 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
sub,
- /// Same as `sub` with optimized float mode.
+ /// Integer subtraction. Wrapping is a safety panic.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// The panic handler function must be populated before lowering AIR
+ /// that contains this instruction.
+ /// This instruction will only be emitted if the backend has the
+ /// feature `safety_checked_instructions`.
+ /// Uses the `bin_op` field.
+ sub_safe,
+ /// Float subtraction. The instruction is allowed to have equal or more
+ /// mathematical accuracy than strict IEEE-757 float subtraction.
+ /// If either operand is NaN, the result value is undefined.
+ /// Uses the `bin_op` field.
sub_optimized,
- /// Integer subtraction. Wrapping is defined to be twos complement wrapping.
+ /// Twos complement wrapping integer subtraction.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
- subwrap,
- /// Same as `sub` with optimized float mode.
- subwrap_optimized,
+ sub_wrap,
/// Saturating integer subtraction.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -78,15 +98,25 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mul,
- /// Same as `mul` with optimized float mode.
+ /// Integer multiplication. Wrapping is a safety panic.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// The panic handler function must be populated before lowering AIR
+ /// that contains this instruction.
+ /// This instruction will only be emitted if the backend has the
+ /// feature `safety_checked_instructions`.
+ /// Uses the `bin_op` field.
+ mul_safe,
+ /// Float multiplication. The instruction is allowed to have equal or more
+ /// mathematical accuracy than strict IEEE-757 float multiplication.
+ /// If either operand is NaN, the result value is undefined.
+ /// Uses the `bin_op` field.
mul_optimized,
- /// Integer multiplication. Wrapping is defined to be twos complement wrapping.
+ /// Twos complement wrapping integer multiplication.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
- mulwrap,
- /// Same as `mulwrap` with optimized float mode.
- mulwrap_optimized,
+ mul_wrap,
/// Saturating integer multiplication.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -1197,13 +1227,16 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
const datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst]) {
.add,
- .addwrap,
+ .add_safe,
+ .add_wrap,
.add_sat,
.sub,
- .subwrap,
+ .sub_safe,
+ .sub_wrap,
.sub_sat,
.mul,
- .mulwrap,
+ .mul_safe,
+ .mul_wrap,
.mul_sat,
.div_float,
.div_trunc,
@@ -1224,11 +1257,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.bool_and,
.bool_or,
.add_optimized,
- .addwrap_optimized,
.sub_optimized,
- .subwrap_optimized,
.mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -1594,19 +1624,19 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
=> true,
.add,
+ .add_safe,
.add_optimized,
- .addwrap,
- .addwrap_optimized,
+ .add_wrap,
.add_sat,
.sub,
+ .sub_safe,
.sub_optimized,
- .subwrap,
- .subwrap_optimized,
+ .sub_wrap,
.sub_sat,
.mul,
+ .mul_safe,
.mul_optimized,
- .mulwrap,
- .mulwrap_optimized,
+ .mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
src/Liveness.zig
@@ -232,14 +232,20 @@ pub fn categorizeOperand(
const operand_ref = Air.indexToRef(operand);
switch (air_tags[inst]) {
.add,
- .addwrap,
+ .add_safe,
+ .add_wrap,
.add_sat,
+ .add_optimized,
.sub,
- .subwrap,
+ .sub_safe,
+ .sub_wrap,
.sub_sat,
+ .sub_optimized,
.mul,
- .mulwrap,
+ .mul_safe,
+ .mul_wrap,
.mul_sat,
+ .mul_optimized,
.div_float,
.div_trunc,
.div_floor,
@@ -267,12 +273,6 @@ pub fn categorizeOperand(
.shr_exact,
.min,
.max,
- .add_optimized,
- .addwrap_optimized,
- .sub_optimized,
- .subwrap_optimized,
- .mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
@@ -886,19 +886,19 @@ fn analyzeInst(
switch (inst_tags[inst]) {
.add,
+ .add_safe,
.add_optimized,
- .addwrap,
- .addwrap_optimized,
+ .add_wrap,
.add_sat,
.sub,
+ .sub_safe,
.sub_optimized,
- .subwrap,
- .subwrap_optimized,
+ .sub_wrap,
.sub_sat,
.mul,
+ .mul_safe,
.mul_optimized,
- .mulwrap,
- .mulwrap_optimized,
+ .mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
src/Module.zig
@@ -187,6 +187,40 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
src: LazySrcLoc,
}) = .{},
+panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len,
+panic_func_index: Fn.OptionalIndex = .none,
+null_stack_trace: InternPool.Index = .none,
+
+pub const PanicId = enum {
+ unreach,
+ unwrap_null,
+ cast_to_null,
+ incorrect_alignment,
+ invalid_error_code,
+ cast_truncated_data,
+ negative_to_unsigned,
+ integer_overflow,
+ shl_overflow,
+ shr_overflow,
+ divide_by_zero,
+ exact_division_remainder,
+ inactive_union_field,
+ integer_part_out_of_bounds,
+ corrupt_switch,
+ shift_rhs_too_big,
+ invalid_enum_value,
+ sentinel_mismatch,
+ unwrap_error,
+ index_out_of_bounds,
+ start_index_greater_than_end,
+ for_len_mismatch,
+ memcpy_len_mismatch,
+ memcpy_alias,
+ noreturn_returned,
+
+ pub const len = @typeInfo(PanicId).Enum.fields.len;
+};
+
pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const CImportError = struct {
@@ -6651,6 +6685,14 @@ pub const Feature = enum {
is_named_enum_value,
error_set_has_value,
field_reordering,
+ /// When this feature is supported, the backend supports the following AIR instructions:
+ /// * `Air.Inst.Tag.add_safe`
+ /// * `Air.Inst.Tag.sub_safe`
+ /// * `Air.Inst.Tag.mul_safe`
+ /// The motivation for this feature is that it makes AIR smaller, and makes it easier
+ /// to generate better machine code in the backends. All backends should migrate to
+ /// enabling this feature.
+ safety_checked_instructions,
};
pub fn backendSupportsFeature(mod: Module, feature: Feature) bool {
@@ -6665,6 +6707,7 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool {
.is_named_enum_value => mod.comp.bin_file.options.use_llvm,
.error_set_has_value => mod.comp.bin_file.options.use_llvm or mod.comp.bin_file.options.target.isWasm(),
.field_reordering => mod.comp.bin_file.options.use_llvm,
+ .safety_checked_instructions => mod.comp.bin_file.options.use_llvm,
};
}
src/print_air.zig
@@ -114,13 +114,19 @@ const Writer = struct {
});
switch (tag) {
.add,
- .addwrap,
+ .add_optimized,
+ .add_safe,
+ .add_wrap,
.add_sat,
.sub,
- .subwrap,
+ .sub_optimized,
+ .sub_safe,
+ .sub_wrap,
.sub_sat,
.mul,
- .mulwrap,
+ .mul_optimized,
+ .mul_safe,
+ .mul_wrap,
.mul_sat,
.div_float,
.div_trunc,
@@ -152,12 +158,6 @@ const Writer = struct {
.set_union_tag,
.min,
.max,
- .add_optimized,
- .addwrap_optimized,
- .sub_optimized,
- .subwrap_optimized,
- .mul_optimized,
- .mulwrap_optimized,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
src/Sema.zig
@@ -9674,7 +9674,7 @@ fn intCast(
const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty);
const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar);
const dest_max = try sema.addConstant(dest_max_val);
- const diff = try block.addBinOp(.subwrap, dest_max, operand);
+ const diff = try block.addBinOp(.sub_wrap, dest_max, operand);
if (actual_info.signedness == .signed) {
// Reinterpret the sign-bit as part of the value. This will make
@@ -15113,7 +15113,11 @@ fn analyzeArithmetic(
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
- const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
+ const rs: struct {
+ src: LazySrcLoc,
+ air_tag: Air.Inst.Tag,
+ air_tag_safe: Air.Inst.Tag,
+ } = rs: {
switch (zir_tag) {
.add, .add_unsafe => {
// For integers:intAddSat
@@ -15162,8 +15166,8 @@ fn analyzeArithmetic(
try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod),
);
}
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .add_safe };
+ } else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .add_safe };
},
.addwrap => {
// Integers only; floats are checked above.
@@ -15174,7 +15178,6 @@ fn analyzeArithmetic(
return casted_rhs;
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
@@ -15186,8 +15189,8 @@ fn analyzeArithmetic(
return sema.addConstant(
try sema.numberAddWrapScalar(lhs_val, rhs_val, resolved_type),
);
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = lhs_src, .air_tag = .add_wrap, .air_tag_safe = .add_wrap };
+ } else break :rs .{ .src = rhs_src, .air_tag = .add_wrap, .air_tag_safe = .add_wrap };
},
.add_sat => {
// Integers only; floats are checked above.
@@ -15212,8 +15215,16 @@ fn analyzeArithmetic(
try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(val);
- } else break :rs .{ .src = lhs_src, .air_tag = .add_sat };
- } else break :rs .{ .src = rhs_src, .air_tag = .add_sat };
+ } else break :rs .{
+ .src = lhs_src,
+ .air_tag = .add_sat,
+ .air_tag_safe = .add_sat,
+ };
+ } else break :rs .{
+ .src = rhs_src,
+ .air_tag = .add_sat,
+ .air_tag_safe = .add_sat,
+ };
},
.sub => {
// For integers:
@@ -15257,8 +15268,8 @@ fn analyzeArithmetic(
try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod),
);
}
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .sub_safe };
+ } else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .sub_safe };
},
.subwrap => {
// Integers only; floats are checked above.
@@ -15272,7 +15283,6 @@ fn analyzeArithmetic(
return casted_lhs;
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap;
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
@@ -15281,8 +15291,8 @@ fn analyzeArithmetic(
return sema.addConstant(
try sema.numberSubWrapScalar(lhs_val, rhs_val, resolved_type),
);
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = rhs_src, .air_tag = .sub_wrap, .air_tag_safe = .sub_wrap };
+ } else break :rs .{ .src = lhs_src, .air_tag = .sub_wrap, .air_tag_safe = .sub_wrap };
},
.sub_sat => {
// Integers only; floats are checked above.
@@ -15307,8 +15317,8 @@ fn analyzeArithmetic(
try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(val);
- } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat };
- } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
+ } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat, .air_tag_safe = .sub_sat };
+ } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat, .air_tag_safe = .sub_sat };
},
.mul => {
// For integers:
@@ -15406,8 +15416,8 @@ fn analyzeArithmetic(
try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod),
);
}
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = lhs_src, .air_tag = air_tag, .air_tag_safe = .mul_safe };
+ } else break :rs .{ .src = rhs_src, .air_tag = air_tag, .air_tag_safe = .mul_safe };
},
.mulwrap => {
// Integers only; floats are handled above.
@@ -15435,7 +15445,6 @@ fn analyzeArithmetic(
}
}
}
- const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap;
if (maybe_rhs_val) |rhs_val| {
if (rhs_val.isUndef(mod)) {
return sema.addConstUndef(resolved_type);
@@ -15454,8 +15463,8 @@ fn analyzeArithmetic(
return sema.addConstant(
try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod),
);
- } else break :rs .{ .src = lhs_src, .air_tag = air_tag };
- } else break :rs .{ .src = rhs_src, .air_tag = air_tag };
+ } else break :rs .{ .src = lhs_src, .air_tag = .mul_wrap, .air_tag_safe = .mul_wrap };
+ } else break :rs .{ .src = rhs_src, .air_tag = .mul_wrap, .air_tag_safe = .mul_wrap };
},
.mul_sat => {
// Integers only; floats are checked above.
@@ -15505,16 +15514,19 @@ fn analyzeArithmetic(
try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod);
return sema.addConstant(val);
- } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat };
- } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat };
+ } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat, .air_tag_safe = .mul_sat };
+ } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat, .air_tag_safe = .mul_sat };
},
else => unreachable,
}
};
try sema.requireRuntimeBlock(block, src, rs.src);
- if (block.wantSafety() and want_safety) {
- if (scalar_tag == .Int) {
+ if (block.wantSafety() and want_safety and scalar_tag == .Int) {
+ if (mod.backendSupportsFeature(.safety_checked_instructions)) {
+ _ = try sema.preparePanicId(block, .integer_overflow);
+ return block.addBinOp(rs.air_tag_safe, casted_lhs, casted_rhs);
+ } else {
const maybe_op_ov: ?Air.Inst.Tag = switch (rs.air_tag) {
.add => .add_with_overflow,
.sub => .sub_with_overflow,
@@ -24743,39 +24755,67 @@ fn explainWhyTypeIsNotPacked(
}
}
-pub const PanicId = enum {
- unreach,
- unwrap_null,
- cast_to_null,
- incorrect_alignment,
- invalid_error_code,
- cast_truncated_data,
- negative_to_unsigned,
- integer_overflow,
- shl_overflow,
- shr_overflow,
- divide_by_zero,
- exact_division_remainder,
- inactive_union_field,
- integer_part_out_of_bounds,
- corrupt_switch,
- shift_rhs_too_big,
- invalid_enum_value,
- sentinel_mismatch,
- unwrap_error,
- index_out_of_bounds,
- start_index_greater_than_end,
- for_len_mismatch,
- memcpy_len_mismatch,
- memcpy_alias,
- noreturn_returned,
-};
+fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
+ const mod = sema.mod;
+
+ if (mod.panic_func_index == .none) {
+ const decl_index = (try sema.getBuiltinDecl(block, "panic"));
+ // decl_index may be an alias; we must find the decl that actually
+ // owns the function.
+ try sema.ensureDeclAnalyzed(decl_index);
+ const tv = try mod.declPtr(decl_index).typedValue();
+ assert(tv.ty.zigTypeTag(mod) == .Fn);
+ assert(try sema.fnHasRuntimeBits(tv.ty));
+ const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap().?;
+ try mod.ensureFuncBodyAnalysisQueued(func_index);
+ mod.panic_func_index = func_index.toOptional();
+ }
+
+ if (mod.null_stack_trace == .none) {
+ const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
+ const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
+ const target = mod.getTarget();
+ const ptr_stack_trace_ty = try mod.ptrType(.{
+ .child = stack_trace_ty.toIntern(),
+ .flags = .{
+ .address_space = target_util.defaultAddressSpace(target, .global_constant),
+ },
+ });
+ const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
+ mod.null_stack_trace = try mod.intern(.{ .opt = .{
+ .ty = opt_ptr_stack_trace_ty.toIntern(),
+ .val = .none,
+ } });
+ }
+}
+
+/// Backends depend on panic decls being available when lowering safety-checked
+/// instructions. This function ensures the panic function will be available to
+/// be called during that time.
+fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !Module.Decl.Index {
+ const mod = sema.mod;
+ const gpa = sema.gpa;
+ if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x;
+
+ try sema.prepareSimplePanic(block);
+
+ const panic_messages_ty = try sema.getBuiltinType("panic_messages");
+ const msg_decl_index = (try sema.namespaceLookup(
+ block,
+ sema.src,
+ panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
+ try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)),
+ )).?;
+ try sema.ensureDeclAnalyzed(msg_decl_index);
+ mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional();
+ return msg_decl_index;
+}
fn addSafetyCheck(
sema: *Sema,
parent_block: *Block,
ok: Air.Inst.Ref,
- panic_id: PanicId,
+ panic_id: Module.PanicId,
) !void {
const gpa = sema.gpa;
assert(!parent_block.is_comptime);
@@ -24852,32 +24892,19 @@ fn addSafetyCheckExtra(
parent_block.instructions.appendAssumeCapacity(block_inst);
}
-fn panicWithMsg(
- sema: *Sema,
- block: *Block,
- msg_inst: Air.Inst.Ref,
-) !void {
+fn panicWithMsg(sema: *Sema, block: *Block, msg_inst: Air.Inst.Ref) !void {
const mod = sema.mod;
if (!mod.backendSupportsFeature(.panic_fn)) {
_ = try block.addNoOp(.trap);
return;
}
- const panic_fn = try sema.getBuiltin("panic");
- const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
- const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
- const target = mod.getTarget();
- const ptr_stack_trace_ty = try mod.ptrType(.{
- .child = stack_trace_ty.toIntern(),
- .flags = .{
- .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic
- },
- });
- const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern());
- const null_stack_trace = try sema.addConstant((try mod.intern(.{ .opt = .{
- .ty = opt_ptr_stack_trace_ty.toIntern(),
- .val = .none,
- } })).toValue());
+
+ try sema.prepareSimplePanic(block);
+
+ const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?;
+ const panic_fn = try sema.analyzeDeclVal(block, .unneeded, panic_func.owner_decl);
+ const null_stack_trace = try sema.addConstant(mod.null_stack_trace.toValue());
const opt_usize_ty = try mod.optionalType(.usize_type);
const null_ret_addr = try sema.addConstant((try mod.intern(.{ .opt = .{
@@ -25036,21 +25063,8 @@ fn safetyCheckFormatted(
try sema.addSafetyCheckExtra(parent_block, ok, &fail_block);
}
-fn safetyPanic(
- sema: *Sema,
- block: *Block,
- panic_id: PanicId,
-) CompileError!void {
- const mod = sema.mod;
- const gpa = sema.gpa;
- const panic_messages_ty = try sema.getBuiltinType("panic_messages");
- const msg_decl_index = (try sema.namespaceLookup(
- block,
- sema.src,
- panic_messages_ty.getNamespaceIndex(mod).unwrap().?,
- try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)),
- )).?;
-
+fn safetyPanic(sema: *Sema, block: *Block, panic_id: Module.PanicId) CompileError!void {
+ const msg_decl_index = try sema.preparePanicId(block, panic_id);
const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index);
try sema.panicWithMsg(block, msg_inst);
}
@@ -35022,6 +35036,7 @@ fn generateUnionTagTypeSimple(
fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
+ const src = LazySrcLoc.nodeOffset(0);
var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope);
defer wip_captures.deinit();
@@ -35040,6 +35055,14 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
block.instructions.deinit(gpa);
block.params.deinit(gpa);
}
+
+ const decl_index = try getBuiltinDecl(sema, &block, name);
+ return sema.analyzeDeclVal(&block, src, decl_index);
+}
+
+fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!Module.Decl.Index {
+ const gpa = sema.gpa;
+
const src = LazySrcLoc.nodeOffset(0);
const mod = sema.mod;
@@ -35047,23 +35070,23 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref {
const std_pkg = mod.main_pkg.table.get("std").?;
const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
const opt_builtin_inst = (try sema.namespaceLookupRef(
- &block,
+ block,
src,
mod.declPtr(std_file.root_decl.unwrap().?).src_namespace,
try ip.getOrPutString(gpa, "builtin"),
)) orelse @panic("lib/std.zig is corrupt and missing 'builtin'");
- const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src);
- const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) {
+ const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src);
+ const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) {
error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}),
else => |e| return e,
};
- const opt_ty_decl = (try sema.namespaceLookup(
- &block,
+ const decl_index = (try sema.namespaceLookup(
+ block,
src,
builtin_ty.getNamespaceIndex(mod).unwrap().?,
try ip.getOrPutString(gpa, name),
)) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name});
- return sema.analyzeDeclVal(&block, src, opt_ty_decl);
+ return decl_index;
}
fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {