Commit 9aec2758cc
Changed files (38)
src
arch
aarch64
riscv64
sparc64
x86_64
src/arch/aarch64/abi.zig
@@ -4,6 +4,7 @@ const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
+const Module = @import("../../Module.zig");
pub const Class = union(enum) {
memory,
@@ -14,40 +15,40 @@ pub const Class = union(enum) {
};
/// For `float_array` the second element will be the amount of floats.
-pub fn classifyType(ty: Type, target: std.Target) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
+pub fn classifyType(ty: Type, mod: *const Module) Class {
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
var maybe_float_bits: ?u16 = null;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.containerLayout() == .Packed) return .byval;
- const float_count = countFloats(ty, target, &maybe_float_bits);
+ const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Union => {
if (ty.containerLayout() == .Packed) return .byval;
- const float_count = countFloats(ty, target, &maybe_float_bits);
+ const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
.Vector => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
// TODO is this controlled by a cpu feature?
if (bit_size > 128) return .memory;
return .byval;
},
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional());
+ std.debug.assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
@@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
}
const sret_float_count = 4;
-fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
+fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 {
+ const target = mod.getTarget();
const invalid = std.math.maxInt(u8);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
var max_count: u8 = 0;
for (fields.values()) |field| {
- const field_count = countFloats(field.ty, target, maybe_float_bits);
+ const field_count = countFloats(field.ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > sret_float_count) return invalid;
@@ -93,7 +95,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
- const field_count = countFloats(field_ty, target, maybe_float_bits);
+ const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > sret_float_count) return invalid;
@@ -113,12 +115,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
}
}
-pub fn getFloatArrayType(ty: Type) ?Type {
- switch (ty.zigTypeTag()) {
+pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type {
+ switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
for (fields.values()) |field| {
- if (getFloatArrayType(field.ty)) |some| return some;
+ if (getFloatArrayType(field.ty, mod)) |some| return some;
}
return null;
},
@@ -127,7 +129,7 @@ pub fn getFloatArrayType(ty: Type) ?Type {
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
- if (getFloatArrayType(field_ty)) |some| return some;
+ if (getFloatArrayType(field_ty, mod)) |some| return some;
}
return null;
},
src/arch/aarch64/CodeGen.zig
@@ -471,6 +471,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
}
fn gen(self: *Self) !void {
+ const mod = self.bin_file.options.module.?;
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// stp fp, lr, [sp, #-16]!
@@ -522,8 +523,8 @@ fn gen(self: *Self) !void {
const ty = self.air.typeOfIndex(inst);
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const abi_align = ty.abiAlignment(self.target.*);
+ const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_align = ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int < Air.ref_start_index) continue;
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
@@ -1026,31 +1027,31 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
+ const mod = self.bin_file.options.module.?;
const elem_ty = self.air.typeOfIndex(inst).elemType();
- if (!elem_ty.hasRuntimeBits()) {
+ if (!elem_ty.hasRuntimeBits(mod)) {
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
// allocations will always have an offset > 0.
return @as(u32, 0);
}
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
return self.allocMem(abi_size, abi_align, inst);
}
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -1177,13 +1178,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
+ const mod = self.bin_file.options.module.?;
const operand = ty_op.operand;
const operand_mcv = try self.resolveInst(operand);
const operand_ty = self.air.typeOf(operand);
- const operand_info = operand_ty.intInfo(self.target.*);
+ const operand_info = operand_ty.intInfo(mod);
const dest_ty = self.air.typeOfIndex(inst);
- const dest_info = dest_ty.intInfo(self.target.*);
+ const dest_info = dest_ty.intInfo(mod);
const result: MCValue = result: {
const operand_lock: ?RegisterLock = switch (operand_mcv) {
@@ -1257,8 +1259,9 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = dest_ty.intInfo(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = dest_ty.intInfo(mod);
if (info_b.bits <= 64) {
const operand_reg = switch (operand) {
@@ -1319,6 +1322,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -1327,7 +1331,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.unreach => unreachable,
.compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() },
else => {
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Bool => {
// TODO convert this to mvn + and
const op_reg = switch (operand) {
@@ -1361,7 +1365,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
},
.Vector => return self.fail("TODO bitwise not for vectors", .{}),
.Int => {
- const int_info = operand_ty.intInfo(self.target.*);
+ const int_info = operand_ty.intInfo(mod);
if (int_info.bits <= 64) {
const op_reg = switch (operand) {
.register => |r| r,
@@ -1413,13 +1417,13 @@ fn minMax(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM min/max on floats", .{}),
.Vector => return self.fail("TODO ARM min/max on vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
var lhs_reg: Register = undefined;
var rhs_reg: Register = undefined;
@@ -1907,12 +1911,12 @@ fn addSub(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -1968,11 +1972,11 @@ fn mul(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
@@ -1999,7 +2003,8 @@ fn divFloat(
_ = rhs_ty;
_ = maybe_inst;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div_float", .{}),
.Vector => return self.fail("TODO div_float on vectors", .{}),
else => unreachable,
@@ -2015,12 +2020,12 @@ fn divTrunc(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
switch (int_info.signedness) {
.signed => {
@@ -2049,12 +2054,12 @@ fn divFloor(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
switch (int_info.signedness) {
.signed => {
@@ -2082,12 +2087,12 @@ fn divExact(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO div on floats", .{}),
.Vector => return self.fail("TODO div on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
switch (int_info.signedness) {
.signed => {
@@ -2118,12 +2123,12 @@ fn rem(
_ = maybe_inst;
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO rem/mod on floats", .{}),
.Vector => return self.fail("TODO rem/mod on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
var lhs_reg: Register = undefined;
var rhs_reg: Register = undefined;
@@ -2188,7 +2193,8 @@ fn modulo(
_ = rhs_ty;
_ = maybe_inst;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO mod on floats", .{}),
.Vector => return self.fail("TODO mod on vectors", .{}),
.Int => return self.fail("TODO mod on ints", .{}),
@@ -2205,10 +2211,11 @@ fn wrappingArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// Generate an add/sub/mul
const result: MCValue = switch (tag) {
@@ -2240,11 +2247,11 @@ fn bitwise(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// TODO implement bitwise operations with immediates
const mir_tag: Mir.Inst.Tag = switch (tag) {
@@ -2274,10 +2281,11 @@ fn shiftExact(
) InnerError!MCValue {
_ = rhs_ty;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -2323,10 +2331,11 @@ fn shiftNormal(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// Generate a shl_exact/shr_exact
const result: MCValue = switch (tag) {
@@ -2362,7 +2371,8 @@ fn booleanOp(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Bool => {
assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema
@@ -2388,9 +2398,9 @@ fn ptrArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
- const mod = self.bin_file.options.module.?;
assert(rhs_ty.eql(Type.usize, mod));
const ptr_ty = lhs_ty;
@@ -2398,7 +2408,7 @@ fn ptrArithmetic(
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
};
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
@@ -2511,6 +2521,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2518,16 +2529,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
switch (int_info.bits) {
1...31, 33...63 => {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -2639,24 +2649,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const mod = self.bin_file.options.module.?;
const result: MCValue = result: {
- const mod = self.bin_file.options.module.?;
-
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -2864,6 +2873,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const mod = self.bin_file.options.module.?;
const result: MCValue = result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -2871,14 +2881,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -3011,10 +3021,11 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
}
fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue {
+ const mod = self.bin_file.options.module.?;
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&opt_buf);
- if (!payload_ty.hasRuntimeBits()) return MCValue.none;
- if (optional_ty.isPtrLikeOptional()) {
+ if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none;
+ if (optional_ty.isPtrLikeOptional(mod)) {
// TODO should we reuse the operand here?
const raw_reg = try self.register_manager.allocReg(inst, gp);
const reg = self.registerAlias(raw_reg, payload_ty);
@@ -3055,16 +3066,17 @@ fn errUnionErr(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try error_union_bind.resolveToMcv(self);
}
- const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
+ const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3086,7 +3098,7 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8;
+ const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
@@ -3134,16 +3146,17 @@ fn errUnionPayload(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return try error_union_bind.resolveToMcv(self);
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
+ const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -3165,10 +3178,10 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8;
+ const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
_ = try self.addInst(.{
- .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
.data = .{
.rr_lsb_width = .{
// Set both registers to the X variant to get the full width
@@ -3245,6 +3258,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst)) {
@@ -3253,7 +3267,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
const payload_ty = self.air.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBits()) {
+ if (!payload_ty.hasRuntimeBits(mod)) {
break :result MCValue{ .immediate = 1 };
}
@@ -3265,7 +3279,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
- if (optional_ty.isPtrLikeOptional()) {
+ if (optional_ty.isPtrLikeOptional(mod)) {
// TODO should we check if we can reuse the operand?
const raw_reg = try self.register_manager.allocReg(inst, gp);
const reg = self.registerAlias(raw_reg, payload_ty);
@@ -3273,9 +3287,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = reg };
}
- const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*));
- const optional_abi_align = optional_ty.abiAlignment(self.target.*);
- const offset = @intCast(u32, payload_ty.abiSize(self.target.*));
+ const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod));
+ const optional_abi_align = optional_ty.abiAlignment(mod);
+ const offset = @intCast(u32, payload_ty.abiSize(mod));
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3289,19 +3303,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
- const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
- const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ const payload_off = errUnionPayloadOffset(payload_ty, mod);
+ const err_off = errUnionErrorOffset(payload_ty, mod);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
@@ -3314,17 +3329,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const error_union_ty = self.air.getRefType(ty_op.ty);
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
- const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
- const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ const payload_off = errUnionPayloadOffset(payload_ty, mod);
+ const err_off = errUnionErrorOffset(payload_ty, mod);
try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
@@ -3440,8 +3456,9 @@ fn ptrElemVal(
ptr_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
// TODO optimize for elem_sizes of 1, 2, 4, 8
switch (elem_size) {
@@ -3597,8 +3614,9 @@ fn reuseOperand(
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
+ const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.elemType();
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
switch (ptr) {
.none => unreachable,
@@ -3846,9 +3864,10 @@ fn genInlineMemsetCode(
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
- const elem_size = elem_ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits())
+ if (!elem_ty.hasRuntimeBits(mod))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -3874,11 +3893,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate,
4 => .ldr_immediate,
8 => .ldr_immediate,
3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}),
@@ -3896,7 +3916,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
}
fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb_immediate,
@@ -3917,8 +3938,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
+ const mod = self.bin_file.options.module.?;
log.debug("store: storing {} to {}", .{ value, ptr });
- const abi_size = value_ty.abiSize(self.target.*);
+ const abi_size = value_ty.abiSize(mod);
switch (ptr) {
.none => unreachable,
@@ -4069,10 +4091,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4093,10 +4116,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const operand = extra.struct_operand;
const index = extra.field_index;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
const struct_field_ty = struct_ty.structFieldType(index);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -4142,12 +4166,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = self.air.getRefType(ty_pl.ty).childType();
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -4223,8 +4248,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
+ const mod = self.bin_file.options.module.?;
- const fn_ty = switch (ty.zigTypeTag()) {
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
@@ -4246,8 +4272,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.return_value == .stack_offset) {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType();
- const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
+ const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -4289,8 +4315,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- const mod = self.bin_file.options.module.?;
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee, mod)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
@@ -4369,7 +4394,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- assert(ty.zigTypeTag() == .Pointer);
+ assert(ty.zigTypeTag(mod) == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(ty, .x30, mcv);
@@ -4410,11 +4435,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.fn_type.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (self.ret_mcv) {
.none => {},
.immediate => {
- assert(ret_ty.isError());
+ assert(ret_ty.isError(mod));
},
.register => |reg| {
// Return result by value
@@ -4465,8 +4491,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- const abi_align = ret_ty.abiAlignment(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const abi_align = ret_ty.abiAlignment(mod);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4501,21 +4528,21 @@ fn cmp(
lhs_ty: Type,
op: math.CompareOperator,
) !MCValue {
- var int_buffer: Type.Payload.Bits = undefined;
- const int_ty = switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.initTag(.u1);
- } else if (lhs_ty.isPtrLikeOptional()) {
+ } else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
return self.fail("TODO ARM cmp non-pointer optionals", .{});
}
},
.Float => return self.fail("TODO ARM cmp floats", .{}),
- .Enum => lhs_ty.intTagType(&int_buffer),
+ .Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
.Bool => Type.initTag(.u1),
.Pointer => Type.usize,
@@ -4523,7 +4550,7 @@ fn cmp(
else => unreachable,
};
- const int_info = int_ty.intInfo(self.target.*);
+ const int_info = int_ty.intInfo(mod);
if (int_info.bits <= 64) {
try self.spillCompareFlagsIfOccupied();
@@ -4687,8 +4714,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
const op_int = @enumToInt(pl_op.operand);
- if (op_int >= Air.Inst.Ref.typed_value_map.len) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int >= Air.ref_start_index) {
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
}
@@ -4819,13 +4846,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
- const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: {
+ const mod = self.bin_file.options.module.?;
+ const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
break :blk .{ .ty = operand_ty, .bind = operand_bind };
- const offset = @intCast(u32, payload_ty.abiSize(self.target.*));
+ const offset = @intCast(u32, payload_ty.abiSize(mod));
const operand_mcv = try operand_bind.resolveToMcv(self);
const new_mcv: MCValue = switch (operand_mcv) {
.register => |source_reg| new: {
@@ -4838,7 +4866,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue {
try self.genSetReg(payload_ty, dest_reg, operand_mcv);
} else {
_ = try self.addInst(.{
- .tag = if (payload_ty.isSignedInt())
+ .tag = if (payload_ty.isSignedInt(mod))
Mir.Inst.Tag.asr_immediate
else
Mir.Inst.Tag.lsr_immediate,
@@ -5210,9 +5238,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
+ const mod = self.bin_file.options.module.?;
const block_data = self.blocks.getPtr(block).?;
- if (self.air.typeOf(operand).hasRuntimeBits()) {
+ if (self.air.typeOf(operand).hasRuntimeBits(mod)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -5386,7 +5415,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5445,7 +5475,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
+ const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const raw_cond_reg = try self.register_manager.allocReg(null, gp);
const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
@@ -5559,6 +5589,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.options.module.?;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5669,13 +5700,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.genLdrRegister(reg, reg.toX(), ty);
},
.stack_offset => |off| {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = ty.abiSize(mod);
switch (abi_size) {
1, 2, 4, 8 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack,
4, 8 => .ldr_stack,
else => unreachable, // unexpected abi size
};
@@ -5693,13 +5724,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.stack_argument_offset => |off| {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = ty.abiSize(mod);
switch (abi_size) {
1, 2, 4, 8 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
4, 8 => .ldr_stack_argument,
else => unreachable, // unexpected abi size
};
@@ -5720,7 +5751,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -5728,7 +5760,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
+ switch (ty.abiSize(mod)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -6087,14 +6119,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.air.typeOf(pl_op.operand);
- const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const error_union_align = error_union_ty.abiAlignment(self.target.*);
+ const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const error_union_align = error_union_ty.abiAlignment(mod);
// The error union will die in the body. However, we need the
// error union after the body in order to extract the payload
@@ -6123,22 +6156,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return MCValue{ .none = {} };
- }
- return self.genTypedValue(tv);
- }
+ const mod = self.bin_file.options.module.?;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
+ const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{
+ .ty = inst_ty,
+ .val = self.air.value(inst, mod).?,
+ });
+
switch (self.air.instructions.items(.tag)[inst_index]) {
.constant => {
// Constants have static lifetimes, so they are always memoized in the outer most table.
@@ -6222,6 +6251,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (cc) {
.Naked => {
@@ -6236,14 +6266,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
if (ret_ty_size == 0) {
- assert(ret_ty.isError());
+ assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
} else if (ret_ty_size <= 8) {
result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) };
@@ -6253,7 +6283,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_size = @intCast(u32, ty.abiSize(mod));
if (param_size == 0) {
result.args[i] = .{ .none = {} };
continue;
@@ -6261,7 +6291,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
- if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) {
+ if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
@@ -6279,7 +6309,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
- if (ty.abiAlignment(self.target.*) == 8) {
+ if (ty.abiAlignment(mod) == 8) {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
@@ -6294,14 +6324,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 16;
},
.Unspecified => {
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
if (ret_ty_size == 0) {
- assert(ret_ty.isError());
+ assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
} else if (ret_ty_size <= 8) {
result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) };
@@ -6318,9 +6348,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (param_types, 0..) |ty, i| {
- if (ty.abiSize(self.target.*) > 0) {
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
- const param_alignment = ty.abiAlignment(self.target.*);
+ if (ty.abiSize(mod) > 0) {
+ const param_size = @intCast(u32, ty.abiSize(mod));
+ const param_alignment = ty.abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
@@ -6371,7 +6401,8 @@ fn parseRegName(name: []const u8) ?Register {
}
fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
switch (reg.class()) {
.general_purpose => {
src/arch/arm/abi.zig
@@ -1,8 +1,10 @@
const std = @import("std");
+const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
+const Module = @import("../../Module.zig");
pub const Class = union(enum) {
memory,
@@ -22,28 +24,28 @@ pub const Class = union(enum) {
pub const Context = enum { ret, arg };
-pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
+pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class {
+ assert(ty.hasRuntimeBitsIgnoreComptime(mod));
var maybe_float_bits: ?u16 = null;
const max_byval_size = 512;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
- const float_count = countFloats(ty, target, &maybe_float_bits);
+ const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
const fields = ty.structFieldCount();
var i: u32 = 0;
while (i < fields) : (i += 1) {
const field_ty = ty.structFieldType(i);
- const field_alignment = ty.structFieldAlign(i, target);
- const field_size = field_ty.bitSize(target);
+ const field_alignment = ty.structFieldAlign(i, mod);
+ const field_size = field_ty.bitSize(mod);
if (field_size > 32 or field_alignment > 32) {
return Class.arrSize(bit_size, 64);
}
@@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
return Class.arrSize(bit_size, 32);
},
.Union => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
- const float_count = countFloats(ty, target, &maybe_float_bits);
+ const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
for (ty.unionFields().values()) |field| {
- if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) {
+ if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) {
return Class.arrSize(bit_size, 64);
}
}
@@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
.Int => {
// TODO this is incorrect for _BitInt(128) but implementing
// this correctly makes implementing compiler-rt impossible.
- // const bit_size = ty.bitSize(target);
+ // const bit_size = ty.bitSize(mod);
// if (bit_size > 64) return .memory;
return .byval;
},
.Enum, .ErrorSet => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (bit_size > 64) return .memory;
return .byval;
},
.Vector => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
// TODO is this controlled by a cpu feature?
if (ctx == .ret and bit_size > 128) return .memory;
if (bit_size > 512) return .memory;
return .byval;
},
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional());
+ assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
- std.debug.assert(!ty.isSlice());
+ assert(!ty.isSlice());
return .byval;
},
.ErrorUnion,
@@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
}
const byval_float_count = 4;
-fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
+fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 {
+ const target = mod.getTarget();
const invalid = std.math.maxInt(u32);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
var max_count: u32 = 0;
for (fields.values()) |field| {
- const field_count = countFloats(field.ty, target, maybe_float_bits);
+ const field_count = countFloats(field.ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > byval_float_count) return invalid;
@@ -134,7 +137,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
- const field_count = countFloats(field_ty, target, maybe_float_bits);
+ const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > byval_float_count) return invalid;
src/arch/arm/CodeGen.zig
@@ -520,8 +520,9 @@ fn gen(self: *Self) !void {
const ty = self.air.typeOfIndex(inst);
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const abi_align = ty.abiAlignment(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
+ const abi_align = ty.abiAlignment(mod);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int < Air.ref_start_index) continue;
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
@@ -1006,9 +1007,10 @@ fn allocMem(
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
+ const mod = self.bin_file.options.module.?;
const elem_ty = self.air.typeOfIndex(inst).elemType();
- if (!elem_ty.hasRuntimeBits()) {
+ if (!elem_ty.hasRuntimeBits(mod)) {
// As this stack item will never be dereferenced at runtime,
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
@@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
return self.allocMem(abi_size, abi_align, inst);
}
fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue {
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -1158,10 +1159,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const operand_ty = self.air.typeOf(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
- const operand_abi_size = operand_ty.abiSize(self.target.*);
- const dest_abi_size = dest_ty.abiSize(self.target.*);
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = dest_ty.intInfo(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const operand_abi_size = operand_ty.abiSize(mod);
+ const dest_abi_size = dest_ty.abiSize(mod);
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = dest_ty.intInfo(mod);
const dst_mcv: MCValue = blk: {
if (info_a.bits == info_b.bits) {
@@ -1215,8 +1217,9 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = dest_ty.intInfo(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = dest_ty.intInfo(mod);
if (info_b.bits <= 32) {
if (info_a.bits > 32) {
@@ -1278,6 +1281,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand };
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -1286,7 +1290,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
.unreach => unreachable,
.cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() },
else => {
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Bool => {
var op_reg: Register = undefined;
var dest_reg: Register = undefined;
@@ -1319,7 +1323,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
},
.Vector => return self.fail("TODO bitwise not for vectors", .{}),
.Int => {
- const int_info = operand_ty.intInfo(self.target.*);
+ const int_info = operand_ty.intInfo(mod);
if (int_info.bits <= 32) {
var op_reg: Register = undefined;
var dest_reg: Register = undefined;
@@ -1373,13 +1377,13 @@ fn minMax(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM min/max on floats", .{}),
.Vector => return self.fail("TODO ARM min/max on vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
var lhs_reg: Register = undefined;
var rhs_reg: Register = undefined;
@@ -1582,6 +1586,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1589,16 +1594,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits < 32) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -1695,6 +1699,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const mod = self.bin_file.options.module.?;
const result: MCValue = result: {
const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs };
const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs };
@@ -1702,16 +1707,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 16) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -1859,19 +1863,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+ const mod = self.bin_file.options.module.?;
const result: MCValue = result: {
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
const tuple_ty = self.air.typeOfIndex(inst);
- const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
- const tuple_align = tuple_ty.abiAlignment(self.target.*);
- const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
+ const tuple_size = @intCast(u32, tuple_ty.abiSize(mod));
+ const tuple_align = tuple_ty.abiAlignment(mod);
+ const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
const stack_offset = try self.allocMem(tuple_size, tuple_align, inst);
@@ -2017,7 +2022,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.air.typeOfIndex(inst);
- const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, optional_ty.abiSize(mod));
// Optional with a zero-bit payload type is just a boolean true
if (abi_size == 1) {
@@ -2036,16 +2042,17 @@ fn errUnionErr(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try error_union_bind.resolveToMcv(self);
}
- const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*));
+ const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2067,7 +2074,7 @@ fn errUnionErr(
);
const err_bit_offset = err_offset * 8;
- const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8;
+ const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8;
_ = try self.addInst(.{
.tag = .ubfx, // errors are unsigned integers
@@ -2112,16 +2119,17 @@ fn errUnionPayload(
error_union_ty: Type,
maybe_inst: ?Air.Inst.Index,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return try error_union_bind.resolveToMcv(self);
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
+ const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
switch (try error_union_bind.resolveToMcv(self)) {
.register => {
var operand_reg: Register = undefined;
@@ -2143,10 +2151,10 @@ fn errUnionPayload(
);
const payload_bit_offset = payload_offset * 8;
- const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8;
+ const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8;
_ = try self.addInst(.{
- .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
@@ -2221,19 +2229,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
- const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
- const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ const payload_off = errUnionPayloadOffset(payload_ty, mod);
+ const err_off = errUnionErrorOffset(payload_ty, mod);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand);
try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 });
@@ -2244,19 +2253,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.getRefType(ty_op.ty);
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand;
- const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const abi_align = error_union_ty.abiAlignment(self.target.*);
+ const abi_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const abi_align = error_union_ty.abiAlignment(mod);
const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst));
- const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
- const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ const payload_off = errUnionPayloadOffset(payload_ty, mod);
+ const err_off = errUnionErrorOffset(payload_ty, mod);
try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand);
try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef);
@@ -2361,7 +2371,8 @@ fn ptrElemVal(
maybe_inst: ?Air.Inst.Index,
) !MCValue {
const elem_ty = ptr_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
switch (elem_size) {
1, 4 => {
@@ -2647,7 +2658,8 @@ fn reuseOperand(
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
switch (ptr) {
.none => unreachable,
@@ -2722,10 +2734,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits())
+ if (!elem_ty.hasRuntimeBits(mod))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -2734,7 +2747,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue.dead;
const dest_mcv: MCValue = blk: {
- const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4;
+ const ptr_fits_dest = elem_ty.abiSize(mod) <= 4;
if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
@@ -2750,7 +2763,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const elem_size = @intCast(u32, value_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const elem_size = @intCast(u32, value_ty.abiSize(mod));
switch (ptr) {
.none => unreachable,
@@ -2869,10 +2883,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -2892,10 +2907,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const operand = extra.struct_operand;
const index = extra.field_index;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
const struct_field_ty = struct_ty.structFieldType(index);
switch (mcv) {
@@ -2959,10 +2975,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
);
const field_bit_offset = struct_field_offset * 8;
- const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8;
+ const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8;
_ = try self.addInst(.{
- .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx,
+ .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx,
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
@@ -2981,17 +2997,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = self.air.getRefType(ty_pl.ty).childType();
- if (struct_ty.zigTypeTag() == .Union) {
+ if (struct_ty.zigTypeTag(mod) == .Union) {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
}
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod));
switch (field_ptr) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_field_offset };
@@ -3375,12 +3392,12 @@ fn addSub(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3431,12 +3448,12 @@ fn mul(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
@@ -3463,7 +3480,8 @@ fn divFloat(
_ = rhs_ty;
_ = maybe_inst;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
else => unreachable,
@@ -3479,12 +3497,12 @@ fn divTrunc(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
switch (int_info.signedness) {
.signed => {
@@ -3522,12 +3540,12 @@ fn divFloor(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
switch (int_info.signedness) {
.signed => {
@@ -3569,7 +3587,8 @@ fn divExact(
_ = rhs_ty;
_ = maybe_inst;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => return self.fail("TODO ARM div_exact", .{}),
@@ -3586,12 +3605,12 @@ fn rem(
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
switch (int_info.signedness) {
.signed => {
@@ -3654,7 +3673,8 @@ fn modulo(
_ = rhs_ty;
_ = maybe_inst;
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => return self.fail("TODO ARM mod", .{}),
@@ -3671,10 +3691,11 @@ fn wrappingArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
// Generate an add/sub/mul
const result: MCValue = switch (tag) {
@@ -3708,12 +3729,12 @@ fn bitwise(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3753,16 +3774,17 @@ fn shiftExact(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
const mir_tag: Mir.Inst.Tag = switch (tag) {
.shl_exact => .lsl,
- .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) {
+ .shr_exact => switch (lhs_ty.intInfo(mod).signedness) {
.signed => Mir.Inst.Tag.asr,
.unsigned => Mir.Inst.Tag.lsr,
},
@@ -3791,10 +3813,11 @@ fn shiftNormal(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 32) {
// Generate a shl_exact/shr_exact
const result: MCValue = switch (tag) {
@@ -3833,7 +3856,8 @@ fn booleanOp(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Bool => {
const lhs_immediate = try lhs_bind.resolveToImmediate(self);
const rhs_immediate = try rhs_bind.resolveToImmediate(self);
@@ -3866,9 +3890,9 @@ fn ptrArithmetic(
rhs_ty: Type,
maybe_inst: ?Air.Inst.Index,
) InnerError!MCValue {
- switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
- const mod = self.bin_file.options.module.?;
assert(rhs_ty.eql(Type.usize, mod));
const ptr_ty = lhs_ty;
@@ -3876,7 +3900,7 @@ fn ptrArithmetic(
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
};
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const base_tag: Air.Inst.Tag = switch (tag) {
.ptr_add => .add,
@@ -3903,11 +3927,12 @@ fn ptrArithmetic(
}
fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh,
3, 4 => .ldr,
else => unreachable,
};
@@ -3924,7 +3949,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type)
} };
const data: Mir.Inst.Data = switch (abi_size) {
- 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset,
+ 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset,
2 => rr_extra_offset,
3, 4 => rr_offset,
else => unreachable,
@@ -3937,7 +3962,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type)
}
fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
@@ -4197,8 +4223,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
+ const mod = self.bin_file.options.module.?;
- const fn_ty = switch (ty.zigTypeTag()) {
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
@@ -4226,8 +4253,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType();
- const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
+ const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod));
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
var ptr_ty_payload: Type.Payload.ElemType = .{
@@ -4270,7 +4297,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee, mod)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
@@ -4294,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- assert(ty.zigTypeTag() == .Pointer);
+ assert(ty.zigTypeTag(mod) == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .lr, mcv);
@@ -4356,11 +4383,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.fn_type.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (self.ret_mcv) {
.none => {},
.immediate => {
- assert(ret_ty.isError());
+ assert(ret_ty.isError(mod));
},
.register => |reg| {
// Return result by value
@@ -4411,8 +4439,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// location.
const op_inst = Air.refToIndex(un_op).?;
if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) {
- const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
- const abi_align = ret_ty.abiAlignment(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ret_ty.abiSize(mod));
+ const abi_align = ret_ty.abiAlignment(mod);
const offset = try self.allocMem(abi_size, abi_align, null);
@@ -4448,21 +4477,21 @@ fn cmp(
lhs_ty: Type,
op: math.CompareOperator,
) !MCValue {
- var int_buffer: Type.Payload.Bits = undefined;
- const int_ty = switch (lhs_ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Optional => blk: {
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.initTag(.u1);
- } else if (lhs_ty.isPtrLikeOptional()) {
+ } else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
return self.fail("TODO ARM cmp non-pointer optionals", .{});
}
},
.Float => return self.fail("TODO ARM cmp floats", .{}),
- .Enum => lhs_ty.intTagType(&int_buffer),
+ .Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
.Bool => Type.initTag(.u1),
.Pointer => Type.usize,
@@ -4470,7 +4499,7 @@ fn cmp(
else => unreachable,
};
- const int_info = int_ty.intInfo(self.target.*);
+ const int_info = int_ty.intInfo(mod);
if (int_info.bits <= 32) {
try self.spillCompareFlagsIfOccupied();
@@ -4636,8 +4665,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
const op_int = @enumToInt(pl_op.operand);
- if (op_int >= Air.Inst.Ref.typed_value_map.len) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int >= Air.ref_start_index) {
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
}
@@ -4772,8 +4801,9 @@ fn isNull(
operand_bind: ReadArg.Bind,
operand_ty: Type,
) !MCValue {
- if (operand_ty.isPtrLikeOptional()) {
- assert(operand_ty.abiSize(self.target.*) == 4);
+ const mod = self.bin_file.options.module.?;
+ if (operand_ty.isPtrLikeOptional(mod)) {
+ assert(operand_ty.abiSize(mod) == 4);
const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } };
return self.cmp(operand_bind, imm_bind, Type.usize, .eq);
@@ -5131,9 +5161,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
+ const mod = self.bin_file.options.module.?;
const block_data = self.blocks.getPtr(block).?;
- if (self.air.typeOf(operand).hasRuntimeBits()) {
+ if (self.air.typeOf(operand).hasRuntimeBits(mod)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -5301,7 +5332,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5382,7 +5414,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
const overflow_bit_ty = ty.structFieldType(1);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
+ const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const cond_reg = try self.register_manager.allocReg(null, gp);
// C flag: movcs reg, #1
@@ -5466,6 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.options.module.?;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -5640,17 +5673,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
.stack_offset => |off| {
// TODO: maybe addressing from sp instead of fp
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, ty.abiSize(mod));
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh,
3, 4 => .ldr,
else => unreachable,
};
const extra_offset = switch (abi_size) {
- 1 => ty.isSignedInt(),
+ 1 => ty.isSignedInt(mod),
2 => true,
3, 4 => false,
else => unreachable,
@@ -5691,11 +5724,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.stack_argument_offset => |off| {
- const abi_size = ty.abiSize(self.target.*);
+ const abi_size = ty.abiSize(mod);
const tag: Mir.Inst.Tag = switch (abi_size) {
- 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
- 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
+ 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
+ 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
3, 4 => .ldr_stack_argument,
else => unreachable,
};
@@ -5712,7 +5745,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
switch (mcv) {
.dead => unreachable,
.none, .unreach => return,
@@ -6039,8 +6073,9 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = result: {
const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand };
const error_union_ty = self.air.typeOf(pl_op.operand);
- const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*));
- const error_union_align = error_union_ty.abiAlignment(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const error_union_size = @intCast(u32, error_union_ty.abiSize(mod));
+ const error_union_align = error_union_ty.abiAlignment(mod);
// The error union will die in the body. However, we need the
// error union after the body in order to extract the payload
@@ -6069,22 +6104,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return MCValue{ .none = {} };
- }
- return self.genTypedValue(tv);
- }
+ const mod = self.bin_file.options.module.?;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod))
return MCValue{ .none = {} };
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
+ const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{
+ .ty = inst_ty,
+ .val = self.air.value(inst, mod).?,
+ });
+
switch (self.air.instructions.items(.tag)[inst_index]) {
.constant => {
// Constants have static lifetimes, so they are always memoized in the outer most table.
@@ -6166,6 +6197,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (cc) {
.Naked => {
@@ -6180,12 +6212,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
// TODO handle cases where multiple registers are used
if (ret_ty_size <= 4) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
@@ -6200,10 +6232,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (param_types, 0..) |ty, i| {
- if (ty.abiAlignment(self.target.*) == 8)
+ if (ty.abiAlignment(mod) == 8)
ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2);
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_size = @intCast(u32, ty.abiSize(mod));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6215,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
- if (ty.abiAlignment(self.target.*) == 8)
+ if (ty.abiAlignment(mod) == 8)
nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8);
result.args[i] = .{ .stack_argument_offset = nsaa };
@@ -6227,14 +6259,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
result.stack_align = 8;
},
.Unspecified => {
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
if (ret_ty_size == 0) {
- assert(ret_ty.isError());
+ assert(ret_ty.isError(mod));
result.return_value = .{ .immediate = 0 };
} else if (ret_ty_size <= 4) {
result.return_value = .{ .register = .r0 };
@@ -6250,9 +6282,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (param_types, 0..) |ty, i| {
- if (ty.abiSize(self.target.*) > 0) {
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
- const param_alignment = ty.abiAlignment(self.target.*);
+ if (ty.abiSize(mod) > 0) {
+ const param_size = @intCast(u32, ty.abiSize(mod));
+ const param_alignment = ty.abiAlignment(mod);
stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment);
result.args[i] = .{ .stack_argument_offset = stack_offset };
src/arch/riscv64/abi.zig
@@ -3,16 +3,18 @@ const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
+const Module = @import("../../Module.zig");
pub const Class = enum { memory, byval, integer, double_integer };
-pub fn classifyType(ty: Type, target: std.Target) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
+pub fn classifyType(ty: Type, mod: *const Module) Class {
+ const target = mod.getTarget();
+ std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
const max_byval_size = target.ptrBitWidth() * 2;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) {
if (bit_size > max_byval_size) return .memory;
return .byval;
@@ -23,7 +25,7 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
return .integer;
},
.Union => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (ty.containerLayout() == .Packed) {
if (bit_size > max_byval_size) return .memory;
return .byval;
@@ -36,17 +38,17 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
.Bool => return .integer,
.Float => return .byval,
.Int, .Enum, .ErrorSet => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (bit_size > max_byval_size) return .memory;
return .byval;
},
.Vector => {
- const bit_size = ty.bitSize(target);
+ const bit_size = ty.bitSize(mod);
if (bit_size > max_byval_size) return .memory;
return .integer;
},
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional());
+ std.debug.assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
src/arch/riscv64/CodeGen.zig
@@ -755,8 +755,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int < Air.ref_start_index) continue;
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
@@ -805,22 +805,22 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
return self.allocMem(inst, abi_size, abi_align);
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
@@ -893,10 +893,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
+ const mod = self.bin_file.options.module.?;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = self.air.typeOfIndex(inst).intInfo(mod);
if (info_a.signedness != info_b.signedness)
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
@@ -1068,18 +1069,18 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
switch (tag) {
// Arithmetic operations on integers and floats
.add,
.sub,
=> {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// TODO immediate operands
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
@@ -1093,14 +1094,14 @@ fn binOp(
.ptr_add,
.ptr_sub,
=> {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
const ptr_ty = lhs_ty;
const elem_ty = switch (ptr_ty.ptrSize()) {
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
};
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
if (elem_size == 1) {
const base_tag: Air.Inst.Tag = switch (tag) {
@@ -1331,10 +1332,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const optional_ty = self.air.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true
- if (optional_ty.abiSize(self.target.*) == 1)
+ if (optional_ty.abiSize(mod) == 1)
break :result MCValue{ .immediate = 1 };
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@@ -1526,7 +1528,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits())
+ const mod = self.bin_file.options.module.?;
+ if (!elem_ty.hasRuntimeBits(mod))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -1698,6 +1701,7 @@ fn airFence(self: *Self) !void {
}
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
+ const mod = self.bin_file.options.module.?;
if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{});
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const fn_ty = self.air.typeOf(pl_op.operand);
@@ -1736,7 +1740,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
}
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee, mod)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
@@ -1828,7 +1832,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const ty = self.air.typeOf(bin_op.lhs);
const mod = self.bin_file.options.module.?;
assert(ty.eql(self.air.typeOf(bin_op.rhs), mod));
- if (ty.zigTypeTag() == .ErrorSet)
+ if (ty.zigTypeTag(mod) == .ErrorSet)
return self.fail("TODO implement cmp for errors", .{});
const lhs = try self.resolveInst(bin_op.lhs);
@@ -2107,7 +2111,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
- if (self.air.typeOf(operand).hasRuntimeBits()) {
+ const mod = self.bin_file.options.module.?;
+ if (self.air.typeOf(operand).hasRuntimeBits(mod)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -2533,22 +2538,18 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBits()) {
- return MCValue{ .none = {} };
- }
- return self.genTypedValue(tv);
- }
+ const mod = self.bin_file.options.module.?;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBits())
+ if (!inst_ty.hasRuntimeBits(mod))
return MCValue{ .none = {} };
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
+ const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{
+ .ty = inst_ty,
+ .val = self.air.value(inst, mod).?,
+ });
+
switch (self.air.instructions.items(.tag)[inst_index]) {
.constant => {
// Constants have static lifetimes, so they are always memoized in the outer most table.
@@ -2630,6 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (cc) {
.Naked => {
@@ -2650,7 +2652,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_size = @intCast(u32, ty.abiSize(mod));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@@ -2680,14 +2682,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
}
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBits()) {
+ } else if (!ret_ty.hasRuntimeBits(mod)) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
if (ret_ty_size <= 8) {
result.return_value = .{ .register = .a0 };
} else if (ret_ty_size <= 16) {
src/arch/sparc64/CodeGen.zig
@@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
switch (int_info.bits) {
32, 64 => {
// Only say yes if the operation is
@@ -1018,7 +1018,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
switch (arg) {
.stack_offset => |off| {
const mod = self.bin_file.options.module.?;
- const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse {
+ const abi_size = math.cast(u32, ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
};
const offset = off + abi_size;
@@ -1203,6 +1203,7 @@ fn airBreakpoint(self: *Self) !void {
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
// We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you.
@@ -1218,14 +1219,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO byteswap for vectors", .{}),
.Int => {
- const int_info = operand_ty.intInfo(self.target.*);
+ const int_info = operand_ty.intInfo(mod);
if (int_info.bits == 8) break :result operand;
const abi_size = int_info.bits >> 3;
- const abi_align = operand_ty.abiAlignment(self.target.*);
+ const abi_align = operand_ty.abiAlignment(mod);
const opposite_endian_asi = switch (self.target.cpu.arch.endian()) {
Endian.Big => ASI.asi_primary_little,
Endian.Little => ASI.asi_primary,
@@ -1294,7 +1295,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]);
const ty = self.air.typeOf(callee);
- const fn_ty = switch (ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
@@ -1337,7 +1339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee, mod)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
@@ -1374,7 +1376,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
} else @panic("TODO SPARCv9 currently does not support non-ELF binaries");
} else {
- assert(ty.zigTypeTag() == .Pointer);
+ assert(ty.zigTypeTag(mod) == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(ty, .o7, mcv);
@@ -1422,15 +1424,15 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
- var int_buffer: Type.Payload.Bits = undefined;
- const int_ty = switch (lhs_ty.zigTypeTag()) {
+ const int_ty = switch (lhs_ty.zigTypeTag(mod)) {
.Vector => unreachable, // Handled by cmp_vector.
- .Enum => lhs_ty.intTagType(&int_buffer),
+ .Enum => lhs_ty.intTagType(),
.Int => lhs_ty,
.Bool => Type.initTag(.u1),
.Pointer => Type.usize,
@@ -1438,9 +1440,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.Optional => blk: {
var opt_buffer: Type.Payload.ElemType = undefined;
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :blk Type.initTag(.u1);
- } else if (lhs_ty.isPtrLikeOptional()) {
+ } else if (lhs_ty.isPtrLikeOptional(mod)) {
break :blk Type.usize;
} else {
return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{});
@@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
else => unreachable,
};
- const int_info = int_ty.intInfo(self.target.*);
+ const int_info = int_ty.intInfo(mod);
if (int_info.bits <= 64) {
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
.lhs = bin_op.lhs,
@@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
const op_int = @enumToInt(pl_op.operand);
- if (op_int >= Air.Inst.Ref.typed_value_map.len) {
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int >= Air.ref_start_index) {
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
}
@@ -1752,10 +1754,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
+ const mod = self.bin_file.options.module.?;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = self.air.typeOfIndex(inst).intInfo(mod);
if (info_a.signedness != info_b.signedness)
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
@@ -1814,9 +1817,10 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
- const elem_size = elem_ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBits())
+ if (!elem_ty.hasRuntimeBits(mod))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@@ -2037,18 +2041,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
//const tag = self.air.instructions.items(.tag)[inst];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.Int => {
- const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
switch (int_info.bits) {
1...32 => {
try self.spillConditionFlagsIfOccupied();
@@ -2101,6 +2105,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
fn airNot(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -2116,7 +2121,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
};
},
else => {
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Bool => {
const op_reg = switch (operand) {
.register => |r| r,
@@ -2150,7 +2155,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
},
.Vector => return self.fail("TODO bitwise not for vectors", .{}),
.Int => {
- const int_info = operand_ty.intInfo(self.target.*);
+ const int_info = operand_ty.intInfo(mod);
if (int_info.bits <= 64) {
const op_reg = switch (operand) {
.register => |r| r,
@@ -2332,16 +2337,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const mod = self.bin_file.options.module.?;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const lhs = try self.resolveInst(extra.lhs);
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
try self.spillConditionFlagsIfOccupied();
@@ -2449,7 +2455,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const slice_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
@@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const operand = extra.struct_operand;
const index = extra.field_index;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand);
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.dead, .unreach => unreachable,
@@ -2701,7 +2709,8 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits()) break :result mcv;
+ const mod = self.bin_file.options.module.?;
+ if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
@@ -2713,7 +2722,8 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
+ const mod = self.bin_file.options.module.?;
+ if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
@@ -2727,7 +2737,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBits()) break :result mcv;
+ const mod = self.bin_file.options.module.?;
+ if (!payload_ty.hasRuntimeBits(mod)) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@@ -2747,7 +2758,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const optional_ty = self.air.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true
- if (optional_ty.abiSize(self.target.*) == 1)
+ const mod = self.bin_file.options.module.?;
+ if (optional_ty.abiSize(mod) == 1)
break :result MCValue{ .immediate = 1 };
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@@ -2784,7 +2796,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
- if (!elem_ty.hasRuntimeBits()) {
+ const mod = self.bin_file.options.module.?;
+ if (!elem_ty.hasRuntimeBits(mod)) {
// As this stack item will never be dereferenced at runtime,
// return the stack offset 0. Stack offset 0 will be where all
// zero-sized stack allocations live as non-zero-sized
@@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
return self.allocMem(inst, abi_size, abi_align);
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
- const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
- const abi_align = elem_ty.abiAlignment(self.target.*);
+ const abi_align = elem_ty.abiAlignment(mod);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
@@ -2860,12 +2872,12 @@ fn binOp(
.xor,
.cmp_eq,
=> {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// Only say yes if the operation is
// commutative, i.e. we can swap both of the
@@ -2934,10 +2946,10 @@ fn binOp(
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const result_reg = result.register;
try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits);
@@ -2951,11 +2963,11 @@ fn binOp(
},
.div_trunc => {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, mod));
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const rhs_immediate_ok = switch (tag) {
.div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
@@ -2984,14 +2996,14 @@ fn binOp(
},
.ptr_add => {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
const ptr_ty = lhs_ty;
const elem_ty = switch (ptr_ty.ptrSize()) {
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
};
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
if (elem_size == 1) {
const base_tag: Mir.Inst.Tag = switch (tag) {
@@ -3016,7 +3028,7 @@ fn binOp(
.bool_and,
.bool_or,
=> {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Bool => {
assert(lhs != .immediate); // should have been handled by Sema
assert(rhs != .immediate); // should have been handled by Sema
@@ -3046,10 +3058,10 @@ fn binOp(
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// 32 and 64 bit operands doesn't need truncating
if (int_info.bits == 32 or int_info.bits == 64) return result;
@@ -3068,10 +3080,10 @@ fn binOp(
.shl_exact,
.shr_exact,
=> {
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate;
@@ -3393,7 +3405,8 @@ fn binOpRegister(
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
- if (self.air.typeOf(operand).hasRuntimeBits()) {
+ const mod = self.bin_file.options.module.?;
+ if (self.air.typeOf(operand).hasRuntimeBits(mod)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
/// Given an error union, returns the payload
fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
if (err_ty.errorSetIsEmpty()) {
return error_union_mcv;
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return MCValue.none;
}
- const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*));
+ const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
switch (error_union_mcv) {
.register => return self.fail("TODO errUnionPayload for registers", .{}),
.stack_offset => |off| {
@@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int < Air.ref_start_index) continue;
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
@@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.options.module.?;
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
- try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*));
+ try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod));
},
.stack_offset => |off| {
const real_offset = realStackOffset(off);
const simm13 = math.cast(i13, real_offset) orelse
return self.fail("TODO larger stack offsets: {}", .{real_offset});
- try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*));
+ try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod));
},
}
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
switch (mcv) {
.dead => unreachable,
.unreach, .none => return, // Nothing to do.
@@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
if (!self.wantSafety())
return; // The already existing value will do just fine.
// TODO Upgrade this to a memset call when we have that available.
- switch (ty.abiSize(self.target.*)) {
+ switch (ty.abiSize(mod)) {
1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }),
2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }),
4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }),
@@ -3978,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
const overflow_bit_ty = ty.structFieldType(1);
- const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
+ const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod));
const cond_reg = try self.register_manager.allocReg(null, gp);
// TODO handle floating point CCRs
@@ -4152,13 +4168,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
+ const mod = self.bin_file.options.module.?;
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
- if (!error_type.hasRuntimeBits()) {
+ if (!error_type.hasRuntimeBits(mod)) {
return MCValue{ .immediate = 0 }; // always false
- } else if (!payload_type.hasRuntimeBits()) {
- if (error_type.abiSize(self.target.*) <= 8) {
+ } else if (!payload_type.hasRuntimeBits(mod)) {
+ if (error_type.abiSize(mod) <= 8) {
const reg_mcv: MCValue = switch (operand) {
.register => operand,
else => .{ .register = try self.copyToTmpRegister(error_type, operand) },
@@ -4249,8 +4266,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void {
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
+ const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.elemType();
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
switch (ptr) {
.none => unreachable,
@@ -4321,11 +4339,11 @@ fn minMax(
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO min/max on floats", .{}),
.Vector => return self.fail("TODO min/max on vectors", .{}),
.Int => {
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// TODO skip register setting when one of the operands
// is a small (fits in i13) immediate.
@@ -4455,6 +4473,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
+ const mod = self.bin_file.options.module.?;
switch (cc) {
.Naked => {
@@ -4478,7 +4497,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
for (param_types, 0..) |ty, i| {
- const param_size = @intCast(u32, ty.abiSize(self.target.*));
+ const param_size = @intCast(u32, ty.abiSize(mod));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@@ -4505,12 +4524,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
result.stack_byte_count = next_stack_offset;
result.stack_align = 16;
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
- } else if (!ret_ty.hasRuntimeBits()) {
+ } else if (!ret_ty.hasRuntimeBits(mod)) {
result.return_value = .{ .none = {} };
} else {
- const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
// The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller.
if (ret_ty_size <= 8) {
result.return_value = switch (role) {
@@ -4528,40 +4547,37 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
return result;
}
-fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
- // First section of indexes correspond to a set number of constant values.
- const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- const tv = Air.Inst.Ref.typed_value_map[ref_int];
- if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) {
- return MCValue{ .none = {} };
- }
- return self.genTypedValue(tv);
- }
+fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
+ const ty = self.air.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- const inst_ty = self.air.typeOf(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError())
- return MCValue{ .none = {} };
-
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
- switch (self.air.instructions.items(.tag)[inst_index]) {
- .constant => {
- // Constants have static lifetimes, so they are always memoized in the outer most table.
- const branch = &self.branch_stack.items[0];
- const gop = try branch.inst_table.getOrPut(self.gpa, inst_index);
- if (!gop.found_existing) {
- const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
- gop.value_ptr.* = try self.genTypedValue(.{
- .ty = inst_ty,
- .val = self.air.values[ty_pl.payload],
- });
- }
- return gop.value_ptr.*;
- },
- .const_ty => unreachable,
- else => return self.getResolvedInstValue(inst_index),
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
+
+ if (Air.refToIndex(ref)) |inst| {
+ switch (self.air.instructions.items(.tag)[inst]) {
+ .constant => {
+ // Constants have static lifetimes, so they are always memoized in the outer most table.
+ const branch = &self.branch_stack.items[0];
+ const gop = try branch.inst_table.getOrPut(self.gpa, inst);
+ if (!gop.found_existing) {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ gop.value_ptr.* = try self.genTypedValue(.{
+ .ty = ty,
+ .val = self.air.values[ty_pl.payload],
+ });
+ }
+ return gop.value_ptr.*;
+ },
+ .const_ty => unreachable,
+ else => return self.getResolvedInstValue(inst),
+ }
}
+
+ return self.genTypedValue(.{
+ .ty = ty,
+ .val = self.air.value(ref, mod).?,
+ });
}
fn ret(self: *Self, mcv: MCValue) !void {
@@ -4666,7 +4682,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
- const abi_size = value_ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = value_ty.abiSize(mod);
switch (ptr) {
.none => unreachable,
@@ -4707,10 +4724,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: {
+ const mod = self.bin_file.options.module.?;
const mcv = try self.resolveInst(operand);
const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
- const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
+ const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod));
switch (mcv) {
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
@@ -4748,8 +4766,9 @@ fn trunc(
operand_ty: Type,
dest_ty: Type,
) !MCValue {
- const info_a = operand_ty.intInfo(self.target.*);
- const info_b = dest_ty.intInfo(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const info_a = operand_ty.intInfo(mod);
+ const info_b = dest_ty.intInfo(mod);
if (info_b.bits <= 64) {
const operand_reg = switch (operand) {
src/arch/wasm/abi.zig
@@ -5,9 +5,11 @@
//! Note: Above mentioned document is not an official specification, therefore called a convention.
const std = @import("std");
-const Type = @import("../../type.zig").Type;
const Target = std.Target;
+const Type = @import("../../type.zig").Type;
+const Module = @import("../../Module.zig");
+
/// Defines how to pass a type as part of a function signature,
/// both for parameters as well as return values.
pub const Class = enum { direct, indirect, none };
@@ -19,12 +21,13 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned.
-pub fn classifyType(ty: Type, target: Target) [2]Class {
- if (!ty.hasRuntimeBitsIgnoreComptime()) return none;
- switch (ty.zigTypeTag()) {
+pub fn classifyType(ty: Type, mod: *const Module) [2]Class {
+ const target = mod.getTarget();
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.containerLayout() == .Packed) {
- if (ty.bitSize(target) <= 64) return direct;
+ if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
// When the struct type is non-scalar
@@ -32,14 +35,14 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
// When the struct's alignment is non-natural
const field = ty.structFields().values()[0];
if (field.abi_align != 0) {
- if (field.abi_align > field.ty.abiAlignment(target)) {
+ if (field.abi_align > field.ty.abiAlignment(mod)) {
return memory;
}
}
- return classifyType(field.ty, target);
+ return classifyType(field.ty, mod);
},
.Int, .Enum, .ErrorSet, .Vector => {
- const int_bits = ty.intInfo(target).bits;
+ const int_bits = ty.intInfo(mod).bits;
if (int_bits <= 64) return direct;
if (int_bits <= 128) return .{ .direct, .direct };
return memory;
@@ -53,7 +56,7 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
.Bool => return direct,
.Array => return memory,
.Optional => {
- std.debug.assert(ty.isPtrLikeOptional());
+ std.debug.assert(ty.isPtrLikeOptional(mod));
return direct;
},
.Pointer => {
@@ -62,13 +65,13 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
},
.Union => {
if (ty.containerLayout() == .Packed) {
- if (ty.bitSize(target) <= 64) return direct;
+ if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
std.debug.assert(layout.tag_size == 0);
if (ty.unionFields().count() > 1) return memory;
- return classifyType(ty.unionFields().values()[0].ty, target);
+ return classifyType(ty.unionFields().values()[0].ty, mod);
},
.ErrorUnion,
.Frame,
@@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
/// Returns the scalar type a given type can represent.
/// Asserts given type can be represented as scalar, such as
/// a struct with a single scalar field.
-pub fn scalarType(ty: Type, target: std.Target) Type {
- switch (ty.zigTypeTag()) {
+pub fn scalarType(ty: Type, mod: *const Module) Type {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
switch (ty.containerLayout()) {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
- return scalarType(struct_obj.backing_int_ty, target);
+ return scalarType(struct_obj.backing_int_ty, mod);
},
else => {
std.debug.assert(ty.structFieldCount() == 1);
- return scalarType(ty.structFieldType(0), target);
+ return scalarType(ty.structFieldType(0), mod);
},
}
},
.Union => {
if (ty.containerLayout() != .Packed) {
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0 and layout.tag_size != 0) {
- return scalarType(ty.unionTagTypeSafety().?, target);
+ return scalarType(ty.unionTagTypeSafety().?, mod);
}
std.debug.assert(ty.unionFields().count() == 1);
}
- return scalarType(ty.unionFields().values()[0].ty, target);
+ return scalarType(ty.unionFields().values()[0].ty, mod);
},
else => return ty,
}
src/arch/wasm/CodeGen.zig
@@ -788,9 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
assert(!gop.found_existing);
- const val = func.air.value(ref).?;
+ const mod = func.bin_file.base.options.module.?;
+ const val = func.air.value(ref, mod).?;
const ty = func.air.typeOf(ref);
- if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) {
gop.value_ptr.* = WValue{ .none = {} };
return gop.value_ptr.*;
}
@@ -801,7 +802,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
//
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
- const result = if (isByRef(ty, func.target)) blk: {
+ const result = if (isByRef(ty, mod)) blk: {
const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index);
break :blk WValue{ .memory = sym_index };
} else try func.lowerConstant(val, ty);
@@ -987,8 +988,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
}
/// Using a given `Type`, returns the corresponding type
-fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
- return switch (ty.zigTypeTag()) {
+fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
+ const target = mod.getTarget();
+ return switch (ty.zigTypeTag(mod)) {
.Float => blk: {
const bits = ty.floatBits(target);
if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16
@@ -998,7 +1000,7 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
return wasm.Valtype.i32; // represented as pointer to stack
},
.Int, .Enum => blk: {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
if (info.bits <= 32) break :blk wasm.Valtype.i32;
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
@@ -1006,22 +1008,18 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
.Struct => switch (ty.containerLayout()) {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
- return typeToValtype(struct_obj.backing_int_ty, target);
+ return typeToValtype(struct_obj.backing_int_ty, mod);
},
else => wasm.Valtype.i32,
},
- .Vector => switch (determineSimdStoreStrategy(ty, target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.direct => wasm.Valtype.v128,
.unrolled => wasm.Valtype.i32,
},
.Union => switch (ty.containerLayout()) {
.Packed => {
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.bitSize(target)),
- };
- const int_ty = Type.initPayload(&int_ty_payload.base);
- return typeToValtype(int_ty, target);
+ const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory");
+ return typeToValtype(int_ty, mod);
},
else => wasm.Valtype.i32,
},
@@ -1030,17 +1028,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype {
}
/// Using a given `Type`, returns the byte representation of its wasm value type
-fn genValtype(ty: Type, target: std.Target) u8 {
- return wasm.valtype(typeToValtype(ty, target));
+fn genValtype(ty: Type, mod: *Module) u8 {
+ return wasm.valtype(typeToValtype(ty, mod));
}
/// Using a given `Type`, returns the corresponding wasm value type
/// Differently from `genValtype` this also allows `void` to create a block
/// with no return type
-fn genBlockType(ty: Type, target: std.Target) u8 {
+fn genBlockType(ty: Type, mod: *Module) u8 {
return switch (ty.tag()) {
.void, .noreturn => wasm.block_empty,
- else => genValtype(ty, target),
+ else => genValtype(ty, mod),
};
}
@@ -1101,7 +1099,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
/// Creates one locals for a given `Type`.
/// Returns a corresponding `Wvalue` with `local` as active tag
fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- const valtype = typeToValtype(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const valtype = typeToValtype(ty, mod);
switch (valtype) {
.i32 => if (func.free_locals_i32.popOrNull()) |index| {
log.debug("reusing local ({d}) of type {}", .{ index, valtype });
@@ -1132,7 +1131,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Ensures a new local will be created. This is useful when it's useful
/// to use a zero-initialized local.
fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
- try func.locals.append(func.gpa, genValtype(ty, func.target));
+ const mod = func.bin_file.base.options.module.?;
+ try func.locals.append(func.gpa, genValtype(ty, mod));
const initial_index = func.local_index;
func.local_index += 1;
return WValue{ .local = .{ .value = initial_index, .references = 1 } };
@@ -1140,48 +1140,54 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
/// Generates a `wasm.Type` from a given function type.
/// Memory is owned by the caller.
-fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type {
+fn genFunctype(
+ gpa: Allocator,
+ cc: std.builtin.CallingConvention,
+ params: []const Type,
+ return_type: Type,
+ mod: *Module,
+) !wasm.Type {
var temp_params = std.ArrayList(wasm.Valtype).init(gpa);
defer temp_params.deinit();
var returns = std.ArrayList(wasm.Valtype).init(gpa);
defer returns.deinit();
- if (firstParamSRet(cc, return_type, target)) {
+ if (firstParamSRet(cc, return_type, mod)) {
try temp_params.append(.i32); // memory address is always a 32-bit handle
- } else if (return_type.hasRuntimeBitsIgnoreComptime()) {
+ } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) {
if (cc == .C) {
- const res_classes = abi.classifyType(return_type, target);
+ const res_classes = abi.classifyType(return_type, mod);
assert(res_classes[0] == .direct and res_classes[1] == .none);
- const scalar_type = abi.scalarType(return_type, target);
- try returns.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(return_type, mod);
+ try returns.append(typeToValtype(scalar_type, mod));
} else {
- try returns.append(typeToValtype(return_type, target));
+ try returns.append(typeToValtype(return_type, mod));
}
- } else if (return_type.isError()) {
+ } else if (return_type.isError(mod)) {
try returns.append(.i32);
}
// param types
for (params) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
switch (cc) {
.C => {
- const param_classes = abi.classifyType(param_type, target);
+ const param_classes = abi.classifyType(param_type, mod);
for (param_classes) |class| {
if (class == .none) continue;
if (class == .direct) {
- const scalar_type = abi.scalarType(param_type, target);
- try temp_params.append(typeToValtype(scalar_type, target));
+ const scalar_type = abi.scalarType(param_type, mod);
+ try temp_params.append(typeToValtype(scalar_type, mod));
} else {
- try temp_params.append(typeToValtype(param_type, target));
+ try temp_params.append(typeToValtype(param_type, mod));
}
}
},
- else => if (isByRef(param_type, target))
+ else => if (isByRef(param_type, mod))
try temp_params.append(.i32)
else
- try temp_params.append(typeToValtype(param_type, target)),
+ try temp_params.append(typeToValtype(param_type, mod)),
}
}
@@ -1227,7 +1233,8 @@ pub fn generate(
fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1254,7 +1261,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
const inst = @intCast(u32, func.air.instructions.len - 1);
const last_inst_ty = func.air.typeOfIndex(inst);
- if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) {
+ if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) {
try func.addTag(.@"unreachable");
}
}
@@ -1335,6 +1342,7 @@ const CallWValues = struct {
};
fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
+ const mod = func.bin_file.base.options.module.?;
const cc = fn_ty.fnCallingConvention();
const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
defer func.gpa.free(param_types);
@@ -1351,7 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
const fn_info = fn_ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1361,7 +1369,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
for (param_types) |ty| {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1371,7 +1379,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
.C => {
for (param_types) |ty| {
- const ty_classes = abi.classifyType(ty, func.target);
+ const ty_classes = abi.classifyType(ty, mod);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1385,11 +1393,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
return result;
}
-fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool {
+fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool {
switch (cc) {
- .Unspecified, .Inline => return isByRef(return_type, target),
+ .Unspecified, .Inline => return isByRef(return_type, mod),
.C => {
- const ty_classes = abi.classifyType(return_type, target);
+ const ty_classes = abi.classifyType(return_type, mod);
if (ty_classes[0] == .indirect) return true;
if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true;
return false;
@@ -1405,16 +1413,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
- const ty_classes = abi.classifyType(ty, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const ty_classes = abi.classifyType(ty, mod);
assert(ty_classes[0] != .none);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct, .Union => {
if (ty_classes[0] == .indirect) {
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct);
- const scalar_type = abi.scalarType(ty, func.target);
- const abi_size = scalar_type.abiSize(func.target);
+ const scalar_type = abi.scalarType(ty, mod);
+ const abi_size = scalar_type.abiSize(mod);
try func.emitWValue(value);
// When the value lives in the virtual stack, we must load it onto the actual stack
@@ -1422,12 +1431,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
const opcode = buildOpcode(.{
.op = .load,
.width = @intCast(u8, abi_size),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = value.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
}
},
@@ -1436,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
return func.lowerToStack(value);
}
assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
- assert(ty.abiSize(func.target) == 16);
+ assert(ty.abiSize(mod) == 16);
// in this case we have an integer or float that must be lowered as 2 i64's.
try func.emitWValue(value);
try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
@@ -1503,18 +1512,18 @@ fn restoreStackPointer(func: *CodeGen) !void {
///
/// Asserts Type has codegenbits
fn allocStack(func: *CodeGen, ty: Type) !WValue {
- assert(ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.hasRuntimeBitsIgnoreComptime(mod));
if (func.initial_stack_value == .none) {
try func.initializeStack();
}
- const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- ty.fmt(module), ty.abiSize(func.target),
+ ty.fmt(mod), ty.abiSize(mod),
});
};
- const abi_align = ty.abiAlignment(func.target);
+ const abi_align = ty.abiAlignment(mod);
if (abi_align > func.stack_alignment) {
func.stack_alignment = abi_align;
@@ -1531,6 +1540,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
/// This is different from allocStack where this will use the pointer's alignment
/// if it is set, to ensure the stack alignment will be set correctly.
fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
+ const mod = func.bin_file.base.options.module.?;
const ptr_ty = func.air.typeOfIndex(inst);
const pointee_ty = ptr_ty.childType();
@@ -1538,15 +1548,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
try func.initializeStack();
}
- if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.allocStack(Type.usize); // create a value containing just the stack pointer.
}
- const abi_alignment = ptr_ty.ptrAlignment(func.target);
- const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
+ const abi_alignment = ptr_ty.ptrAlignment(mod);
+ const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse {
return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
- pointee_ty.fmt(module), pointee_ty.abiSize(func.target),
+ pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
});
};
if (abi_alignment > func.stack_alignment) {
@@ -1704,8 +1713,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
/// For a given `Type`, will return true when the type will be passed
/// by reference, rather than by value
-fn isByRef(ty: Type, target: std.Target) bool {
- switch (ty.zigTypeTag()) {
+fn isByRef(ty: Type, mod: *const Module) bool {
+ const target = mod.getTarget();
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -1726,40 +1736,40 @@ fn isByRef(ty: Type, target: std.Target) bool {
.Array,
.Frame,
- => return ty.hasRuntimeBitsIgnoreComptime(),
+ => return ty.hasRuntimeBitsIgnoreComptime(mod),
.Union => {
if (ty.castTag(.@"union")) |union_ty| {
if (union_ty.data.layout == .Packed) {
- return ty.abiSize(target) > 8;
+ return ty.abiSize(mod) > 8;
}
}
- return ty.hasRuntimeBitsIgnoreComptime();
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
},
.Struct => {
if (ty.castTag(.@"struct")) |struct_ty| {
const struct_obj = struct_ty.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
- return isByRef(struct_obj.backing_int_ty, target);
+ return isByRef(struct_obj.backing_int_ty, mod);
}
}
- return ty.hasRuntimeBitsIgnoreComptime();
+ return ty.hasRuntimeBitsIgnoreComptime(mod);
},
- .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled,
- .Int => return ty.intInfo(target).bits > 64,
+ .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled,
+ .Int => return ty.intInfo(mod).bits > 64,
.Float => return ty.floatBits(target) > 64,
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
return true;
},
.Optional => {
- if (ty.isPtrLikeOptional()) return false;
+ if (ty.isPtrLikeOptional(mod)) return false;
var buf: Type.Payload.ElemType = undefined;
const pl_type = ty.optionalChild(&buf);
- if (pl_type.zigTypeTag() == .ErrorSet) return false;
- return pl_type.hasRuntimeBitsIgnoreComptime();
+ if (pl_type.zigTypeTag(mod) == .ErrorSet) return false;
+ return pl_type.hasRuntimeBitsIgnoreComptime(mod);
},
.Pointer => {
// Slices act like struct and will be passed by reference
@@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum {
/// This means when a given type is 128 bits and either the simd128 or relaxed-simd
/// features are enabled, the function will return `.direct`. This would allow to store
/// it using a instruction, rather than an unrolled version.
-fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy {
- std.debug.assert(ty.zigTypeTag() == .Vector);
- if (ty.bitSize(target) != 128) return .unrolled;
+fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy {
+ std.debug.assert(ty.zigTypeTag(mod) == .Vector);
+ if (ty.bitSize(mod) != 128) return .unrolled;
const hasFeature = std.Target.wasm.featureSetHas;
+ const target = mod.getTarget();
const features = target.cpu.features;
if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) {
return .direct;
@@ -2084,32 +2095,33 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(un_op);
const fn_info = func.decl.ty.fnInfo();
const ret_ty = fn_info.return_type;
+ const mod = func.bin_file.base.options.module.?;
// result must be stored in the stack and we return a pointer
// to the stack instead
if (func.return_value != .none) {
try func.store(func.return_value, operand, ret_ty, 0);
- } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) {
- switch (ret_ty.zigTypeTag()) {
+ } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ switch (ret_ty.zigTypeTag(mod)) {
// Aggregate types can be lowered as a singular value
.Struct, .Union => {
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
try func.emitWValue(operand);
const opcode = buildOpcode(.{
.op = .load,
- .width = @intCast(u8, scalar_type.abiSize(func.target) * 8),
- .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
- .valtype1 = typeToValtype(scalar_type, func.target),
+ .width = @intCast(u8, scalar_type.abiSize(mod) * 8),
+ .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned,
+ .valtype1 = typeToValtype(scalar_type, mod),
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
- .alignment = scalar_type.abiAlignment(func.target),
+ .alignment = scalar_type.abiAlignment(mod),
});
},
else => try func.emitWValue(operand),
}
} else {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) {
try func.addImm32(0);
} else {
try func.emitWValue(operand);
@@ -2123,14 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const child_type = func.air.typeOfIndex(inst).childType();
+ const mod = func.bin_file.base.options.module.?;
var result = result: {
- if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
break :result try func.allocStack(Type.usize); // create pointer to void
}
const fn_info = func.decl.ty.fnInfo();
- if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
break :result func.return_value;
}
@@ -2141,16 +2154,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const ret_ty = func.air.typeOf(un_op).childType();
const fn_info = func.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+ } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2167,26 +2181,26 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
const ty = func.air.typeOf(pl_op.operand);
- const fn_ty = switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType();
const fn_info = fn_ty.fnInfo();
- const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
+ const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod);
const callee: ?Decl.Index = blk: {
- const func_val = func.air.value(pl_op.operand) orelse break :blk null;
- const module = func.bin_file.base.options.module.?;
+ const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null;
if (func_val.castTag(.function)) |function| {
_ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
- const ext_decl = module.declPtr(extern_fn.data.owner_decl);
+ const ext_decl = mod.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2215,7 +2229,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const arg_val = try func.resolveInst(arg);
const arg_ty = func.air.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
}
@@ -2226,11 +2240,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else {
// in this case we call a function pointer
// so load its value onto the stack
- std.debug.assert(ty.zigTypeTag() == .Pointer);
+ std.debug.assert(ty.zigTypeTag(mod) == .Pointer);
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -2238,7 +2252,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
const result_value = result_value: {
- if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) {
break :result_value WValue{ .none = {} };
} else if (ret_ty.isNoReturn()) {
try func.addTag(.@"unreachable");
@@ -2246,10 +2260,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (first_param_sret) {
break :result_value sret;
// TODO: Make this less fragile and optimize
- } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) {
+ } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) {
const result_local = try func.allocLocal(ret_ty);
try func.addLabel(.local_set, result_local.local.value);
- const scalar_type = abi.scalarType(ret_ty, func.target);
+ const scalar_type = abi.scalarType(ret_ty, mod);
const result = try func.allocStack(scalar_type);
try func.store(result, result_local, scalar_type, 0);
break :result_value result;
@@ -2272,6 +2286,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -2290,17 +2305,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
} else {
// at this point we have a non-natural alignment, we must
// load the value, and then shift+or the rhs into the result location.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
- if (isByRef(int_elem_ty, func.target)) {
+ if (isByRef(int_elem_ty, mod)) {
return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
}
- var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1);
+ var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1);
mask <<= @intCast(u6, ptr_info.bit_offset);
mask ^= ~@as(u64, 0);
const shift_val = if (ptr_info.host_size <= 4)
@@ -2329,11 +2340,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
assert(!(lhs != .stack and rhs == .stack));
- const abi_size = ty.abiSize(func.target);
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = ty.abiSize(mod);
+ switch (ty.zigTypeTag(mod)) {
.ErrorUnion => {
const pl_ty = ty.errorUnionPayload();
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
@@ -2341,26 +2353,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.Optional => {
- if (ty.isPtrLikeOptional()) {
+ if (ty.isPtrLikeOptional(mod)) {
return func.store(lhs, rhs, Type.usize, 0);
}
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.store(lhs, rhs, Type.u8, 0);
}
- if (pl_ty.zigTypeTag() == .ErrorSet) {
+ if (pl_ty.zigTypeTag(mod) == .ErrorSet) {
return func.store(lhs, rhs, Type.anyerror, 0);
}
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Struct, .Array, .Union => if (isByRef(ty, func.target)) {
+ .Struct, .Array, .Union => if (isByRef(ty, mod)) {
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
- .Vector => switch (determineSimdStoreStrategy(ty, func.target)) {
+ .Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
const len = @intCast(u32, abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
@@ -2374,7 +2386,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2404,7 +2416,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
return;
} else if (abi_size > 16) {
- try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) });
+ try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) });
},
else => if (abi_size > 8) {
return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
@@ -2418,7 +2430,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// into lhs, so we calculate that and emit that instead
try func.lowerToStack(rhs);
- const valtype = typeToValtype(ty, func.target);
+ const valtype = typeToValtype(ty, mod);
const opcode = buildOpcode(.{
.valtype1 = valtype,
.width = @intCast(u8, abi_size * 8),
@@ -2428,21 +2440,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// store rhs value at stack pointer's location in memory
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
);
}
fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.getRefType(ty_op.ty);
const ptr_ty = func.air.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo().data;
- if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand});
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand});
const result = result: {
- if (isByRef(ty, func.target)) {
+ if (isByRef(ty, mod)) {
const new_local = try func.allocStack(ty);
try func.store(new_local, operand, ty, 0);
break :result new_local;
@@ -2455,11 +2468,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// at this point we have a non-natural alignment, we must
// shift the value to obtain the correct bit.
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const int_elem_ty = Type.initPayload(&int_ty_payload.base);
+ const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8);
const shift_val = if (ptr_info.host_size <= 4)
WValue{ .imm32 = ptr_info.bit_offset }
else if (ptr_info.host_size <= 8)
@@ -2479,25 +2488,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// load local's value from memory by its stack position
try func.emitWValue(operand);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
// TODO: Add helper functions for simd opcodes
const extra_index = @intCast(u32, func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
- ty.abiAlignment(func.target),
+ ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
}
- const abi_size = @intCast(u8, ty.abiSize(func.target));
+ const abi_size = @intCast(u8, ty.abiSize(mod));
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.width = abi_size * 8,
.op = .load,
.signedness = .unsigned,
@@ -2505,7 +2515,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
- .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) },
+ .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
);
return WValue{ .stack = {} };
@@ -2516,8 +2526,9 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const arg = func.args[arg_index];
const cc = func.decl.ty.fnInfo().cc;
const arg_ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
if (cc == .C) {
- const arg_classes = abi.classifyType(arg_ty, func.target);
+ const arg_classes = abi.classifyType(arg_ty, mod);
for (arg_classes) |class| {
if (class != .none) {
func.arg_index += 1;
@@ -2527,7 +2538,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When we have an argument that's passed using more than a single parameter,
// we combine them into a single stack value
if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
- if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) {
+ if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) {
return func.fail(
"TODO: Implement C-ABI argument for type '{}'",
.{arg_ty.fmt(func.bin_file.base.options.module.?)},
@@ -2557,6 +2568,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
@@ -2570,10 +2582,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2593,6 +2605,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
/// Performs a binary operation on the given `WValue`'s
/// NOTE: THis leaves the value on top of the stack.
fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(!(lhs != .stack and rhs == .stack));
if (ty.isAnyFloat()) {
@@ -2600,8 +2613,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
return func.floatOp(float_op, ty, &.{ lhs, rhs });
}
- if (isByRef(ty, func.target)) {
- if (ty.zigTypeTag() == .Int) {
+ if (isByRef(ty, mod)) {
+ if (ty.zigTypeTag(mod) == .Int) {
return func.binOpBigInt(lhs, rhs, ty, op);
} else {
return func.fail(
@@ -2613,8 +2626,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
const opcode: wasm.Opcode = buildOpcode(.{
.op = op,
- .valtype1 = typeToValtype(ty, func.target),
- .signedness = if (ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(ty, mod),
+ .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.emitWValue(lhs);
try func.emitWValue(rhs);
@@ -2625,7 +2638,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
}
fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
- if (ty.intInfo(func.target).bits > 128) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.intInfo(mod).bits > 128) {
return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
}
@@ -2763,7 +2777,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError
}
fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement floatOps for vectors", .{});
}
@@ -2773,7 +2788,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
for (args) |operand| {
try func.emitWValue(operand);
}
- const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) });
+ const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
return .stack;
}
@@ -2827,6 +2842,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
}
fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const lhs = try func.resolveInst(bin_op.lhs);
@@ -2834,7 +2850,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
const lhs_ty = func.air.typeOf(bin_op.lhs);
const rhs_ty = func.air.typeOf(bin_op.rhs);
- if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
}
@@ -2845,10 +2861,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
// For big integers we can ignore this as we will call into compiler-rt which handles this.
const result = switch (op) {
.shr, .shl => res: {
- const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse {
+ const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse {
return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
};
- const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?;
+ const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?;
const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: {
const tmp = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try tmp.toLocal(func, lhs_ty);
@@ -2877,8 +2893,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// Asserts `Type` is <= 128 bits.
/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- assert(ty.abiSize(func.target) <= 16);
- const bitsize = @intCast(u16, ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ assert(ty.abiSize(mod) <= 16);
+ const bitsize = @intCast(u16, ty.bitSize(mod));
const wasm_bits = toWasmBits(bitsize) orelse {
return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
};
@@ -2915,6 +2932,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
}
fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
@@ -2932,15 +2950,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
const parent_ty = field_ptr.container_ty;
- const field_offset = switch (parent_ty.zigTypeTag()) {
+ const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout()) {
- .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target),
- else => parent_ty.structFieldOffset(field_ptr.field_index, func.target),
+ .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod),
+ else => parent_ty.structFieldOffset(field_ptr.field_index, mod),
},
.Union => switch (parent_ty.containerLayout()) {
.Packed => 0,
else => blk: {
- const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target);
+ const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
if (layout.payload_align > layout.tag_align) break :blk 0;
@@ -2964,7 +2982,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const index = elem_ptr.index;
- const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target);
+ const elem_offset = index * elem_ptr.elem_ty.abiSize(mod);
return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset));
},
.opt_payload_ptr => {
@@ -2976,9 +2994,9 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
}
fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue {
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- module.markDeclAlive(decl);
+ const mod = func.bin_file.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ mod.markDeclAlive(decl);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = decl.ty,
@@ -2992,18 +3010,18 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) };
}
- const module = func.bin_file.base.options.module.?;
- const decl = module.declPtr(decl_index);
- if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
+ const mod = func.bin_file.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
- module.markDeclAlive(decl);
+ mod.markDeclAlive(decl);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = atom.sym_index;
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
} else if (offset == 0) {
@@ -3041,31 +3059,31 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const decl_index = decl_ref_mut.data.decl_index;
return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0);
}
- const target = func.target;
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Void => return WValue{ .none = {} },
.Int => {
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u6, int_info.bits),
)) },
33...64 => return WValue{ .imm64 = toTwosComplement(
- val.toSignedInt(target),
+ val.toSignedInt(mod),
@intCast(u7, int_info.bits),
) },
else => unreachable,
},
.unsigned => switch (int_info.bits) {
- 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
- 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) },
+ 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
+ 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) },
else => unreachable,
},
}
},
- .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
+ .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
.Float => switch (ty.floatBits(func.target)) {
16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) },
32 => return WValue{ .float32 = val.toFloat(f32) },
@@ -3074,7 +3092,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
},
.Pointer => switch (val.tag()) {
.field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0),
- .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
+ .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) },
.zero, .null_value => return WValue{ .imm32 = 0 },
else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
},
@@ -3100,8 +3118,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
}
} else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
+ const int_tag_ty = ty.intTagType();
return func.lowerConstant(val, int_tag_ty);
}
},
@@ -3115,7 +3132,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
.ErrorUnion => {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
const is_pl = val.errorUnionIsPayload();
const err_val = if (!is_pl) val else Value.initTag(.zero);
@@ -3123,12 +3140,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
}
return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
},
- .Optional => if (ty.optionalReprIsPayload()) {
+ .Optional => if (ty.optionalReprIsPayload(mod)) {
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
if (val.castTag(.opt_payload)) |payload| {
return func.lowerConstant(payload.data, pl_ty);
- } else if (val.isNull()) {
+ } else if (val.isNull(mod)) {
return WValue{ .imm32 = 0 };
} else {
return func.lowerConstant(val, pl_ty);
@@ -3150,7 +3167,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
return func.lowerConstant(int_val, struct_obj.backing_int_ty);
},
.Vector => {
- assert(determineSimdStoreStrategy(ty, target) == .direct);
+ assert(determineSimdStoreStrategy(ty, mod) == .direct);
var buf: [16]u8 = undefined;
val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
return func.storeSimdImmd(buf);
@@ -3176,9 +3193,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
}
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
- .Int, .Enum => switch (ty.intInfo(func.target).bits) {
+ .Int, .Enum => switch (ty.intInfo(mod).bits) {
0...32 => return WValue{ .imm32 = 0xaaaaaaaa },
33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
@@ -3197,7 +3215,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return func.emitUndefined(pl_ty);
}
return WValue{ .imm32 = 0xaaaaaaaa };
@@ -3210,7 +3228,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
assert(struct_obj.layout == .Packed);
return func.emitUndefined(struct_obj.backing_int_ty);
},
- else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}),
+ else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
}
@@ -3218,8 +3236,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
- const target = func.target;
- switch (ty.zigTypeTag()) {
+ const mod = func.bin_file.base.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
.Enum => {
if (val.castTag(.enum_field_index)) |field_index| {
switch (ty.tag()) {
@@ -3239,35 +3257,35 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
else => unreachable,
}
} else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_buffer);
+ const int_tag_ty = ty.intTagType();
return func.valueAsI32(val, int_tag_ty);
}
},
- .Int => switch (ty.intInfo(func.target).signedness) {
- .signed => return @truncate(i32, val.toSignedInt(target)),
- .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
+ .Int => switch (ty.intInfo(mod).signedness) {
+ .signed => return @truncate(i32, val.toSignedInt(mod)),
+ .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))),
},
.ErrorSet => {
const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
return @bitCast(i32, kv.value);
},
- .Bool => return @intCast(i32, val.toSignedInt(target)),
- .Pointer => return @intCast(i32, val.toSignedInt(target)),
+ .Bool => return @intCast(i32, val.toSignedInt(mod)),
+ .Pointer => return @intCast(i32, val.toSignedInt(mod)),
else => unreachable, // Programmer called this function for an illegal type
}
}
fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const block_ty = func.air.getRefType(ty_pl.ty);
- const wasm_block_ty = genBlockType(block_ty, func.target);
+ const wasm_block_ty = genBlockType(block_ty, mod);
const extra = func.air.extraData(Air.Block, ty_pl.payload);
const body = func.air.extra[extra.end..][0..extra.data.body_len];
// if wasm_block_ty is non-empty, we create a register to store the temporary value
const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
- const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty;
+ const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty;
break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
} else WValue.none;
@@ -3379,16 +3397,17 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In
/// NOTE: This leaves the result on top of the stack, rather than a new local.
fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(!(lhs != .stack and rhs == .stack));
- if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
return func.cmpOptionals(lhs, rhs, ty, op);
}
- } else if (isByRef(ty, func.target)) {
+ } else if (isByRef(ty, mod)) {
return func.cmpBigInt(lhs, rhs, ty, op);
} else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) {
return func.cmpFloat16(lhs, rhs, op);
@@ -3401,13 +3420,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk ty.intInfo(func.target).signedness;
+ break :blk ty.intInfo(mod).signedness;
};
const opcode: wasm.Opcode = buildOpcode(.{
- .valtype1 = typeToValtype(ty, func.target),
+ .valtype1 = typeToValtype(ty, mod),
.op = switch (op) {
.lt => .lt,
.lte => .le,
@@ -3464,11 +3483,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const br = func.air.instructions.items(.data)[inst].br;
const block = func.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
- if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) {
+ if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) {
const operand = try func.resolveInst(br.operand);
try func.lowerToStack(operand);
@@ -3490,16 +3510,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (operand_ty.zigTypeTag() == .Bool) {
+ if (operand_ty.zigTypeTag(mod) == .Bool) {
try func.emitWValue(operand);
try func.addTag(.i32_eqz);
const not_tmp = try func.allocLocal(operand_ty);
try func.addLabel(.local_set, not_tmp.local.value);
break :result not_tmp;
} else {
- const operand_bits = operand_ty.intInfo(func.target).bits;
+ const operand_bits = operand_ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(operand_bits) orelse {
return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits});
};
@@ -3566,16 +3587,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
// if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand;
- if (wanted_ty.bitSize(func.target) > 64) return operand;
- assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt()));
+ if (wanted_ty.bitSize(mod) > 64) return operand;
+ assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod)));
const opcode = buildOpcode(.{
.op = .reinterpret,
- .valtype1 = typeToValtype(wanted_ty, func.target),
- .valtype2 = typeToValtype(given_ty, func.target),
+ .valtype1 = typeToValtype(wanted_ty, mod),
+ .valtype2 = typeToValtype(given_ty, mod),
});
try func.emitWValue(operand);
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
@@ -3609,19 +3631,20 @@ fn structFieldPtr(
struct_ty: Type,
index: u32,
) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
const result_ty = func.air.typeOfIndex(inst);
const offset = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => offset: {
if (result_ty.ptrInfo().data.host_size != 0) {
break :offset @as(u32, 0);
}
- break :offset struct_ty.packedStructFieldByteOffset(index, func.target);
+ break :offset struct_ty.packedStructFieldByteOffset(index, mod);
},
.Union => 0,
else => unreachable,
},
- else => struct_ty.structFieldOffset(index, func.target),
+ else => struct_ty.structFieldOffset(index, mod),
};
// save a load and store when we can simply reuse the operand
if (offset == 0) {
@@ -3636,6 +3659,7 @@ fn structFieldPtr(
}
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3643,15 +3667,15 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
const result = switch (struct_ty.containerLayout()) {
- .Packed => switch (struct_ty.zigTypeTag()) {
+ .Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const offset = struct_obj.packedFieldBitOffset(func.target, field_index);
+ const offset = struct_obj.packedFieldBitOffset(mod, field_index);
const backing_ty = struct_obj.backing_int_ty;
- const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse {
+ const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
const const_wvalue = if (wasm_bits == 32)
@@ -3667,25 +3691,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
try func.binOp(operand, const_wvalue, backing_ty, .shr);
- if (field_ty.zigTypeTag() == .Float) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) {
+ } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
- } else if (field_ty.isPtrAtRuntime()) {
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&payload.base);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3693,8 +3709,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result try truncated.toLocal(func, field_ty);
},
.Union => result: {
- if (isByRef(struct_ty, func.target)) {
- if (!isByRef(field_ty, func.target)) {
+ if (isByRef(struct_ty, mod)) {
+ if (!isByRef(field_ty, mod)) {
const val = try func.load(operand, field_ty, 0);
break :result try val.toLocal(func, field_ty);
} else {
@@ -3704,26 +3720,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
}
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, struct_ty.bitSize(func.target)),
- };
- const union_int_type = Type.initPayload(&payload.base);
- if (field_ty.zigTypeTag() == .Float) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod)));
+ if (field_ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(operand, int_type, union_int_type);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
- } else if (field_ty.isPtrAtRuntime()) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field_ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod)));
const truncated = try func.trunc(operand, int_type, union_int_type);
break :result try truncated.toLocal(func, field_ty);
}
@@ -3733,11 +3737,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
},
else => result: {
- const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
+ const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse {
+ return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)});
};
- if (isByRef(field_ty, func.target)) {
+ if (isByRef(field_ty, mod)) {
switch (operand) {
.stack_offset => |stack_offset| {
break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
@@ -3754,6 +3757,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
@@ -3787,7 +3791,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
errdefer func.gpa.free(values);
for (items, 0..) |ref, i| {
- const item_val = func.air.value(ref).?;
+ const item_val = func.air.value(ref, mod).?;
const int_val = func.valueAsI32(item_val, target_ty);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
@@ -3810,7 +3814,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the target is an integer size larger than u32, we have no way to use the value
// as an index, therefore we also use an if/else-chain for those cases.
// TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
- const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32;
+ const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32;
const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len];
const has_else_body = else_body.len != 0;
@@ -3855,7 +3859,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// for errors that are not present in any branch. This is fine as this default
// case will never be hit for those cases but we do save runtime cost and size
// by using a jump table for this instead of if-else chains.
- break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable;
+ break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable;
};
func.mir_extra.appendAssumeCapacity(idx);
} else if (has_else_body) {
@@ -3866,10 +3870,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
- if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
+ if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned;
// incase of an actual integer, we emit the correct signedness
- break :blk target_ty.intInfo(func.target).signedness;
+ break :blk target_ty.intInfo(mod).signedness;
};
try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
@@ -3882,7 +3886,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(case.values[0].value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
.signedness = signedness,
});
@@ -3896,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const val = try func.lowerConstant(value.value, target_ty);
try func.emitWValue(val);
const opcode = buildOpcode(.{
- .valtype1 = typeToValtype(target_ty, func.target),
+ .valtype1 = typeToValtype(target_ty, mod),
.op = .eq,
.signedness = signedness,
});
@@ -3933,6 +3937,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const err_union_ty = func.air.typeOf(un_op);
@@ -3948,10 +3953,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
try func.emitWValue(operand);
- if (pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
- .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)),
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
@@ -3967,6 +3972,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
}
fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -3975,15 +3981,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
const payload_ty = err_ty.errorUnionPayload();
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (op_is_ptr) {
break :result func.reuseOperand(ty_op.operand, operand);
}
break :result WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target));
- if (op_is_ptr or isByRef(payload_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod));
+ if (op_is_ptr or isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, pl_offset, .new);
}
@@ -3994,6 +4000,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo
}
fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4006,17 +4013,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
break :result WValue{ .imm32 = 0 };
}
- if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target)));
+ const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod)));
break :result try error_val.toLocal(func, Type.anyerror);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4024,18 +4032,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
const pl_ty = func.air.typeOf(ty_op.operand);
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
try func.store(payload_ptr, operand, pl_ty, 0);
// ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
try func.emitWValue(err_union);
try func.addImm32(0);
- const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
break :result err_union;
};
@@ -4043,6 +4051,7 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4050,17 +4059,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_ty = err_ty.errorUnionPayload();
const result = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
const err_union = try func.allocStack(err_ty);
// store error value
- try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target)));
+ try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod)));
// write 'undefined' to the payload
- const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
- const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target));
+ const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new);
+ const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod));
try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
break :result err_union;
@@ -4074,15 +4083,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.getRefType(ty_op.ty);
const operand = try func.resolveInst(ty_op.operand);
const operand_ty = func.air.typeOf(ty_op.operand);
- if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) {
return func.fail("todo Wasm intcast for vectors", .{});
}
- if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) {
return func.fail("todo Wasm intcast for bitsize > 128", .{});
}
- const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?;
- const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?;
+ const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
const result = if (op_bits == wanted_bits)
func.reuseOperand(ty_op.operand, operand)
else
@@ -4096,8 +4106,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Asserts type's bitsize <= 128
/// NOTE: May leave the result on the top of the stack.
fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
- const given_bitsize = @intCast(u16, given.bitSize(func.target));
- const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bitsize = @intCast(u16, given.bitSize(mod));
+ const wanted_bitsize = @intCast(u16, wanted.bitSize(mod));
assert(given_bitsize <= 128);
assert(wanted_bitsize <= 128);
@@ -4110,7 +4121,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
try func.addTag(.i32_wrap_i64);
} else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) {
try func.emitWValue(operand);
- try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u);
+ try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u);
} else if (wanted_bits == 128) {
// for 128bit integers we store the integer in the virtual stack, rather than a local
const stack_ptr = try func.allocStack(wanted);
@@ -4119,14 +4130,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
// for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
// meaning less store operations are required.
const lhs = if (op_bits == 32) blk: {
- break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
+ break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64);
} else operand;
// store msb first
try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
// For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
- if (wanted.isSignedInt()) {
+ if (wanted.isSignedInt(mod)) {
try func.emitWValue(stack_ptr);
const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
@@ -4154,16 +4165,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
try func.emitWValue(operand);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
- if (!optional_ty.optionalReprIsPayload()) {
+ if (!optional_ty.optionalReprIsPayload(mod)) {
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)});
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)});
};
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
}
@@ -4183,18 +4194,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod
}
fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const opt_ty = func.air.typeOf(ty_op.operand);
const payload_ty = func.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.finishAir(inst, .none, &.{ty_op.operand});
}
const result = result: {
const operand = try func.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand);
+ if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand);
- if (isByRef(payload_ty, func.target)) {
+ if (isByRef(payload_ty, mod)) {
break :result try func.buildPointerOffset(operand, 0, .new);
}
@@ -4209,10 +4221,11 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.air.typeOf(ty_op.operand).childType();
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
@@ -4222,22 +4235,22 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const opt_ty = func.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
}
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
return func.finishAir(inst, operand, &.{ty_op.operand});
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)});
};
try func.emitWValue(operand);
@@ -4251,9 +4264,10 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const payload_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const non_null_bit = try func.allocStack(Type.initTag(.u1));
try func.emitWValue(non_null_bit);
try func.addImm32(1);
@@ -4263,12 +4277,11 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOfIndex(inst);
- if (op_ty.optionalReprIsPayload()) {
+ if (op_ty.optionalReprIsPayload(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse {
- const module = func.bin_file.base.options.module.?;
- return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
+ const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse {
+ return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type
@@ -4314,7 +4327,8 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto stack
_ = try func.load(slice, Type.usize, 0);
@@ -4328,7 +4342,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result_ptr.local.value);
- const result = if (!isByRef(elem_ty, func.target)) result: {
+ const result = if (!isByRef(elem_ty, mod)) result: {
const elem_val = try func.load(result_ptr, elem_ty, 0);
break :result try elem_val.toLocal(func, elem_ty);
} else result_ptr;
@@ -4341,7 +4355,8 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const slice = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4389,13 +4404,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a given operand to a given type, discarding any overflown bits.
/// NOTE: Resulting value is left on the stack.
fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
- const given_bits = @intCast(u16, given_ty.bitSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const given_bits = @intCast(u16, given_ty.bitSize(mod));
if (toWasmBits(given_bits) == null) {
return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
}
var result = try func.intcast(operand, given_ty, wanted_ty);
- const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target));
+ const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod));
const wasm_bits = toWasmBits(wanted_bits).?;
if (wasm_bits != wanted_bits) {
result = try func.wrapOperand(result, wanted_ty);
@@ -4412,6 +4428,7 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
@@ -4422,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const slice_local = try func.allocStack(slice_ty);
// store the array ptr in the slice
- if (array_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.store(slice_local, operand, Type.usize, 0);
}
@@ -4454,7 +4471,8 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = ptr_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
// load pointer onto the stack
if (ptr_ty.isSlice()) {
@@ -4472,7 +4490,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const elem_result = val: {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if it's not returned like above
@@ -4489,7 +4507,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ptr_ty = func.air.typeOf(bin_op.lhs);
const elem_ty = func.air.getRefType(ty_pl.ty).childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
const ptr = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
@@ -4513,6 +4532,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4524,13 +4544,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
else => ptr_ty.childType(),
};
- const valtype = typeToValtype(Type.usize, func.target);
+ const valtype = typeToValtype(Type.usize, mod);
const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target))));
+ try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod))));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4572,7 +4592,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
/// this to wasm's memset instruction. When the feature is not present,
/// we implement it manually.
fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
- const abi_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = @intCast(u32, elem_ty.abiSize(mod));
// When bulk_memory is enabled, we lower it to wasm's memset instruction.
// If not, we lower it ourselves.
@@ -4666,24 +4687,25 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const array = try func.resolveInst(bin_op.lhs);
const index = try func.resolveInst(bin_op.rhs);
const elem_ty = array_ty.childType();
- const elem_size = elem_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const elem_size = elem_ty.abiSize(mod);
- if (isByRef(array_ty, func.target)) {
+ if (isByRef(array_ty, mod)) {
try func.lowerToStack(array);
try func.emitWValue(index);
try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
- std.debug.assert(array_ty.zigTypeTag() == .Vector);
+ std.debug.assert(array_ty.zigTypeTag(mod) == .Vector);
switch (index) {
inline .imm32, .imm64 => |lane| {
- const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) {
- 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
- 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
- 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane,
- 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane,
+ const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) {
+ 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u,
+ 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u,
+ 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane,
+ 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane,
else => unreachable,
};
@@ -4715,7 +4737,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var result = try func.allocLocal(Type.usize);
try func.addLabel(.local_set, result.local.value);
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
break :val result;
}
defer result.free(func); // only free if no longer needed and not returned like above
@@ -4733,17 +4755,18 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
const op_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .trunc,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty);
@@ -4757,17 +4780,18 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const dest_ty = func.air.typeOfIndex(inst);
const op_ty = func.air.typeOf(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.abiSize(func.target) > 8) {
+ if (op_ty.abiSize(mod) > 8) {
return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{});
}
try func.emitWValue(operand);
const op = buildOpcode(.{
.op = .convert,
- .valtype1 = typeToValtype(dest_ty, func.target),
- .valtype2 = typeToValtype(op_ty, func.target),
- .signedness = if (op_ty.isSignedInt()) .signed else .unsigned,
+ .valtype1 = typeToValtype(dest_ty, mod),
+ .valtype2 = typeToValtype(op_ty, mod),
+ .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned,
});
try func.addTag(Mir.Inst.Tag.fromOpcode(op));
@@ -4777,18 +4801,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const operand = try func.resolveInst(ty_op.operand);
const ty = func.air.typeOfIndex(inst);
const elem_ty = ty.childType();
- if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: {
+ if (determineSimdStoreStrategy(ty, mod) == .direct) blk: {
switch (operand) {
// when the operand lives in the linear memory section, we can directly
// load and splat the value at once. Meaning we do not first have to load
// the scalar value onto the stack.
.stack_offset, .memory, .memory_offset => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.v128_load8_splat),
16 => std.wasm.simdOpcode(.v128_load16_splat),
32 => std.wasm.simdOpcode(.v128_load32_splat),
@@ -4803,18 +4828,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
- elem_ty.abiAlignment(func.target),
+ elem_ty.abiAlignment(mod),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
.local => {
- const opcode = switch (elem_ty.bitSize(func.target)) {
+ const opcode = switch (elem_ty.bitSize(mod)) {
8 => std.wasm.simdOpcode(.i8x16_splat),
16 => std.wasm.simdOpcode(.i16x8_splat),
- 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
- 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
+ 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat),
+ 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat),
else => break :blk, // Cannot make use of simd-instructions
};
const result = try func.allocLocal(ty);
@@ -4828,14 +4853,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
- const elem_size = elem_ty.bitSize(func.target);
+ const elem_size = elem_ty.bitSize(mod);
const vector_len = @intCast(usize, ty.vectorLen());
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const result = try func.allocStack(ty);
- const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod));
var index: usize = 0;
var offset: u32 = 0;
while (index < vector_len) : (index += 1) {
@@ -4855,6 +4880,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const inst_ty = func.air.typeOfIndex(inst);
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -4865,16 +4891,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mask_len = extra.mask_len;
const child_ty = inst_ty.childType();
- const elem_size = child_ty.abiSize(func.target);
+ const elem_size = child_ty.abiSize(mod);
- const module = func.bin_file.base.options.module.?;
// TODO: One of them could be by ref; handle in loop
- if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) {
+ if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) {
const result = try func.allocStack(inst_ty);
for (0..mask_len) |index| {
var buf: Value.ElemValueBuffer = undefined;
- const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod);
try func.emitWValue(result);
@@ -4895,7 +4920,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lanes = std.mem.asBytes(operands[1..]);
for (0..@intCast(usize, mask_len)) |index| {
var buf: Value.ElemValueBuffer = undefined;
- const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target);
+ const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
@intCast(u8, @intCast(i64, elem_size) * mask_elem)
else
@@ -4930,13 +4955,14 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ty = func.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
+ const mod = func.bin_file.base.options.module.?;
const result: WValue = result_value: {
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Array => {
const result = try func.allocStack(result_ty);
const elem_ty = result_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const sentinel = if (result_ty.sentinel()) |sent| blk: {
break :blk try func.lowerConstant(sent, elem_ty);
} else null;
@@ -4944,7 +4970,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// When the element type is by reference, we must copy the entire
// value. It is therefore safer to move the offset pointer and store
// each value individually, instead of using store offsets.
- if (isByRef(elem_ty, func.target)) {
+ if (isByRef(elem_ty, mod)) {
// copy stack pointer into a temporary local, which is
// moved for each element to store each value in the right position.
const offset = try func.buildPointerOffset(result, 0, .new);
@@ -4974,7 +5000,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.Struct => switch (result_ty.containerLayout()) {
.Packed => {
- if (isByRef(result_ty, func.target)) {
+ if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const struct_obj = result_ty.castTag(.@"struct").?.data;
@@ -4983,7 +5009,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
- if (struct_obj.backing_int_ty.bitSize(func.target) <= 32)
+ if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -4992,20 +5018,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32)
+ const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
- const value_bit_size = @intCast(u16, field.ty.bitSize(func.target));
- var int_ty_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = value_bit_size,
- };
- const int_ty = Type.initPayload(&int_ty_payload.base);
+ const value_bit_size = @intCast(u16, field.ty.bitSize(mod));
+ const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
@@ -5027,10 +5049,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = try func.allocStack(result_ty);
const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
for (elements, 0..) |elem, elem_index| {
- if (result_ty.structFieldValueComptime(elem_index) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index);
- const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -5058,12 +5080,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result = result: {
const union_ty = func.air.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(func.target);
+ const layout = union_ty.unionGetLayout(mod);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = union_obj.fields.values()[extra.field_index];
const field_name = union_obj.fields.keys()[extra.field_index];
@@ -5082,15 +5105,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (layout.tag_size == 0) {
break :result WValue{ .none = {} };
}
- assert(!isByRef(union_ty, func.target));
+ assert(!isByRef(union_ty, mod));
break :result tag_int;
}
- if (isByRef(union_ty, func.target)) {
+ if (isByRef(union_ty, mod)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align >= layout.payload_align) {
- if (isByRef(field.ty, func.target)) {
+ if (isByRef(field.ty, mod)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field.ty, 0);
} else {
@@ -5114,26 +5137,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result result_ptr;
} else {
const operand = try func.resolveInst(extra.init);
- var payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, union_ty.bitSize(func.target)),
- };
- const union_int_type = Type.initPayload(&payload.base);
- if (field.ty.zigTypeTag() == .Float) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field.ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod)));
+ if (field.ty.zigTypeTag(mod) == .Float) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
const bitcasted = try func.bitcast(field.ty, int_type, operand);
const casted = try func.trunc(bitcasted, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
- } else if (field.ty.isPtrAtRuntime()) {
- var int_payload: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, field.ty.bitSize(func.target)),
- };
- const int_type = Type.initPayload(&int_payload.base);
+ } else if (field.ty.isPtrAtRuntime(mod)) {
+ const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod)));
const casted = try func.intcast(operand, int_type, union_int_type);
break :result try casted.toLocal(func, field.ty);
}
@@ -5171,7 +5182,8 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
}
fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.hasRuntimeBitsIgnoreComptime());
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod));
assert(op == .eq or op == .neq);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
@@ -5189,7 +5201,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
_ = try func.load(lhs, payload_ty, 0);
_ = try func.load(rhs, payload_ty, 0);
- const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) });
+ const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) });
try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
try func.addLabel(.br_if, 0);
@@ -5207,10 +5219,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
/// NOTE: Leaves the result of the comparison on top of the stack.
/// TODO: Lower this to compiler_rt call when bitsize > 128
fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
- assert(operand_ty.abiSize(func.target) >= 16);
+ const mod = func.bin_file.base.options.module.?;
+ assert(operand_ty.abiSize(mod) >= 16);
assert(!(lhs != .stack and rhs == .stack));
- if (operand_ty.bitSize(func.target) > 128) {
- return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)});
+ if (operand_ty.bitSize(mod) > 128) {
+ return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)});
}
var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
@@ -5233,7 +5246,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
},
else => {
- const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
+ const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64;
// leave those value on top of the stack for '.select'
const lhs_low_bit = try func.load(lhs, Type.u64, 8);
const rhs_low_bit = try func.load(rhs, Type.u64, 8);
@@ -5248,10 +5261,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std
}
fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const un_ty = func.air.typeOf(bin_op.lhs).childType();
const tag_ty = func.air.typeOf(bin_op.rhs);
- const layout = un_ty.unionGetLayout(func.target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
const union_ptr = try func.resolveInst(bin_op.lhs);
@@ -5271,11 +5285,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const un_ty = func.air.typeOf(ty_op.operand);
const tag_ty = func.air.typeOfIndex(inst);
- const layout = un_ty.unionGetLayout(func.target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
const operand = try func.resolveInst(ty_op.operand);
@@ -5375,6 +5390,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
}
fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_op = func.air.instructions.items(.data)[inst].ty_op;
const err_set_ty = func.air.typeOf(ty_op.operand).childType();
@@ -5386,26 +5402,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
operand,
.{ .imm32 = 0 },
Type.anyerror,
- @intCast(u32, errUnionErrorOffset(payload_ty, func.target)),
+ @intCast(u32, errUnionErrorOffset(payload_ty, mod)),
);
const result = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result func.reuseOperand(ty_op.operand, operand);
}
- break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new);
+ break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new);
};
func.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const field_ptr = try func.resolveInst(extra.field_ptr);
const parent_ty = func.air.getRefType(ty_pl.ty).childType();
- const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
@@ -5428,6 +5445,7 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue
}
fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const dst = try func.resolveInst(bin_op.lhs);
const dst_ty = func.air.typeOf(bin_op.lhs);
@@ -5437,16 +5455,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const len = switch (dst_ty.ptrSize()) {
.Slice => blk: {
const slice_len = try func.sliceLen(dst);
- if (ptr_elem_ty.abiSize(func.target) != 1) {
+ if (ptr_elem_ty.abiSize(mod) != 1) {
try func.emitWValue(slice_len);
- try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) });
+ try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) });
try func.addTag(.i32_mul);
try func.addLabel(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.One => @as(WValue, .{
- .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)),
+ .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)),
}),
.C, .Many => unreachable,
};
@@ -5472,12 +5490,13 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
const op_ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
+ const mod = func.bin_file.base.options.module.?;
- if (op_ty.zigTypeTag() == .Vector) {
+ if (op_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement @popCount for vectors", .{});
}
- const int_info = op_ty.intInfo(func.target);
+ const int_info = op_ty.intInfo(mod);
const bits = int_info.bits;
const wasm_bits = toWasmBits(bits) orelse {
return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
@@ -5527,7 +5546,8 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// to make a copy of the ptr+value but can point towards them directly.
const error_table_symbol = try func.bin_file.getErrorTableSymbol();
const name_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const abi_size = name_ty.abiSize(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const abi_size = name_ty.abiSize(mod);
const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
try func.emitWValue(error_name_value);
@@ -5566,12 +5586,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const lhs_op = try func.resolveInst(extra.lhs);
const rhs_op = try func.resolveInst(extra.rhs);
const lhs_ty = func.air.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -5630,15 +5651,16 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
}
fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
assert(op == .add or op == .sub);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits != 128) {
return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
@@ -5701,6 +5723,7 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type,
}
fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -5709,11 +5732,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs_ty = func.air.typeOf(extra.lhs);
const rhs_ty = func.air.typeOf(extra.rhs);
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
@@ -5721,7 +5744,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Ensure rhs is coerced to lhs as they must have the same WebAssembly types
// before we can perform any binary operation.
- const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?;
+ const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?;
const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: {
const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty);
break :blk try rhs_casted.toLocal(func, lhs_ty);
@@ -5750,7 +5773,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, result, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5763,8 +5786,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try func.resolveInst(extra.lhs);
const rhs = try func.resolveInst(extra.rhs);
const lhs_ty = func.air.typeOf(extra.lhs);
+ const mod = func.bin_file.base.options.module.?;
- if (lhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
}
@@ -5773,7 +5797,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1));
defer overflow_bit.free(func);
- const int_info = lhs_ty.intInfo(func.target);
+ const int_info = lhs_ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
};
@@ -5924,7 +5948,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
try func.store(result_ptr, bin_op_local, lhs_ty, 0);
- const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+ const offset = @intCast(u32, lhs_ty.abiSize(mod));
try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
@@ -5934,11 +5958,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
}
- if (ty.abiSize(func.target) > 16) {
+ if (ty.abiSize(mod) > 16) {
return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
}
@@ -5954,7 +5979,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE
try func.addTag(.select);
// store result in local
- const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty;
+ const result_ty = if (isByRef(ty, mod)) Type.u32 else ty;
const result = try func.allocLocal(result_ty);
try func.addLabel(.local_set, result.local.value);
func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
@@ -5965,7 +5990,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
const ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@mulAdd` for vectors", .{});
}
@@ -5998,12 +6024,13 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@clz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6051,12 +6078,13 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOf(ty_op.operand);
const result_ty = func.air.typeOfIndex(inst);
- if (ty.zigTypeTag() == .Vector) {
+ const mod = func.bin_file.base.options.module.?;
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: `@ctz` for vectors", .{});
}
const operand = try func.resolveInst(ty_op.operand);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits) orelse {
return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
};
@@ -6174,12 +6202,13 @@ fn lowerTry(
err_union_ty: Type,
operand_is_ptr: bool,
) InnerError!WValue {
+ const mod = func.bin_file.base.options.module.?;
if (operand_is_ptr) {
return func.fail("TODO: lowerTry for pointers", .{});
}
const pl_ty = err_union_ty.errorUnionPayload();
- const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime();
+ const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod);
if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
// Block we can jump out of when error is not set
@@ -6188,10 +6217,10 @@ fn lowerTry(
// check if the error tag is set for the error union.
try func.emitWValue(err_union);
if (pl_has_bits) {
- const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+ const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
- .alignment = Type.anyerror.abiAlignment(func.target),
+ .alignment = Type.anyerror.abiAlignment(mod),
});
}
try func.addTag(.i32_eqz);
@@ -6213,8 +6242,8 @@ fn lowerTry(
return WValue{ .none = {} };
}
- const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target));
- if (isByRef(pl_ty, func.target)) {
+ const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod));
+ if (isByRef(pl_ty, mod)) {
return buildPointerOffset(func, err_union, pl_offset, .new);
}
const payload = try func.load(err_union, pl_ty, pl_offset);
@@ -6226,11 +6255,12 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = func.air.typeOfIndex(inst);
const operand = try func.resolveInst(ty_op.operand);
+ const mod = func.bin_file.base.options.module.?;
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
return func.fail("TODO: @byteSwap for vectors", .{});
}
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
// bytes are no-op
if (int_info.bits == 8) {
@@ -6292,13 +6322,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const result = if (ty.isSignedInt())
+ const result = if (ty.isSignedInt(mod))
try func.divSigned(lhs, rhs, ty)
else
try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
@@ -6306,13 +6337,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const div_result = if (ty.isSignedInt())
+ const div_result = if (ty.isSignedInt(mod))
try func.divSigned(lhs, rhs, ty)
else
try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
@@ -6328,15 +6360,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- if (ty.isUnsignedInt()) {
+ if (ty.isUnsignedInt(mod)) {
const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
- } else if (ty.isSignedInt()) {
- const int_bits = ty.intInfo(func.target).bits;
+ } else if (ty.isSignedInt(mod)) {
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6414,7 +6447,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits});
};
@@ -6441,7 +6475,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal
/// Retrieves the absolute value of a signed integer
/// NOTE: Leaves the result value on the stack.
fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
- const int_bits = ty.intInfo(func.target).bits;
+ const mod = func.bin_file.base.options.module.?;
+ const int_bits = ty.intInfo(mod).bits;
const wasm_bits = toWasmBits(int_bits) orelse {
return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits});
};
@@ -6476,11 +6511,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
assert(op == .add or op == .sub);
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
const lhs = try func.resolveInst(bin_op.lhs);
const rhs = try func.resolveInst(bin_op.rhs);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
@@ -6523,7 +6559,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
}
fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue {
- const int_info = ty.intInfo(func.target);
+ const mod = func.bin_file.base.options.module.?;
+ const int_info = ty.intInfo(mod);
const wasm_bits = toWasmBits(int_info.bits).?;
const is_wasm_bits = wasm_bits == int_info.bits;
@@ -6588,8 +6625,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type,
fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+ const mod = func.bin_file.base.options.module.?;
const ty = func.air.typeOfIndex(inst);
- const int_info = ty.intInfo(func.target);
+ const int_info = ty.intInfo(mod);
const is_signed = int_info.signedness == .signed;
if (int_info.bits > 64) {
return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
@@ -6707,12 +6745,13 @@ fn callIntrinsic(
};
// Always pass over C-ABI
- var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target);
+ const mod = func.bin_file.base.options.module.?;
+ var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod);
defer func_type.deinit(func.gpa);
const func_type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
- const want_sret_param = firstParamSRet(.C, return_type, func.target);
+ const want_sret_param = firstParamSRet(.C, return_type, mod);
// if we want return as first param, we allocate a pointer to stack,
// and emit it as our first argument
const sret = if (want_sret_param) blk: {
@@ -6724,14 +6763,14 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
+ assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod));
try func.lowerArg(.C, param_types[arg_i], arg);
}
// Actually call our intrinsic
try func.addLabel(.call, symbol_index);
- if (!return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return WValue.none;
} else if (return_type.isNoReturn()) {
try func.addTag(.@"unreachable");
@@ -6759,15 +6798,15 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
+ const mod = func.bin_file.base.options.module.?;
const enum_decl_index = enum_ty.getOwnerDecl();
- const module = func.bin_file.base.options.module.?;
var arena_allocator = std.heap.ArenaAllocator.init(func.gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module);
- defer module.gpa.free(fqn);
+ const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod);
+ defer mod.gpa.free(fqn);
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
// check if we already generated code for this.
@@ -6775,10 +6814,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
return loc.index;
}
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const int_tag_ty = enum_ty.intTagType();
- if (int_tag_ty.bitSize(func.target) > 64) {
+ if (int_tag_ty.bitSize(mod) > 64) {
return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{});
}
@@ -6806,9 +6844,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.data = @intCast(u64, tag_name.len),
};
const name_ty = Type.initPayload(&name_ty_payload.base);
- const string_bytes = &module.string_literal_bytes;
- try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len);
- const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{
+ const string_bytes = &mod.string_literal_bytes;
+ try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len);
+ const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{
.bytes = string_bytes,
}, Module.StringLiteralContext{
.bytes = string_bytes,
@@ -6929,7 +6967,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
try writer.writeByte(std.wasm.opcode(.end));
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target);
+ const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod);
return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs);
}
@@ -6944,11 +6982,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
defer values.deinit();
- const module = func.bin_file.base.options.module.?;
+ const mod = func.bin_file.base.options.module.?;
var lowest: ?u32 = null;
var highest: ?u32 = null;
for (names) |name| {
- const err_int = module.global_error_set.get(name).?;
+ const err_int = mod.global_error_set.get(name).?;
if (lowest) |*l| {
if (err_int < l.*) {
l.* = err_int;
@@ -7019,6 +7057,7 @@ inline fn useAtomicFeature(func: *const CodeGen) bool {
}
fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
@@ -7037,7 +7076,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr_operand);
try func.lowerToStack(expected_val);
try func.lowerToStack(new_val);
- try func.addAtomicMemArg(switch (ty.abiSize(func.target)) {
+ try func.addAtomicMemArg(switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7045,14 +7084,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
try func.addLabel(.local_set, cmp_result.local.value);
break :val val_local;
} else val: {
- if (ty.abiSize(func.target) > 8) {
+ if (ty.abiSize(mod) > 8) {
return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
}
const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
@@ -7068,7 +7107,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :val ptr_val;
};
- const result_ptr = if (isByRef(result_ty, func.target)) val: {
+ const result_ptr = if (isByRef(result_ty, mod)) val: {
try func.emitWValue(cmp_result);
try func.addImm32(-1);
try func.addTag(.i32_xor);
@@ -7076,7 +7115,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.i32_and);
const and_result = try WValue.toLocal(.stack, func, Type.bool);
const result_ptr = try func.allocStack(result_ty);
- try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target)));
+ try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod)));
try func.store(result_ptr, ptr_val, ty, 0);
break :val result_ptr;
} else val: {
@@ -7091,12 +7130,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const atomic_load = func.air.instructions.items(.data)[inst].atomic_load;
const ptr = try func.resolveInst(atomic_load.ptr);
const ty = func.air.typeOfIndex(inst);
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_load8_u,
2 => .i32_atomic_load16_u,
4 => .i32_atomic_load,
@@ -7106,7 +7146,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7117,6 +7157,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const pl_op = func.air.instructions.items(.data)[inst].pl_op;
const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
@@ -7140,7 +7181,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.emitWValue(value);
if (op == .Nand) {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
@@ -7157,7 +7198,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addTag(.select);
}
try func.addAtomicMemArg(
- switch (ty.abiSize(func.target)) {
+ switch (ty.abiSize(mod)) {
1 => .i32_atomic_rmw8_cmpxchg_u,
2 => .i32_atomic_rmw16_cmpxchg_u,
4 => .i32_atomic_rmw_cmpxchg,
@@ -7166,7 +7207,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
},
);
const select_res = try func.allocLocal(ty);
@@ -7185,7 +7226,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => {
try func.emitWValue(ptr);
try func.emitWValue(operand);
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => switch (op) {
.Xchg => .i32_atomic_rmw8_xchg_u,
.Add => .i32_atomic_rmw8_add_u,
@@ -7226,7 +7267,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7255,7 +7296,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Xor => .xor,
else => unreachable,
});
- if (ty.isInt() and (op == .Add or op == .Sub)) {
+ if (ty.isInt(mod) and (op == .Add or op == .Sub)) {
_ = try func.wrapOperand(.stack, ty);
}
try func.store(.stack, .stack, ty, ptr.offset());
@@ -7271,7 +7312,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(.stack, .stack, ty, ptr.offset());
},
.Nand => {
- const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?;
+ const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?;
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
@@ -7302,6 +7343,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+ const mod = func.bin_file.base.options.module.?;
const bin_op = func.air.instructions.items(.data)[inst].bin_op;
const ptr = try func.resolveInst(bin_op.lhs);
@@ -7310,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = ptr_ty.childType();
if (func.useAtomicFeature()) {
- const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) {
+ const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) {
1 => .i32_atomic_store8,
2 => .i32_atomic_store16,
4 => .i32_atomic_store,
@@ -7321,7 +7363,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
- .alignment = ty.abiAlignment(func.target),
+ .alignment = ty.abiAlignment(mod),
});
} else {
try func.store(ptr, operand, ty, 0);
src/arch/x86_64/abi.zig
@@ -1,10 +1,3 @@
-const std = @import("std");
-const Type = @import("../../type.zig").Type;
-const Target = std.Target;
-const assert = std.debug.assert;
-const Register = @import("bits.zig").Register;
-const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
-
pub const Class = enum {
integer,
sse,
@@ -19,7 +12,7 @@ pub const Class = enum {
float_combine,
};
-pub fn classifyWindows(ty: Type, target: Target) Class {
+pub fn classifyWindows(ty: Type, mod: *const Module) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
@@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Pointer,
.Int,
.Bool,
@@ -43,10 +36,10 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
.ErrorUnion,
.AnyFrame,
.Frame,
- => switch (ty.abiSize(target)) {
+ => switch (ty.abiSize(mod)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
- else => switch (ty.zigTypeTag()) {
+ else => switch (ty.zigTypeTag(mod)) {
.Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout() == .Packed) {
return .win_i128;
@@ -75,13 +68,14 @@ pub const Context = enum { ret, arg, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
-pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
+pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class {
+ const target = mod.getTarget();
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Pointer => switch (ty.ptrSize()) {
.Slice => {
result[0] = .integer;
@@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
},
},
.Int, .Enum, .ErrorSet => {
- const bits = ty.intInfo(target).bits;
+ const bits = ty.intInfo(mod).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
@@ -165,7 +159,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
},
.Vector => {
const elem_ty = ty.childType();
- const bits = elem_ty.bitSize(target) * ty.arrayLen();
+ const bits = elem_ty.bitSize(mod) * ty.arrayLen();
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
@@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
return memory_class;
},
.Optional => {
- if (ty.isPtrLikeOptional()) {
+ if (ty.isPtrLikeOptional(mod)) {
result[0] = .integer;
return result;
}
@@ -215,7 +209,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
- const ty_size = ty.abiSize(target);
+ const ty_size = ty.abiSize(mod);
if (ty.containerLayout() == .Packed) {
assert(ty_size <= 128);
result[0] = .integer;
@@ -230,12 +224,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
const fields = ty.structFields();
for (fields.values()) |field| {
if (field.abi_align != 0) {
- if (field.abi_align < field.ty.abiAlignment(target)) {
+ if (field.abi_align < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
- const field_size = field.ty.abiSize(target);
- const field_class_array = classifySystemV(field.ty, target, .other);
+ const field_size = field.ty.abiSize(mod);
+ const field_class_array = classifySystemV(field.ty, mod, .other);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
@@ -334,7 +328,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
- const ty_size = ty.abiSize(target);
+ const ty_size = ty.abiSize(mod);
if (ty.containerLayout() == .Packed) {
assert(ty_size <= 128);
result[0] = .integer;
@@ -347,12 +341,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
const fields = ty.unionFields();
for (fields.values()) |field| {
if (field.abi_align != 0) {
- if (field.abi_align < field.ty.abiAlignment(target)) {
+ if (field.abi_align < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
// Combine this field with the previous one.
- const field_class = classifySystemV(field.ty, target, .other);
+ const field_class = classifySystemV(field.ty, mod, .other);
for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
@@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
return result;
},
.Array => {
- const ty_size = ty.abiSize(target);
+ const ty_size = ty.abiSize(mod);
if (ty_size <= 64) {
result[0] = .integer;
return result;
@@ -527,10 +521,17 @@ pub const RegisterClass = struct {
};
};
+const builtin = @import("builtin");
+const std = @import("std");
+const Target = std.Target;
+const assert = std.debug.assert;
const testing = std.testing;
+
const Module = @import("../../Module.zig");
+const Register = @import("bits.zig").Register;
+const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
+const Type = @import("../../type.zig").Type;
const Value = @import("../../value.zig").Value;
-const builtin = @import("builtin");
fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
return .{
@@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
.is_comptime = false,
};
}
-
-test "C_C_D" {
- var fields = Module.Struct.Fields{};
- // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 };
- try fields.ensureTotalCapacity(testing.allocator, 3);
- defer fields.deinit(testing.allocator);
- fields.putAssumeCapacity("v1", _field(.i8, 0));
- fields.putAssumeCapacity("v2", _field(.i8, 1));
- fields.putAssumeCapacity("v3", _field(.f64, 4));
-
- var C_C_D_struct = Module.Struct{
- .fields = fields,
- .namespace = undefined,
- .owner_decl = undefined,
- .zir_index = undefined,
- .layout = .Extern,
- .status = .fully_resolved,
- .known_non_opv = true,
- .is_tuple = false,
- };
- var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct };
-
- try testing.expectEqual(
- [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
- classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret),
- );
- try testing.expectEqual(
- [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
- classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg),
- );
-}
src/arch/x86_64/CodeGen.zig
@@ -605,14 +605,14 @@ const FrameAlloc = struct {
.ref_count = 0,
};
}
- fn initType(ty: Type, target: Target) FrameAlloc {
- return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) });
+ fn initType(ty: Type, mod: *const Module) FrameAlloc {
+ return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) });
}
};
const StackAllocation = struct {
inst: ?Air.Inst.Index,
- /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*)
+ /// TODO do we need size? should be determined by inst.ty.abiSize(mod)
size: u32,
};
@@ -714,12 +714,12 @@ pub fn generate(
function.args = call_info.args;
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(function.target.*),
- .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align),
+ .size = Type.usize.abiSize(mod),
+ .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align),
}));
function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{
- .size = Type.usize.abiSize(function.target.*),
- .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align),
+ .size = Type.usize.abiSize(mod),
+ .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align),
}));
function.frame_allocs.set(
@enumToInt(FrameIndex.args_frame),
@@ -1565,6 +1565,7 @@ fn asmMemoryRegisterImmediate(
}
fn gen(self: *Self) InnerError!void {
+ const mod = self.bin_file.options.module.?;
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
try self.asmRegister(.{ ._, .push }, .rbp);
@@ -1582,7 +1583,7 @@ fn gen(self: *Self) InnerError!void {
// register which the callee is free to clobber. Therefore, we purposely
// spill it to stack immediately.
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod));
try self.genSetMem(
.{ .frame = frame_index },
0,
@@ -1999,7 +2000,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
}
fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
- switch (lazy_sym.ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (lazy_sym.ty.zigTypeTag(mod)) {
.Enum => {
const enum_ty = lazy_sym.ty;
wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)});
@@ -2127,8 +2129,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
- if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
- const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
+ if (op_int < Air.ref_start_index) continue;
+ const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
self.finishAirResult(inst, result);
@@ -2252,14 +2254,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
+ const mod = self.bin_file.options.module.?;
const ptr_ty = self.air.typeOfIndex(inst);
const val_ty = ptr_ty.childType();
return self.allocFrameIndex(FrameAlloc.init(.{
- .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ .size = math.cast(u32, val_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
},
- .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1),
+ .alignment = @max(ptr_ty.ptrAlignment(mod), 1),
}));
}
@@ -2272,19 +2274,19 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue {
}
fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue {
- const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse {
- const mod = self.bin_file.options.module.?;
+ const mod = self.bin_file.options.module.?;
+ const abi_size = math.cast(u32, ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
};
if (reg_ok) need_mem: {
- if (abi_size <= @as(u32, switch (ty.zigTypeTag()) {
+ if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16,
80 => break :need_mem,
@@ -2294,18 +2296,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b
},
else => 8,
})) {
- if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| {
+ if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
}
}
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod));
return .{ .load_frame = .{ .index = frame_index } };
}
-fn regClassForType(ty: Type) RegisterManager.RegisterBitSet {
- return switch (ty.zigTypeTag()) {
+fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet {
+ return switch (ty.zigTypeTag(mod)) {
.Float, .Vector => sse,
else => gp,
};
@@ -2449,7 +2451,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void {
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
- const reg = try self.register_manager.allocReg(null, regClassForType(ty));
+ const mod = self.bin_file.options.module.?;
+ const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod));
try self.genSetReg(reg, ty, mcv);
return reg;
}
@@ -2464,7 +2467,8 @@ fn copyToRegisterWithInstTracking(
ty: Type,
mcv: MCValue,
) !MCValue {
- const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty));
+ const mod = self.bin_file.options.module.?;
+ const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod));
try self.genSetReg(reg, ty, mcv);
return MCValue{ .register = reg };
}
@@ -2618,14 +2622,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
const src_ty = self.air.typeOf(ty_op.operand);
- const src_int_info = src_ty.intInfo(self.target.*);
+ const src_int_info = src_ty.intInfo(mod);
const dst_ty = self.air.typeOfIndex(inst);
- const dst_int_info = dst_ty.intInfo(self.target.*);
- const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const dst_int_info = dst_ty.intInfo(mod);
+ const abi_size = @intCast(u32, dst_ty.abiSize(mod));
const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty;
const extend = switch (src_int_info.signedness) {
@@ -2670,14 +2675,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const high_bits = src_int_info.bits % 64;
if (high_bits > 0) {
- var high_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (extend) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } },
- .data = high_bits,
- };
- const high_ty = Type.initPayload(&high_pl.base);
+ const high_ty = try mod.intType(extend, high_bits);
try self.truncateRegister(high_ty, high_reg);
try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg });
}
@@ -2706,12 +2704,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dst_ty = self.air.typeOfIndex(inst);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
const src_ty = self.air.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -2724,10 +2723,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
- if (dst_ty.zigTypeTag() == .Vector) {
- assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen());
- const dst_info = dst_ty.childType().intInfo(self.target.*);
- const src_info = src_ty.childType().intInfo(self.target.*);
+ if (dst_ty.zigTypeTag(mod) == .Vector) {
+ assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen());
+ const dst_info = dst_ty.childType().intInfo(mod);
+ const src_info = src_ty.childType().intInfo(mod);
const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) {
8 => switch (src_info.bits) {
16 => switch (dst_ty.vectorLen()) {
@@ -2775,7 +2774,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
},
};
const full_ty = Type.initPayload(&full_pl.base);
- const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*));
+ const full_abi_size = @intCast(u32, full_ty.abiSize(mod));
const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val });
const splat_addr_mcv: MCValue = switch (splat_mcv) {
@@ -2831,6 +2830,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -2840,11 +2840,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.air.typeOf(bin_op.rhs);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, ptr_ty.abiSize(self.target.*)),
+ @intCast(i32, ptr_ty.abiSize(mod)),
len_ty,
len,
);
@@ -2873,23 +2873,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
}
fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
+ const mod = self.bin_file.options.module.?;
const air_tag = self.air.instructions.items(.tag);
const air_data = self.air.instructions.items(.data);
const dst_ty = self.air.typeOf(dst_air);
- const dst_info = dst_ty.intInfo(self.target.*);
+ const dst_info = dst_ty.intInfo(mod);
if (Air.refToIndex(dst_air)) |inst| {
switch (air_tag[inst]) {
.constant => {
const src_val = self.air.values[air_data[inst].ty_pl.payload];
var space: Value.BigIntSpace = undefined;
- const src_int = src_val.toBigInt(&space, self.target.*);
+ const src_int = src_val.toBigInt(&space, mod);
return @intCast(u16, src_int.bitCountTwosComp()) +
@boolToInt(src_int.positive and dst_info.signedness == .signed);
},
.intcast => {
const src_ty = self.air.typeOf(air_data[inst].ty_op.operand);
- const src_info = src_ty.intInfo(self.target.*);
+ const src_info = src_ty.intInfo(mod);
return @min(switch (src_info.signedness) {
.signed => switch (dst_info.signedness) {
.signed => src_info.bits,
@@ -2908,20 +2909,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
}
fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result = result: {
const tag = self.air.instructions.items(.tag)[inst];
const dst_ty = self.air.typeOfIndex(inst);
- switch (dst_ty.zigTypeTag()) {
+ switch (dst_ty.zigTypeTag(mod)) {
.Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs),
else => {},
}
- const dst_info = dst_ty.intInfo(self.target.*);
- var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } }, .data = switch (tag) {
+ const dst_info = dst_ty.intInfo(mod);
+ const src_ty = try mod.intType(dst_info.signedness, switch (tag) {
else => unreachable,
.mul, .mulwrap => math.max3(
self.activeIntBits(bin_op.lhs),
@@ -2929,8 +2928,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
dst_info.bits / 2,
),
.div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits,
- } };
- const src_ty = Type.initPayload(&src_pl.base);
+ });
try self.spillEflagsIfOccupied();
try self.spillRegisters(&.{ .rax, .rdx });
@@ -2942,6 +2940,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.air.typeOf(bin_op.lhs);
@@ -2968,7 +2967,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const reg_extra_bits = self.regExtraBits(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
+ const cc: Condition = if (ty.isSignedInt(mod)) cc: {
if (reg_extra_bits > 0) {
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits });
}
@@ -2994,7 +2993,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .o;
} else cc: {
try self.genSetReg(limit_reg, ty, .{
- .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)),
+ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)),
});
try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv);
@@ -3005,14 +3004,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
cc,
);
- if (reg_extra_bits > 0 and ty.isSignedInt()) {
+ if (reg_extra_bits > 0 and ty.isSignedInt(mod)) {
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits });
}
@@ -3020,6 +3019,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.air.typeOf(bin_op.lhs);
@@ -3046,7 +3046,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const reg_extra_bits = self.regExtraBits(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
+ const cc: Condition = if (ty.isSignedInt(mod)) cc: {
if (reg_extra_bits > 0) {
try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits });
}
@@ -3076,14 +3076,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
break :cc .c;
};
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
cc,
);
- if (reg_extra_bits > 0 and ty.isSignedInt()) {
+ if (reg_extra_bits > 0 and ty.isSignedInt(mod)) {
try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits });
}
@@ -3091,6 +3091,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.air.typeOf(bin_op.lhs);
@@ -3118,7 +3119,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(limit_lock);
const reg_bits = self.regBitSize(ty);
- const cc: Condition = if (ty.isSignedInt()) cc: {
+ const cc: Condition = if (ty.isSignedInt(mod)) cc: {
try self.genSetReg(limit_reg, ty, lhs_mcv);
try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv);
try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
@@ -3134,7 +3135,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
};
const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv);
- const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_mcv.register, cmov_abi_size),
registerAlias(limit_reg, cmov_abi_size),
@@ -3145,12 +3146,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
const tag = self.air.instructions.items(.tag)[inst];
const ty = self.air.typeOf(bin_op.lhs);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}),
.Int => {
try self.spillEflagsIfOccupied();
@@ -3160,7 +3162,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.sub_with_overflow => .sub,
else => unreachable,
}, bin_op.lhs, bin_op.rhs);
- const int_info = ty.intInfo(self.target.*);
+ const int_info = ty.intInfo(mod);
const cc: Condition = switch (int_info.signedness) {
.unsigned => .c,
.signed => .o,
@@ -3177,16 +3179,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
Type.u1,
.{ .eflags = cc },
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
ty,
partial_mcv,
);
@@ -3194,7 +3196,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -3205,12 +3207,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = result: {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}),
.Int => {
try self.spillEflagsIfOccupied();
@@ -3219,7 +3222,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
const partial_lock = switch (partial_mcv) {
@@ -3249,16 +3252,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1),
.{ .eflags = cc },
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
tuple_ty.structFieldType(0),
partial_mcv,
);
@@ -3266,7 +3269,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
}
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
@@ -3283,6 +3286,7 @@ fn genSetFrameTruncatedOverflowCompare(
src_mcv: MCValue,
overflow_cc: ?Condition,
) !void {
+ const mod = self.bin_file.options.module.?;
const src_lock = switch (src_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -3290,22 +3294,12 @@ fn genSetFrameTruncatedOverflowCompare(
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
const ty = tuple_ty.structFieldType(0);
- const int_info = ty.intInfo(self.target.*);
+ const int_info = ty.intInfo(mod);
- var hi_limb_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (int_info.signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } },
- .data = (int_info.bits - 1) % 64 + 1,
- };
- const hi_limb_ty = Type.initPayload(&hi_limb_pl.base);
+ const hi_limb_bits = (int_info.bits - 1) % 64 + 1;
+ const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits);
- var rest_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = int_info.bits - hi_limb_pl.data,
- };
- const rest_ty = Type.initPayload(&rest_pl.base);
+ const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits);
const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp);
const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs);
@@ -3335,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare(
);
}
- const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*));
+ const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod));
if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv);
try self.genSetMem(
.{ .frame = frame_index },
@@ -3345,23 +3339,24 @@ fn genSetFrameTruncatedOverflowCompare(
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1),
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
);
}
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const dst_ty = self.air.typeOf(bin_op.lhs);
- const result: MCValue = switch (dst_ty.zigTypeTag()) {
+ const result: MCValue = switch (dst_ty.zigTypeTag(mod)) {
.Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}),
.Int => result: {
try self.spillEflagsIfOccupied();
try self.spillRegisters(&.{ .rax, .rdx });
- const dst_info = dst_ty.intInfo(self.target.*);
+ const dst_info = dst_ty.intInfo(mod);
const cc: Condition = switch (dst_info.signedness) {
.unsigned => .c,
.signed => .o,
@@ -3369,11 +3364,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const lhs_active_bits = self.activeIntBits(bin_op.lhs);
const rhs_active_bits = self.activeIntBits(bin_op.rhs);
- var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) };
- const src_ty = Type.initPayload(&src_pl.base);
+ const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2);
+ const src_ty = try mod.intType(dst_info.signedness, src_bits);
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -3391,26 +3383,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } };
} else {
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc);
break :result .{ .load_frame = .{ .index = frame_index } };
},
else => {
// For now, this is the only supported multiply that doesn't fit in a register.
- assert(dst_info.bits <= 128 and src_pl.data == 64);
+ assert(dst_info.bits <= 128 and src_bits == 64);
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod));
if (dst_info.bits >= lhs_active_bits + rhs_active_bits) {
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(0, mod)),
tuple_ty.structFieldType(0),
partial_mcv,
);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)),
+ @intCast(i32, tuple_ty.structFieldOffset(1, mod)),
tuple_ty.structFieldType(1),
.{ .immediate = 0 }, // cc being set is impossible
);
@@ -3433,7 +3425,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
/// Clobbers .rax and .rdx registers.
/// Quotient is saved in .rax and remainder in .rdx.
fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
if (abi_size > 8) {
return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{});
}
@@ -3472,8 +3465,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
/// Always returns a register.
/// Clobbers .rax and .rdx registers.
fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
- const int_info = ty.intInfo(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
+ const int_info = ty.intInfo(mod);
const dividend: Register = switch (lhs) {
.register => |reg| reg,
else => try self.copyToTmpRegister(ty, lhs),
@@ -3585,6 +3579,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
const dst_ty = self.air.typeOfIndex(inst);
@@ -3592,7 +3587,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const opt_ty = src_ty.childType();
const src_mcv = try self.resolveInst(ty_op.operand);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
break :result if (self.liveness.isUnused(inst))
.unreach
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
@@ -3610,7 +3605,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const pl_ty = dst_ty.childType();
- const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*));
+ const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 });
break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv;
};
@@ -3618,6 +3613,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_union_ty = self.air.typeOf(ty_op.operand);
const err_ty = err_union_ty.errorUnionSet();
@@ -3629,11 +3625,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .immediate = 0 };
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result operand;
}
- const err_off = errUnionErrorOffset(payload_ty, self.target.*);
+ const err_off = errUnionErrorOffset(payload_ty, mod);
switch (operand) {
.register => |reg| {
// TODO reuse operand
@@ -3678,12 +3674,13 @@ fn genUnwrapErrorUnionPayloadMir(
err_union_ty: Type,
err_union: MCValue,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const payload_ty = err_union_ty.errorUnionPayload();
const result: MCValue = result: {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
- const payload_off = errUnionPayloadOffset(payload_ty, self.target.*);
+ const payload_off = errUnionPayloadOffset(payload_ty, mod);
switch (err_union) {
.load_frame => |frame_addr| break :result .{ .load_frame = .{
.index = frame_addr.index,
@@ -3720,6 +3717,7 @@ fn genUnwrapErrorUnionPayloadMir(
// *(E!T) -> E
fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3739,8 +3737,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType();
const pl_ty = eu_ty.errorUnionPayload();
const err_ty = eu_ty.errorUnionSet();
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
- const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+ const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
try self.asmRegisterMemory(
.{ ._, .mov },
registerAlias(dst_reg, err_abi_size),
@@ -3755,6 +3753,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
// *(E!T) -> *T
fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3777,8 +3776,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType();
const pl_ty = eu_ty.errorUnionPayload();
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -3789,6 +3788,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3803,8 +3803,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const eu_ty = src_ty.childType();
const pl_ty = eu_ty.errorUnionPayload();
const err_ty = eu_ty.errorUnionSet();
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
- const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
+ const err_abi_size = @intCast(u32, err_ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .mov },
Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{
@@ -3824,8 +3824,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -3853,14 +3853,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
}
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
const pl_ty = self.air.typeOf(ty_op.operand);
- if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 };
+ if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 };
const opt_ty = self.air.typeOfIndex(inst);
const pl_mcv = try self.resolveInst(ty_op.operand);
- const same_repr = opt_ty.optionalReprIsPayload();
+ const same_repr = opt_ty.optionalReprIsPayload(mod);
if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv;
const pl_lock: ?RegisterLock = switch (pl_mcv) {
@@ -3873,7 +3874,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
try self.genCopy(pl_ty, opt_mcv, pl_mcv);
if (!same_repr) {
- const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*));
+ const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod));
switch (opt_mcv) {
else => unreachable,
@@ -3900,6 +3901,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
/// T to E!T
fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const eu_ty = self.air.getRefType(ty_op.ty);
@@ -3908,11 +3910,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
const operand = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 };
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 };
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*));
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 });
break :result .{ .load_frame = .{ .index = frame_index } };
@@ -3922,6 +3924,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void {
/// E to E!T
fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const eu_ty = self.air.getRefType(ty_op.ty);
@@ -3929,11 +3932,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const err_ty = eu_ty.errorUnionSet();
const result: MCValue = result: {
- if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand);
+ if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand);
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*));
- const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
- const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod));
+ const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod));
+ const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef);
const operand = try self.resolveInst(ty_op.operand);
try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand);
@@ -3974,6 +3977,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
@@ -3994,7 +3998,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
@@ -4041,6 +4045,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi
}
fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
+ const mod = self.bin_file.options.module.?;
const slice_ty = self.air.typeOf(lhs);
const slice_mcv = try self.resolveInst(lhs);
const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) {
@@ -4050,7 +4055,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
const elem_ty = slice_ty.childType();
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf);
@@ -4097,6 +4102,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const array_ty = self.air.typeOf(bin_op.lhs);
@@ -4108,7 +4114,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (array_lock) |lock| self.register_manager.unlockReg(lock);
const elem_ty = array_ty.childType();
- const elem_abi_size = elem_ty.abiSize(self.target.*);
+ const elem_abi_size = elem_ty.abiSize(mod);
const index_ty = self.air.typeOf(bin_op.rhs);
const index = try self.resolveInst(bin_op.rhs);
@@ -4125,7 +4131,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const addr_reg = try self.register_manager.allocReg(null, gp);
switch (array) {
.register => {
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array);
try self.asmRegisterMemory(
.{ ._, .lea },
@@ -4162,14 +4168,15 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
// this is identical to the `airPtrElemPtr` codegen expect here an
// additional `mov` is needed at the end to get the actual value
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const elem_ty = ptr_ty.elemType2(mod);
+ const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
const index_ty = self.air.typeOf(bin_op.rhs);
const index_mcv = try self.resolveInst(bin_op.rhs);
const index_lock = switch (index_mcv) {
@@ -4207,6 +4214,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -4218,8 +4226,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_ty = ptr_ty.elemType2();
- const elem_abi_size = elem_ty.abiSize(self.target.*);
+ const elem_ty = ptr_ty.elemType2(mod);
+ const elem_abi_size = elem_ty.abiSize(mod);
const index_ty = self.air.typeOf(extra.rhs);
const index = try self.resolveInst(extra.rhs);
const index_lock: ?RegisterLock = switch (index) {
@@ -4239,11 +4247,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_union_ty = self.air.typeOf(bin_op.lhs);
const union_ty = ptr_union_ty.childType();
const tag_ty = self.air.typeOf(bin_op.rhs);
- const layout = union_ty.unionGetLayout(self.target.*);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -4284,11 +4293,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
}
fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const tag_ty = self.air.typeOfIndex(inst);
const union_ty = self.air.typeOf(ty_op.operand);
- const layout = union_ty.unionGetLayout(self.target.*);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) {
return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none });
@@ -4302,7 +4312,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
- const tag_abi_size = tag_ty.abiSize(self.target.*);
+ const tag_abi_size = tag_ty.abiSize(mod);
const dst_mcv: MCValue = blk: {
switch (operand) {
.load_frame => |frame_addr| {
@@ -4337,6 +4347,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
}
fn airClz(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
const dst_ty = self.air.typeOfIndex(inst);
@@ -4358,7 +4369,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const src_bits = src_ty.bitSize(self.target.*);
+ const src_bits = src_ty.bitSize(mod);
if (self.hasFeature(.lzcnt)) {
if (src_bits <= 8) {
const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv);
@@ -4405,7 +4416,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
}
if (src_bits > 64)
- return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)});
+ return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)});
if (math.isPowerOfTwo(src_bits)) {
const imm_reg = try self.copyToTmpRegister(dst_ty, .{
.immediate = src_bits ^ (src_bits - 1),
@@ -4422,7 +4433,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(imm_reg, cmov_abi_size),
@@ -4449,7 +4460,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
.{ .register = wide_reg },
);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(imm_reg, cmov_abi_size),
registerAlias(dst_reg, cmov_abi_size),
@@ -4465,11 +4476,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = result: {
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
- const src_bits = src_ty.bitSize(self.target.*);
+ const src_bits = src_ty.bitSize(mod);
const src_mcv = try self.resolveInst(ty_op.operand);
const mat_src_mcv = switch (src_mcv) {
@@ -4548,7 +4560,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg });
} else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv);
- const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, cmov_abi_size),
registerAlias(width_reg, cmov_abi_size),
@@ -4560,10 +4572,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = result: {
const src_ty = self.air.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
const src_mcv = try self.resolveInst(ty_op.operand);
if (self.hasFeature(.popcnt)) {
@@ -4729,6 +4742,7 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m
}
fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
@@ -4738,7 +4752,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
switch (self.regExtraBits(src_ty)) {
0 => {},
else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh },
+ if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh },
src_ty,
dst_mcv,
.{ .immediate = extra },
@@ -4749,10 +4763,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
}
fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
const src_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false);
@@ -4847,7 +4862,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
switch (self.regExtraBits(src_ty)) {
0 => {},
else => |extra| try self.genBinOpMir(
- if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh },
+ if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh },
src_ty,
dst_mcv,
.{ .immediate = extra },
@@ -4858,17 +4873,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const tag = self.air.instructions.items(.tag)[inst];
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ty = self.air.typeOf(un_op);
- const abi_size: u32 = switch (ty.abiSize(self.target.*)) {
+ const abi_size: u32 = switch (ty.abiSize(mod)) {
1...16 => 16,
17...32 => 32,
else => return self.fail("TODO implement airFloatSign for {}", .{
ty.fmt(self.bin_file.options.module.?),
}),
};
- const scalar_bits = ty.scalarType().floatBits(self.target.*);
+ const scalar_bits = ty.scalarType(mod).floatBits(self.target.*);
const src_mcv = try self.resolveInst(un_op);
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
@@ -4905,21 +4921,17 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- var int_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_signed },
- .data = scalar_bits,
- };
var vec_pl = Type.Payload.Array{
.base = .{ .tag = .vector },
.data = .{
.len = @divExact(abi_size * 8, scalar_bits),
- .elem_type = Type.initPayload(&int_pl.base),
+ .elem_type = try mod.intType(.signed, scalar_bits),
},
};
const vec_ty = Type.initPayload(&vec_pl.base);
const sign_val = switch (tag) {
- .neg => try vec_ty.minInt(stack.get(), self.target.*),
- .fabs => try vec_ty.maxInt(stack.get(), self.target.*),
+ .neg => try vec_ty.minInt(stack.get(), mod),
+ .fabs => try vec_ty.maxInt(stack.get(), mod),
else => unreachable,
};
@@ -5008,17 +5020,18 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void {
}
fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void {
+ const mod = self.bin_file.options.module.?;
if (!self.hasFeature(.sse4_1))
return self.fail("TODO implement genRound without sse4_1 feature", .{});
- const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) {
+ const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
32 => switch (ty.vectorLen()) {
1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round },
@@ -5041,7 +5054,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4
})) |tag| tag else return self.fail("TODO implement genRound for {}", .{
ty.fmt(self.bin_file.options.module.?),
});
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, ty.abiSize(mod));
const dst_alias = registerAlias(dst_reg, abi_size);
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate(
@@ -5078,9 +5091,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4
}
fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ty = self.air.typeOf(un_op);
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, ty.abiSize(mod));
const src_mcv = try self.resolveInst(un_op);
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv))
@@ -5092,7 +5106,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const result: MCValue = result: {
- const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) {
+ const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) {
const mat_src_reg = if (src_mcv.isRegister())
@@ -5114,7 +5128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) {
1 => {
@@ -5186,7 +5200,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
},
else => unreachable,
})) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{
- ty.fmt(self.bin_file.options.module.?),
+ ty.fmt(mod),
});
switch (mir_tag[0]) {
.v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
@@ -5274,10 +5288,11 @@ fn reuseOperandAdvanced(
}
fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.options.module.?;
const ptr_info = ptr_ty.ptrInfo().data;
const val_ty = ptr_info.pointee_type;
- const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
+ const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
const limb_abi_size: u32 = @min(val_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size);
@@ -5382,20 +5397,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
- if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.air.typeOf(ty_op.operand);
- const elem_size = elem_ty.abiSize(self.target.*);
+ const elem_size = elem_ty.abiSize(mod);
- const elem_rc = regClassForType(elem_ty);
- const ptr_rc = regClassForType(ptr_ty);
+ const elem_rc = regClassForType(elem_ty, mod);
+ const ptr_rc = regClassForType(ptr_ty, mod);
const ptr_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and
@@ -5416,13 +5432,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.options.module.?;
const ptr_info = ptr_ty.ptrInfo().data;
const src_ty = ptr_ty.childType();
const limb_abi_size: u16 = @min(ptr_info.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
- const src_bit_size = src_ty.bitSize(self.target.*);
+ const src_bit_size = src_ty.bitSize(mod);
const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size);
const src_bit_off = ptr_info.bit_offset % limb_abi_bits;
@@ -5555,14 +5572,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
}
fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
+ const mod = self.bin_file.options.module.?;
const ptr_field_ty = self.air.typeOfIndex(inst);
const ptr_container_ty = self.air.typeOf(operand);
const container_ty = ptr_container_ty.childType();
const field_offset = @intCast(i32, switch (container_ty.containerLayout()) {
- .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*),
- .Packed => if (container_ty.zigTypeTag() == .Struct and
+ .Auto, .Extern => container_ty.structFieldOffset(index, mod),
+ .Packed => if (container_ty.zigTypeTag(mod) == .Struct and
ptr_field_ty.ptrInfo().data.host_size == 0)
- container_ty.packedStructFieldByteOffset(index, self.target.*)
+ container_ty.packedStructFieldByteOffset(index, mod)
else
0,
});
@@ -5577,6 +5595,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
const result: MCValue = result: {
@@ -5584,17 +5603,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const index = extra.field_index;
const container_ty = self.air.typeOf(operand);
- const container_rc = regClassForType(container_ty);
+ const container_rc = regClassForType(container_ty, mod);
const field_ty = container_ty.structFieldType(index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none;
- const field_rc = regClassForType(field_ty);
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none;
+ const field_rc = regClassForType(field_ty, mod);
const field_is_gp = field_rc.supersetOf(gp);
const src_mcv = try self.resolveInst(operand);
const field_off = switch (container_ty.containerLayout()) {
- .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8),
+ .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8),
.Packed => if (container_ty.castTag(.@"struct")) |struct_obj|
- struct_obj.data.packedFieldBitOffset(self.target.*, index)
+ struct_obj.data.packedFieldBitOffset(mod, index)
else
0,
};
@@ -5611,7 +5630,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*));
+ const field_abi_size = @intCast(u32, field_ty.abiSize(mod));
const limb_abi_size: u32 = @min(field_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size);
@@ -5733,12 +5752,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const inst_ty = self.air.typeOfIndex(inst);
const parent_ty = inst_ty.childType();
- const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*));
+ const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod));
const src_mcv = try self.resolveInst(extra.field_ptr);
const dst_mcv = if (src_mcv.isRegisterOffset() and
@@ -5751,9 +5771,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
}
fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
+ const mod = self.bin_file.options.module.?;
const src_ty = self.air.typeOf(src_air);
const src_mcv = try self.resolveInst(src_air);
- if (src_ty.zigTypeTag() == .Vector) {
+ if (src_ty.zigTypeTag(mod) == .Vector) {
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)});
}
@@ -5786,28 +5807,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
switch (tag) {
.not => {
- const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8));
+ const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8));
const int_info = if (src_ty.tag() == .bool)
std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 }
else
- src_ty.intInfo(self.target.*);
+ src_ty.intInfo(mod);
var byte_off: i32 = 0;
while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) {
- var limb_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (int_info.signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } },
- .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)),
- };
- const limb_ty = Type.initPayload(&limb_pl.base);
+ const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8));
+ const limb_ty = try mod.intType(int_info.signedness, limb_bits);
const limb_mcv = switch (byte_off) {
0 => dst_mcv,
else => dst_mcv.address().offset(byte_off).deref(),
};
- if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) {
- const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data);
+ if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) {
+ const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits);
try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask });
} else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv);
}
@@ -5819,7 +5834,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
}
fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void {
- const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, dst_ty.abiSize(mod));
if (abi_size > 8) return self.fail("TODO implement {} for {}", .{
mir_tag,
dst_ty.fmt(self.bin_file.options.module.?),
@@ -5866,6 +5882,7 @@ fn genShiftBinOpMir(
lhs_mcv: MCValue,
shift_mcv: MCValue,
) !void {
+ const mod = self.bin_file.options.module.?;
const rhs_mcv: MCValue = rhs: {
switch (shift_mcv) {
.immediate => |imm| switch (imm) {
@@ -5880,7 +5897,7 @@ fn genShiftBinOpMir(
break :rhs .{ .register = .rcx };
};
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, ty.abiSize(mod));
if (abi_size <= 8) {
switch (lhs_mcv) {
.register => |lhs_reg| switch (rhs_mcv) {
@@ -6099,13 +6116,14 @@ fn genShiftBinOp(
lhs_ty: Type,
rhs_ty: Type,
) !MCValue {
- if (lhs_ty.zigTypeTag() == .Vector) {
+ const mod = self.bin_file.options.module.?;
+ if (lhs_ty.zigTypeTag(mod) == .Vector) {
return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()});
}
- assert(rhs_ty.abiSize(self.target.*) == 1);
+ assert(rhs_ty.abiSize(mod) == 1);
- const lhs_abi_size = lhs_ty.abiSize(self.target.*);
+ const lhs_abi_size = lhs_ty.abiSize(mod);
if (lhs_abi_size > 16) {
return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()});
}
@@ -6136,7 +6154,7 @@ fn genShiftBinOp(
break :dst dst_mcv;
};
- const signedness = lhs_ty.intInfo(self.target.*).signedness;
+ const signedness = lhs_ty.intInfo(mod).signedness;
try self.genShiftBinOpMir(switch (air_tag) {
.shl, .shl_exact => switch (signedness) {
.signed => .{ ._l, .sa },
@@ -6163,11 +6181,12 @@ fn genMulDivBinOp(
lhs: MCValue,
rhs: MCValue,
) !MCValue {
- if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) {
+ const mod = self.bin_file.options.module.?;
+ if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) {
return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()});
}
- const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
- const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*));
+ const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod));
+ const src_abi_size = @intCast(u32, src_ty.abiSize(mod));
if (switch (tag) {
else => unreachable,
.mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,
@@ -6184,7 +6203,7 @@ fn genMulDivBinOp(
const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx });
defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock);
- const signedness = ty.intInfo(self.target.*).signedness;
+ const signedness = ty.intInfo(mod).signedness;
switch (tag) {
.mul,
.mulwrap,
@@ -6338,13 +6357,14 @@ fn genBinOp(
lhs_air: Air.Inst.Ref,
rhs_air: Air.Inst.Ref,
) !MCValue {
+ const mod = self.bin_file.options.module.?;
const lhs_ty = self.air.typeOf(lhs_air);
const rhs_ty = self.air.typeOf(rhs_air);
- const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, lhs_ty.abiSize(mod));
const maybe_mask_reg = switch (air_tag) {
else => null,
- .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias(
+ .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias(
if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: {
try self.register_manager.getReg(.xmm0, null);
break :mask .xmm0;
@@ -6384,7 +6404,7 @@ fn genBinOp(
else => false,
};
- const vec_op = switch (lhs_ty.zigTypeTag()) {
+ const vec_op = switch (lhs_ty.zigTypeTag(mod)) {
else => false,
.Float, .Vector => true,
};
@@ -6456,7 +6476,7 @@ fn genBinOp(
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
- const elem_size = lhs_ty.elemType2().abiSize(self.target.*);
+ const elem_size = lhs_ty.elemType2(mod).abiSize(mod);
try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size });
try self.genBinOpMir(
switch (air_tag) {
@@ -6506,7 +6526,7 @@ fn genBinOp(
try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv);
- const int_info = lhs_ty.intInfo(self.target.*);
+ const int_info = lhs_ty.intInfo(mod);
const cc: Condition = switch (int_info.signedness) {
.unsigned => switch (air_tag) {
.min => .a,
@@ -6520,7 +6540,7 @@ fn genBinOp(
},
};
- const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2);
+ const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2);
const tmp_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
@@ -6581,7 +6601,7 @@ fn genBinOp(
}
const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size);
- const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
else => unreachable,
.Float => switch (lhs_ty.floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) {
@@ -6657,9 +6677,9 @@ fn genBinOp(
80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
else => null,
- .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) {
+ .Int => switch (lhs_ty.childType().intInfo(mod).bits) {
8 => switch (lhs_ty.vectorLen()) {
1...16 => switch (air_tag) {
.add,
@@ -6671,7 +6691,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .mins }
else if (self.hasFeature(.sse4_1))
@@ -6685,7 +6705,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_b, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -6711,11 +6731,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
},
@@ -6737,7 +6757,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .mins }
else
@@ -6747,7 +6767,7 @@ fn genBinOp(
else
.{ .p_w, .minu },
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_w, .maxs }
else
@@ -6772,11 +6792,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
},
@@ -6803,7 +6823,7 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .mins }
else if (self.hasFeature(.sse4_1))
@@ -6817,7 +6837,7 @@ fn genBinOp(
else
null,
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx))
.{ .vp_d, .maxs }
else if (self.hasFeature(.sse4_1))
@@ -6846,11 +6866,11 @@ fn genBinOp(
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
- .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .min => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null,
.unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null,
},
- .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) {
+ .max => switch (lhs_ty.childType().intInfo(mod).signedness) {
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
},
@@ -7206,14 +7226,14 @@ fn genBinOp(
const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size);
try self.asmRegisterRegisterRegisterImmediate(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .cmp },
64 => .{ .v_sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1 => .{ .v_ss, .cmp },
@@ -7240,14 +7260,14 @@ fn genBinOp(
Immediate.u(3), // unord
);
try self.asmRegisterRegisterRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ .v_ps, .blendv },
64 => .{ .v_pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1...8 => .{ .v_ps, .blendv },
@@ -7274,14 +7294,14 @@ fn genBinOp(
} else {
const has_blend = self.hasFeature(.sse4_1);
try self.asmRegisterRegisterImmediate(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ss, .cmp },
64 => .{ ._sd, .cmp },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1 => .{ ._ss, .cmp },
@@ -7307,14 +7327,14 @@ fn genBinOp(
Immediate.u(if (has_blend) 3 else 7), // unord, ord
);
if (has_blend) try self.asmRegisterRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .blendv },
64 => .{ ._pd, .blendv },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1...4 => .{ ._ps, .blendv },
@@ -7338,14 +7358,14 @@ fn genBinOp(
mask_reg,
) else {
try self.asmRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .@"and" },
64 => .{ ._pd, .@"and" },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1...4 => .{ ._ps, .@"and" },
@@ -7368,14 +7388,14 @@ fn genBinOp(
mask_reg,
);
try self.asmRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .andn },
64 => .{ ._pd, .andn },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1...4 => .{ ._ps, .andn },
@@ -7398,14 +7418,14 @@ fn genBinOp(
lhs_copy_reg.?,
);
try self.asmRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) {
.Float => switch (lhs_ty.floatBits(self.target.*)) {
32 => .{ ._ps, .@"or" },
64 => .{ ._pd, .@"or" },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (lhs_ty.childType().zigTypeTag()) {
+ .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) {
.Float => switch (lhs_ty.childType().floatBits(self.target.*)) {
32 => switch (lhs_ty.vectorLen()) {
1...4 => .{ ._ps, .@"or" },
@@ -7442,7 +7462,8 @@ fn genBinOpMir(
dst_mcv: MCValue,
src_mcv: MCValue,
) !void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
switch (dst_mcv) {
.none,
.unreach,
@@ -7640,7 +7661,7 @@ fn genBinOpMir(
defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock);
const ty_signedness =
- if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
+ if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned;
const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) {
.signed => Type.usize,
.unsigned => Type.isize,
@@ -7796,7 +7817,8 @@ fn genBinOpMir(
/// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv.
/// Does not support byte-size operands.
fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, dst_ty.abiSize(mod));
switch (dst_mcv) {
.none,
.unreach,
@@ -8022,6 +8044,7 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
+ const mod = self.bin_file.options.module.?;
if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{});
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const callee = pl_op.operand;
@@ -8029,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const ty = self.air.typeOf(callee);
- const fn_ty = switch (ty.zigTypeTag()) {
+ const fn_ty = switch (ty.zigTypeTag(mod)) {
.Fn => ty,
.Pointer => ty.childType(),
else => unreachable,
@@ -8077,7 +8100,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.none, .unreach => null,
.indirect => |reg_off| lock: {
const ret_ty = fn_ty.fnReturnType();
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod));
try self.genSetReg(reg_off.reg, Type.usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
});
@@ -8100,8 +8123,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
- const mod = self.bin_file.options.module.?;
- if (self.air.value(callee)) |func_value| {
+ if (self.air.value(callee, mod)) |func_value| {
if (if (func_value.castTag(.function)) |func_payload|
func_payload.data.owner_decl
else if (func_value.castTag(.decl_ref)) |decl_ref_payload|
@@ -8178,7 +8200,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
- assert(ty.zigTypeTag() == .Pointer);
+ assert(ty.zigTypeTag(mod) == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(.rax, Type.usize, mcv);
try self.asmRegister(.{ ._, .call }, .rax);
@@ -8234,6 +8256,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
+ const mod = self.bin_file.options.module.?;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.air.typeOf(bin_op.lhs);
@@ -8255,9 +8278,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
const result = MCValue{
- .eflags = switch (ty.zigTypeTag()) {
+ .eflags = switch (ty.zigTypeTag(mod)) {
else => result: {
- const abi_size = @intCast(u16, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u16, ty.abiSize(mod));
const may_flip: enum {
may_flip,
must_flip,
@@ -8290,7 +8313,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
break :result Condition.fromCompareOperator(
- if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned,
+ if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned,
result_op: {
const flipped_op = if (flipped) op.reverse() else op;
if (abi_size > 8) switch (flipped_op) {
@@ -8404,7 +8427,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg);
try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv);
} else return self.fail("TODO implement airCmp for {}", .{
- ty.fmt(self.bin_file.options.module.?),
+ ty.fmt(mod),
}),
32 => try self.genBinOpMir(
.{ ._ss, .ucomi },
@@ -8419,7 +8442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
src_mcv,
),
else => return self.fail("TODO implement airCmp for {}", .{
- ty.fmt(self.bin_file.options.module.?),
+ ty.fmt(mod),
}),
}
@@ -8454,7 +8477,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
self.eflags_inst = inst;
const op_ty = self.air.typeOf(un_op);
- const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
+ const op_abi_size = @intCast(u32, op_ty.abiSize(mod));
const op_mcv = try self.resolveInst(un_op);
const dst_reg = switch (op_mcv) {
.register => |reg| reg,
@@ -8573,7 +8596,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
}
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
- const abi_size = ty.abiSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
switch (mcv) {
.eflags => |cc| {
// Here we map the opposites since the jump is to the false branch.
@@ -8646,6 +8670,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
+ const mod = self.bin_file.options.module.?;
switch (opt_mcv) {
.register_overflow => |ro| return .{ .eflags = ro.eflags.negate() },
else => {},
@@ -8658,10 +8683,10 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
const pl_ty = opt_ty.optionalChild(&pl_buf);
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload())
+ const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty }
else
- .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool };
+ .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
switch (opt_mcv) {
.none,
@@ -8681,14 +8706,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.register => |opt_reg| {
if (some_info.off == 0) {
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
+ const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
const alias_reg = registerAlias(opt_reg, some_abi_size);
assert(some_abi_size * 8 == alias_reg.bitSize());
try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg);
return .{ .eflags = .z };
}
assert(some_info.ty.tag() == .bool);
- const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*));
+ const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod));
try self.asmRegisterImmediate(
.{ ._, .bt },
registerAlias(opt_reg, opt_abi_size),
@@ -8707,7 +8732,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
defer self.register_manager.unlockReg(addr_reg_lock);
try self.genSetReg(addr_reg, Type.usize, opt_mcv.address());
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
+ const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8720,7 +8745,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
},
.indirect, .load_frame => {
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
+ const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) {
@@ -8742,6 +8767,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
}
fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
+ const mod = self.bin_file.options.module.?;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
@@ -8750,10 +8776,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const pl_ty = opt_ty.optionalChild(&pl_buf);
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload())
+ const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod))
.{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty }
else
- .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool };
+ .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool };
const ptr_reg = switch (ptr_mcv) {
.register => |reg| reg,
@@ -8762,7 +8788,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
- const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
+ const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
@@ -8775,6 +8801,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
}
fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
+ const mod = self.bin_file.options.module.?;
const err_type = ty.errorUnionSet();
if (err_type.errorSetIsEmpty()) {
@@ -8786,7 +8813,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
self.eflags_inst = inst;
}
- const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*);
+ const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod);
switch (operand) {
.register => |reg| {
const eu_lock = self.register_manager.lockReg(reg);
@@ -9088,12 +9115,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const br = self.air.instructions.items(.data)[inst].br;
const src_mcv = try self.resolveInst(br.operand);
const block_ty = self.air.typeOfIndex(br.block_inst);
const block_unused =
- !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst);
+ !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst);
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
const block_data = self.blocks.getPtr(br.block_inst).?;
const first_br = block_data.relocs.items.len == 0;
@@ -9402,7 +9430,8 @@ const MoveStrategy = union(enum) {
};
};
fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
- switch (ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ switch (ty.zigTypeTag(mod)) {
else => return .{ .move = .{ ._, .mov } },
.Float => switch (ty.floatBits(self.target.*)) {
16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{
@@ -9419,8 +9448,8 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy {
else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } },
else => {},
},
- .Vector => switch (ty.childType().zigTypeTag()) {
- .Int => switch (ty.childType().intInfo(self.target.*).bits) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
+ .Int => switch (ty.childType().intInfo(mod).bits) {
8 => switch (ty.vectorLen()) {
1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{
.insert = .{ .vp_b, .insr },
@@ -9647,7 +9676,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError
}
fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
if (abi_size * 8 > dst_reg.bitSize())
return self.fail("genSetReg called with a value larger than dst_reg", .{});
switch (src_mcv) {
@@ -9730,7 +9760,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.{ .register = try self.copyToTmpRegister(ty, src_mcv) },
),
.sse => try self.asmRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) {
else => switch (abi_size) {
1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov },
5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov },
@@ -9738,7 +9768,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null,
else => null,
},
- .Float => switch (ty.scalarType().floatBits(self.target.*)) {
+ .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) {
16, 128 => switch (abi_size) {
2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov },
5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov },
@@ -9789,7 +9819,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.indirect => try self.moveStrategy(ty, false),
.load_frame => |frame_addr| try self.moveStrategy(
ty,
- self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*),
+ self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod),
),
.lea_frame => .{ .move = .{ ._, .lea } },
else => unreachable,
@@ -9821,7 +9851,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
u32,
@bitCast(u32, small_addr),
- ty.abiAlignment(self.target.*),
+ ty.abiAlignment(mod),
))) {
.move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
.insert_extract => |ie| try self.asmRegisterMemoryImmediate(
@@ -9839,7 +9869,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
),
}
},
- .load_direct => |sym_index| switch (ty.zigTypeTag()) {
+ .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) {
else => {
const atom_index = try self.owner.getSymbolIndex(self);
_ = try self.addInst(.{
@@ -9933,7 +9963,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
}
fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void {
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const mod = self.bin_file.options.module.?;
+ const abi_size = @intCast(u32, ty.abiSize(mod));
const dst_ptr_mcv: MCValue = switch (base) {
.none => .{ .immediate = @bitCast(u64, @as(i64, disp)) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
@@ -9945,7 +9976,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }),
.immediate => |imm| switch (abi_size) {
1, 2, 4 => {
- const immediate = if (ty.isSignedInt())
+ const immediate = if (ty.isSignedInt(mod))
Immediate.s(@truncate(i32, @bitCast(i64, imm)))
else
Immediate.u(@intCast(u32, imm));
@@ -9967,7 +9998,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate(
.{ ._, .mov },
Memory.sib(.dword, .{ .base = base, .disp = disp + offset }),
- if (ty.isSignedInt())
+ if (ty.isSignedInt(mod))
Immediate.s(@truncate(
i32,
@bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63),
@@ -9991,19 +10022,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.none => mem.isAlignedGeneric(
u32,
@bitCast(u32, disp),
- ty.abiAlignment(self.target.*),
+ ty.abiAlignment(mod),
),
.reg => |reg| switch (reg) {
.es, .cs, .ss, .ds => mem.isAlignedGeneric(
u32,
@bitCast(u32, disp),
- ty.abiAlignment(self.target.*),
+ ty.abiAlignment(mod),
),
else => false,
},
.frame => |frame_index| self.getFrameAddrAlignment(
.{ .index = frame_index, .off = disp },
- ) >= ty.abiAlignment(self.target.*),
+ ) >= ty.abiAlignment(mod),
})) {
.move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias),
.insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate(
@@ -10017,13 +10048,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.register_overflow => |ro| {
try self.genSetMem(
base,
- disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)),
+ disp + @intCast(i32, ty.structFieldOffset(0, mod)),
ty.structFieldType(0),
.{ .register = ro.reg },
);
try self.genSetMem(
base,
- disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)),
+ disp + @intCast(i32, ty.structFieldOffset(1, mod)),
ty.structFieldType(1),
.{ .eflags = ro.eflags },
);
@@ -10146,13 +10177,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
const result = result: {
- const dst_rc = regClassForType(dst_ty);
- const src_rc = regClassForType(src_ty);
+ const dst_rc = regClassForType(dst_ty, mod);
+ const src_rc = regClassForType(src_ty, mod);
const src_mcv = try self.resolveInst(ty_op.operand);
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
@@ -10172,13 +10204,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
};
const dst_signedness =
- if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned;
+ if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
const src_signedness =
- if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned;
+ if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
if (dst_signedness == src_signedness) break :result dst_mcv;
- const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*));
- const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*));
+ const abi_size = @intCast(u16, dst_ty.abiSize(mod));
+ const bit_size = @intCast(u16, dst_ty.bitSize(mod));
if (abi_size * 8 <= bit_size) break :result dst_mcv;
const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable;
@@ -10192,14 +10224,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const high_lock = self.register_manager.lockReg(high_reg);
defer if (high_lock) |lock| self.register_manager.unlockReg(lock);
- var high_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (dst_signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- } },
- .data = bit_size % 64,
- };
- const high_ty = Type.initPayload(&high_pl.base);
+ const high_ty = try mod.intType(dst_signedness, bit_size % 64);
try self.truncateRegister(high_ty, high_reg);
if (!dst_mcv.isRegister()) try self.genCopy(
@@ -10213,6 +10238,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
}
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const slice_ty = self.air.typeOfIndex(inst);
@@ -10221,11 +10247,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType();
const array_len = array_ty.arrayLen();
- const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*));
+ const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod));
try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr);
try self.genSetMem(
.{ .frame = frame_index },
- @intCast(i32, ptr_ty.abiSize(self.target.*)),
+ @intCast(i32, ptr_ty.abiSize(mod)),
Type.usize,
.{ .immediate = array_len },
);
@@ -10235,12 +10261,13 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
}
fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
- const src_bits = @intCast(u32, src_ty.bitSize(self.target.*));
+ const src_bits = @intCast(u32, src_ty.bitSize(mod));
const src_signedness =
- if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned;
+ if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
const dst_ty = self.air.typeOfIndex(inst);
const src_size = math.divCeil(u32, @max(switch (src_signedness) {
@@ -10248,7 +10275,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
.unsigned => src_bits + 1,
}, 32), 8) catch unreachable;
if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
+ src_ty.fmt(mod), dst_ty.fmt(mod),
});
const src_mcv = try self.resolveInst(ty_op.operand);
@@ -10261,12 +10288,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg);
- const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty));
+ const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
const dst_mcv = MCValue{ .register = dst_reg };
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
- const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) {
+ const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) {
.Float => switch (dst_ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
@@ -10275,7 +10302,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
},
else => null,
})) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
+ src_ty.fmt(mod), dst_ty.fmt(mod),
});
const dst_alias = dst_reg.to128();
const src_alias = registerAlias(src_reg, src_size);
@@ -10288,13 +10315,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
- const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*));
+ const dst_bits = @intCast(u32, dst_ty.bitSize(mod));
const dst_signedness =
- if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned;
+ if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
const dst_size = math.divCeil(u32, @max(switch (dst_signedness) {
.signed => dst_bits,
@@ -10312,13 +10340,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
defer self.register_manager.unlockReg(src_lock);
- const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty));
+ const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
const dst_mcv = MCValue{ .register = dst_reg };
const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
defer self.register_manager.unlockReg(dst_lock);
try self.asmRegisterRegister(
- if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) {
+ if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) {
.Float => switch (src_ty.floatBits(self.target.*)) {
32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si },
64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si },
@@ -10339,12 +10367,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(extra.ptr);
const val_ty = self.air.typeOf(extra.expected_value);
- const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
+ const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
@@ -10433,6 +10462,7 @@ fn atomicOp(
rmw_op: ?std.builtin.AtomicRmwOp,
order: std.builtin.AtomicOrder,
) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
const ptr_lock = switch (ptr_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
@@ -10445,7 +10475,7 @@ fn atomicOp(
};
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
- const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
+ const val_abi_size = @intCast(u32, val_ty.abiSize(mod));
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
const ptr_mem = switch (ptr_mcv) {
.immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size),
@@ -10539,8 +10569,8 @@ fn atomicOp(
.Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv),
.Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv),
.Min, .Max => {
- const cc: Condition = switch (if (val_ty.isAbiInt())
- val_ty.intInfo(self.target.*).signedness
+ const cc: Condition = switch (if (val_ty.isAbiInt(mod))
+ val_ty.intInfo(mod).signedness
else
.unsigned) {
.unsigned => switch (op) {
@@ -10728,6 +10758,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr
}
fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
+ const mod = self.bin_file.options.module.?;
if (safety) {
// TODO if the value is undef, write 0xaa bytes to dest
} else {
@@ -10752,7 +10783,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
};
defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
- const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*));
+ const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod));
if (elem_abi_size == 1) {
const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) {
@@ -10897,8 +10928,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
// We need a properly aligned and sized call frame to be able to call this function.
{
const needed_call_frame = FrameAlloc.init(.{
- .size = inst_ty.abiSize(self.target.*),
- .alignment = inst_ty.abiAlignment(self.target.*),
+ .size = inst_ty.abiSize(mod),
+ .alignment = inst_ty.abiAlignment(mod),
});
const frame_allocs_slice = self.frame_allocs.slice();
const stack_frame_size =
@@ -11013,14 +11044,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
}
fn airSplat(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const vector_ty = self.air.typeOfIndex(inst);
- const dst_rc = regClassForType(vector_ty);
- const scalar_ty = vector_ty.scalarType();
+ const dst_rc = regClassForType(vector_ty, mod);
+ const scalar_ty = vector_ty.scalarType(mod);
const src_mcv = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
- switch (scalar_ty.zigTypeTag()) {
+ switch (scalar_ty.zigTypeTag(mod)) {
else => {},
.Float => switch (scalar_ty.floatBits(self.target.*)) {
32 => switch (vector_ty.vectorLen()) {
@@ -11233,36 +11265,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = result: {
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Struct => {
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
if (result_ty.containerLayout() == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
- .{ .immediate = result_ty.abiSize(self.target.*) },
+ .{ .immediate = result_ty.abiSize(mod) },
);
for (elements, 0..) |elem, elem_i| {
- if (result_ty.structFieldValueComptime(elem_i) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i);
- const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*));
+ const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod));
if (elem_bit_size > 64) {
return self.fail(
"TODO airAggregateInit implement packed structs with large fields",
.{},
);
}
- const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i);
+ const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -11322,10 +11355,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
}
} else for (elements, 0..) |elem, elem_i| {
- if (result_ty.structFieldValueComptime(elem_i) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i);
- const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*));
+ const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod));
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
@@ -11337,9 +11370,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
},
.Array => {
const frame_index =
- try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*));
+ try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
const elem_ty = result_ty.childType();
- const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*));
+ const elem_size = @intCast(u32, elem_ty.abiSize(mod));
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
@@ -11374,11 +11407,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const result: MCValue = result: {
const union_ty = self.air.typeOfIndex(inst);
- const layout = union_ty.unionGetLayout(self.target.*);
+ const layout = union_ty.unionGetLayout(mod);
const src_ty = self.air.typeOf(extra.init);
const src_mcv = try self.resolveInst(extra.init);
@@ -11400,7 +11434,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const tag_val = Value.initPayload(&tag_pl.base);
var tag_int_pl: Value.Payload.U64 = undefined;
const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl);
- const tag_int = tag_int_val.toUnsignedInt(self.target.*);
+ const tag_int = tag_int_val.toUnsignedInt(mod);
const tag_off = if (layout.tag_align < layout.payload_align)
@intCast(i32, layout.payload_size)
else
@@ -11424,6 +11458,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void {
}
fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
+ const mod = self.bin_file.options.module.?;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const ty = self.air.typeOfIndex(inst);
@@ -11466,14 +11501,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
const mir_tag = if (@as(
?Mir.Inst.FixedTag,
if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 }))
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd132 },
64 => .{ .v_sd, .fmadd132 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
32 => switch (ty.vectorLen()) {
1 => .{ .v_ss, .fmadd132 },
@@ -11493,14 +11528,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}
else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 }))
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd213 },
64 => .{ .v_sd, .fmadd213 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
32 => switch (ty.vectorLen()) {
1 => .{ .v_ss, .fmadd213 },
@@ -11520,14 +11555,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
else => unreachable,
}
else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 }))
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(self.target.*)) {
32 => .{ .v_ss, .fmadd231 },
64 => .{ .v_sd, .fmadd231 },
16, 80, 128 => null,
else => unreachable,
},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.Float => switch (ty.childType().floatBits(self.target.*)) {
32 => switch (ty.vectorLen()) {
1 => .{ .v_ss, .fmadd231 },
@@ -11555,7 +11590,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
var mops: [3]MCValue = undefined;
for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv;
- const abi_size = @intCast(u32, ty.abiSize(self.target.*));
+ const abi_size = @intCast(u32, ty.abiSize(mod));
const mop1_reg = registerAlias(mops[0].getReg().?, abi_size);
const mop2_reg = registerAlias(mops[1].getReg().?, abi_size);
if (mops[2].isRegister()) try self.asmRegisterRegisterRegister(
@@ -11573,10 +11608,11 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
+ const mod = self.bin_file.options.module.?;
const ty = self.air.typeOf(ref);
// If the type has no codegen bits, no need to store it.
- if (!ty.hasRuntimeBitsIgnoreComptime()) return .none;
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
if (Air.refToIndex(ref)) |inst| {
const mcv = switch (self.air.instructions.items(.tag)[inst]) {
@@ -11584,7 +11620,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
const gop = try self.const_tracking.getOrPut(self.gpa, inst);
if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{
.ty = ty,
- .val = self.air.value(ref).?,
+ .val = self.air.value(ref, mod).?,
}));
break :tracking gop.value_ptr;
},
@@ -11597,7 +11633,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
}
}
- return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? });
+ return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? });
}
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking {
@@ -11670,6 +11706,7 @@ fn resolveCallingConventionValues(
var_args: []const Air.Inst.Ref,
stack_frame_base: FrameIndex,
) !CallMCValues {
+ const mod = self.bin_file.options.module.?;
const cc = fn_ty.fnCallingConvention();
const param_len = fn_ty.fnParamLen();
const param_types = try self.gpa.alloc(Type, param_len + var_args.len);
@@ -11702,21 +11739,21 @@ fn resolveCallingConventionValues(
switch (self.target.os.tag) {
.windows => {
// Align the stack to 16bytes before allocating shadow stack space (if any).
- result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*));
+ result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod));
},
else => {},
}
// Return values
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// TODO: is this even possible for C calling convention?
result.return_value = InstTracking.init(.none);
} else {
const classes = switch (self.target.os.tag) {
- .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)},
- else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none),
+ .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)},
+ else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none),
};
if (classes.len > 1) {
return self.fail("TODO handle multiple classes per type", .{});
@@ -11725,7 +11762,7 @@ fn resolveCallingConventionValues(
result.return_value = switch (classes[0]) {
.integer => InstTracking.init(.{ .register = registerAlias(
ret_reg,
- @intCast(u32, ret_ty.abiSize(self.target.*)),
+ @intCast(u32, ret_ty.abiSize(mod)),
) }),
.float, .sse => InstTracking.init(.{ .register = .xmm0 }),
.memory => ret: {
@@ -11744,11 +11781,11 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- assert(ty.hasRuntimeBitsIgnoreComptime());
+ assert(ty.hasRuntimeBitsIgnoreComptime(mod));
const classes = switch (self.target.os.tag) {
- .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)},
- else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none),
+ .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)},
+ else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none),
};
if (classes.len > 1) {
return self.fail("TODO handle multiple classes per type", .{});
@@ -11783,8 +11820,8 @@ fn resolveCallingConventionValues(
}),
}
- const param_size = @intCast(u31, ty.abiSize(self.target.*));
- const param_align = @intCast(u31, ty.abiAlignment(self.target.*));
+ const param_size = @intCast(u31, ty.abiSize(mod));
+ const param_align = @intCast(u31, ty.abiAlignment(mod));
result.stack_byte_count =
mem.alignForwardGeneric(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -11798,13 +11835,13 @@ fn resolveCallingConventionValues(
result.stack_align = 16;
// Return values
- if (ret_ty.zigTypeTag() == .NoReturn) {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = InstTracking.init(.unreach);
- } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
result.return_value = InstTracking.init(.none);
} else {
const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0];
- const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*));
+ const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod));
if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) {
const aliased_reg = registerAlias(ret_reg, ret_ty_size);
result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none };
@@ -11819,12 +11856,12 @@ fn resolveCallingConventionValues(
// Input params
for (param_types, result.args) |ty, *arg| {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
arg.* = .none;
continue;
}
- const param_size = @intCast(u31, ty.abiSize(self.target.*));
- const param_align = @intCast(u31, ty.abiAlignment(self.target.*));
+ const param_size = @intCast(u31, ty.abiSize(mod));
+ const param_align = @intCast(u31, ty.abiAlignment(mod));
result.stack_byte_count =
mem.alignForwardGeneric(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -11908,9 +11945,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
/// Truncates the value in the register in place.
/// Clobbers any remaining bits.
fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
- const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{
+ const mod = self.bin_file.options.module.?;
+ const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(self.target.*)),
+ .bits = @intCast(u16, ty.bitSize(mod)),
};
const max_reg_bit_width = Register.rax.bitSize();
switch (int_info.signedness) {
@@ -11953,8 +11991,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
}
fn regBitSize(self: *Self, ty: Type) u64 {
- const abi_size = ty.abiSize(self.target.*);
- return switch (ty.zigTypeTag()) {
+ const mod = self.bin_file.options.module.?;
+ const abi_size = ty.abiSize(mod);
+ return switch (ty.zigTypeTag(mod)) {
else => switch (abi_size) {
1 => 8,
2 => 16,
@@ -11971,7 +12010,8 @@ fn regBitSize(self: *Self, ty: Type) u64 {
}
fn regExtraBits(self: *Self, ty: Type) u64 {
- return self.regBitSize(ty) - ty.bitSize(self.target.*);
+ const mod = self.bin_file.options.module.?;
+ return self.regBitSize(ty) - ty.bitSize(mod);
}
fn hasFeature(self: *Self, feature: Target.x86.Feature) bool {
src/codegen/c/type.zig
@@ -292,19 +292,19 @@ pub const CType = extern union {
.abi = std.math.log2_int(u32, abi_alignment),
};
}
- pub fn abiAlign(ty: Type, target: Target) AlignAs {
- const abi_align = ty.abiAlignment(target);
+ pub fn abiAlign(ty: Type, mod: *const Module) AlignAs {
+ const abi_align = ty.abiAlignment(mod);
return init(abi_align, abi_align);
}
- pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs {
+ pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs {
return init(
- struct_ty.structFieldAlign(field_i, target),
- struct_ty.structFieldType(field_i).abiAlignment(target),
+ struct_ty.structFieldAlign(field_i, mod),
+ struct_ty.structFieldType(field_i).abiAlignment(mod),
);
}
- pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs {
+ pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const union_payload_align = union_obj.abiAlignment(target, false);
+ const union_payload_align = union_obj.abiAlignment(mod, false);
return init(union_payload_align, union_payload_align);
}
@@ -344,8 +344,8 @@ pub const CType = extern union {
return self.map.entries.items(.hash)[index - Tag.no_payload_count];
}
- pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index {
- const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } };
+ pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index {
+ const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } };
var convert: Convert = undefined;
convert.initType(ty, kind, lookup) catch unreachable;
@@ -405,7 +405,7 @@ pub const CType = extern union {
);
if (!gop.found_existing) {
errdefer _ = self.set.map.pop();
- gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert);
+ gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert);
}
if (std.debug.runtime_safety) {
const adapter = TypeAdapter64{
@@ -1236,10 +1236,10 @@ pub const CType = extern union {
}
pub const Lookup = union(enum) {
- fail: Target,
+ fail: *Module,
imm: struct {
set: *const Store.Set,
- target: Target,
+ mod: *Module,
},
mut: struct {
promoted: *Store.Promoted,
@@ -1254,10 +1254,14 @@ pub const CType = extern union {
}
pub fn getTarget(self: @This()) Target {
+ return self.getModule().getTarget();
+ }
+
+ pub fn getModule(self: @This()) *Module {
return switch (self) {
- .fail => |target| target,
- .imm => |imm| imm.target,
- .mut => |mut| mut.mod.getTarget(),
+ .fail => |mod| mod,
+ .imm => |imm| imm.mod,
+ .mut => |mut| mut.mod,
};
}
@@ -1272,7 +1276,7 @@ pub const CType = extern union {
pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index {
return switch (self) {
.fail => null,
- .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind),
+ .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind),
.mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind),
};
}
@@ -1284,7 +1288,7 @@ pub const CType = extern union {
pub fn freeze(self: @This()) @This() {
return switch (self) {
.fail, .imm => self,
- .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } },
+ .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } },
};
}
};
@@ -1338,7 +1342,7 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "array",
.type = array_idx,
- .alignas = AlignAs.abiAlign(ty, lookup.getTarget()),
+ .alignas = AlignAs.abiAlign(ty, lookup.getModule()),
};
self.initAnon(kind, fwd_idx, 1);
} else self.init(switch (kind) {
@@ -1350,12 +1354,12 @@ pub const CType = extern union {
}
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
- const target = lookup.getTarget();
+ const mod = lookup.getModule();
self.* = undefined;
- if (!ty.isFnOrHasRuntimeBitsIgnoreComptime())
+ if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
self.init(.void)
- else if (ty.isAbiInt()) switch (ty.tag()) {
+ else if (ty.isAbiInt(mod)) switch (ty.tag()) {
.usize => self.init(.uintptr_t),
.isize => self.init(.intptr_t),
.c_char => self.init(.char),
@@ -1367,13 +1371,13 @@ pub const CType = extern union {
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
- else => switch (tagFromIntInfo(ty.intInfo(target))) {
+ else => switch (tagFromIntInfo(ty.intInfo(mod))) {
.void => unreachable,
else => |t| self.init(t),
.array => switch (kind) {
.forward, .complete, .global => {
- const abi_size = ty.abiSize(target);
- const abi_align = ty.abiAlignment(target);
+ const abi_size = ty.abiSize(mod);
+ const abi_align = ty.abiAlignment(mod);
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
@@ -1389,7 +1393,7 @@ pub const CType = extern union {
.payload => unreachable,
},
},
- } else switch (ty.zigTypeTag()) {
+ } else switch (ty.zigTypeTag(mod)) {
.Frame => unreachable,
.AnyFrame => unreachable,
@@ -1434,12 +1438,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "ptr",
.type = ptr_idx,
- .alignas = AlignAs.abiAlign(ptr_ty, target),
+ .alignas = AlignAs.abiAlign(ptr_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "len",
.type = Tag.uintptr_t.toIndex(),
- .alignas = AlignAs.abiAlign(Type.usize, target),
+ .alignas = AlignAs.abiAlign(Type.usize, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1462,12 +1466,8 @@ pub const CType = extern union {
},
};
- var host_int_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = info.host_size * 8,
- };
const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
- Type.initPayload(&host_int_pl.base)
+ try mod.intType(.unsigned, info.host_size * 8)
else
info.pointee_type;
@@ -1490,11 +1490,9 @@ pub const CType = extern union {
if (ty.castTag(.@"struct")) |struct_obj| {
try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
} else {
- var buf: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.bitSize(target)),
- };
- try self.initType(Type.initPayload(&buf.base), kind, lookup);
+ const bits = @intCast(u16, ty.bitSize(mod));
+ const int_ty = try mod.intType(.unsigned, bits);
+ try self.initType(int_ty, kind, lookup);
}
} else if (ty.isTupleOrAnonStruct()) {
if (lookup.isMutable()) {
@@ -1505,7 +1503,7 @@ pub const CType = extern union {
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
@@ -1555,7 +1553,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "payload",
.type = payload_idx.?,
- .alignas = AlignAs.unionPayloadAlign(ty, target),
+ .alignas = AlignAs.unionPayloadAlign(ty, mod),
};
field_count += 1;
}
@@ -1563,7 +1561,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "tag",
.type = tag_idx.?,
- .alignas = AlignAs.abiAlign(tag_ty.?, target),
+ .alignas = AlignAs.abiAlign(tag_ty.?, mod),
};
field_count += 1;
}
@@ -1576,7 +1574,7 @@ pub const CType = extern union {
} };
self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
} else self.init(.@"struct");
- } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) {
+ } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) {
self.init(.void);
} else {
var is_packed = false;
@@ -1586,9 +1584,9 @@ pub const CType = extern union {
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = AlignAs.fieldAlign(ty, field_i, target);
+ const field_align = AlignAs.fieldAlign(ty, field_i, mod);
if (field_align.@"align" < field_align.abi) {
is_packed = true;
if (!lookup.isMutable()) break;
@@ -1643,8 +1641,8 @@ pub const CType = extern union {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
- if (ty.optionalReprIsPayload()) {
+ if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (ty.optionalReprIsPayload(mod)) {
try self.initType(payload_ty, kind, lookup);
} else if (switch (kind) {
.forward, .forward_parameter => @as(Index, undefined),
@@ -1661,12 +1659,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, target),
+ .alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "is_null",
.type = Tag.bool.toIndex(),
- .alignas = AlignAs.abiAlign(Type.bool, target),
+ .alignas = AlignAs.abiAlign(Type.bool, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1699,12 +1697,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
- .alignas = AlignAs.abiAlign(payload_ty, target),
+ .alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "error",
.type = error_idx,
- .alignas = AlignAs.abiAlign(error_ty, target),
+ .alignas = AlignAs.abiAlign(error_ty, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@@ -1733,7 +1731,7 @@ pub const CType = extern union {
};
_ = try lookup.typeToIndex(info.return_type, param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(param_type, param_kind);
}
}
@@ -1900,16 +1898,16 @@ pub const CType = extern union {
}
}
- fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType {
+ fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType {
var convert: Convert = undefined;
- try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } });
- return createFromConvert(store, ty, target, kind, &convert);
+ try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
+ return createFromConvert(store, ty, mod, kind, &convert);
}
fn createFromConvert(
store: *Store.Promoted,
ty: Type,
- target: Target,
+ mod: *Module,
kind: Kind,
convert: Convert,
) !CType {
@@ -1930,7 +1928,7 @@ pub const CType = extern union {
.packed_struct,
.packed_union,
=> {
- const zig_ty_tag = ty.zigTypeTag();
+ const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
@@ -1941,7 +1939,7 @@ pub const CType = extern union {
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_fields_len += 1;
}
@@ -1950,7 +1948,7 @@ pub const CType = extern union {
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
fields_pl[c_field_i] = .{
@@ -1962,12 +1960,12 @@ pub const CType = extern union {
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
}),
- .type = store.set.typeToIndex(field_ty, target, switch (kind) {
+ .type = store.set.typeToIndex(field_ty, mod, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
- .alignas = AlignAs.fieldAlign(ty, field_i, target),
+ .alignas = AlignAs.fieldAlign(ty, field_i, mod),
};
}
@@ -2004,7 +2002,7 @@ pub const CType = extern union {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
- .fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
+ .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?,
} };
return initPayload(struct_pl);
},
@@ -2026,21 +2024,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
- params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?,
+ .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@@ -2067,12 +2065,12 @@ pub const CType = extern union {
}
pub fn eql(self: @This(), ty: Type, cty: CType) bool {
+ const mod = self.lookup.getModule();
switch (self.convert.value) {
.cty => |c| return c.eql(cty),
.tag => |t| {
if (t != cty.tag()) return false;
- const target = self.lookup.getTarget();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2084,7 +2082,7 @@ pub const CType = extern union {
]u8 = undefined;
const c_fields = cty.cast(Payload.Fields).?.data;
- const zig_ty_tag = ty.zigTypeTag();
+ const zig_ty_tag = ty.zigTypeTag(mod);
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
@@ -2093,7 +2091,7 @@ pub const CType = extern union {
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
const c_field = &c_fields[c_field_i];
@@ -2113,7 +2111,7 @@ pub const CType = extern union {
else => unreachable,
},
mem.span(c_field.name),
- ) or AlignAs.fieldAlign(ty, field_i, target).@"align" !=
+ ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" !=
c_field.alignas.@"align") return false;
}
return true;
@@ -2146,7 +2144,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
- if (ty.zigTypeTag() != .Fn) return false;
+ if (ty.zigTypeTag(mod) != .Fn) return false;
const info = ty.fnInfo();
assert(!info.is_generic);
@@ -2162,7 +2160,7 @@ pub const CType = extern union {
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
@@ -2202,7 +2200,7 @@ pub const CType = extern union {
.tag => |t| {
autoHash(hasher, t);
- const target = self.lookup.getTarget();
+ const mod = self.lookup.getModule();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@@ -2211,15 +2209,15 @@ pub const CType = extern union {
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
- const zig_ty_tag = ty.zigTypeTag();
- for (0..switch (ty.zigTypeTag()) {
+ const zig_ty_tag = ty.zigTypeTag(mod);
+ for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
- !field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
.forward, .forward_parameter => .forward,
@@ -2234,7 +2232,7 @@ pub const CType = extern union {
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
});
- autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align");
+ autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
}
},
@@ -2271,7 +2269,7 @@ pub const CType = extern union {
self.updateHasherRecurse(hasher, info.return_type, param_kind);
for (info.param_types) |param_type| {
- if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, param_type, param_kind);
}
},
src/codegen/c.zig
@@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
+const InternPool = @import("../InternPool.zig");
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -285,10 +286,11 @@ pub const Function = struct {
const gop = try f.value_map.getOrPut(inst);
if (gop.found_existing) return gop.value_ptr.*;
- const val = f.air.value(ref).?;
+ const mod = f.object.dg.module;
+ const val = f.air.value(ref, mod).?;
const ty = f.air.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: {
+ const result: CValue = if (lowersToArray(ty, mod)) result: {
const writer = f.object.code_header.writer();
const alignment = 0;
const decl_c_value = try f.allocLocalValue(ty, alignment);
@@ -318,11 +320,11 @@ pub const Function = struct {
/// those which go into `allocs`. This function does not add the resulting local into `allocs`;
/// that responsibility lies with the caller.
fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
+ const mod = f.object.dg.module;
const gpa = f.object.dg.gpa;
- const target = f.object.dg.module.getTarget();
try f.locals.append(gpa, .{
.cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)),
+ .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
});
return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) };
}
@@ -336,10 +338,10 @@ pub const Function = struct {
/// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
/// not be used for persistent locals (i.e. those in `allocs`).
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
- const target = f.object.dg.module.getTarget();
+ const mod = f.object.dg.module;
if (f.free_locals_map.getPtr(.{
.cty_idx = try f.typeToIndex(ty, .complete),
- .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)),
+ .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
})) |locals_list| {
if (locals_list.popOrNull()) |local_entry| {
return .{ .new_local = local_entry.key };
@@ -352,8 +354,9 @@ pub const Function = struct {
fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void {
switch (c_value) {
.constant => |inst| {
+ const mod = f.object.dg.module;
const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const val = f.air.value(inst, mod).?;
return f.object.dg.renderValue(w, ty, val, location);
},
.undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location),
@@ -364,8 +367,9 @@ pub const Function = struct {
fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void {
switch (c_value) {
.constant => |inst| {
+ const mod = f.object.dg.module;
const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const val = f.air.value(inst, mod).?;
try w.writeAll("(*");
try f.object.dg.renderValue(w, ty, val, .Other);
return w.writeByte(')');
@@ -377,8 +381,9 @@ pub const Function = struct {
fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
.constant => |inst| {
+ const mod = f.object.dg.module;
const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const val = f.air.value(inst, mod).?;
try f.object.dg.renderValue(w, ty, val, .Other);
try w.writeByte('.');
return f.writeCValue(w, member, .Other);
@@ -390,8 +395,9 @@ pub const Function = struct {
fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
switch (c_value) {
.constant => |inst| {
+ const mod = f.object.dg.module;
const ty = f.air.typeOf(inst);
- const val = f.air.value(inst).?;
+ const val = f.air.value(inst, mod).?;
try w.writeByte('(');
try f.object.dg.renderValue(w, ty, val, .Other);
try w.writeAll(")->");
@@ -522,11 +528,12 @@ pub const DeclGen = struct {
decl_index: Decl.Index,
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
- const decl = dg.module.declPtr(decl_index);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
assert(decl.has_tv);
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
- if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) {
+ if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) {
return dg.writeCValue(writer, .{ .undef = ty });
}
@@ -553,7 +560,7 @@ pub const DeclGen = struct {
var len_pl: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
- .data = val.sliceLen(dg.module),
+ .data = val.sliceLen(mod),
};
const len_val = Value.initPayload(&len_pl.base);
@@ -568,7 +575,7 @@ pub const DeclGen = struct {
// them). The analysis until now should ensure that the C function
// pointers are compatible. If they are not, then there is a bug
// somewhere and we should let the C compiler tell us about it.
- const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module);
+ const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.eql(decl.ty, mod);
if (need_typecast) {
try writer.writeAll("((");
try dg.renderType(writer, ty);
@@ -584,6 +591,8 @@ pub const DeclGen = struct {
//
// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr
fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void {
+ const mod = dg.module;
+
if (!ptr_ty.isSlice()) {
try writer.writeByte('(');
try dg.renderType(writer, ptr_ty);
@@ -601,7 +610,6 @@ pub const DeclGen = struct {
try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location);
},
.field_ptr => {
- const target = dg.module.getTarget();
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
// Ensure complete type definition is visible before accessing fields.
@@ -615,7 +623,7 @@ pub const DeclGen = struct {
field_ptr.container_ty,
ptr_ty,
@intCast(u32, field_ptr.field_index),
- target,
+ mod,
)) {
.begin => try dg.renderParentPtr(
writer,
@@ -714,19 +722,20 @@ pub const DeclGen = struct {
if (val.castTag(.runtime_value)) |rt| {
val = rt.data;
}
- const target = dg.module.getTarget();
+ const mod = dg.module;
+ const target = mod.getTarget();
const initializer_type: ValueRenderLocation = switch (location) {
.StaticInitializer => .StaticInitializer,
else => .Initializer,
};
- const safety_on = switch (dg.module.optimizeMode()) {
+ const safety_on = switch (mod.optimizeMode()) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
};
if (val.isUndefDeep()) {
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Bool => {
if (safety_on) {
return writer.writeAll("0xaa");
@@ -737,8 +746,8 @@ pub const DeclGen = struct {
.Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}),
.Float => {
const bits = ty.floatBits(target);
- var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits };
- const repr_ty = Type.initPayload(&repr_pl.base);
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
try writer.writeAll("zig_cast_");
try dg.renderTypeForBuiltinFnName(writer, ty);
@@ -778,11 +787,11 @@ pub const DeclGen = struct {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&opt_buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, Type.bool, val, location);
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return dg.renderValue(writer, payload_ty, val, location);
}
@@ -811,7 +820,7 @@ pub const DeclGen = struct {
for (0..ty.structFieldCount()) |field_i| {
if (ty.structFieldIsComptime(field_i)) continue;
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBits()) continue;
+ if (!field_ty.hasRuntimeBits(mod)) continue;
if (!empty) try writer.writeByte(',');
try dg.renderValue(writer, field_ty, val, initializer_type);
@@ -832,17 +841,17 @@ pub const DeclGen = struct {
try writer.writeByte('{');
if (ty.unionTagTypeSafety()) |tag_ty| {
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
try dg.renderValue(writer, tag_ty, val, initializer_type);
}
- if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}');
+ if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
try writer.writeAll(" .payload = {");
}
for (ty.unionFields().values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
try dg.renderValue(writer, field.ty, val, initializer_type);
break;
}
@@ -853,7 +862,7 @@ pub const DeclGen = struct {
const payload_ty = ty.errorUnionPayload();
const error_ty = ty.errorUnionSet();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, error_ty, val, location);
}
@@ -916,7 +925,7 @@ pub const DeclGen = struct {
}
unreachable;
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Int => switch (val.tag()) {
.field_ptr,
.elem_ptr,
@@ -931,8 +940,8 @@ pub const DeclGen = struct {
const bits = ty.floatBits(target);
const f128_val = val.toFloat(f128);
- var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits };
- const repr_ty = Type.initPayload(&repr_ty_pl.base);
+ // All unsigned ints matching float types are pre-allocated.
+ const repr_ty = mod.intType(.unsigned, bits) catch unreachable;
assert(bits <= 128);
var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined;
@@ -1109,7 +1118,7 @@ pub const DeclGen = struct {
},
else => unreachable,
};
- const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null;
+ const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null;
try writer.print("{s}", .{
fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel),
});
@@ -1131,11 +1140,11 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
+ const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
try literal.writeChar(elem_val_u8);
}
if (ai.sentinel) |s| {
- const s_u8 = @intCast(u8, s.toUnsignedInt(target));
+ const s_u8 = @intCast(u8, s.toUnsignedInt(mod));
if (s_u8 != 0) try literal.writeChar(s_u8);
}
try literal.end();
@@ -1145,7 +1154,7 @@ pub const DeclGen = struct {
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeByte(',');
const elem_val = try val.elemValue(dg.module, arena_allocator, index);
- const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target));
+ const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod));
try writer.print("'\\x{x}'", .{elem_val_u8});
}
if (ai.sentinel) |s| {
@@ -1183,10 +1192,10 @@ pub const DeclGen = struct {
const payload_ty = ty.optionalChild(&opt_buf);
const is_null_val = Value.makeBool(val.tag() == .null_value);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
return dg.renderValue(writer, Type.bool, is_null_val, location);
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val;
return dg.renderValue(writer, payload_ty, payload_val, location);
}
@@ -1218,7 +1227,7 @@ pub const DeclGen = struct {
const error_ty = ty.errorUnionSet();
const error_val = if (val.errorUnionIsPayload()) Value.zero else val;
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.renderValue(writer, error_ty, error_val, location);
}
@@ -1263,8 +1272,7 @@ pub const DeclGen = struct {
}
},
else => {
- var int_tag_ty_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&int_tag_ty_buffer);
+ const int_tag_ty = ty.intTagType();
return dg.renderValue(writer, int_tag_ty, val, location);
},
}
@@ -1295,7 +1303,7 @@ pub const DeclGen = struct {
for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue;
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
try dg.renderValue(writer, field_ty, field_val, initializer_type);
@@ -1306,13 +1314,10 @@ pub const DeclGen = struct {
},
.Packed => {
const field_vals = val.castTag(.aggregate).?.data;
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bits = Type.smallestUnsignedBits(int_info.bits - 1);
+ const bit_offset_ty = try mod.intType(.unsigned, bits);
var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 };
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
@@ -1321,7 +1326,7 @@ pub const DeclGen = struct {
for (0..field_vals.len) |field_i| {
if (ty.structFieldIsComptime(field_i)) continue;
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
@@ -1330,7 +1335,7 @@ pub const DeclGen = struct {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
- } else if (ty.bitSize(target) > 64) {
+ } else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
@@ -1344,7 +1349,7 @@ pub const DeclGen = struct {
for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue;
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const cast_context = IntCastContext{ .value = .{ .value = field_val } };
if (bit_offset_val_pl.data != 0) {
@@ -1362,7 +1367,7 @@ pub const DeclGen = struct {
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
- bit_offset_val_pl.data += field_ty.bitSize(target);
+ bit_offset_val_pl.data += field_ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
@@ -1373,7 +1378,7 @@ pub const DeclGen = struct {
for (field_vals, 0..) |field_val, field_i| {
if (ty.structFieldIsComptime(field_i)) continue;
const field_ty = ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
@@ -1388,7 +1393,7 @@ pub const DeclGen = struct {
try dg.renderValue(writer, field_ty, field_val, .Other);
}
- bit_offset_val_pl.data += field_ty.bitSize(target);
+ bit_offset_val_pl.data += field_ty.bitSize(mod);
empty = false;
}
try writer.writeByte(')');
@@ -1408,12 +1413,12 @@ pub const DeclGen = struct {
const field_ty = ty.unionFields().values()[field_i].ty;
const field_name = ty.unionFields().keys()[field_i];
if (ty.containerLayout() == .Packed) {
- if (field_ty.hasRuntimeBits()) {
- if (field_ty.isPtrAtRuntime()) {
+ if (field_ty.hasRuntimeBits(mod)) {
+ if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
- } else if (field_ty.zigTypeTag() == .Float) {
+ } else if (field_ty.zigTypeTag(mod) == .Float) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
@@ -1427,21 +1432,21 @@ pub const DeclGen = struct {
try writer.writeByte('{');
if (ty.unionTagTypeSafety()) |tag_ty| {
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type);
}
- if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}');
+ if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
try writer.writeAll(" .payload = {");
}
- if (field_ty.hasRuntimeBits()) {
+ if (field_ty.hasRuntimeBits(mod)) {
try writer.print(" .{ } = ", .{fmtIdent(field_name)});
try dg.renderValue(writer, field_ty, union_obj.val, initializer_type);
try writer.writeByte(' ');
} else for (ty.unionFields().values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
try dg.renderValue(writer, field.ty, Value.undef, initializer_type);
break;
}
@@ -1478,9 +1483,9 @@ pub const DeclGen = struct {
},
) !void {
const store = &dg.ctypes.set;
- const module = dg.module;
+ const mod = dg.module;
- const fn_decl = module.declPtr(fn_decl_index);
+ const fn_decl = mod.declPtr(fn_decl_index);
const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind);
const fn_info = fn_decl.ty.fnInfo();
@@ -1498,7 +1503,7 @@ pub const DeclGen = struct {
const trailing = try renderTypePrefix(
dg.decl_index,
store.*,
- module,
+ mod,
w,
fn_cty_idx,
.suffix,
@@ -1525,7 +1530,7 @@ pub const DeclGen = struct {
try renderTypeSuffix(
dg.decl_index,
store.*,
- module,
+ mod,
w,
fn_cty_idx,
.suffix,
@@ -1577,9 +1582,9 @@ pub const DeclGen = struct {
fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
- const module = dg.module;
- _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
- try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{});
+ const mod = dg.module;
+ _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{});
+ try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{});
}
const IntCastContext = union(enum) {
@@ -1619,18 +1624,18 @@ pub const DeclGen = struct {
/// | > 64 bit integer | < 64 bit integer | zig_make_<dest_ty>(0, src)
/// | > 64 bit integer | > 64 bit integer | zig_make_<dest_ty>(zig_hi_<src_ty>(src), zig_lo_<src_ty>(src))
fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void {
- const target = dg.module.getTarget();
- const dest_bits = dest_ty.bitSize(target);
- const dest_int_info = dest_ty.intInfo(target);
+ const mod = dg.module;
+ const dest_bits = dest_ty.bitSize(mod);
+ const dest_int_info = dest_ty.intInfo(mod);
- const src_is_ptr = src_ty.isPtrAtRuntime();
+ const src_is_ptr = src_ty.isPtrAtRuntime(mod);
const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) {
.unsigned => Type.usize,
.signed => Type.isize,
} else src_ty;
- const src_bits = src_eff_ty.bitSize(target);
- const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null;
+ const src_bits = src_eff_ty.bitSize(mod);
+ const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null;
if (dest_bits <= 64 and src_bits <= 64) {
const needs_cast = src_int_info == null or
(toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or
@@ -1703,8 +1708,8 @@ pub const DeclGen = struct {
alignment: u32,
kind: CType.Kind,
) error{ OutOfMemory, AnalysisFail }!void {
- const target = dg.module.getTarget();
- const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target));
+ const mod = dg.module;
+ const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod));
try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas);
}
@@ -1717,7 +1722,7 @@ pub const DeclGen = struct {
alignas: CType.AlignAs,
) error{ OutOfMemory, AnalysisFail }!void {
const store = &dg.ctypes.set;
- const module = dg.module;
+ const mod = dg.module;
switch (std.math.order(alignas.@"align", alignas.abi)) {
.lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}),
@@ -1726,22 +1731,23 @@ pub const DeclGen = struct {
}
const trailing =
- try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers);
+ try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers);
try w.print("{}", .{trailing});
try dg.writeCValue(w, name);
- try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{});
+ try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{});
}
fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool {
+ const mod = dg.module;
switch (tv.val.tag()) {
.extern_fn => return true,
.function => {
const func = tv.val.castTag(.function).?.data;
- return dg.module.decl_exports.contains(func.owner_decl);
+ return mod.decl_exports.contains(func.owner_decl);
},
.variable => {
const variable = tv.val.castTag(.variable).?.data;
- return dg.module.decl_exports.contains(variable.owner_decl);
+ return mod.decl_exports.contains(variable.owner_decl);
},
else => unreachable,
}
@@ -1838,10 +1844,11 @@ pub const DeclGen = struct {
}
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void {
- const decl = dg.module.declPtr(decl_index);
- dg.module.markDeclAlive(decl);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
+ mod.markDeclAlive(decl);
- if (dg.module.decl_exports.get(decl_index)) |exports| {
+ if (mod.decl_exports.get(decl_index)) |exports| {
try writer.writeAll(exports.items[export_index].options.name);
} else if (decl.isExtern()) {
try writer.writeAll(mem.span(decl.name));
@@ -1850,7 +1857,7 @@ pub const DeclGen = struct {
// expand to 3x the length of its input, but let's cut it off at a much shorter limit.
var name: [100]u8 = undefined;
var name_stream = std.io.fixedBufferStream(&name);
- decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) {
+ decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) {
error.NoSpaceLeft => {},
};
try writer.print("{}__{d}", .{
@@ -1894,10 +1901,10 @@ pub const DeclGen = struct {
.bits => {},
}
- const target = dg.module.getTarget();
- const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{
+ const mod = dg.module;
+ const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{
.signedness = .unsigned,
- .bits = @intCast(u16, ty.bitSize(target)),
+ .bits = @intCast(u16, ty.bitSize(mod)),
};
if (is_big) try writer.print(", {}", .{int_info.signedness == .signed});
@@ -1916,6 +1923,7 @@ pub const DeclGen = struct {
val: Value,
loc: ValueRenderLocation,
) !std.fmt.Formatter(formatIntLiteral) {
+ const mod = dg.module;
const kind: CType.Kind = switch (loc) {
.FunctionArgument => .parameter,
.Initializer, .Other => .complete,
@@ -1923,7 +1931,7 @@ pub const DeclGen = struct {
};
return std.fmt.Formatter(formatIntLiteral){ .data = .{
.dg = dg,
- .int_info = ty.intInfo(dg.module.getTarget()),
+ .int_info = ty.intInfo(mod),
.kind = kind,
.cty = try dg.typeToCType(ty, kind),
.val = val,
@@ -2646,11 +2654,12 @@ pub fn genDecl(o: *Object) !void {
const tracy = trace(@src());
defer tracy.end();
+ const mod = o.dg.module;
const decl = o.dg.decl.?;
const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? };
const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val };
- if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return;
+ if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
if (tv.val.tag() == .extern_fn) {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.writeAll("zig_extern ");
@@ -2704,8 +2713,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
.val = dg.decl.?.val,
};
const writer = dg.fwd_decl.writer();
+ const mod = dg.module;
- switch (tv.ty.zigTypeTag()) {
+ switch (tv.ty.zigTypeTag(mod)) {
.Fn => {
const is_global = dg.declIsGlobal(tv);
if (is_global) {
@@ -2791,6 +2801,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con
}
fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void {
+ const mod = f.object.dg.module;
const air_tags = f.air.instructions.items(.tag);
for (body) |inst| {
@@ -2826,10 +2837,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
+ const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
- break :blk if (lhs_scalar_ty.isInt())
+ break :blk if (lhs_scalar_ty.isInt(mod))
try airBinOp(f, inst, "%", "rem", .none)
else
try airBinFloatOp(f, inst, "fmod");
@@ -3095,9 +3106,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
}
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const inst_ty = f.air.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3120,13 +3132,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.air.typeOfIndex(inst);
const ptr_ty = f.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime();
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3155,9 +3168,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const inst_ty = f.air.typeOfIndex(inst);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3180,13 +3194,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const inst_ty = f.air.typeOfIndex(inst);
const slice_ty = f.air.typeOf(bin_op.lhs);
- const elem_ty = slice_ty.elemType2();
- const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime();
+ const elem_ty = slice_ty.elemType2(mod);
+ const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod);
const slice = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@@ -3209,9 +3224,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const inst_ty = f.air.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -3234,14 +3250,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const inst_ty = f.air.typeOfIndex(inst);
const elem_type = inst_ty.elemType();
- if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
+ if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
- const target = f.object.dg.module.getTarget();
const local = try f.allocLocalValue(
elem_type,
- inst_ty.ptrAlignment(target),
+ inst_ty.ptrAlignment(mod),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
@@ -3250,14 +3266,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const inst_ty = f.air.typeOfIndex(inst);
const elem_ty = inst_ty.elemType();
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty };
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty };
- const target = f.object.dg.module.getTarget();
const local = try f.allocLocalValue(
elem_ty,
- inst_ty.ptrAlignment(target),
+ inst_ty.ptrAlignment(mod),
);
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
const gpa = f.object.dg.module.gpa;
@@ -3290,14 +3306,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const ptr_ty = f.air.typeOf(ty_op.operand);
- const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_scalar_ty = ptr_ty.scalarType(mod);
const ptr_info = ptr_scalar_ty.ptrInfo().data;
const src_ty = ptr_info.pointee_type;
- if (!src_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ty_op.operand});
return .none;
}
@@ -3306,9 +3323,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
- const target = f.object.dg.module.getTarget();
- const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target);
- const is_array = lowersToArray(src_ty, target);
+ const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod);
+ const is_array = lowersToArray(src_ty, mod);
const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer();
@@ -3327,17 +3343,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, src_ty);
try writer.writeAll("))");
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
- var host_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = ptr_info.host_size * 8,
- };
- const host_ty = Type.initPayload(&host_pl.base);
+ const host_bits: u16 = ptr_info.host_size * 8;
+ const host_ty = try mod.intType(.unsigned, host_bits);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(host_pl.data - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
var bit_offset_val_pl: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
@@ -3345,11 +3354,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
};
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
- var field_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, src_ty.bitSize(target)),
- };
- const field_ty = Type.initPayload(&field_pl.base);
+ const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod)));
try f.writeCValue(writer, local, .Other);
try v.elem(f, writer);
@@ -3360,9 +3365,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("((");
try f.renderType(writer, field_ty);
try writer.writeByte(')');
- const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
if (cant_cast) {
- if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
@@ -3390,23 +3395,23 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
- const target = f.object.dg.module.getTarget();
const op_inst = Air.refToIndex(un_op);
const op_ty = f.air.typeOf(un_op);
const ret_ty = if (is_ptr) op_ty.childType() else op_ty;
var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target);
+ const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) {
try reap(f, inst, &.{un_op});
_ = try airCall(f, op_inst.?, .always_tail);
- } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, target);
+ const is_array = lowersToArray(ret_ty, mod);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocLocal(inst, lowered_ret_ty);
try writer.writeAll("memcpy(");
@@ -3442,15 +3447,16 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
}
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3467,20 +3473,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const target = f.object.dg.module.getTarget();
- const dest_int_info = inst_scalar_ty.intInfo(target);
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const dest_int_info = inst_scalar_ty.intInfo(mod);
const dest_bits = dest_int_info.bits;
const dest_c_bits = toCIntBits(dest_int_info.bits) orelse
return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
- const scalar_int_info = scalar_ty.intInfo(target);
+ const scalar_ty = operand_ty.scalarType(mod);
+ const scalar_int_info = scalar_ty.intInfo(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3515,7 +3521,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue {
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator());
- const mask_val = try inst_scalar_ty.maxInt(stack.get(), target);
+ const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod);
try writer.writeAll("zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty);
try writer.writeByte('(');
@@ -3577,17 +3583,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
+ const mod = f.object.dg.module;
// *a = b;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = f.air.typeOf(bin_op.lhs);
- const ptr_scalar_ty = ptr_ty.scalarType();
+ const ptr_scalar_ty = ptr_ty.scalarType(mod);
const ptr_info = ptr_scalar_ty.ptrInfo().data;
const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.air.typeOf(bin_op.rhs);
- const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
+ const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false;
if (val_is_undef) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
@@ -3602,10 +3609,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
- const target = f.object.dg.module.getTarget();
const is_aligned = ptr_info.@"align" == 0 or
- ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target);
- const is_array = lowersToArray(ptr_info.pointee_type, target);
+ ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod);
+ const is_array = lowersToArray(ptr_info.pointee_type, mod);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -3647,14 +3653,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
} else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) {
const host_bits = ptr_info.host_size * 8;
- var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits };
- const host_ty = Type.initPayload(&host_pl.base);
+ const host_ty = try mod.intType(.unsigned, host_bits);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(host_bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
var bit_offset_val_pl: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
@@ -3662,7 +3663,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
};
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
- const src_bits = src_ty.bitSize(target);
+ const src_bits = src_ty.bitSize(mod);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
@@ -3693,9 +3694,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeByte('(');
- const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64;
+ const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64;
if (cant_cast) {
- if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty);
try writer.writeAll("(0, ");
@@ -3705,7 +3706,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeByte(')');
}
- if (src_ty.isPtrAtRuntime()) {
+ if (src_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -3728,6 +3729,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3737,7 +3739,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
const inst_ty = f.air.typeOfIndex(inst);
const operand_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const w = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -3765,9 +3767,10 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits);
const op = try f.resolveInst(ty_op.operand);
@@ -3797,11 +3800,11 @@ fn airBinOp(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
- const scalar_ty = operand_ty.scalarType();
- const target = f.object.dg.module.getTarget();
- if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat())
+ const scalar_ty = operand_ty.scalarType(mod);
+ if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat())
return try airBinBuiltinCall(f, inst, operation, info);
const lhs = try f.resolveInst(bin_op.lhs);
@@ -3835,12 +3838,12 @@ fn airCmpOp(
data: anytype,
operator: std.math.CompareOperator,
) !CValue {
+ const mod = f.object.dg.module;
const lhs_ty = f.air.typeOf(data.lhs);
- const scalar_ty = lhs_ty.scalarType();
+ const scalar_ty = lhs_ty.scalarType(mod);
- const target = f.object.dg.module.getTarget();
- const scalar_bits = scalar_ty.bitSize(target);
- if (scalar_ty.isInt() and scalar_bits > 64)
+ const scalar_bits = scalar_ty.bitSize(mod);
+ if (scalar_ty.isInt(mod) and scalar_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -3885,12 +3888,12 @@ fn airEquality(
inst: Air.Inst.Index,
operator: std.math.CompareOperator,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
- const operand_bits = operand_ty.bitSize(target);
- if (operand_ty.isInt() and operand_bits > 64)
+ const operand_bits = operand_ty.bitSize(mod);
+ if (operand_ty.isInt(mod) and operand_bits > 64)
return airCmpBuiltinCall(
f,
inst,
@@ -3912,7 +3915,7 @@ fn airEquality(
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) {
+ if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) {
// (A && B) || (C && (A == B))
// A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload
@@ -3965,6 +3968,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -3973,8 +3977,8 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const elem_ty = inst_scalar_ty.elemType2();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const elem_ty = inst_scalar_ty.elemType2(mod);
const local = try f.allocLocal(inst, inst_ty);
const writer = f.object.writer();
@@ -3983,7 +3987,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
try v.elem(f, writer);
try writer.writeAll(" = ");
- if (elem_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We must convert to and from integer types to prevent UB if the operation
// results in a NULL pointer, or if LHS is NULL. The operation is only UB
// if the result is NULL and then dereferenced.
@@ -4012,13 +4016,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
}
fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
- const target = f.object.dg.module.getTarget();
- if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64)
+ if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64)
return try airBinBuiltinCall(f, inst, operation[1..], .none);
if (inst_scalar_ty.isRuntimeFloat())
return try airBinFloatOp(f, inst, operation);
@@ -4092,12 +4096,11 @@ fn airCall(
inst: Air.Inst.Index,
modifier: std.builtin.CallModifier,
) !CValue {
+ const mod = f.object.dg.module;
// Not even allowed to call panic in a naked function.
if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none;
const gpa = f.object.dg.gpa;
- const module = f.object.dg.module;
- const target = module.getTarget();
const writer = f.object.writer();
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
@@ -4116,7 +4119,7 @@ fn airCall(
resolved_arg.* = try f.resolveInst(arg);
if (arg_cty != try f.typeToIndex(arg_ty, .complete)) {
var lowered_arg_buf: LowerFnRetTyBuffer = undefined;
- const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target);
+ const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod);
const array_local = try f.allocLocal(inst, lowered_arg_ty);
try writer.writeAll("memcpy(");
@@ -4139,7 +4142,7 @@ fn airCall(
}
const callee_ty = f.air.typeOf(pl_op.operand);
- const fn_ty = switch (callee_ty.zigTypeTag()) {
+ const fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
.Pointer => callee_ty.childType(),
else => unreachable,
@@ -4147,13 +4150,13 @@ fn airCall(
const ret_ty = fn_ty.fnReturnType();
var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target);
+ const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
const result_local = result: {
if (modifier == .always_tail) {
try writer.writeAll("zig_always_tail return ");
break :result .none;
- } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
break :result .none;
} else if (f.liveness.isUnused(inst)) {
try writer.writeByte('(');
@@ -4171,7 +4174,7 @@ fn airCall(
callee: {
known: {
const fn_decl = fn_decl: {
- const callee_val = f.air.value(pl_op.operand) orelse break :known;
+ const callee_val = f.air.value(pl_op.operand, mod) orelse break :known;
break :fn_decl switch (callee_val.tag()) {
.extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl,
.function => callee_val.castTag(.function).?.data.owner_decl,
@@ -4181,9 +4184,9 @@ fn airCall(
};
switch (modifier) {
.auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0),
- inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName(
- @unionInit(LazyFnKey, @tagName(mod), fn_decl),
- @unionInit(LazyFnValue.Data, @tagName(mod), {}),
+ inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName(
+ @unionInit(LazyFnKey, @tagName(m), fn_decl),
+ @unionInit(LazyFnValue.Data, @tagName(m), {}),
)),
else => unreachable,
}
@@ -4211,7 +4214,7 @@ fn airCall(
try writer.writeAll(");\n");
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, target))
+ if (result_local == .none or !lowersToArray(ret_ty, mod))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -4254,9 +4257,10 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload);
- const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false;
+ const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false;
if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4330,12 +4334,13 @@ fn lowerTry(
err_union_ty: Type,
is_ptr: bool,
) !CValue {
+ const mod = f.object.dg.module;
const err_union = try f.resolveInst(operand);
const inst_ty = f.air.typeOfIndex(inst);
const liveness_condbr = f.liveness.getCondBr(inst);
const writer = f.object.writer();
const payload_ty = err_union_ty.errorUnionPayload();
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
try writer.writeAll("if (");
@@ -4431,6 +4436,8 @@ const LocalResult = struct {
need_free: bool,
fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue {
+ const mod = f.object.dg.module;
+
if (lr.need_free) {
// Move the freshly allocated local to be owned by this instruction,
// by returning it here instead of freeing it.
@@ -4441,7 +4448,7 @@ const LocalResult = struct {
try lr.free(f);
const writer = f.object.writer();
try f.writeCValue(writer, local, .Other);
- if (dest_ty.isAbiInt()) {
+ if (dest_ty.isAbiInt(mod)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4461,12 +4468,13 @@ const LocalResult = struct {
};
fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult {
- const target = f.object.dg.module.getTarget();
+ const mod = f.object.dg.module;
+ const target = mod.getTarget();
const writer = f.object.writer();
- if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) {
- const src_info = dest_ty.intInfo(target);
- const dest_info = operand_ty.intInfo(target);
+ if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) {
+ const src_info = dest_ty.intInfo(mod);
+ const dest_info = operand_ty.intInfo(mod);
if (src_info.signedness == dest_info.signedness and
src_info.bits == dest_info.bits)
{
@@ -4477,7 +4485,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
}
}
- if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) {
+ if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) {
const local = try f.allocLocal(0, dest_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = (");
@@ -4494,7 +4502,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
const operand_lval = if (operand == .constant) blk: {
const operand_local = try f.allocLocal(0, operand_ty);
try f.writeCValue(writer, operand_local, .Other);
- if (operand_ty.isAbiInt()) {
+ if (operand_ty.isAbiInt(mod)) {
try writer.writeAll(" = ");
} else {
try writer.writeAll(" = (");
@@ -4516,13 +4524,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
try writer.writeAll("));\n");
// Ensure padding bits have the expected value.
- if (dest_ty.isAbiInt()) {
+ if (dest_ty.isAbiInt(mod)) {
const dest_cty = try f.typeToCType(dest_ty, .complete);
- const dest_info = dest_ty.intInfo(target);
- var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) {
- .unsigned => .int_unsigned,
- .signed => .int_signed,
- } }, .data = dest_info.bits };
+ const dest_info = dest_ty.intInfo(mod);
+ var bits: u16 = dest_info.bits;
var wrap_cty: ?CType = null;
var need_bitcasts = false;
@@ -4535,9 +4540,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
const elem_cty = f.indexToCType(pl.data.elem_type);
wrap_cty = elem_cty.toSignedness(dest_info.signedness);
need_bitcasts = wrap_cty.?.tag() == .zig_i128;
- info_ty_pl.data -= 1;
- info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8);
- info_ty_pl.data += 1;
+ bits -= 1;
+ bits %= @intCast(u16, f.byteSize(elem_cty) * 8);
+ bits += 1;
}
try writer.writeAll(" = ");
if (need_bitcasts) {
@@ -4546,7 +4551,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca
try writer.writeByte('(');
}
try writer.writeAll("zig_wrap_");
- const info_ty = Type.initPayload(&info_ty_pl.base);
+ const info_ty = try mod.intType(dest_info.signedness, bits);
if (wrap_cty) |cty|
try f.object.dg.renderCTypeForBuiltinFnName(writer, cty)
else
@@ -4675,6 +4680,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const condition = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
@@ -4683,11 +4689,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
try writer.writeAll("switch (");
- if (condition_ty.zigTypeTag() == .Bool) {
+ if (condition_ty.zigTypeTag(mod) == .Bool) {
try writer.writeByte('(');
try f.renderType(writer, Type.u1);
try writer.writeByte(')');
- } else if (condition_ty.isPtrAtRuntime()) {
+ } else if (condition_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
@@ -4714,12 +4720,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
for (items) |item| {
try f.object.indent_writer.insertNewline();
try writer.writeAll("case ");
- if (condition_ty.isPtrAtRuntime()) {
+ if (condition_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, Type.usize);
try writer.writeByte(')');
}
- try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other);
+ try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other);
try writer.writeByte(':');
}
try writer.writeByte(' ');
@@ -4764,6 +4770,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool {
}
fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0;
@@ -4778,7 +4785,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const result = result: {
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
- const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: {
+ const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: {
const local = try f.allocLocal(inst, inst_ty);
if (f.wantSafety()) {
try f.writeCValue(writer, local, .Other);
@@ -5025,6 +5032,7 @@ fn airIsNull(
operator: []const u8,
is_ptr: bool,
) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
@@ -5046,14 +5054,14 @@ fn airIsNull(
const payload_ty = optional_ty.optionalChild(&payload_buf);
var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod))
TypedValue{ .ty = Type.bool, .val = Value.true }
- else if (optional_ty.isPtrLikeOptional())
+ else if (optional_ty.isPtrLikeOptional(mod))
// operand is a regular pointer, test `operand !=/== NULL`
TypedValue{ .ty = optional_ty, .val = Value.null }
- else if (payload_ty.zigTypeTag() == .ErrorSet)
+ else if (payload_ty.zigTypeTag(mod) == .ErrorSet)
TypedValue{ .ty = payload_ty, .val = Value.zero }
- else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: {
+ else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: {
try writer.writeAll(".ptr");
const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf);
break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null };
@@ -5070,6 +5078,7 @@ fn airIsNull(
}
fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -5079,7 +5088,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return .none;
}
@@ -5087,7 +5096,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.writeCValue(writer, operand, .Other);
@@ -5104,6 +5113,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const writer = f.object.writer();
@@ -5113,14 +5123,14 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const opt_ty = ptr_ty.childType();
const inst_ty = f.air.typeOfIndex(inst);
- if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) {
return .{ .undef = inst_ty };
}
const local = try f.allocLocal(inst, inst_ty);
try f.writeCValue(writer, local, .Other);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
// the operand is just a regular pointer, no need to do anything special.
// *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C
try writer.writeAll(" = ");
@@ -5134,6 +5144,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
@@ -5144,7 +5155,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
- if (opt_ty.optionalReprIsPayload()) {
+ if (opt_ty.optionalReprIsPayload(mod)) {
if (f.liveness.isUnused(inst)) {
return .none;
}
@@ -5179,36 +5190,36 @@ fn fieldLocation(
container_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- target: std.Target,
+ mod: *const Module,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u32,
end: void,
} {
- return switch (container_ty.zigTypeTag()) {
+ return switch (container_ty.zigTypeTag(mod)) {
.Struct => switch (container_ty.containerLayout()) {
.Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| {
if (container_ty.structFieldIsComptime(next_field_index)) continue;
const field_ty = container_ty.structFieldType(next_field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
break .{ .field = if (container_ty.isSimpleTuple())
.{ .field = next_field_index }
else
.{ .identifier = container_ty.structFieldName(next_field_index) } };
- } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin,
+ } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
.Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0)
- .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) }
+ .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) }
else
.begin,
},
.Union => switch (container_ty.containerLayout()) {
.Auto, .Extern => {
const field_ty = container_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime())
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
return if (container_ty.unionTagTypeSafety() != null and
- !container_ty.unionHasAllZeroBitFieldTypes())
+ !container_ty.unionHasAllZeroBitFieldTypes(mod))
.{ .field = .{ .identifier = "payload" } }
else
.begin;
@@ -5252,10 +5263,10 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue
}
fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
- const target = f.object.dg.module.getTarget();
const container_ptr_ty = f.air.typeOfIndex(inst);
const container_ty = container_ptr_ty.childType();
@@ -5270,7 +5281,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(writer, container_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) {
+ switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) {
.begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
.field => |field| {
var u8_ptr_pl = field_ptr_ty.ptrInfo();
@@ -5321,7 +5332,7 @@ fn fieldPtr(
container_ptr_val: CValue,
field_index: u32,
) !CValue {
- const target = f.object.dg.module.getTarget();
+ const mod = f.object.dg.module;
const container_ty = container_ptr_ty.elemType();
const field_ptr_ty = f.air.typeOfIndex(inst);
@@ -5335,7 +5346,7 @@ fn fieldPtr(
try f.renderType(writer, field_ptr_ty);
try writer.writeByte(')');
- switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) {
+ switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) {
.begin => try f.writeCValue(writer, container_ptr_val, .Initializer),
.field => |field| {
try writer.writeByte('&');
@@ -5370,16 +5381,16 @@ fn fieldPtr(
}
fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.StructField, ty_pl.payload).data;
const inst_ty = f.air.typeOfIndex(inst);
- if (!inst_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{extra.struct_operand});
return .none;
}
- const target = f.object.dg.module.getTarget();
const struct_byval = try f.resolveInst(extra.struct_operand);
try reap(f, inst, &.{extra.struct_operand});
const struct_ty = f.air.typeOf(extra.struct_operand);
@@ -5396,32 +5407,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.{ .identifier = struct_ty.structFieldName(extra.field_index) },
.Packed => {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const int_info = struct_ty.intInfo(target);
+ const int_info = struct_ty.intInfo(mod);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
var bit_offset_val_pl: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
- .data = struct_obj.packedFieldBitOffset(target, extra.field_index),
+ .data = struct_obj.packedFieldBitOffset(mod, extra.field_index),
};
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
- const field_int_signedness = if (inst_ty.isAbiInt())
- inst_ty.intInfo(target).signedness
+ const field_int_signedness = if (inst_ty.isAbiInt(mod))
+ inst_ty.intInfo(mod).signedness
else
.unsigned;
- var field_int_pl = Type.Payload.Bits{
- .base = .{ .tag = switch (field_int_signedness) {
- .unsigned => .int_unsigned,
- .signed => .int_signed,
- } },
- .data = @intCast(u16, inst_ty.bitSize(target)),
- };
- const field_int_ty = Type.initPayload(&field_int_pl.base);
+ const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod)));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(writer, temp_local, .Other);
@@ -5432,7 +5432,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
- if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
+ if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try writer.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty);
try writer.writeByte('(');
@@ -5511,6 +5511,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
/// *(E!T) -> E
/// Note that the result is never a pointer.
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
@@ -5518,13 +5519,13 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.air.typeOf(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
- const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer;
+ const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer;
const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
const error_ty = error_union_ty.errorUnionSet();
const payload_ty = error_union_ty.errorUnionPayload();
const local = try f.allocLocal(inst, inst_ty);
- if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) {
+ if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) {
// The store will be 'x = x'; elide it.
return local;
}
@@ -5533,7 +5534,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (!payload_ty.hasRuntimeBits()) {
+ if (!payload_ty.hasRuntimeBits(mod)) {
try f.writeCValue(writer, operand, .Other);
} else {
if (!error_ty.errorSetIsEmpty())
@@ -5549,6 +5550,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
@@ -5558,7 +5560,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty;
const writer = f.object.writer();
- if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) {
+ if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) {
if (!is_ptr) return .none;
const local = try f.allocLocal(inst, inst_ty);
@@ -5584,10 +5586,11 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
}
fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
- const repr_is_payload = inst_ty.optionalReprIsPayload();
+ const repr_is_payload = inst_ty.optionalReprIsPayload(mod);
const payload_ty = f.air.typeOf(ty_op.operand);
const payload = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5615,11 +5618,12 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload();
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime();
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
const err_ty = inst_ty.errorUnionSet();
const err = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
@@ -5653,6 +5657,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -5662,7 +5667,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const payload_ty = error_union_ty.errorUnionPayload();
// First, set the non-error value.
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try f.writeCValueDeref(writer, operand);
try writer.writeAll(" = ");
try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
@@ -5703,12 +5708,13 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
const payload_ty = inst_ty.errorUnionPayload();
const payload = try f.resolveInst(ty_op.operand);
- const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime();
+ const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod);
const err_ty = inst_ty.errorUnionSet();
try reap(f, inst, &.{ty_op.operand});
@@ -5735,6 +5741,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
@@ -5750,7 +5757,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try writer.writeAll(" = ");
if (!error_ty.errorSetIsEmpty())
- if (payload_ty.hasRuntimeBits())
+ if (payload_ty.hasRuntimeBits(mod))
if (is_ptr)
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
else
@@ -5768,6 +5775,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
}
fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -5784,7 +5792,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
if (operand == .undef) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer);
- } else if (array_ty.hasRuntimeBitsIgnoreComptime()) {
+ } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try writer.writeAll("&(");
try f.writeCValueDeref(writer, operand);
try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)});
@@ -5801,6 +5809,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
@@ -5810,10 +5819,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
const target = f.object.dg.module.getTarget();
const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat())
if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend"
- else if (inst_ty.isInt() and operand_ty.isRuntimeFloat())
- if (inst_ty.isSignedInt()) "fix" else "fixuns"
- else if (inst_ty.isRuntimeFloat() and operand_ty.isInt())
- if (operand_ty.isSignedInt()) "float" else "floatun"
+ else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat())
+ if (inst_ty.isSignedInt(mod)) "fix" else "fixuns"
+ else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod))
+ if (operand_ty.isSignedInt(mod)) "float" else "floatun"
else
unreachable;
@@ -5822,19 +5831,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
+ if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) {
try writer.writeAll("zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
try writer.writeByte('(');
}
try writer.writeAll("zig_");
try writer.writeAll(operation);
- try writer.writeAll(compilerRtAbbrev(operand_ty, target));
- try writer.writeAll(compilerRtAbbrev(inst_ty, target));
+ try writer.writeAll(compilerRtAbbrev(operand_ty, mod));
+ try writer.writeAll(compilerRtAbbrev(inst_ty, mod));
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeByte(')');
- if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) {
+ if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) {
try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits);
try writer.writeByte(')');
}
@@ -5871,14 +5880,15 @@ fn airUnBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const operand_ty = f.air.typeOf(ty_op.operand);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -5914,6 +5924,7 @@ fn airBinBuiltinCall(
operation: []const u8,
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const operand_ty = f.air.typeOf(bin_op.lhs);
@@ -5925,8 +5936,8 @@ fn airBinBuiltinCall(
if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
- const scalar_ty = operand_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -5968,14 +5979,15 @@ fn airCmpBuiltinCall(
operation: enum { cmp, operator },
info: BuiltinInfo,
) !CValue {
+ const mod = f.object.dg.module;
const lhs = try f.resolveInst(data.lhs);
const rhs = try f.resolveInst(data.rhs);
try reap(f, inst, &.{ data.lhs, data.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const operand_ty = f.air.typeOf(data.lhs);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete);
const ref_ret = inst_scalar_cty.tag() == .array;
@@ -6017,6 +6029,7 @@ fn airCmpBuiltinCall(
}
fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const inst_ty = f.air.typeOfIndex(inst);
@@ -6030,15 +6043,13 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value);
try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
const local = try f.allocLocal(inst, inst_ty);
- if (inst_ty.isPtrLikeOptional()) {
+ if (inst_ty.isPtrLikeOptional(mod)) {
{
const a = try Assignment.start(f, writer, ty);
try f.writeCValue(writer, local, .Other);
@@ -6123,6 +6134,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
}
fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const inst_ty = f.air.typeOfIndex(inst);
@@ -6135,14 +6147,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_mat = try Materialize.start(f, inst, writer, ty, operand);
try reap(f, inst, &.{ pl_op.operand, extra.operand });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
+ const repr_bits = @intCast(u16, ty.abiSize(mod) * 8);
const is_float = ty.isRuntimeFloat();
- const is_128 = repr_pl.data == 128;
- const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty;
+ const is_128 = repr_bits == 128;
+ const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty;
const local = try f.allocLocal(inst, inst_ty);
try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())});
@@ -6181,18 +6189,17 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const atomic_load = f.air.instructions.items(.data)[inst].atomic_load;
const ptr = try f.resolveInst(atomic_load.ptr);
try reap(f, inst, &.{atomic_load.ptr});
const ptr_ty = f.air.typeOf(atomic_load.ptr);
const ty = ptr_ty.childType();
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
const inst_ty = f.air.typeOfIndex(inst);
const writer = f.object.writer();
@@ -6218,6 +6225,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = f.air.typeOf(bin_op.lhs);
const ty = ptr_ty.childType();
@@ -6228,12 +6236,10 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa
const element_mat = try Materialize.start(f, inst, writer, ty, element);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const target = f.object.dg.module.getTarget();
- var repr_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, ty.abiSize(target) * 8),
- };
- const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty;
+ const repr_ty = if (ty.isRuntimeFloat())
+ mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable
+ else
+ ty;
try writer.writeAll("zig_atomic_store((zig_atomic(");
try f.renderType(writer, ty);
@@ -6262,14 +6268,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo
}
fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const dest_ty = f.air.typeOf(bin_op.lhs);
const dest_slice = try f.resolveInst(bin_op.lhs);
const value = try f.resolveInst(bin_op.rhs);
const elem_ty = f.air.typeOf(bin_op.rhs);
- const target = f.object.dg.module.getTarget();
- const elem_abi_size = elem_ty.abiSize(target);
- const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const elem_abi_size = elem_ty.abiSize(mod);
+ const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
const writer = f.object.writer();
if (val_is_undef) {
@@ -6383,12 +6389,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try f.resolveInst(bin_op.lhs);
const src_ptr = try f.resolveInst(bin_op.rhs);
const dest_ty = f.air.typeOf(bin_op.lhs);
const src_ty = f.air.typeOf(bin_op.rhs);
- const target = f.object.dg.module.getTarget();
const writer = f.object.writer();
try writer.writeAll("memcpy(");
@@ -6399,7 +6405,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
switch (dest_ty.ptrSize()) {
.Slice => {
const elem_ty = dest_ty.childType();
- const elem_abi_size = elem_ty.abiSize(target);
+ const elem_abi_size = elem_ty.abiSize(mod);
try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" });
if (elem_abi_size > 1) {
try writer.print(" * {d});\n", .{elem_abi_size});
@@ -6410,7 +6416,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
.One => {
const array_ty = dest_ty.childType();
const elem_ty = array_ty.childType();
- const elem_abi_size = elem_ty.abiSize(target);
+ const elem_abi_size = elem_ty.abiSize(mod);
const len = array_ty.arrayLen() * elem_abi_size;
try writer.print("{d});\n", .{len});
},
@@ -6422,14 +6428,14 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const union_ptr = try f.resolveInst(bin_op.lhs);
const new_tag = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
- const target = f.object.dg.module.getTarget();
const union_ty = f.air.typeOf(bin_op.lhs).childType();
- const layout = union_ty.unionGetLayout(target);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return .none;
const tag_ty = union_ty.unionTagTypeSafety().?;
@@ -6443,14 +6449,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const union_ty = f.air.typeOf(ty_op.operand);
- const target = f.object.dg.module.getTarget();
- const layout = union_ty.unionGetLayout(target);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return .none;
const inst_ty = f.air.typeOfIndex(inst);
@@ -6501,13 +6507,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -6555,6 +6562,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
@@ -6562,8 +6570,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
const lhs = try f.resolveInst(extra.a);
const rhs = try f.resolveInst(extra.b);
- const module = f.object.dg.module;
- const target = module.getTarget();
const inst_ty = f.air.typeOfIndex(inst);
const writer = f.object.writer();
@@ -6581,7 +6587,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll("] = ");
var buf: Value.ElemValueBuffer = undefined;
- const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target);
+ const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod);
var src_pl = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
.data = @intCast(u64, mask_elem ^ mask_elem >> 63),
@@ -6597,16 +6603,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const reduce = f.air.instructions.items(.data)[inst].reduce;
- const target = f.object.dg.module.getTarget();
+ const target = mod.getTarget();
const scalar_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(reduce.operand);
try reap(f, inst, &.{reduce.operand});
const operand_ty = f.air.typeOf(reduce.operand);
const writer = f.object.writer();
- const use_operator = scalar_ty.bitSize(target) <= 64;
+ const use_operator = scalar_ty.bitSize(mod) <= 64;
const op: union(enum) {
const Func = struct { operation: []const u8, info: BuiltinInfo = .none };
float_op: Func,
@@ -6617,28 +6624,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } },
.Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } },
.Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } },
- .Min => switch (scalar_ty.zigTypeTag()) {
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .ternary = " < " } else .{
.builtin = .{ .operation = "min" },
},
.Float => .{ .float_op = .{ .operation = "fmin" } },
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .ternary = " > " } else .{
.builtin = .{ .operation = "max" },
},
.Float => .{ .float_op = .{ .operation = "fmax" } },
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag()) {
+ .Add => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .infix = " += " } else .{
.builtin = .{ .operation = "addw", .info = .bits },
},
.Float => .{ .builtin = .{ .operation = "add" } },
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag()) {
+ .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int => if (use_operator) .{ .infix = " *= " } else .{
.builtin = .{ .operation = "mulw", .info = .bits },
},
@@ -6680,22 +6687,22 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) {
.Or, .Xor, .Add => Value.zero,
- .And => switch (scalar_ty.zigTypeTag()) {
+ .And => switch (scalar_ty.zigTypeTag(mod)) {
.Bool => Value.one,
- else => switch (scalar_ty.intInfo(target).signedness) {
- .unsigned => try scalar_ty.maxInt(stack.get(), target),
+ else => switch (scalar_ty.intInfo(mod).signedness) {
+ .unsigned => try scalar_ty.maxInt(stack.get(), mod),
.signed => Value.negative_one,
},
},
- .Min => switch (scalar_ty.zigTypeTag()) {
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
.Bool => Value.one,
- .Int => try scalar_ty.maxInt(stack.get(), target),
+ .Int => try scalar_ty.maxInt(stack.get(), mod),
.Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
.Bool => Value.zero,
- .Int => try scalar_ty.minInt(stack.get(), target),
+ .Int => try scalar_ty.minInt(stack.get(), mod),
.Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target),
else => unreachable,
},
@@ -6753,6 +6760,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const inst_ty = f.air.typeOfIndex(inst);
const len = @intCast(usize, inst_ty.arrayLen());
@@ -6770,11 +6778,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
}
- const target = f.object.dg.module.getTarget();
-
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
- switch (inst_ty.zigTypeTag()) {
+ switch (inst_ty.zigTypeTag(mod)) {
.Array, .Vector => {
const elem_ty = inst_ty.childType();
const a = try Assignment.init(f, elem_ty);
@@ -6799,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue;
const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const a = try Assignment.start(f, writer, field_ty);
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
@@ -6813,13 +6819,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
.Packed => {
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
- const int_info = inst_ty.intInfo(target);
+ const int_info = inst_ty.intInfo(mod);
- var bit_offset_ty_pl = Type.Payload.Bits{
- .base = .{ .tag = .int_unsigned },
- .data = Type.smallestUnsignedBits(int_info.bits - 1),
- };
- const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base);
+ const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 };
const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base);
@@ -6828,7 +6830,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (0..elements.len) |field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue;
const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) {
try writer.writeAll("zig_or_");
@@ -6841,7 +6843,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
for (resolved_elements, 0..) |element, field_i| {
if (inst_ty.structFieldIsComptime(field_i)) continue;
const field_ty = inst_ty.structFieldType(field_i);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(", ");
// TODO: Skip this entire shift if val is 0?
@@ -6849,13 +6851,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty);
try writer.writeByte('(');
- if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) {
+ if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) {
try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try writer.writeByte('(');
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
- if (field_ty.isPtrAtRuntime()) {
+ if (field_ty.isPtrAtRuntime(mod)) {
try writer.writeByte('(');
try f.renderType(writer, switch (int_info.signedness) {
.unsigned => Type.usize,
@@ -6872,7 +6874,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
if (!empty) try writer.writeByte(')');
- bit_offset_val_pl.data += field_ty.bitSize(target);
+ bit_offset_val_pl.data += field_ty.bitSize(mod);
empty = false;
}
@@ -6886,11 +6888,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = f.air.typeOfIndex(inst);
- const target = f.object.dg.module.getTarget();
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field_name = union_obj.fields.keys()[extra.field_index];
const payload_ty = f.air.typeOf(extra.init);
@@ -6908,7 +6910,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: {
- const layout = union_ty.unionGetLayout(target);
+ const layout = union_ty.unionGetLayout(mod);
if (layout.tag_size != 0) {
const field_index = tag_ty.enumFieldIndex(field_name).?;
@@ -6991,13 +6993,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const operand_ty = f.air.typeOf(un_op);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, operand_ty);
@@ -7016,13 +7019,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7043,6 +7047,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal
}
fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
+ const mod = f.object.dg.module;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const lhs = try f.resolveInst(bin_op.lhs);
@@ -7050,7 +7055,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7074,6 +7079,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa
}
fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
+ const mod = f.object.dg.module;
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data;
@@ -7083,7 +7089,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
const inst_ty = f.air.typeOfIndex(inst);
- const inst_scalar_ty = inst_ty.scalarType();
+ const inst_scalar_ty = inst_ty.scalarType(mod);
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
@@ -7279,8 +7285,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 {
};
}
-fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 {
- return if (ty.isInt()) switch (ty.intInfo(target).bits) {
+fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 {
+ const target = mod.getTarget();
+ return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) {
1...32 => "si",
33...64 => "di",
65...128 => "ti",
@@ -7407,7 +7414,7 @@ fn undefPattern(comptime IntType: type) IntType {
const FormatIntLiteralContext = struct {
dg: *DeclGen,
- int_info: std.builtin.Type.Int,
+ int_info: InternPool.Key.IntType,
kind: CType.Kind,
cty: CType,
val: Value,
@@ -7418,7 +7425,8 @@ fn formatIntLiteral(
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
- const target = data.dg.module.getTarget();
+ const mod = data.dg.module;
+ const target = mod.getTarget();
const ExpectedContents = struct {
const base = 10;
@@ -7449,7 +7457,7 @@ fn formatIntLiteral(
};
undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits);
break :blk undef_int.toConst();
- } else data.val.toBigInt(&int_buf, target);
+ } else data.val.toBigInt(&int_buf, mod);
assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits));
const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8);
@@ -7684,7 +7692,8 @@ const Vectorize = struct {
index: CValue = .none,
pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize {
- return if (ty.zigTypeTag() == .Vector) index: {
+ const mod = f.object.dg.module;
+ return if (ty.zigTypeTag(mod) == .Vector) index: {
var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() };
const local = try f.allocLocal(inst, Type.usize);
@@ -7727,10 +7736,10 @@ const LowerFnRetTyBuffer = struct {
values: [1]Value,
payload: Type.Payload.AnonStruct,
};
-fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type {
- if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn);
+fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type {
+ if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn);
- if (lowersToArray(ret_ty, target)) {
+ if (lowersToArray(ret_ty, mod)) {
buffer.names = [1][]const u8{"array"};
buffer.types = [1]Type{ret_ty};
buffer.values = [1]Value{Value.initTag(.unreachable_value)};
@@ -7742,13 +7751,13 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T
return Type.initPayload(&buffer.payload.base);
}
- return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void;
+ return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
}
-fn lowersToArray(ty: Type, target: std.Target) bool {
- return switch (ty.zigTypeTag()) {
+fn lowersToArray(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => return true,
- else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null,
+ else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null,
};
}
src/codegen/llvm.zig
@@ -598,7 +598,7 @@ pub const Object = struct {
};
const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False);
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const slice_alignment = slice_ty.abiAlignment(target);
+ const slice_alignment = slice_ty.abiAlignment(mod);
const error_name_list = mod.error_name_list.items;
const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len);
@@ -880,28 +880,28 @@ pub const Object = struct {
pub fn updateFunc(
o: *Object,
- module: *Module,
+ mod: *Module,
func: *Module.Fn,
air: Air,
liveness: Liveness,
) !void {
const decl_index = func.owner_decl;
- const decl = module.declPtr(decl_index);
- const target = module.getTarget();
+ const decl = mod.declPtr(decl_index);
+ const target = mod.getTarget();
var dg: DeclGen = .{
.context = o.context,
.object = o,
- .module = module,
+ .module = mod,
.decl_index = decl_index,
.decl = decl,
.err_msg = null,
- .gpa = module.gpa,
+ .gpa = mod.gpa,
};
const llvm_func = try dg.resolveLlvmFunction(decl_index);
- if (module.align_stack_fns.get(func)) |align_info| {
+ if (mod.align_stack_fns.get(func)) |align_info| {
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
dg.addFnAttr(llvm_func, "noinline");
} else {
@@ -922,7 +922,7 @@ pub const Object = struct {
}
// TODO: disable this if safety is off for the function scope
- const ssp_buf_size = module.comp.bin_file.options.stack_protector;
+ const ssp_buf_size = mod.comp.bin_file.options.stack_protector;
if (ssp_buf_size != 0) {
var buf: [12]u8 = undefined;
const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable;
@@ -931,7 +931,7 @@ pub const Object = struct {
}
// TODO: disable this if safety is off for the function scope
- if (module.comp.bin_file.options.stack_check) {
+ if (mod.comp.bin_file.options.stack_check) {
dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack");
} else if (target.os.tag == .uefi) {
dg.addFnAttrString(llvm_func, "no-stack-arg-probe", "");
@@ -954,17 +954,17 @@ pub const Object = struct {
// This gets the LLVM values from the function and stores them in `dg.args`.
const fn_info = decl.ty.fnInfo();
- const sret = firstParamSRet(fn_info, target);
+ const sret = firstParamSRet(fn_info, mod);
const ret_ptr = if (sret) llvm_func.getParam(0) else null;
const gpa = dg.gpa;
- if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) {
+ if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) {
.signed => dg.addAttr(llvm_func, 0, "signext"),
.unsigned => dg.addAttr(llvm_func, 0, "zeroext"),
};
- const err_return_tracing = fn_info.return_type.isError() and
- module.comp.bin_file.options.error_return_tracing;
+ const err_return_tracing = fn_info.return_type.isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing;
const err_ret_trace = if (err_return_tracing)
llvm_func.getParam(@boolToInt(ret_ptr != null))
@@ -989,8 +989,8 @@ pub const Object = struct {
const param = llvm_func.getParam(llvm_arg_i);
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = param.typeOf();
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
const store_inst = builder.buildStore(param, arg_ptr);
@@ -1007,14 +1007,14 @@ pub const Object = struct {
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
llvm_arg_i += 1;
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(param);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, param, "");
@@ -1026,14 +1026,14 @@ pub const Object = struct {
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
const param = llvm_func.getParam(llvm_arg_i);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
dg.addArgAttr(llvm_func, llvm_arg_i, "noundef");
llvm_arg_i += 1;
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(param);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, param, "");
@@ -1048,10 +1048,10 @@ pub const Object = struct {
llvm_arg_i += 1;
const param_llvm_ty = try dg.lowerType(param_ty);
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
const int_llvm_ty = dg.context.intType(abi_size * 8);
const alignment = @max(
- param_ty.abiAlignment(target),
+ param_ty.abiAlignment(mod),
dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
@@ -1060,7 +1060,7 @@ pub const Object = struct {
try args.ensureUnusedCapacity(1);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
args.appendAssumeCapacity(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1078,7 +1078,7 @@ pub const Object = struct {
dg.addArgAttr(llvm_func, llvm_arg_i, "noalias");
}
}
- if (param_ty.zigTypeTag() != .Optional) {
+ if (param_ty.zigTypeTag(mod) != .Optional) {
dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -1087,7 +1087,7 @@ pub const Object = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align);
}
const ptr_param = llvm_func.getParam(llvm_arg_i);
@@ -1105,7 +1105,7 @@ pub const Object = struct {
const field_types = it.llvm_types_buffer[0..it.llvm_types_len];
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
- const param_alignment = param_ty.abiAlignment(target);
+ const param_alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target);
const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
for (field_types, 0..) |_, field_i_usize| {
@@ -1117,7 +1117,7 @@ pub const Object = struct {
store_inst.setAlignment(target.ptrBitWidth() / 8);
}
- const is_by_ref = isByRef(param_ty);
+ const is_by_ref = isByRef(param_ty, mod);
const loaded = if (is_by_ref) arg_ptr else l: {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
load_inst.setAlignment(param_alignment);
@@ -1139,11 +1139,11 @@ pub const Object = struct {
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
_ = builder.buildStore(param, arg_ptr);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try args.append(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1157,11 +1157,11 @@ pub const Object = struct {
const param = llvm_func.getParam(llvm_arg_i);
llvm_arg_i += 1;
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target);
_ = builder.buildStore(param, arg_ptr);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try args.append(arg_ptr);
} else {
const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, "");
@@ -1180,7 +1180,7 @@ pub const Object = struct {
const line_number = decl.src_line + 1;
const is_internal_linkage = decl.val.tag() != .extern_fn and
- !module.decl_exports.contains(decl_index);
+ !mod.decl_exports.contains(decl_index);
const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
llvm.DIFlags.NoReturn
else
@@ -1196,7 +1196,7 @@ pub const Object = struct {
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember | noret_bit,
- module.comp.bin_file.options.optimize_mode != .Debug,
+ mod.comp.bin_file.options.optimize_mode != .Debug,
null, // decl_subprogram
);
try dg.object.di_map.put(gpa, decl, subprogram.toNode());
@@ -1219,7 +1219,7 @@ pub const Object = struct {
.func_inst_table = .{},
.llvm_func = llvm_func,
.blocks = .{},
- .single_threaded = module.comp.bin_file.options.single_threaded,
+ .single_threaded = mod.comp.bin_file.options.single_threaded,
.di_scope = di_scope,
.di_file = di_file,
.base_line = dg.decl.src_line,
@@ -1232,14 +1232,14 @@ pub const Object = struct {
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
- try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?);
+ try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
- try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
+ try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void {
@@ -1275,37 +1275,40 @@ pub const Object = struct {
pub fn updateDeclExports(
self: *Object,
- module: *Module,
+ mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const gpa = mod.gpa;
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
const llvm_global = self.decl_map.get(decl_index) orelse return;
- const decl = module.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
if (decl.isExtern()) {
- const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction();
+ const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod);
const mangle_name = is_wasm_fn and
decl.getExternFn().?.lib_name != null and
!std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c");
const decl_name = if (mangle_name) name: {
- const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? });
+ const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{
+ decl.name, decl.getExternFn().?.lib_name.?,
+ });
break :name tmp.ptr;
} else decl.name;
- defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0));
+ defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0));
llvm_global.setValueName(decl_name);
if (self.getLlvmGlobal(decl_name)) |other_global| {
if (other_global != llvm_global) {
log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name});
- try self.extern_collisions.put(module.gpa, decl_index, {});
+ try self.extern_collisions.put(gpa, decl_index, {});
}
}
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
if (self.di_map.get(decl)) |di_node| {
- if (try decl.isFunction()) {
+ if (try decl.isFunction(mod)) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name));
di_func.replaceLinkageName(linkage_name);
@@ -1329,9 +1332,9 @@ pub const Object = struct {
const exp_name = exports[0].options.name;
llvm_global.setValueName2(exp_name.ptr, exp_name.len);
llvm_global.setUnnamedAddr(.False);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport);
if (self.di_map.get(decl)) |di_node| {
- if (try decl.isFunction()) {
+ if (try decl.isFunction(mod)) {
const di_func = @ptrCast(*llvm.DISubprogram, di_node);
const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len);
di_func.replaceLinkageName(linkage_name);
@@ -1353,8 +1356,8 @@ pub const Object = struct {
.protected => llvm_global.setVisibility(.Protected),
}
if (exports[0].options.section) |section| {
- const section_z = try module.gpa.dupeZ(u8, section);
- defer module.gpa.free(section_z);
+ const section_z = try gpa.dupeZ(u8, section);
+ defer gpa.free(section_z);
llvm_global.setSection(section_z);
}
if (decl.val.castTag(.variable)) |variable| {
@@ -1370,8 +1373,8 @@ pub const Object = struct {
// Until then we iterate over existing aliases and make them point
// to the correct decl, or otherwise add a new alias. Old aliases are leaked.
for (exports[1..]) |exp| {
- const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name);
- defer module.gpa.free(exp_name_z);
+ const exp_name_z = try gpa.dupeZ(u8, exp.options.name);
+ defer gpa.free(exp_name_z);
if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
alias.setAliasee(llvm_global);
@@ -1385,14 +1388,14 @@ pub const Object = struct {
}
}
} else {
- const fqn = try decl.getFullyQualifiedName(module);
- defer module.gpa.free(fqn);
+ const fqn = try decl.getFullyQualifiedName(mod);
+ defer gpa.free(fqn);
llvm_global.setValueName2(fqn.ptr, fqn.len);
llvm_global.setLinkage(.Internal);
- if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
+ if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default);
llvm_global.setUnnamedAddr(.True);
if (decl.val.castTag(.variable)) |variable| {
- const single_threaded = module.comp.bin_file.options.single_threaded;
+ const single_threaded = mod.comp.bin_file.options.single_threaded;
if (variable.data.is_threadlocal and !single_threaded) {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
} else {
@@ -1479,14 +1482,15 @@ pub const Object = struct {
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
- switch (ty.zigTypeTag()) {
+ const mod = o.module;
+ switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => {
const di_type = dib.createBasicType("void", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
},
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
@@ -1494,7 +1498,7 @@ pub const Object = struct {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_type = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
return di_type;
@@ -1503,7 +1507,7 @@ pub const Object = struct {
const owner_decl_index = ty.getOwnerDecl();
const owner_decl = o.module.declPtr(owner_decl_index);
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
@@ -1522,9 +1526,8 @@ pub const Object = struct {
};
const field_index_val = Value.initPayload(&buf_field_index.base);
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = ty.intTagType(&buffer);
- const int_info = ty.intInfo(target);
+ const int_ty = ty.intTagType();
+ const int_info = ty.intInfo(mod);
assert(int_info.bits != 0);
for (field_names, 0..) |field_name, i| {
@@ -1536,7 +1539,7 @@ pub const Object = struct {
const field_int_val = field_index_val.enumToInt(ty, &buf_u64);
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = field_int_val.toBigInt(&bigint_space, target);
+ const bigint = field_int_val.toBigInt(&bigint_space, mod);
if (bigint.limbs.len == 1) {
enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned);
@@ -1566,8 +1569,8 @@ pub const Object = struct {
name,
di_file,
owner_decl.src_node + 1,
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
enumerators.ptr,
@intCast(c_int, enumerators.len),
try o.lowerDebugType(int_ty, .full),
@@ -1604,7 +1607,7 @@ pub const Object = struct {
!ptr_info.mutable or
ptr_info.@"volatile" or
ptr_info.size == .Many or ptr_info.size == .C or
- !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime())
+ !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod))
{
var payload: Type.Payload.Pointer = .{
.data = .{
@@ -1623,7 +1626,7 @@ pub const Object = struct {
},
},
};
- if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) {
payload.data.pointee_type = Type.anyopaque;
}
const bland_ptr_ty = Type.initPayload(&payload.base);
@@ -1657,10 +1660,10 @@ pub const Object = struct {
break :blk fwd_decl;
};
- const ptr_size = ptr_ty.abiSize(target);
- const ptr_align = ptr_ty.abiAlignment(target);
- const len_size = len_ty.abiSize(target);
- const len_align = len_ty.abiAlignment(target);
+ const ptr_size = ptr_ty.abiSize(mod);
+ const ptr_align = ptr_ty.abiAlignment(mod);
+ const len_size = len_ty.abiSize(mod);
+ const len_align = len_ty.abiAlignment(mod);
var offset: u64 = 0;
offset += ptr_size;
@@ -1697,8 +1700,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1719,7 +1722,7 @@ pub const Object = struct {
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
target.ptrBitWidth(),
- ty.ptrAlignment(target) * 8,
+ ty.ptrAlignment(mod) * 8,
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -1750,8 +1753,8 @@ pub const Object = struct {
},
.Array => {
const array_di_ty = dib.createArrayType(
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
try o.lowerDebugType(ty.childType(), .full),
@intCast(c_int, ty.arrayLen()),
);
@@ -1760,14 +1763,14 @@ pub const Object = struct {
return array_di_ty;
},
.Vector => {
- const elem_ty = ty.elemType2();
+ const elem_ty = ty.elemType2(mod);
// Vector elements cannot be padded since that would make
// @bitSizOf(elem) * len > @bitSizOf(vec).
// Neither gdb nor lldb seem to be able to display non-byte sized
// vectors properly.
- const elem_di_type = switch (elem_ty.zigTypeTag()) {
+ const elem_di_type = switch (elem_ty.zigTypeTag(mod)) {
.Int => blk: {
- const info = elem_ty.intInfo(target);
+ const info = elem_ty.intInfo(mod);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
@@ -1782,8 +1785,8 @@ pub const Object = struct {
};
const vector_di_ty = dib.createVectorType(
- ty.abiSize(target) * 8,
- ty.abiAlignment(target) * 8,
+ ty.abiSize(mod) * 8,
+ ty.abiAlignment(mod) * 8,
elem_di_type,
ty.vectorLen(),
);
@@ -1796,13 +1799,13 @@ pub const Object = struct {
defer gpa.free(name);
var buf: Type.Payload.ElemType = undefined;
const child_ty = ty.optionalChild(&buf);
- if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const di_bits = 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
@@ -1826,10 +1829,10 @@ pub const Object = struct {
};
const non_null_ty = Type.u8;
- const payload_size = child_ty.abiSize(target);
- const payload_align = child_ty.abiAlignment(target);
- const non_null_size = non_null_ty.abiSize(target);
- const non_null_align = non_null_ty.abiAlignment(target);
+ const payload_size = child_ty.abiSize(mod);
+ const payload_align = child_ty.abiAlignment(mod);
+ const non_null_size = non_null_ty.abiSize(mod);
+ const non_null_align = non_null_ty.abiAlignment(mod);
var offset: u64 = 0;
offset += payload_size;
@@ -1866,8 +1869,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1883,7 +1886,7 @@ pub const Object = struct {
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module });
@@ -1907,10 +1910,10 @@ pub const Object = struct {
break :blk fwd_decl;
};
- const error_size = Type.anyerror.abiSize(target);
- const error_align = Type.anyerror.abiAlignment(target);
- const payload_size = payload_ty.abiSize(target);
- const payload_align = payload_ty.abiAlignment(target);
+ const error_size = Type.anyerror.abiSize(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const payload_size = payload_ty.abiSize(mod);
+ const payload_align = payload_ty.abiAlignment(mod);
var error_index: u32 = undefined;
var payload_index: u32 = undefined;
@@ -1957,8 +1960,8 @@ pub const Object = struct {
name.ptr,
di_file,
line,
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -1988,12 +1991,12 @@ pub const Object = struct {
const struct_obj = payload.data;
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
assert(struct_obj.haveLayout());
- const info = struct_obj.backing_int_ty.intInfo(target);
+ const info = struct_obj.backing_int_ty.intInfo(mod);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
};
- const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types
+ const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types
const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
@@ -2026,10 +2029,10 @@ pub const Object = struct {
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
- const field_size = field_ty.abiSize(target);
- const field_align = field_ty.abiAlignment(target);
+ const field_size = field_ty.abiSize(mod);
+ const field_align = field_ty.abiAlignment(mod);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
offset = field_offset + field_size;
@@ -2057,8 +2060,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@@ -2093,7 +2096,7 @@ pub const Object = struct {
}
}
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
const owner_decl_index = ty.getOwnerDecl();
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
@@ -2114,11 +2117,11 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
- var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator();
+ var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_size = field.ty.abiSize(target);
- const field_align = field.alignment(target, layout);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.alignment(mod, layout);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
offset = field_offset + field_size;
@@ -2143,8 +2146,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@@ -2179,7 +2182,7 @@ pub const Object = struct {
};
const union_obj = ty.cast(Type.Payload.Union).?.data;
- if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
@@ -2188,7 +2191,7 @@ pub const Object = struct {
return union_di_ty;
}
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full);
@@ -2198,8 +2201,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&di_fields,
@@ -2225,10 +2228,10 @@ pub const Object = struct {
const field_name = kv.key_ptr.*;
const field = kv.value_ptr.*;
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_size = field.ty.abiSize(target);
- const field_align = field.normalAlignment(target);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.normalAlignment(mod);
const field_name_copy = try gpa.dupeZ(u8, field_name);
defer gpa.free(field_name_copy);
@@ -2258,8 +2261,8 @@ pub const Object = struct {
union_name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
di_fields.items.ptr,
@intCast(c_int, di_fields.items.len),
@@ -2319,8 +2322,8 @@ pub const Object = struct {
name.ptr,
null, // file
0, // line
- ty.abiSize(target) * 8, // size in bits
- ty.abiAlignment(target) * 8, // align in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
&full_di_fields,
@@ -2341,8 +2344,8 @@ pub const Object = struct {
defer param_di_types.deinit();
// Return type goes first.
- if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) {
- const sret = firstParamSRet(fn_info, target);
+ if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
+ const sret = firstParamSRet(fn_info, mod);
const di_ret_ty = if (sret) Type.void else fn_info.return_type;
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
@@ -2358,7 +2361,7 @@ pub const Object = struct {
try param_di_types.append(try o.lowerDebugType(Type.void, .full));
}
- if (fn_info.return_type.isError() and
+ if (fn_info.return_type.isError(mod) and
o.module.comp.bin_file.options.error_return_tracing)
{
var ptr_ty_payload: Type.Payload.ElemType = .{
@@ -2370,9 +2373,9 @@ pub const Object = struct {
}
for (fn_info.param_types) |param_ty| {
- if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = param_ty,
@@ -2450,7 +2453,7 @@ pub const Object = struct {
const stack_trace_str: []const u8 = "StackTrace";
// buffer is only used for int_type, `builtin` is a struct.
- const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined);
+ const builtin_ty = mod.declPtr(builtin_decl).val.toType();
const builtin_namespace = builtin_ty.getNamespace().?;
const stack_trace_decl_index = builtin_namespace.decls
.getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?;
@@ -2458,7 +2461,7 @@ pub const Object = struct {
// Sema should have ensured that StackTrace was analyzed.
assert(stack_trace_decl.has_tv);
- return stack_trace_decl.val.toType(undefined);
+ return stack_trace_decl.val.toType();
}
};
@@ -2495,9 +2498,10 @@ pub const DeclGen = struct {
if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl);
} else {
- const target = dg.module.getTarget();
+ const mod = dg.module;
+ const target = mod.getTarget();
var global = try dg.resolveGlobalDecl(decl_index);
- global.setAlignment(decl.getAlignment(target));
+ global.setAlignment(decl.getAlignment(mod));
if (decl.@"linksection") |section| global.setSection(section);
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
@@ -2569,19 +2573,20 @@ pub const DeclGen = struct {
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value {
- const decl = dg.module.declPtr(decl_index);
+ const mod = dg.module;
+ const decl = mod.declPtr(decl_index);
const zig_fn_type = decl.ty;
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.*;
assert(decl.has_tv);
const fn_info = zig_fn_type.fnInfo();
- const target = dg.module.getTarget();
- const sret = firstParamSRet(fn_info, target);
+ const target = mod.getTarget();
+ const sret = firstParamSRet(fn_info, mod);
const fn_type = try dg.lowerType(zig_fn_type);
- const fqn = try decl.getFullyQualifiedName(dg.module);
+ const fqn = try decl.getFullyQualifiedName(mod);
defer dg.gpa.free(fqn);
const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
@@ -2593,7 +2598,7 @@ pub const DeclGen = struct {
llvm_fn.setLinkage(.Internal);
llvm_fn.setUnnamedAddr(.True);
} else {
- if (dg.module.getTarget().isWasm()) {
+ if (target.isWasm()) {
dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0));
if (decl.getExternFn().?.lib_name) |lib_name| {
const module_name = std.mem.sliceTo(lib_name, 0);
@@ -2612,8 +2617,8 @@ pub const DeclGen = struct {
llvm_fn.addSretAttr(raw_llvm_ret_ty);
}
- const err_return_tracing = fn_info.return_type.isError() and
- dg.module.comp.bin_file.options.error_return_tracing;
+ const err_return_tracing = fn_info.return_type.isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull");
@@ -2656,14 +2661,14 @@ pub const DeclGen = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types[param_index];
- if (!isByRef(param_ty)) {
+ if (!isByRef(param_ty, mod)) {
dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_ty = fn_info.param_types[it.zig_index - 1];
const param_llvm_ty = try dg.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
@@ -2784,12 +2789,13 @@ pub const DeclGen = struct {
fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const llvm_ty = try lowerTypeInner(dg, t);
+ const mod = dg.module;
if (std.debug.runtime_safety and false) check: {
- if (t.zigTypeTag() == .Opaque) break :check;
- if (!t.hasRuntimeBits()) break :check;
+ if (t.zigTypeTag(mod) == .Opaque) break :check;
+ if (!t.hasRuntimeBits(mod)) break :check;
if (!llvm_ty.isSized().toBool()) break :check;
- const zig_size = t.abiSize(dg.module.getTarget());
+ const zig_size = t.abiSize(mod);
const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty);
if (llvm_size != zig_size) {
log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{
@@ -2802,18 +2808,18 @@ pub const DeclGen = struct {
fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const gpa = dg.gpa;
- const target = dg.module.getTarget();
- switch (t.zigTypeTag()) {
+ const mod = dg.module;
+ const target = mod.getTarget();
+ switch (t.zigTypeTag(mod)) {
.Void, .NoReturn => return dg.context.voidType(),
.Int => {
- const info = t.intInfo(target);
+ const info = t.intInfo(mod);
assert(info.bits != 0);
return dg.context.intType(info.bits);
},
.Enum => {
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = t.intTagType(&buffer);
- const bit_count = int_ty.intInfo(target).bits;
+ const int_ty = t.intTagType();
+ const bit_count = int_ty.intInfo(mod).bits;
assert(bit_count != 0);
return dg.context.intType(bit_count);
},
@@ -2863,7 +2869,7 @@ pub const DeclGen = struct {
},
.Array => {
const elem_ty = t.childType();
- assert(elem_ty.onePossibleValue() == null);
+ assert(elem_ty.onePossibleValue(mod) == null);
const elem_llvm_ty = try dg.lowerType(elem_ty);
const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null);
return elem_llvm_ty.arrayType(@intCast(c_uint, total_len));
@@ -2875,11 +2881,11 @@ pub const DeclGen = struct {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const child_ty = t.optionalChild(&buf);
- if (!child_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return dg.context.intType(8);
}
const payload_llvm_ty = try dg.lowerType(child_ty);
- if (t.optionalReprIsPayload()) {
+ if (t.optionalReprIsPayload(mod)) {
return payload_llvm_ty;
}
@@ -2887,8 +2893,8 @@ pub const DeclGen = struct {
var fields_buf: [3]*llvm.Type = .{
payload_llvm_ty, dg.context.intType(8), undefined,
};
- const offset = child_ty.abiSize(target) + 1;
- const abi_size = t.abiSize(target);
+ const offset = child_ty.abiSize(mod) + 1;
+ const abi_size = t.abiSize(mod);
const padding = @intCast(c_uint, abi_size - offset);
if (padding == 0) {
return dg.context.structType(&fields_buf, 2, .False);
@@ -2898,17 +2904,17 @@ pub const DeclGen = struct {
},
.ErrorUnion => {
const payload_ty = t.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try dg.lowerType(Type.anyerror);
}
const llvm_error_type = try dg.lowerType(Type.anyerror);
const llvm_payload_type = try dg.lowerType(payload_ty);
- const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
- const payload_size = payload_ty.abiSize(target);
- const error_size = Type.anyerror.abiSize(target);
+ const payload_size = payload_ty.abiSize(mod);
+ const error_size = Type.anyerror.abiSize(mod);
var fields_buf: [3]*llvm.Type = undefined;
if (error_align > payload_align) {
@@ -2964,9 +2970,9 @@ pub const DeclGen = struct {
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
- const field_align = field_ty.abiAlignment(target);
+ const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -2979,7 +2985,7 @@ pub const DeclGen = struct {
const field_llvm_ty = try dg.lowerType(field_ty);
try llvm_field_types.append(gpa, field_llvm_ty);
- offset += field_ty.abiSize(target);
+ offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
@@ -3027,11 +3033,11 @@ pub const DeclGen = struct {
var big_align: u32 = 1;
var any_underaligned_fields = false;
- var it = struct_obj.runtimeFieldIterator();
+ var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_align = field.alignment(target, struct_obj.layout);
- const field_ty_align = field.ty.abiAlignment(target);
+ const field_align = field.alignment(mod, struct_obj.layout);
+ const field_ty_align = field.ty.abiAlignment(mod);
any_underaligned_fields = any_underaligned_fields or
field_align < field_ty_align;
big_align = @max(big_align, field_align);
@@ -3046,7 +3052,7 @@ pub const DeclGen = struct {
const field_llvm_ty = try dg.lowerType(field.ty);
try llvm_field_types.append(gpa, field_llvm_ty);
- offset += field.ty.abiSize(target);
+ offset += field.ty.abiSize(mod);
}
{
const prev_offset = offset;
@@ -3074,11 +3080,11 @@ pub const DeclGen = struct {
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
- const layout = t.unionGetLayout(target);
+ const layout = t.unionGetLayout(mod);
const union_obj = t.cast(Type.Payload.Union).?.data;
if (union_obj.layout == .Packed) {
- const bitsize = @intCast(c_uint, t.bitSize(target));
+ const bitsize = @intCast(c_uint, t.bitSize(mod));
const int_llvm_ty = dg.context.intType(bitsize);
gop.value_ptr.* = int_llvm_ty;
return int_llvm_ty;
@@ -3155,19 +3161,19 @@ pub const DeclGen = struct {
}
fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type {
- const target = dg.module.getTarget();
+ const mod = dg.module;
const fn_info = fn_ty.fnInfo();
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa);
defer llvm_params.deinit();
- if (firstParamSRet(fn_info, target)) {
+ if (firstParamSRet(fn_info, mod)) {
try llvm_params.append(dg.context.pointerType(0));
}
- if (fn_info.return_type.isError() and
- dg.module.comp.bin_file.options.error_return_tracing)
+ if (fn_info.return_type.isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing)
{
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -3189,14 +3195,14 @@ pub const DeclGen = struct {
},
.abi_sized_int => {
const param_ty = fn_info.param_types[it.zig_index - 1];
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
try llvm_params.append(dg.context.intType(abi_size * 8));
},
.slice => {
const param_ty = fn_info.param_types[it.zig_index - 1];
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
var opt_buf: Type.Payload.ElemType = undefined;
- const ptr_ty = if (param_ty.zigTypeTag() == .Optional)
+ const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional)
param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf)
else
param_ty.slicePtrFieldType(&buf);
@@ -3215,7 +3221,7 @@ pub const DeclGen = struct {
},
.float_array => |count| {
const param_ty = fn_info.param_types[it.zig_index - 1];
- const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?);
+ const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
const field_count = @intCast(c_uint, count);
const arr_ty = float_ty.arrayType(field_count);
try llvm_params.append(arr_ty);
@@ -3239,11 +3245,12 @@ pub const DeclGen = struct {
/// being a zero bit type, but it should still be lowered as an i8 in such case.
/// There are other similar cases handled here as well.
fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type {
- const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
+ const mod = dg.module;
+ const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) {
.Opaque => true,
.Fn => !elem_ty.fnInfo().is_generic,
- .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(),
- else => elem_ty.hasRuntimeBitsIgnoreComptime(),
+ .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(mod),
};
const llvm_elem_ty = if (lower_elem_ty)
try dg.lowerType(elem_ty)
@@ -3262,9 +3269,9 @@ pub const DeclGen = struct {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
}
- const target = dg.module.getTarget();
-
- switch (tv.ty.zigTypeTag()) {
+ const mod = dg.module;
+ const target = mod.getTarget();
+ switch (tv.ty.zigTypeTag(mod)) {
.Bool => {
const llvm_type = try dg.lowerType(tv.ty);
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
@@ -3276,8 +3283,8 @@ pub const DeclGen = struct {
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
else => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = tv.val.toBigInt(&bigint_space, target);
- const int_info = tv.ty.intInfo(target);
+ const bigint = tv.val.toBigInt(&bigint_space, mod);
+ const int_info = tv.ty.intInfo(mod);
assert(int_info.bits != 0);
const llvm_type = dg.context.intType(int_info.bits);
@@ -3304,9 +3311,9 @@ pub const DeclGen = struct {
const int_val = tv.enumToInt(&int_buffer);
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_space, target);
+ const bigint = int_val.toBigInt(&bigint_space, mod);
- const int_info = tv.ty.intInfo(target);
+ const int_info = tv.ty.intInfo(mod);
const llvm_type = dg.context.intType(int_info.bits);
const unsigned_val = v: {
@@ -3408,7 +3415,7 @@ pub const DeclGen = struct {
},
.int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => {
const llvm_usize = try dg.lowerType(Type.usize);
- const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False);
+ const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False);
return llvm_int.constIntToPtr(try dg.lowerType(tv.ty));
},
.field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => {
@@ -3439,7 +3446,7 @@ pub const DeclGen = struct {
const str_lit = tv.val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
if (tv.ty.sentinel()) |sent_val| {
- const byte = @intCast(u8, sent_val.toUnsignedInt(target));
+ const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
if (byte == 0 and bytes.len > 0) {
return dg.context.constString(
bytes.ptr,
@@ -3549,13 +3556,13 @@ pub const DeclGen = struct {
const payload_ty = tv.ty.optionalChild(&buf);
const llvm_i8 = dg.context.intType(8);
- const is_pl = !tv.val.isNull();
+ const is_pl = !tv.val.isNull(mod);
const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return non_null_bit;
}
const llvm_ty = try dg.lowerType(tv.ty);
- if (tv.ty.optionalReprIsPayload()) {
+ if (tv.ty.optionalReprIsPayload(mod)) {
if (tv.val.castTag(.opt_payload)) |payload| {
return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data });
} else if (is_pl) {
@@ -3564,7 +3571,7 @@ pub const DeclGen = struct {
return llvm_ty.constNull();
}
}
- assert(payload_ty.zigTypeTag() != .Fn);
+ assert(payload_ty.zigTypeTag(mod) != .Fn);
const llvm_field_count = llvm_ty.countStructElementTypes();
var fields_buf: [3]*llvm.Value = undefined;
@@ -3607,14 +3614,14 @@ pub const DeclGen = struct {
const payload_type = tv.ty.errorUnionPayload();
const is_pl = tv.val.errorUnionIsPayload();
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val });
}
- const payload_align = payload_type.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
+ const payload_align = payload_type.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
const llvm_error_value = try dg.lowerValue(.{
.ty = Type.anyerror,
.val = if (is_pl) Value.initTag(.zero) else tv.val,
@@ -3661,9 +3668,9 @@ pub const DeclGen = struct {
for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = field_ty.abiAlignment(target);
+ const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -3685,7 +3692,7 @@ pub const DeclGen = struct {
llvm_fields.appendAssumeCapacity(field_llvm_val);
- offset += field_ty.abiSize(target);
+ offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
@@ -3715,7 +3722,7 @@ pub const DeclGen = struct {
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
@@ -3723,15 +3730,15 @@ pub const DeclGen = struct {
var running_bits: u16 = 0;
for (field_vals, 0..) |field_val, i| {
const field = fields[i];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try dg.lowerValue(.{
.ty = field.ty,
.val = field_val,
});
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
else
non_int_val.constBitCast(small_int_ty);
@@ -3756,10 +3763,10 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var need_unnamed = false;
- var it = struct_obj.runtimeFieldIterator();
+ var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_align = field.alignment(target, struct_obj.layout);
+ const field_align = field.alignment(mod, struct_obj.layout);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -3781,7 +3788,7 @@ pub const DeclGen = struct {
llvm_fields.appendAssumeCapacity(field_llvm_val);
- offset += field.ty.abiSize(target);
+ offset += field.ty.abiSize(mod);
}
{
const prev_offset = offset;
@@ -3810,7 +3817,7 @@ pub const DeclGen = struct {
const llvm_union_ty = try dg.lowerType(tv.ty);
const tag_and_val = tv.val.castTag(.@"union").?.data;
- const layout = tv.ty.unionGetLayout(target);
+ const layout = tv.ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
return lowerValue(dg, .{
@@ -3824,12 +3831,12 @@ pub const DeclGen = struct {
const field_ty = union_obj.fields.values()[field_index].ty;
if (union_obj.layout == .Packed) {
- if (!field_ty.hasRuntimeBits())
+ if (!field_ty.hasRuntimeBits(mod))
return llvm_union_ty.constNull();
const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
- const ty_bit_size = @intCast(u16, field_ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field_ty.bitSize(mod));
const small_int_ty = dg.context.intType(ty_bit_size);
- const small_int_val = if (field_ty.isPtrAtRuntime())
+ const small_int_val = if (field_ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
else
non_int_val.constBitCast(small_int_ty);
@@ -3842,13 +3849,13 @@ pub const DeclGen = struct {
// must pointer cast to the expected type before accessing the union.
var need_unnamed: bool = layout.most_aligned_field != field_index;
const payload = p: {
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
}
const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field);
- const field_size = field_ty.abiSize(target);
+ const field_size = field_ty.abiSize(mod);
if (field_size == layout.payload_size) {
break :p field;
}
@@ -4012,7 +4019,8 @@ pub const DeclGen = struct {
}
fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value {
- const target = dg.module.getTarget();
+ const mod = dg.module;
+ const target = mod.getTarget();
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
@@ -4045,13 +4053,13 @@ pub const DeclGen = struct {
const field_index = @intCast(u32, field_ptr.field_index);
const llvm_u32 = dg.context.intType(32);
- switch (parent_ty.zigTypeTag()) {
+ switch (parent_ty.zigTypeTag(mod)) {
.Union => {
if (parent_ty.containerLayout() == .Packed) {
return parent_llvm_ptr;
}
- const layout = parent_ty.unionGetLayout(target);
+ const layout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
// In this case a pointer to the union and a pointer to any
// (void) payload is the same.
@@ -4077,8 +4085,8 @@ pub const DeclGen = struct {
const prev_bits = b: {
var b: usize = 0;
for (parent_ty.structFields().values()[0..field_index]) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
- b += @intCast(usize, field.ty.bitSize(target));
+ if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
+ b += @intCast(usize, field.ty.bitSize(mod));
}
break :b b;
};
@@ -4091,14 +4099,14 @@ pub const DeclGen = struct {
var ty_buf: Type.Payload.Pointer = undefined;
const parent_llvm_ty = try dg.lowerType(parent_ty);
- if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| {
+ if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_field_index, .False),
};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
} else {
- const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False);
+ const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
}
@@ -4132,8 +4140,8 @@ pub const DeclGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or
- payload_ty.optionalReprIsPayload())
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
+ payload_ty.optionalReprIsPayload(mod))
{
// In this case, we represent pointer to optional the same as pointer
// to the payload.
@@ -4153,13 +4161,13 @@ pub const DeclGen = struct {
const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true);
const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
// to the payload.
return parent_llvm_ptr;
}
- const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1;
+ const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1;
const llvm_u32 = dg.context.intType(32);
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
@@ -4177,12 +4185,13 @@ pub const DeclGen = struct {
tv: TypedValue,
decl_index: Module.Decl.Index,
) Error!*llvm.Value {
+ const mod = self.module;
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
- .data = tv.val.sliceLen(self.module),
+ .data = tv.val.sliceLen(mod),
};
const fields: [2]*llvm.Value = .{
try self.lowerValue(.{
@@ -4202,7 +4211,7 @@ pub const DeclGen = struct {
// const bar = foo;
// ... &bar;
// `bar` is just an alias and we actually want to lower a reference to `foo`.
- const decl = self.module.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
if (decl.val.castTag(.function)) |func| {
if (func.data.owner_decl != decl_index) {
return self.lowerDeclRefValue(tv, func.data.owner_decl);
@@ -4213,21 +4222,21 @@ pub const DeclGen = struct {
}
}
- const is_fn_body = decl.ty.zigTypeTag() == .Fn;
- if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or
+ const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
+ if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or
(is_fn_body and decl.ty.fnInfo().is_generic))
{
return self.lowerPtrToVoid(tv.ty);
}
- self.module.markDeclAlive(decl);
+ mod.markDeclAlive(decl);
const llvm_decl_val = if (is_fn_body)
try self.resolveLlvmFunction(decl_index)
else
try self.resolveGlobalDecl(decl_index);
- const target = self.module.getTarget();
+ const target = mod.getTarget();
const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: {
@@ -4236,7 +4245,7 @@ pub const DeclGen = struct {
} else llvm_decl_val;
const llvm_type = try self.lowerType(tv.ty);
- if (tv.ty.zigTypeTag() == .Int) {
+ if (tv.ty.zigTypeTag(mod) == .Int) {
return llvm_val.constPtrToInt(llvm_type);
} else {
return llvm_val.constBitCast(llvm_type);
@@ -4338,21 +4347,20 @@ pub const DeclGen = struct {
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type {
- const target = dg.module.getTarget();
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = switch (ty.zigTypeTag()) {
+ const mod = dg.module;
+ const int_ty = switch (ty.zigTypeTag(mod)) {
.Int => ty,
- .Enum => ty.intTagType(&buffer),
+ .Enum => ty.intTagType(),
.Float => {
if (!is_rmw_xchg) return null;
- return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8));
+ return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8));
},
.Bool => return dg.context.intType(8),
else => return null,
};
- const bit_count = int_ty.intInfo(target).bits;
+ const bit_count = int_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) {
- return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8));
+ return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8));
} else {
return null;
}
@@ -4366,15 +4374,15 @@ pub const DeclGen = struct {
fn_info: Type.Payload.Function.Data,
llvm_arg_i: u32,
) void {
- const target = dg.module.getTarget();
- if (param_ty.isPtrAtRuntime()) {
+ const mod = dg.module;
+ if (param_ty.isPtrAtRuntime(mod)) {
const ptr_info = param_ty.ptrInfo().data;
if (math.cast(u5, param_index)) |i| {
if (@truncate(u1, fn_info.noalias_bits >> i) != 0) {
dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias");
}
}
- if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") {
+ if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") {
dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -4383,13 +4391,10 @@ pub const DeclGen = struct {
if (ptr_info.@"align" != 0) {
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(
- ptr_info.pointee_type.abiAlignment(target),
- 1,
- );
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align);
}
- } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) {
+ } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"),
.unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"),
};
@@ -4490,9 +4495,10 @@ pub const FuncGen = struct {
const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
+ const mod = self.dg.module;
const llvm_val = try self.resolveValue(.{
.ty = self.air.typeOf(inst),
- .val = self.air.value(inst).?,
+ .val = self.air.value(inst, mod).?,
});
gop.value_ptr.* = llvm_val;
return llvm_val;
@@ -4500,11 +4506,12 @@ pub const FuncGen = struct {
fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value {
const llvm_val = try self.dg.lowerValue(tv);
- if (!isByRef(tv.ty)) return llvm_val;
+ const mod = self.dg.module;
+ if (!isByRef(tv.ty, mod)) return llvm_val;
// We have an LLVM value but we need to create a global constant and
// set the value as its initializer, and then return a pointer to the global.
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target);
const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace);
@@ -4512,7 +4519,7 @@ pub const FuncGen = struct {
global.setLinkage(.Private);
global.setGlobalConstant(.True);
global.setUnnamedAddr(.True);
- global.setAlignment(tv.ty.abiAlignment(target));
+ global.setAlignment(tv.ty.abiAlignment(mod));
const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace))
else
@@ -4775,7 +4782,8 @@ pub const FuncGen = struct {
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.air.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
+ const mod = self.dg.module;
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
.Pointer => callee_ty.childType(),
else => unreachable,
@@ -4783,20 +4791,20 @@ pub const FuncGen = struct {
const fn_info = zig_fn_ty.fnInfo();
const return_type = fn_info.return_type;
const llvm_fn = try self.resolveInst(pl_op.operand);
- const target = self.dg.module.getTarget();
- const sret = firstParamSRet(fn_info, target);
+ const target = mod.getTarget();
+ const sret = firstParamSRet(fn_info, mod);
var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa);
defer llvm_args.deinit();
const ret_ptr = if (!sret) null else blk: {
const llvm_ret_ty = try self.dg.lowerType(return_type);
- const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target));
+ const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod));
try llvm_args.append(ret_ptr);
break :blk ret_ptr;
};
- const err_return_tracing = fn_info.return_type.isError() and
+ const err_return_tracing = fn_info.return_type.isError(mod) and
self.dg.module.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
try llvm_args.append(self.err_ret_trace.?);
@@ -4810,8 +4818,8 @@ pub const FuncGen = struct {
const param_ty = self.air.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try self.dg.lowerType(param_ty);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4823,10 +4831,10 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.air.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
try llvm_args.append(llvm_arg);
} else {
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = llvm_arg.typeOf();
const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
@@ -4839,10 +4847,10 @@ pub const FuncGen = struct {
const param_ty = self.air.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
const param_llvm_ty = try self.dg.lowerType(param_ty);
const arg_ptr = self.buildAlloca(param_llvm_ty, alignment);
- if (isByRef(param_ty)) {
+ if (isByRef(param_ty, mod)) {
const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
@@ -4859,11 +4867,11 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const param_ty = self.air.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
- const abi_size = @intCast(c_uint, param_ty.abiSize(target));
+ const abi_size = @intCast(c_uint, param_ty.abiSize(mod));
const int_llvm_ty = self.context.intType(abi_size * 8);
- if (isByRef(param_ty)) {
- const alignment = param_ty.abiAlignment(target);
+ if (isByRef(param_ty, mod)) {
+ const alignment = param_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4871,7 +4879,7 @@ pub const FuncGen = struct {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
const alignment = @max(
- param_ty.abiAlignment(target),
+ param_ty.abiAlignment(mod),
self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty),
);
const int_ptr = self.buildAlloca(int_llvm_ty, alignment);
@@ -4896,11 +4904,11 @@ pub const FuncGen = struct {
const param_ty = self.air.typeOf(arg);
const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len];
const llvm_arg = try self.resolveInst(arg);
- const is_by_ref = isByRef(param_ty);
+ const is_by_ref = isByRef(param_ty, mod);
const arg_ptr = if (is_by_ref) llvm_arg else p: {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(param_ty.abiAlignment(target));
+ store_inst.setAlignment(param_ty.abiAlignment(mod));
break :p p;
};
@@ -4924,17 +4932,17 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.air.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- if (!isByRef(arg_ty)) {
+ if (!isByRef(arg_ty, mod)) {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(target));
+ store_inst.setAlignment(arg_ty.abiAlignment(mod));
llvm_arg = store_inst;
}
- const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?);
+ const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?);
const array_llvm_ty = float_ty.arrayType(count);
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4944,15 +4952,15 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.air.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
- if (!isByRef(arg_ty)) {
+ if (!isByRef(arg_ty, mod)) {
const p = self.buildAlloca(llvm_arg.typeOf(), null);
const store_inst = self.builder.buildStore(llvm_arg, p);
- store_inst.setAlignment(arg_ty.abiAlignment(target));
+ store_inst.setAlignment(arg_ty.abiAlignment(mod));
llvm_arg = store_inst;
}
const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len);
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, "");
load_inst.setAlignment(alignment);
try llvm_args.append(load_inst);
@@ -4969,7 +4977,7 @@ pub const FuncGen = struct {
"",
);
- if (callee_ty.zigTypeTag() == .Pointer) {
+ if (callee_ty.zigTypeTag(mod) == .Pointer) {
// Add argument attributes for function pointer calls.
it = iterateParamTypes(self.dg, fn_info);
it.llvm_index += @boolToInt(sret);
@@ -4978,7 +4986,7 @@ pub const FuncGen = struct {
.byval => {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types[param_index];
- if (!isByRef(param_ty)) {
+ if (!isByRef(param_ty, mod)) {
self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
@@ -4986,7 +4994,7 @@ pub const FuncGen = struct {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types[param_index];
const param_llvm_ty = try self.dg.lowerType(param_ty);
- const alignment = param_ty.abiAlignment(target);
+ const alignment = param_ty.abiAlignment(mod);
self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => {
@@ -5013,7 +5021,7 @@ pub const FuncGen = struct {
self.dg.addArgAttr(call, llvm_arg_i, "noalias");
}
}
- if (param_ty.zigTypeTag() != .Optional) {
+ if (param_ty.zigTypeTag(mod) != .Optional) {
self.dg.addArgAttr(call, llvm_arg_i, "nonnull");
}
if (!ptr_info.mutable) {
@@ -5022,7 +5030,7 @@ pub const FuncGen = struct {
if (ptr_info.@"align" != 0) {
self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align");
} else {
- const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1);
+ const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1);
self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align);
}
},
@@ -5033,7 +5041,7 @@ pub const FuncGen = struct {
return null;
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
@@ -5041,12 +5049,12 @@ pub const FuncGen = struct {
if (ret_ptr) |rp| {
call.setCallSret(llvm_ret_ty);
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
const loaded = self.builder.buildLoad(llvm_ret_ty, rp, "");
- loaded.setAlignment(return_type.abiAlignment(target));
+ loaded.setAlignment(return_type.abiAlignment(mod));
return loaded;
}
}
@@ -5061,7 +5069,7 @@ pub const FuncGen = struct {
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const store_inst = self.builder.buildStore(call, rp);
store_inst.setAlignment(alignment);
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
return rp;
} else {
const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, "");
@@ -5070,10 +5078,10 @@ pub const FuncGen = struct {
}
}
- if (isByRef(return_type)) {
+ if (isByRef(return_type, mod)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
- const alignment = return_type.abiAlignment(target);
+ const alignment = return_type.abiAlignment(mod);
const rp = self.buildAlloca(llvm_ret_ty, alignment);
const store_inst = self.builder.buildStore(call, rp);
store_inst.setAlignment(alignment);
@@ -5084,6 +5092,7 @@ pub const FuncGen = struct {
}
fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ret_ty = self.air.typeOf(un_op);
if (self.ret_ptr) |ret_ptr| {
@@ -5098,8 +5107,8 @@ pub const FuncGen = struct {
return null;
}
const fn_info = self.dg.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (fn_info.return_type.isError()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (fn_info.return_type.isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5113,10 +5122,9 @@ pub const FuncGen = struct {
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
const operand = try self.resolveInst(un_op);
- const target = self.dg.module.getTarget();
- const alignment = ret_ty.abiAlignment(target);
+ const alignment = ret_ty.abiAlignment(mod);
- if (isByRef(ret_ty)) {
+ if (isByRef(ret_ty, mod)) {
// operand is a pointer however self.ret_ptr is null so that means
// we need to return a value.
const load_inst = self.builder.buildLoad(abi_ret_ty, operand, "");
@@ -5145,8 +5153,9 @@ pub const FuncGen = struct {
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
const fn_info = self.dg.decl.ty.fnInfo();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
- if (fn_info.return_type.isError()) {
+ const mod = self.dg.module;
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ if (fn_info.return_type.isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5162,10 +5171,9 @@ pub const FuncGen = struct {
return null;
}
const ptr = try self.resolveInst(un_op);
- const target = self.dg.module.getTarget();
const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info);
const loaded = self.builder.buildLoad(abi_ret_ty, ptr, "");
- loaded.setAlignment(ret_ty.abiAlignment(target));
+ loaded.setAlignment(ret_ty.abiAlignment(mod));
_ = self.builder.buildRet(loaded);
return null;
}
@@ -5184,9 +5192,9 @@ pub const FuncGen = struct {
const src_list = try self.resolveInst(ty_op.operand);
const va_list_ty = self.air.getRefType(ty_op.ty);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
+ const mod = self.dg.module;
- const target = self.dg.module.getTarget();
- const result_alignment = va_list_ty.abiAlignment(target);
+ const result_alignment = va_list_ty.abiAlignment(mod);
const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_copy";
@@ -5202,7 +5210,7 @@ pub const FuncGen = struct {
const args: [2]*llvm.Value = .{ dest_list, src_list };
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- if (isByRef(va_list_ty)) {
+ if (isByRef(va_list_ty, mod)) {
return dest_list;
} else {
const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, "");
@@ -5227,11 +5235,11 @@ pub const FuncGen = struct {
}
fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const va_list_ty = self.air.typeOfIndex(inst);
const llvm_va_list_ty = try self.dg.lowerType(va_list_ty);
- const target = self.dg.module.getTarget();
- const result_alignment = va_list_ty.abiAlignment(target);
+ const result_alignment = va_list_ty.abiAlignment(mod);
const list = self.buildAlloca(llvm_va_list_ty, result_alignment);
const llvm_fn_name = "llvm.va_start";
@@ -5243,7 +5251,7 @@ pub const FuncGen = struct {
const args: [1]*llvm.Value = .{list};
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
- if (isByRef(va_list_ty)) {
+ if (isByRef(va_list_ty, mod)) {
return list;
} else {
const loaded = self.builder.buildLoad(llvm_va_list_ty, list, "");
@@ -5292,23 +5300,23 @@ pub const FuncGen = struct {
operand_ty: Type,
op: math.CompareOperator,
) Allocator.Error!*llvm.Value {
- var int_buffer: Type.Payload.Bits = undefined;
var opt_buffer: Type.Payload.ElemType = undefined;
- const scalar_ty = operand_ty.scalarType();
- const int_ty = switch (scalar_ty.zigTypeTag()) {
- .Enum => scalar_ty.intTagType(&int_buffer),
+ const mod = self.dg.module;
+ const scalar_ty = operand_ty.scalarType(mod);
+ const int_ty = switch (scalar_ty.zigTypeTag(mod)) {
+ .Enum => scalar_ty.intTagType(),
.Int, .Bool, .Pointer, .ErrorSet => scalar_ty,
.Optional => blk: {
const payload_ty = operand_ty.optionalChild(&opt_buffer);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime() or
- operand_ty.optionalReprIsPayload())
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
+ operand_ty.optionalReprIsPayload(mod))
{
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
// of optionals that are not pointers.
- const is_by_ref = isByRef(scalar_ty);
+ const is_by_ref = isByRef(scalar_ty, mod);
const opt_llvm_ty = try self.dg.lowerType(scalar_ty);
const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref);
const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref);
@@ -5375,7 +5383,7 @@ pub const FuncGen = struct {
.Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }),
else => unreachable,
};
- const is_signed = int_ty.isSignedInt();
+ const is_signed = int_ty.isSignedInt(mod);
const operation: llvm.IntPredicate = switch (op) {
.eq => .EQ,
.neq => .NE,
@@ -5393,6 +5401,7 @@ pub const FuncGen = struct {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const inst_ty = self.air.typeOfIndex(inst);
const parent_bb = self.context.createBasicBlock("Block");
+ const mod = self.dg.module;
if (inst_ty.isNoReturn()) {
try self.genBody(body);
@@ -5414,8 +5423,8 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(parent_bb);
// Create a phi node only if the block returns a value.
- const is_body = inst_ty.zigTypeTag() == .Fn;
- if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ const is_body = inst_ty.zigTypeTag(mod) == .Fn;
+ if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
const raw_llvm_ty = try self.dg.lowerType(inst_ty);
@@ -5424,7 +5433,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
- if (is_body or isByRef(inst_ty)) {
+ if (is_body or isByRef(inst_ty, mod)) {
break :ty self.context.pointerType(0);
}
break :ty raw_llvm_ty;
@@ -5445,7 +5454,8 @@ pub const FuncGen = struct {
// Add the values to the lists only if the break provides a value.
const operand_ty = self.air.typeOf(branch.operand);
- if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) {
+ const mod = self.dg.module;
+ if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@@ -5481,6 +5491,7 @@ pub const FuncGen = struct {
}
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
@@ -5488,7 +5499,7 @@ pub const FuncGen = struct {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const err_union_ty = self.air.typeOf(pl_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
- const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused);
}
@@ -5512,9 +5523,9 @@ pub const FuncGen = struct {
can_elide_load: bool,
is_unused: bool,
) !?*llvm.Value {
+ const mod = fg.dg.module;
const payload_ty = err_union_ty.errorUnionPayload();
- const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
- const target = fg.dg.module.getTarget();
+ const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod);
const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty);
if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
@@ -5529,8 +5540,8 @@ pub const FuncGen = struct {
err_union;
break :err fg.builder.buildICmp(.NE, loaded, zero, "");
}
- const err_field_index = errUnionErrorOffset(payload_ty, target);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ const err_field_index = errUnionErrorOffset(payload_ty, mod);
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, "");
@@ -5555,30 +5566,31 @@ pub const FuncGen = struct {
if (!payload_has_bits) {
return if (operand_is_ptr) err_union else null;
}
- const offset = errUnionPayloadOffset(payload_ty, target);
+ const offset = errUnionPayloadOffset(payload_ty, mod);
if (operand_is_ptr) {
return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
- } else if (isByRef(err_union_ty)) {
+ } else if (isByRef(err_union_ty, mod)) {
const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, "");
- if (isByRef(payload_ty)) {
+ if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
- return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
+ return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
}
const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(target));
+ load_inst.setAlignment(payload_ty.abiAlignment(mod));
return load_inst;
}
return fg.builder.buildExtractValue(err_union, offset, "");
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
const llvm_usize = self.context.intType(target.ptrBitWidth());
const cond_int = if (cond.typeOf().getTypeKind() == .Pointer)
self.builder.buildPtrToInt(cond, llvm_usize, "")
@@ -5645,6 +5657,7 @@ pub const FuncGen = struct {
}
fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const array_ty = operand_ty.childType();
@@ -5652,7 +5665,7 @@ pub const FuncGen = struct {
const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
const operand = try self.resolveInst(ty_op.operand);
- if (!array_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, "");
return self.builder.buildInsertValue(partial, len, 1, "");
}
@@ -5666,30 +5679,31 @@ pub const FuncGen = struct {
}
fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType();
+ const operand_scalar_ty = operand_ty.scalarType(mod);
const dest_ty = self.air.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType();
+ const dest_scalar_ty = dest_ty.scalarType(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
if (intrinsicsAllowed(dest_scalar_ty, target)) {
- if (operand_scalar_ty.isSignedInt()) {
+ if (operand_scalar_ty.isSignedInt(mod)) {
return self.builder.buildSIToFP(operand, dest_llvm_ty, "");
} else {
return self.builder.buildUIToFP(operand, dest_llvm_ty, "");
}
}
- const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target));
+ const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod));
const rt_int_bits = compilerRtIntBits(operand_bits);
const rt_int_ty = self.context.intType(rt_int_bits);
var extended = e: {
- if (operand_scalar_ty.isSignedInt()) {
+ if (operand_scalar_ty.isSignedInt(mod)) {
break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, "");
} else {
break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, "");
@@ -5698,7 +5712,7 @@ pub const FuncGen = struct {
const dest_bits = dest_scalar_ty.floatBits(target);
const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits);
const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits);
- const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un";
+ const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un";
var fn_name_buf: [64]u8 = undefined;
const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{
sign_prefix,
@@ -5724,27 +5738,28 @@ pub const FuncGen = struct {
fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_scalar_ty = operand_ty.scalarType();
+ const operand_scalar_ty = operand_ty.scalarType(mod);
const dest_ty = self.air.typeOfIndex(inst);
- const dest_scalar_ty = dest_ty.scalarType();
+ const dest_scalar_ty = dest_ty.scalarType(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
if (intrinsicsAllowed(operand_scalar_ty, target)) {
// TODO set fast math flag
- if (dest_scalar_ty.isSignedInt()) {
+ if (dest_scalar_ty.isSignedInt(mod)) {
return self.builder.buildFPToSI(operand, dest_llvm_ty, "");
} else {
return self.builder.buildFPToUI(operand, dest_llvm_ty, "");
}
}
- const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target)));
+ const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod)));
const ret_ty = self.context.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
@@ -5756,7 +5771,7 @@ pub const FuncGen = struct {
const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits);
- const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns";
+ const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns";
var fn_name_buf: [64]u8 = undefined;
const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{
@@ -5786,13 +5801,14 @@ pub const FuncGen = struct {
}
fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value {
- const target = fg.dg.module.getTarget();
+ const mod = fg.dg.module;
+ const target = mod.getTarget();
const llvm_usize_ty = fg.context.intType(target.ptrBitWidth());
switch (ty.ptrSize()) {
.Slice => {
const len = fg.builder.buildExtractValue(ptr, 1, "");
const elem_ty = ty.childType();
- const abi_size = elem_ty.abiSize(target);
+ const abi_size = elem_ty.abiSize(mod);
if (abi_size == 1) return len;
const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False);
return fg.builder.buildMul(len, abi_size_llvm_val, "");
@@ -5800,7 +5816,7 @@ pub const FuncGen = struct {
.One => {
const array_ty = ty.childType();
const elem_ty = array_ty.childType();
- const abi_size = elem_ty.abiSize(target);
+ const abi_size = elem_ty.abiSize(mod);
return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False);
},
.Many, .C => unreachable,
@@ -5823,6 +5839,7 @@ pub const FuncGen = struct {
}
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -5833,12 +5850,11 @@ pub const FuncGen = struct {
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
const indices: [1]*llvm.Value = .{index};
const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail))
return ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
}
return self.load(ptr, slice_ty);
@@ -5858,6 +5874,7 @@ pub const FuncGen = struct {
}
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -5866,15 +5883,14 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
const array_llvm_ty = try self.dg.lowerType(array_ty);
const elem_ty = array_ty.childType();
- if (isByRef(array_ty)) {
+ if (isByRef(array_ty, mod)) {
const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, "");
if (canElideLoad(self, body_tail))
return elem_ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false);
} else {
const lhs_index = Air.refToIndex(bin_op.lhs).?;
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -5901,6 +5917,7 @@ pub const FuncGen = struct {
}
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
@@ -5917,23 +5934,23 @@ pub const FuncGen = struct {
const indices: [1]*llvm.Value = .{rhs};
break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
};
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail))
return ptr;
- const target = self.dg.module.getTarget();
- return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false);
+ return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false);
}
return self.load(ptr, ptr_ty);
}
fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
- if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -5972,6 +5989,7 @@ pub const FuncGen = struct {
}
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -5979,29 +5997,28 @@ pub const FuncGen = struct {
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
- const target = self.dg.module.getTarget();
- if (!isByRef(struct_ty)) {
- assert(!isByRef(field_ty));
- switch (struct_ty.zigTypeTag()) {
+ if (!isByRef(struct_ty, mod)) {
+ assert(!isByRef(field_ty, mod));
+ switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout()) {
.Packed => {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const bit_offset = struct_obj.packedFieldBitOffset(target, field_index);
+ const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
const containing_int = struct_llvm_val;
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime()) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -6010,7 +6027,7 @@ pub const FuncGen = struct {
},
else => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
},
},
@@ -6018,13 +6035,13 @@ pub const FuncGen = struct {
assert(struct_ty.containerLayout() == .Packed);
const containing_int = struct_llvm_val;
const elem_llvm_ty = try self.dg.lowerType(field_ty);
- if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
- } else if (field_ty.isPtrAtRuntime()) {
- const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
+ } else if (field_ty.isPtrAtRuntime(mod)) {
+ const elem_bits = @intCast(c_uint, field_ty.bitSize(mod));
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -6035,30 +6052,30 @@ pub const FuncGen = struct {
}
}
- switch (struct_ty.zigTypeTag()) {
+ switch (struct_ty.zigTypeTag(mod)) {
.Struct => {
assert(struct_ty.containerLayout() != .Packed);
var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
- if (isByRef(field_ty)) {
+ if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
- return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false);
+ return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false);
} else {
return self.load(field_ptr, field_ptr_ty);
}
},
.Union => {
const union_llvm_ty = try self.dg.lowerType(struct_ty);
- const layout = struct_ty.unionGetLayout(target);
+ const layout = struct_ty.unionGetLayout(mod);
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, "");
const llvm_field_ty = try self.dg.lowerType(field_ty);
- if (isByRef(field_ty)) {
+ if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
@@ -6072,6 +6089,7 @@ pub const FuncGen = struct {
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
@@ -6079,7 +6097,7 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const parent_ty = self.air.getRefType(ty_pl.ty).childType();
- const field_offset = parent_ty.structFieldOffset(extra.field_index, target);
+ const field_offset = parent_ty.structFieldOffset(extra.field_index, mod);
const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
if (field_offset == 0) {
@@ -6119,12 +6137,13 @@ pub const FuncGen = struct {
}
fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const dib = self.dg.object.di_builder orelse return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
const decl_index = func.owner_decl;
- const decl = self.dg.module.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope);
self.di_file = di_file;
const line_number = decl.src_line + 1;
@@ -6136,22 +6155,41 @@ pub const FuncGen = struct {
.base_line = self.base_line,
});
- const fqn = try decl.getFullyQualifiedName(self.dg.module);
+ const fqn = try decl.getFullyQualifiedName(mod);
defer self.gpa.free(fqn);
- const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index);
+ const is_internal_linkage = !mod.decl_exports.contains(decl_index);
+ var fn_ty_pl: Type.Payload.Function = .{
+ .base = .{ .tag = .function },
+ .data = .{
+ .param_types = &.{},
+ .comptime_params = undefined,
+ .return_type = Type.void,
+ .alignment = 0,
+ .noalias_bits = 0,
+ .cc = .Unspecified,
+ .is_var_args = false,
+ .is_generic = false,
+ .is_noinline = false,
+ .align_is_generic = false,
+ .cc_is_generic = false,
+ .section_is_generic = false,
+ .addrspace_is_generic = false,
+ },
+ };
+ const fn_ty = Type.initPayload(&fn_ty_pl.base);
const subprogram = dib.createFunction(
di_file.toScope(),
decl.name,
fqn,
di_file,
line_number,
- try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full),
+ try self.dg.object.lowerDebugType(fn_ty, .full),
is_internal_linkage,
true, // is definition
line_number + func.lbrace_line, // scope line
llvm.DIFlags.StaticMember,
- self.dg.module.comp.bin_file.options.optimize_mode != .Debug,
+ mod.comp.bin_file.options.optimize_mode != .Debug,
null, // decl_subprogram
);
@@ -6243,10 +6281,11 @@ pub const FuncGen = struct {
null;
const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at);
const insert_block = self.builder.getInsertBlock();
- if (isByRef(operand_ty)) {
+ const mod = self.dg.module;
+ if (isByRef(operand_ty, mod)) {
_ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block);
} else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = operand_ty.abiAlignment(self.dg.module.getTarget());
+ const alignment = operand_ty.abiAlignment(mod);
const alloca = self.buildAlloca(operand.typeOf(), alignment);
const store_inst = self.builder.buildStore(operand, alloca);
store_inst.setAlignment(alignment);
@@ -6294,7 +6333,8 @@ pub const FuncGen = struct {
// This stores whether we need to add an elementtype attribute and
// if so, the element type itself.
const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
var llvm_ret_i: usize = 0;
var llvm_param_i: usize = 0;
@@ -6322,7 +6362,7 @@ pub const FuncGen = struct {
if (output != .none) {
const output_inst = try self.resolveInst(output);
const output_ty = self.air.typeOf(output);
- assert(output_ty.zigTypeTag() == .Pointer);
+ assert(output_ty.zigTypeTag(mod) == .Pointer);
const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType());
if (llvm_ret_indirect[i]) {
@@ -6376,13 +6416,13 @@ pub const FuncGen = struct {
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.air.typeOf(input);
var llvm_elem_ty: ?*llvm.Type = null;
- if (isByRef(arg_ty)) {
+ if (isByRef(arg_ty, mod)) {
llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty);
if (constraintAllowsMemory(constraint)) {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
} else {
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const arg_llvm_ty = try self.dg.lowerType(arg_ty);
const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, "");
load_inst.setAlignment(alignment);
@@ -6394,7 +6434,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf();
} else {
- const alignment = arg_ty.abiAlignment(target);
+ const alignment = arg_ty.abiAlignment(mod);
const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment);
const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr);
store_inst.setAlignment(alignment);
@@ -6599,7 +6639,7 @@ pub const FuncGen = struct {
const output_ptr_ty = self.air.typeOf(output);
const store_inst = self.builder.buildStore(output_value, output_ptr);
- store_inst.setAlignment(output_ptr_ty.ptrAlignment(target));
+ store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod));
} else {
ret_val = output_value;
}
@@ -6622,7 +6662,8 @@ pub const FuncGen = struct {
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
- if (optional_ty.optionalReprIsPayload()) {
+ const mod = self.dg.module;
+ if (optional_ty.optionalReprIsPayload(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
@@ -6638,7 +6679,7 @@ pub const FuncGen = struct {
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(optional_llvm_ty, operand, "")
else
@@ -6647,7 +6688,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), "");
}
- const is_by_ref = operand_is_ptr or isByRef(optional_ty);
+ const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod);
const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref);
if (pred == .EQ) {
return self.builder.buildNot(non_null_bit, "");
@@ -6662,6 +6703,7 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const operand_ty = self.air.typeOf(un_op);
@@ -6679,7 +6721,7 @@ pub const FuncGen = struct {
}
}
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const loaded = if (operand_is_ptr)
self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "")
else
@@ -6687,10 +6729,9 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- const target = self.dg.module.getTarget();
- const err_field_index = errUnionErrorOffset(payload_ty, target);
+ const err_field_index = errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, "");
const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, "");
@@ -6702,17 +6743,18 @@ pub const FuncGen = struct {
}
fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
}
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// The payload and the optional are the same value.
return operand;
}
@@ -6723,18 +6765,19 @@ pub const FuncGen = struct {
fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
comptime assert(optional_layout_version == 3);
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
const non_null_bit = self.context.intType(8).constInt(1, .False);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
_ = self.builder.buildStore(non_null_bit, operand);
return operand;
}
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// The payload and the optional are the same value.
// Setting to non-null will be done when the payload is set.
return operand;
@@ -6754,20 +6797,21 @@ pub const FuncGen = struct {
}
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// Payload value is the same as the optional value.
return operand;
}
const opt_llvm_ty = try self.dg.lowerType(optional_ty);
- const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false;
+ const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false;
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
}
@@ -6776,6 +6820,7 @@ pub const FuncGen = struct {
body_tail: []const Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
@@ -6783,25 +6828,24 @@ pub const FuncGen = struct {
const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
const result_ty = self.air.typeOfIndex(inst);
const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty;
- const target = self.dg.module.getTarget();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return if (operand_is_ptr) operand else null;
}
- const offset = errUnionPayloadOffset(payload_ty, target);
+ const offset = errUnionPayloadOffset(payload_ty, mod);
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
if (operand_is_ptr) {
return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
- } else if (isByRef(err_union_ty)) {
+ } else if (isByRef(err_union_ty, mod)) {
const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
- if (isByRef(payload_ty)) {
+ if (isByRef(payload_ty, mod)) {
if (self.canElideLoad(body_tail))
return payload_ptr;
- return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false);
+ return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false);
}
const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, "");
- load_inst.setAlignment(payload_ty.abiAlignment(target));
+ load_inst.setAlignment(payload_ty.abiAlignment(mod));
return load_inst;
}
return self.builder.buildExtractValue(operand, offset, "");
@@ -6812,6 +6856,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
@@ -6828,15 +6873,14 @@ pub const FuncGen = struct {
const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror);
const payload_ty = err_union_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (!operand_is_ptr) return operand;
return self.builder.buildLoad(err_set_llvm_ty, operand, "");
}
- const target = self.dg.module.getTarget();
- const offset = errUnionErrorOffset(payload_ty, target);
+ const offset = errUnionErrorOffset(payload_ty, mod);
- if (operand_is_ptr or isByRef(err_union_ty)) {
+ if (operand_is_ptr or isByRef(err_union_ty, mod)) {
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, "");
return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, "");
@@ -6846,30 +6890,30 @@ pub const FuncGen = struct {
}
fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand).childType();
const payload_ty = err_union_ty.errorUnionPayload();
const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero });
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
_ = self.builder.buildStore(non_error_val, operand);
return operand;
}
- const target = self.dg.module.getTarget();
const err_union_llvm_ty = try self.dg.lowerType(err_union_ty);
{
- const error_offset = errUnionErrorOffset(payload_ty, target);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
// First set the non-error value.
const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, "");
const store_inst = self.builder.buildStore(non_error_val, non_null_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
}
// Then return the payload pointer (only if it is used).
if (self.liveness.isUnused(inst))
return null;
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, "");
}
@@ -6885,15 +6929,14 @@ pub const FuncGen = struct {
}
fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const target = self.dg.module.getTarget();
-
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
//const struct_ty = try self.resolveInst(ty_pl.ty);
const struct_ty = self.air.getRefType(ty_pl.ty);
const field_index = ty_pl.payload;
var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
+ const mod = self.dg.module;
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
@@ -6901,20 +6944,20 @@ pub const FuncGen = struct {
}
fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(8).constInt(1, .False);
comptime assert(optional_layout_version == 3);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand;
}
const llvm_optional_ty = try self.dg.lowerType(optional_ty);
- if (isByRef(optional_ty)) {
- const target = self.dg.module.getTarget();
- const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target));
+ if (isByRef(optional_ty, mod)) {
+ const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, "");
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -6931,24 +6974,24 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const payload_ty = self.air.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return operand;
}
const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull();
const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
- const target = self.dg.module.getTarget();
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
- const error_offset = errUnionErrorOffset(payload_ty, target);
- if (isByRef(err_un_ty)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target));
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
+ if (isByRef(err_un_ty, mod)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(ok_err_code, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -6964,23 +7007,23 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const err_un_ty = self.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return operand;
}
const err_un_llvm_ty = try self.dg.lowerType(err_un_ty);
- const target = self.dg.module.getTarget();
- const payload_offset = errUnionPayloadOffset(payload_ty, target);
- const error_offset = errUnionErrorOffset(payload_ty, target);
- if (isByRef(err_un_ty)) {
- const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target));
+ const payload_offset = errUnionPayloadOffset(payload_ty, mod);
+ const error_offset = errUnionErrorOffset(payload_ty, mod);
+ if (isByRef(err_un_ty, mod)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod));
const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, "");
const store_inst = self.builder.buildStore(operand, err_ptr);
- store_inst.setAlignment(Type.anyerror.abiAlignment(target));
+ store_inst.setAlignment(Type.anyerror.abiAlignment(mod));
const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, "");
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
@@ -7021,6 +7064,7 @@ pub const FuncGen = struct {
}
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const data = self.air.instructions.items(.data)[inst].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
@@ -7032,8 +7076,7 @@ pub const FuncGen = struct {
const loaded_vector = blk: {
const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType());
const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, "");
- const target = self.dg.module.getTarget();
- load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target));
+ load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod));
load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr()));
break :blk load_inst;
};
@@ -7043,24 +7086,26 @@ pub const FuncGen = struct {
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const scalar_ty = self.air.typeOfIndex(inst).scalarType();
+ const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, "");
return self.builder.buildUMin(lhs, rhs, "");
}
fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- const scalar_ty = self.air.typeOfIndex(inst).scalarType();
+ const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, "");
return self.builder.buildUMax(lhs, rhs, "");
}
@@ -7081,14 +7126,15 @@ pub const FuncGen = struct {
fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, "");
return self.builder.buildNUWAdd(lhs, rhs, "");
}
@@ -7103,14 +7149,15 @@ pub const FuncGen = struct {
}
fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, "");
return self.builder.buildUAddSat(lhs, rhs, "");
}
@@ -7118,14 +7165,15 @@ pub const FuncGen = struct {
fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, "");
return self.builder.buildNUWSub(lhs, rhs, "");
}
@@ -7140,28 +7188,30 @@ pub const FuncGen = struct {
}
fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, "");
return self.builder.buildUSubSat(lhs, rhs, "");
}
fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, "");
return self.builder.buildNUWMul(lhs, rhs, "");
}
@@ -7176,14 +7226,15 @@ pub const FuncGen = struct {
}
fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
- if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, "");
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
@@ -7201,38 +7252,39 @@ pub const FuncGen = struct {
fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.trunc, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
return self.buildFloatOp(.floor, inst_ty, 1, .{result});
}
- if (scalar_ty.isSignedInt()) {
- const target = self.dg.module.getTarget();
+ if (scalar_ty.isSignedInt(mod)) {
const inst_llvm_ty = try self.dg.lowerType(inst_ty);
- const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: {
+ const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
+ const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
const vec_len = inst_ty.vectorLen();
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -7258,40 +7310,43 @@ pub const FuncGen = struct {
fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, "");
return self.builder.buildExactUDiv(lhs, rhs, "");
}
fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
- if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, "");
+ if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, "");
return self.builder.buildURem(lhs, rhs, "");
}
fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
const inst_llvm_ty = try self.dg.lowerType(inst_ty);
- const scalar_ty = inst_ty.scalarType();
+ const scalar_ty = inst_ty.scalarType(mod);
if (scalar_ty.isRuntimeFloat()) {
const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs });
@@ -7301,10 +7356,9 @@ pub const FuncGen = struct {
const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero });
return self.builder.buildSelect(ltz, c, a, "");
}
- if (scalar_ty.isSignedInt()) {
- const target = self.dg.module.getTarget();
- const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1;
- const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: {
+ if (scalar_ty.isSignedInt(mod)) {
+ const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1;
+ const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: {
const vec_len = inst_ty.vectorLen();
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -7386,6 +7440,7 @@ pub const FuncGen = struct {
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -7393,16 +7448,14 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(extra.rhs);
const lhs_ty = self.air.typeOf(extra.lhs);
- const scalar_ty = lhs_ty.scalarType();
+ const scalar_ty = lhs_ty.scalarType(mod);
const dest_ty = self.air.typeOfIndex(inst);
- const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic;
+ const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic;
const llvm_lhs_ty = try self.dg.lowerType(lhs_ty);
const llvm_dest_ty = try self.dg.lowerType(dest_ty);
- const tg = self.dg.module.getTarget();
-
const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
@@ -7410,12 +7463,11 @@ pub const FuncGen = struct {
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?;
+ const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
+ const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
- if (isByRef(dest_ty)) {
- const target = self.dg.module.getTarget();
- const result_alignment = dest_ty.abiAlignment(target);
+ if (isByRef(dest_ty, mod)) {
+ const result_alignment = dest_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
{
const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
@@ -7486,8 +7538,9 @@ pub const FuncGen = struct {
ty: Type,
params: [2]*llvm.Value,
) !*llvm.Value {
+ const mod = self.dg.module;
const target = self.dg.module.getTarget();
- const scalar_ty = ty.scalarType();
+ const scalar_ty = ty.scalarType(mod);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
if (intrinsicsAllowed(scalar_ty, target)) {
@@ -7531,7 +7584,7 @@ pub const FuncGen = struct {
.gte => .SGE,
};
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const vec_len = ty.vectorLen();
const vector_result_ty = llvm_i32.vectorType(vec_len);
@@ -7587,8 +7640,9 @@ pub const FuncGen = struct {
comptime params_len: usize,
params: [params_len]*llvm.Value,
) !*llvm.Value {
- const target = self.dg.module.getTarget();
- const scalar_ty = ty.scalarType();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
+ const scalar_ty = ty.scalarType(mod);
const llvm_ty = try self.dg.lowerType(ty);
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -7615,7 +7669,7 @@ pub const FuncGen = struct {
const one = int_llvm_ty.constInt(1, .False);
const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
const sign_mask = one.constShl(shift_amt);
- const result = if (ty.zigTypeTag() == .Vector) blk: {
+ const result = if (ty.zigTypeTag(mod) == .Vector) blk: {
const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, "");
const cast_ty = int_llvm_ty.vectorType(ty.vectorLen());
const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, "");
@@ -7662,7 +7716,7 @@ pub const FuncGen = struct {
.libc => |fn_name| b: {
const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty };
const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty);
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result = llvm_ty.getUndef();
return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen());
}
@@ -7686,6 +7740,7 @@ pub const FuncGen = struct {
}
fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -7694,21 +7749,19 @@ pub const FuncGen = struct {
const lhs_ty = self.air.typeOf(extra.lhs);
const rhs_ty = self.air.typeOf(extra.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
const dest_ty = self.air.typeOfIndex(inst);
const llvm_dest_ty = try self.dg.lowerType(dest_ty);
- const tg = self.dg.module.getTarget();
-
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
const result = self.builder.buildShl(lhs, casted_rhs, "");
- const reconstructed = if (lhs_scalar_ty.isSignedInt())
+ const reconstructed = if (lhs_scalar_ty.isSignedInt(mod))
self.builder.buildAShr(result, casted_rhs, "")
else
self.builder.buildLShr(result, casted_rhs, "");
@@ -7716,12 +7769,11 @@ pub const FuncGen = struct {
const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?;
+ const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
+ const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
- if (isByRef(dest_ty)) {
- const target = self.dg.module.getTarget();
- const result_alignment = dest_ty.abiAlignment(target);
+ if (isByRef(dest_ty, mod)) {
+ const result_alignment = dest_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment);
{
const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, "");
@@ -7763,6 +7815,7 @@ pub const FuncGen = struct {
}
fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7770,20 +7823,19 @@ pub const FuncGen = struct {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const tg = self.dg.module.getTarget();
-
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
- if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, "");
+ if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, "");
return self.builder.buildNUWShl(lhs, casted_rhs, "");
}
fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7791,12 +7843,10 @@ pub const FuncGen = struct {
const lhs_type = self.air.typeOf(bin_op.lhs);
const rhs_type = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_type.scalarType();
- const rhs_scalar_ty = rhs_type.scalarType();
-
- const tg = self.dg.module.getTarget();
+ const lhs_scalar_ty = lhs_type.scalarType(mod);
+ const rhs_scalar_ty = rhs_type.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "")
else
rhs;
@@ -7804,6 +7854,7 @@ pub const FuncGen = struct {
}
fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7811,17 +7862,16 @@ pub const FuncGen = struct {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const tg = self.dg.module.getTarget();
- const lhs_bits = lhs_scalar_ty.bitSize(tg);
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const lhs_bits = lhs_scalar_ty.bitSize(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits)
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits)
self.builder.buildZExt(rhs, lhs.typeOf(), "")
else
rhs;
- const result = if (lhs_scalar_ty.isSignedInt())
+ const result = if (lhs_scalar_ty.isSignedInt(mod))
self.builder.buildSShlSat(lhs, casted_rhs, "")
else
self.builder.buildUShlSat(lhs, casted_rhs, "");
@@ -7834,7 +7884,7 @@ pub const FuncGen = struct {
const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty);
const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False);
const lhs_max = lhs_scalar_llvm_ty.constAllOnes();
- if (rhs_ty.zigTypeTag() == .Vector) {
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
const vec_len = rhs_ty.vectorLen();
const bits_vec = self.builder.buildVectorSplat(vec_len, bits, "");
const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, "");
@@ -7847,6 +7897,7 @@ pub const FuncGen = struct {
}
fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
@@ -7854,16 +7905,14 @@ pub const FuncGen = struct {
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
-
- const tg = self.dg.module.getTarget();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg))
+ const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod))
self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "")
else
rhs;
- const is_signed_int = lhs_scalar_ty.isSignedInt();
+ const is_signed_int = lhs_scalar_ty.isSignedInt(mod);
if (is_exact) {
if (is_signed_int) {
@@ -7881,14 +7930,14 @@ pub const FuncGen = struct {
}
fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const dest_ty = self.air.typeOfIndex(inst);
- const dest_info = dest_ty.intInfo(target);
+ const dest_info = dest_ty.intInfo(mod);
const dest_llvm_ty = try self.dg.lowerType(dest_ty);
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
- const operand_info = operand_ty.intInfo(target);
+ const operand_info = operand_ty.intInfo(mod);
if (operand_info.bits < dest_info.bits) {
switch (operand_info.signedness) {
@@ -7910,11 +7959,12 @@ pub const FuncGen = struct {
}
fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
@@ -7939,11 +7989,12 @@ pub const FuncGen = struct {
}
fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.air.typeOf(ty_op.operand);
const dest_ty = self.air.typeOfIndex(inst);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
const dest_bits = dest_ty.floatBits(target);
const src_bits = operand_ty.floatBits(target);
@@ -7985,10 +8036,10 @@ pub const FuncGen = struct {
}
fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value {
- const operand_is_ref = isByRef(operand_ty);
- const result_is_ref = isByRef(inst_ty);
+ const mod = self.dg.module;
+ const operand_is_ref = isByRef(operand_ty, mod);
+ const result_is_ref = isByRef(inst_ty, mod);
const llvm_dest_ty = try self.dg.lowerType(inst_ty);
- const target = self.dg.module.getTarget();
if (operand_is_ref and result_is_ref) {
// They are both pointers, so just return the same opaque pointer :)
@@ -8001,20 +8052,20 @@ pub const FuncGen = struct {
return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) {
+ if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) {
return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
}
- if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
+ if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) {
const elem_ty = operand_ty.childType();
if (!result_is_ref) {
return self.dg.todo("implement bitcast vector to non-ref array", .{});
}
const array_ptr = self.buildAlloca(llvm_dest_ty, null);
- const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
const llvm_store = self.builder.buildStore(operand, array_ptr);
- llvm_store.setAlignment(inst_ty.abiAlignment(target));
+ llvm_store.setAlignment(inst_ty.abiAlignment(mod));
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
// a simple bitcast will not work, and we fall back to extractelement.
@@ -8033,19 +8084,19 @@ pub const FuncGen = struct {
}
}
return array_ptr;
- } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
+ } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = operand_ty.childType();
const llvm_vector_ty = try self.dg.lowerType(inst_ty);
if (!operand_is_ref) {
return self.dg.todo("implement bitcast non-ref array to vector", .{});
}
- const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
const vector = self.builder.buildLoad(llvm_vector_ty, operand, "");
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
- vector.setAlignment(elem_ty.abiAlignment(target));
+ vector.setAlignment(elem_ty.abiAlignment(mod));
return vector;
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8073,12 +8124,12 @@ pub const FuncGen = struct {
if (operand_is_ref) {
const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, "");
- load_inst.setAlignment(operand_ty.abiAlignment(target));
+ load_inst.setAlignment(operand_ty.abiAlignment(mod));
return load_inst;
}
if (result_is_ref) {
- const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
@@ -8089,7 +8140,7 @@ pub const FuncGen = struct {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values.
// Therefore, we store operand to alloca, then load for result.
- const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target));
+ const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod));
const result_ptr = self.buildAlloca(llvm_dest_ty, alignment);
const store_inst = self.builder.buildStore(operand, result_ptr);
store_inst.setAlignment(alignment);
@@ -8118,12 +8169,13 @@ pub const FuncGen = struct {
}
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
+ const mod = self.dg.module;
const func = self.dg.decl.getFunction().?;
- const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
+ const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
- func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args
+ func.getParamName(mod, src_index).ptr, // TODO test 0 bit args
self.di_file.?,
lbrace_line,
try self.dg.object.lowerDebugType(inst_ty, .full),
@@ -8134,10 +8186,10 @@ pub const FuncGen = struct {
const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null);
const insert_block = self.builder.getInsertBlock();
- if (isByRef(inst_ty)) {
+ if (isByRef(inst_ty, mod)) {
_ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block);
} else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) {
- const alignment = inst_ty.abiAlignment(self.dg.module.getTarget());
+ const alignment = inst_ty.abiAlignment(mod);
const alloca = self.buildAlloca(arg_val.typeOf(), alignment);
const store_inst = self.builder.buildStore(arg_val, alloca);
store_inst.setAlignment(alignment);
@@ -8153,22 +8205,22 @@ pub const FuncGen = struct {
fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
- if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ const mod = self.dg.module;
+ if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.lowerType(pointee_type);
- const target = self.dg.module.getTarget();
- const alignment = ptr_ty.ptrAlignment(target);
+ const alignment = ptr_ty.ptrAlignment(mod);
return self.buildAlloca(pointee_llvm_ty, alignment);
}
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
- if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
+ const mod = self.dg.module;
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty);
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.lowerType(ret_ty);
- const target = self.dg.module.getTarget();
- return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target));
+ return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod));
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
@@ -8182,8 +8234,9 @@ pub const FuncGen = struct {
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
+ const mod = self.dg.module;
- const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
if (val_is_undef) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
@@ -8193,13 +8246,12 @@ pub const FuncGen = struct {
u8_llvm_ty.constInt(0xaa, .False)
else
u8_llvm_ty.getUndef();
- const target = self.dg.module.getTarget();
- const operand_size = operand_ty.abiSize(target);
+ const operand_size = operand_ty.abiSize(mod);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
const len = usize_llvm_ty.constInt(operand_size, .False);
- const dest_ptr_align = ptr_ty.ptrAlignment(target);
+ const dest_ptr_align = ptr_ty.ptrAlignment(mod);
_ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
- if (safety and self.dg.module.comp.bin_file.options.valgrind) {
+ if (safety and mod.comp.bin_file.options.valgrind) {
self.valgrindMarkUndef(dest_ptr, len);
}
return null;
@@ -8230,6 +8282,7 @@ pub const FuncGen = struct {
}
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value {
+ const mod = fg.dg.module;
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[inst].ty_op;
const ptr_ty = fg.air.typeOf(ty_op.operand);
@@ -8237,7 +8290,7 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (!isByRef(ptr_info.pointee_type)) break :elide;
+ if (!isByRef(ptr_info.pointee_type, mod)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@@ -8261,8 +8314,9 @@ pub const FuncGen = struct {
fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
+ const mod = self.dg.module;
const llvm_usize = try self.dg.lowerType(Type.usize);
- const target = self.dg.module.getTarget();
+ const target = mod.getTarget();
if (!target_util.supportsReturnAddress(target)) {
// https://github.com/ziglang/zig/issues/11946
return llvm_usize.constNull();
@@ -8301,6 +8355,7 @@ pub const FuncGen = struct {
}
fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
const ptr = try self.resolveInst(extra.ptr);
@@ -8310,7 +8365,7 @@ pub const FuncGen = struct {
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening and truncating
- if (operand_ty.isSignedInt()) {
+ if (operand_ty.isSignedInt(mod)) {
expected_value = self.builder.buildSExt(expected_value, abi_ty, "");
new_value = self.builder.buildSExt(new_value, abi_ty, "");
} else {
@@ -8336,7 +8391,7 @@ pub const FuncGen = struct {
}
const success_bit = self.builder.buildExtractValue(result, 1, "");
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, "");
}
@@ -8347,13 +8402,14 @@ pub const FuncGen = struct {
}
fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
const ptr_ty = self.air.typeOf(pl_op.operand);
const operand_ty = ptr_ty.elemType();
const operand = try self.resolveInst(extra.operand);
- const is_signed_int = operand_ty.isSignedInt();
+ const is_signed_int = operand_ty.isSignedInt(mod);
const is_float = operand_ty.isRuntimeFloat();
const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float);
const ordering = toLlvmAtomicOrdering(extra.ordering());
@@ -8402,17 +8458,17 @@ pub const FuncGen = struct {
}
fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.air.typeOf(atomic_load.ptr);
const ptr_info = ptr_ty.ptrInfo().data;
const elem_ty = ptr_info.pointee_type;
- if (!elem_ty.hasRuntimeBitsIgnoreComptime())
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false);
- const target = self.dg.module.getTarget();
- const ptr_alignment = ptr_info.alignment(target);
+ const ptr_alignment = ptr_info.alignment(mod);
const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile");
const elem_llvm_ty = try self.dg.lowerType(elem_ty);
@@ -8436,17 +8492,18 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
ordering: llvm.AtomicOrdering,
) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
- if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null;
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
if (opt_abi_ty) |abi_ty| {
// operand needs widening
- if (operand_ty.isSignedInt()) {
+ if (operand_ty.isSignedInt(mod)) {
element = self.builder.buildSExt(element, abi_ty, "");
} else {
element = self.builder.buildZExt(element, abi_ty, "");
@@ -8457,18 +8514,19 @@ pub const FuncGen = struct {
}
fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = self.air.typeOf(bin_op.rhs);
const module = self.dg.module;
const target = module.getTarget();
- const dest_ptr_align = ptr_ty.ptrAlignment(target);
+ const dest_ptr_align = ptr_ty.ptrAlignment(mod);
const u8_llvm_ty = self.context.intType(8);
const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
const is_volatile = ptr_ty.isVolatilePtr();
- if (self.air.value(bin_op.rhs)) |elem_val| {
+ if (self.air.value(bin_op.rhs, mod)) |elem_val| {
if (elem_val.isUndefDeep()) {
// Even if safety is disabled, we still emit a memset to undefined since it conveys
// extra information to LLVM. However, safety makes the difference between using
@@ -8503,7 +8561,7 @@ pub const FuncGen = struct {
}
const value = try self.resolveInst(bin_op.rhs);
- const elem_abi_size = elem_ty.abiSize(target);
+ const elem_abi_size = elem_ty.abiSize(mod);
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
@@ -8551,9 +8609,9 @@ pub const FuncGen = struct {
_ = self.builder.buildCondBr(end, body_block, end_block);
self.builder.positionBuilderAtEnd(body_block);
- const elem_abi_alignment = elem_ty.abiAlignment(target);
+ const elem_abi_alignment = elem_ty.abiAlignment(mod);
const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align);
- if (isByRef(elem_ty)) {
+ if (isByRef(elem_ty, mod)) {
_ = self.builder.buildMemCpy(
it_ptr,
it_ptr_alignment,
@@ -8589,13 +8647,13 @@ pub const FuncGen = struct {
const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty);
const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty);
const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty);
+ const mod = self.dg.module;
const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr();
- const target = self.dg.module.getTarget();
_ = self.builder.buildMemCpy(
dest_ptr,
- dest_ptr_ty.ptrAlignment(target),
+ dest_ptr_ty.ptrAlignment(mod),
src_ptr,
- src_ptr_ty.ptrAlignment(target),
+ src_ptr_ty.ptrAlignment(mod),
len,
is_volatile,
);
@@ -8603,10 +8661,10 @@ pub const FuncGen = struct {
}
fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const un_ty = self.air.typeOf(bin_op.lhs).childType();
- const target = self.dg.module.getTarget();
- const layout = un_ty.unionGetLayout(target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
@@ -8624,13 +8682,13 @@ pub const FuncGen = struct {
}
fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
- const target = self.dg.module.getTarget();
- const layout = un_ty.unionGetLayout(target);
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolveInst(ty_op.operand);
- if (isByRef(un_ty)) {
+ if (isByRef(un_ty, mod)) {
const llvm_un_ty = try self.dg.lowerType(un_ty);
if (layout.payload_size == 0) {
return self.builder.buildLoad(llvm_un_ty, union_handle, "");
@@ -8666,6 +8724,7 @@ pub const FuncGen = struct {
}
fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@@ -8679,9 +8738,8 @@ pub const FuncGen = struct {
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const result_bits = result_ty.intInfo(target).bits;
+ const bits = operand_ty.intInfo(mod).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8692,6 +8750,7 @@ pub const FuncGen = struct {
}
fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
@@ -8704,9 +8763,8 @@ pub const FuncGen = struct {
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
- const bits = operand_ty.intInfo(target).bits;
- const result_bits = result_ty.intInfo(target).bits;
+ const bits = operand_ty.intInfo(mod).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8717,10 +8775,10 @@ pub const FuncGen = struct {
}
fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
- var bits = operand_ty.intInfo(target).bits;
+ var bits = operand_ty.intInfo(mod).bits;
assert(bits % 8 == 0);
var operand = try self.resolveInst(ty_op.operand);
@@ -8730,7 +8788,7 @@ pub const FuncGen = struct {
// If not an even byte-multiple, we need zero-extend + shift-left 1 byte
// The truncated result at the end will be the correct bswap
const scalar_llvm_ty = self.context.intType(bits + 8);
- if (operand_ty.zigTypeTag() == .Vector) {
+ if (operand_ty.zigTypeTag(mod) == .Vector) {
const vec_len = operand_ty.vectorLen();
operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len);
@@ -8759,7 +8817,7 @@ pub const FuncGen = struct {
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
- const result_bits = result_ty.intInfo(target).bits;
+ const result_bits = result_ty.intInfo(mod).bits;
if (bits > result_bits) {
return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, "");
} else if (bits < result_bits) {
@@ -8770,6 +8828,7 @@ pub const FuncGen = struct {
}
fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const error_set_ty = self.air.getRefType(ty_op.ty);
@@ -8781,7 +8840,7 @@ pub const FuncGen = struct {
const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len));
for (names) |name| {
- const err_int = self.dg.module.global_error_set.get(name).?;
+ const err_int = mod.global_error_set.get(name).?;
const this_tag_int_value = int: {
var tag_val_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
@@ -8841,8 +8900,7 @@ pub const FuncGen = struct {
defer self.gpa.free(fqn);
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn});
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const int_tag_ty = enum_ty.intTagType();
const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
const llvm_ret_ty = try self.dg.lowerType(Type.bool);
@@ -8923,11 +8981,9 @@ pub const FuncGen = struct {
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
const llvm_ret_ty = try self.dg.lowerType(slice_ty);
const usize_llvm_ty = try self.dg.lowerType(Type.usize);
- const target = self.dg.module.getTarget();
- const slice_alignment = slice_ty.abiAlignment(target);
+ const slice_alignment = slice_ty.abiAlignment(mod);
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
+ const int_tag_ty = enum_ty.intTagType();
const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False);
@@ -9057,6 +9113,7 @@ pub const FuncGen = struct {
}
fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
@@ -9077,11 +9134,11 @@ pub const FuncGen = struct {
for (values, 0..) |*val, i| {
var buf: Value.ElemValueBuffer = undefined;
- const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
+ const elem = mask.elemValueBuffer(mod, i, &buf);
if (elem.isUndef()) {
val.* = llvm_i32.getUndef();
} else {
- const int = elem.toSignedInt(self.dg.module.getTarget());
+ const int = elem.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
val.* = llvm_i32.constInt(unsigned, .False);
}
@@ -9157,7 +9214,8 @@ pub const FuncGen = struct {
fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
self.builder.setFastMath(want_fast_math);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
const reduce = self.air.instructions.items(.data)[inst].reduce;
const operand = try self.resolveInst(reduce.operand);
@@ -9168,21 +9226,21 @@ pub const FuncGen = struct {
.And => return self.builder.buildAndReduce(operand),
.Or => return self.builder.buildOrReduce(operand),
.Xor => return self.builder.buildXorReduce(operand),
- .Min => switch (scalar_ty.zigTypeTag()) {
- .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()),
+ .Min => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
return self.builder.buildFPMinReduce(operand);
},
else => unreachable,
},
- .Max => switch (scalar_ty.zigTypeTag()) {
- .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()),
+ .Max => switch (scalar_ty.zigTypeTag(mod)) {
+ .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
return self.builder.buildFPMaxReduce(operand);
},
else => unreachable,
},
- .Add => switch (scalar_ty.zigTypeTag()) {
+ .Add => switch (scalar_ty.zigTypeTag(mod)) {
.Int => return self.builder.buildAddReduce(operand),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -9191,7 +9249,7 @@ pub const FuncGen = struct {
},
else => unreachable,
},
- .Mul => switch (scalar_ty.zigTypeTag()) {
+ .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int => return self.builder.buildMulReduce(operand),
.Float => if (intrinsicsAllowed(scalar_ty, target)) {
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -9247,9 +9305,9 @@ pub const FuncGen = struct {
const len = @intCast(usize, result_ty.arrayLen());
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const llvm_result_ty = try self.dg.lowerType(result_ty);
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
- switch (result_ty.zigTypeTag()) {
+ switch (result_ty.zigTypeTag(mod)) {
.Vector => {
const llvm_u32 = self.context.intType(32);
@@ -9265,7 +9323,7 @@ pub const FuncGen = struct {
if (result_ty.containerLayout() == .Packed) {
const struct_obj = result_ty.castTag(.@"struct").?.data;
assert(struct_obj.haveLayout());
- const big_bits = struct_obj.backing_int_ty.bitSize(target);
+ const big_bits = struct_obj.backing_int_ty.bitSize(mod);
const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
@@ -9273,12 +9331,12 @@ pub const FuncGen = struct {
var running_bits: u16 = 0;
for (elements, 0..) |elem, i| {
const field = fields[i];
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = self.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
self.builder.buildBitCast(non_int_val, small_int_ty, "");
@@ -9296,24 +9354,24 @@ pub const FuncGen = struct {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
- if (isByRef(result_ty)) {
+ if (isByRef(result_ty, mod)) {
const llvm_u32 = self.context.intType(32);
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
+ const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
for (elements, 0..) |elem, i| {
- if (result_ty.structFieldValueComptime(i) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?;
+ const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
indices[1] = llvm_u32.constInt(llvm_i, .False);
const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
var field_ptr_payload: Type.Payload.Pointer = .{
.data = .{
.pointee_type = self.air.typeOf(elem),
- .@"align" = result_ty.structFieldAlign(i, target),
+ .@"align" = result_ty.structFieldAlign(i, mod),
.@"addrspace" = .generic,
},
};
@@ -9325,20 +9383,20 @@ pub const FuncGen = struct {
} else {
var result = llvm_result_ty.getUndef();
for (elements, 0..) |elem, i| {
- if (result_ty.structFieldValueComptime(i) != null) continue;
+ if (result_ty.structFieldValueComptime(mod, i) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?;
+ const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
}
return result;
}
},
.Array => {
- assert(isByRef(result_ty));
+ assert(isByRef(result_ty, mod));
const llvm_usize = try self.dg.lowerType(Type.usize);
- const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target));
+ const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod));
const array_info = result_ty.arrayInfo();
var elem_ptr_payload: Type.Payload.Pointer = .{
@@ -9379,22 +9437,22 @@ pub const FuncGen = struct {
}
fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ const mod = self.dg.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const union_ty = self.air.typeOfIndex(inst);
const union_llvm_ty = try self.dg.lowerType(union_ty);
- const target = self.dg.module.getTarget();
- const layout = union_ty.unionGetLayout(target);
+ const layout = union_ty.unionGetLayout(mod);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
if (union_obj.layout == .Packed) {
- const big_bits = union_ty.bitSize(target);
+ const big_bits = union_ty.bitSize(mod);
const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits));
const field = union_obj.fields.values()[extra.field_index];
const non_int_val = try self.resolveInst(extra.init);
- const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
+ const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = self.context.intType(ty_bit_size);
- const small_int_val = if (field.ty.isPtrAtRuntime())
+ const small_int_val = if (field.ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(non_int_val, small_int_ty, "")
else
self.builder.buildBitCast(non_int_val, small_int_ty, "");
@@ -9412,16 +9470,16 @@ pub const FuncGen = struct {
const tag_val = Value.initPayload(&tag_val_payload.base);
var int_payload: Value.Payload.U64 = undefined;
const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload);
- break :blk tag_int_val.toUnsignedInt(target);
+ break :blk tag_int_val.toUnsignedInt(mod);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
return null;
}
- assert(!isByRef(union_ty));
+ assert(!isByRef(union_ty, mod));
return union_llvm_ty.constInt(tag_int, .False);
}
- assert(isByRef(union_ty));
+ assert(isByRef(union_ty, mod));
// The llvm type of the alloca will be the named LLVM union type, and will not
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
@@ -9431,12 +9489,12 @@ pub const FuncGen = struct {
assert(union_obj.haveFieldTypes());
const field = union_obj.fields.values()[extra.field_index];
const field_llvm_ty = try self.dg.lowerType(field.ty);
- const field_size = field.ty.abiSize(target);
- const field_align = field.normalAlignment(target);
+ const field_size = field.ty.abiSize(mod);
+ const field_align = field.normalAlignment(mod);
const llvm_union_ty = t: {
const payload = p: {
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p self.context.intType(8).arrayType(padding_len);
}
@@ -9511,7 +9569,7 @@ pub const FuncGen = struct {
const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
const llvm_tag = tag_llvm_ty.constInt(tag_int, .False);
const store_inst = self.builder.buildStore(llvm_tag, field_ptr);
- store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target));
+ store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod));
}
return result_ptr;
@@ -9535,7 +9593,8 @@ pub const FuncGen = struct {
// by the target.
// To work around this, don't emit llvm.prefetch in this case.
// See https://bugs.llvm.org/show_bug.cgi?id=21037
- const target = self.dg.module.getTarget();
+ const mod = self.dg.module;
+ const target = mod.getTarget();
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
.x86_64,
@@ -9658,8 +9717,9 @@ pub const FuncGen = struct {
return table;
}
+ const mod = self.dg.module;
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget());
+ const slice_alignment = slice_ty.abiAlignment(mod);
const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space
const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table");
@@ -9703,14 +9763,14 @@ pub const FuncGen = struct {
) !*llvm.Value {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
+ const mod = fg.dg.module;
- if (isByRef(opt_ty)) {
+ if (isByRef(opt_ty, mod)) {
// We have a pointer and we need to return a pointer to the first field.
const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, "");
- const target = fg.dg.module.getTarget();
- const payload_alignment = payload_ty.abiAlignment(target);
- if (isByRef(payload_ty)) {
+ const payload_alignment = payload_ty.abiAlignment(mod);
+ if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
@@ -9722,7 +9782,7 @@ pub const FuncGen = struct {
return load_inst;
}
- assert(!isByRef(payload_ty));
+ assert(!isByRef(payload_ty, mod));
return fg.builder.buildExtractValue(opt_handle, 0, "");
}
@@ -9734,10 +9794,10 @@ pub const FuncGen = struct {
) !?*llvm.Value {
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), "");
+ const mod = self.dg.module;
- if (isByRef(optional_ty)) {
- const target = self.dg.module.getTarget();
- const payload_alignment = optional_ty.abiAlignment(target);
+ if (isByRef(optional_ty, mod)) {
+ const payload_alignment = optional_ty.abiAlignment(mod);
const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment);
{
@@ -9765,9 +9825,9 @@ pub const FuncGen = struct {
struct_ptr_ty: Type,
field_index: u32,
) !?*llvm.Value {
- const target = self.dg.object.target;
const struct_ty = struct_ptr_ty.childType();
- switch (struct_ty.zigTypeTag()) {
+ const mod = self.dg.module;
+ switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout()) {
.Packed => {
const result_ty = self.air.typeOfIndex(inst);
@@ -9783,7 +9843,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
- const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target);
+ const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod);
if (byte_offset == 0) return struct_ptr;
const byte_llvm_ty = self.context.intType(8);
const llvm_usize = try self.dg.lowerType(Type.usize);
@@ -9795,7 +9855,7 @@ pub const FuncGen = struct {
const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty);
var ty_buf: Type.Payload.Pointer = undefined;
- if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
+ if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, "");
} else {
// If we found no index then this means this is a zero sized field at the
@@ -9803,14 +9863,14 @@ pub const FuncGen = struct {
// the index to the element at index `1` to get a pointer to the end of
// the struct.
const llvm_u32 = self.context.intType(32);
- const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False);
+ const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False);
const indices: [1]*llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, "");
}
},
},
.Union => {
- const layout = struct_ty.unionGetLayout(target);
+ const layout = struct_ty.unionGetLayout(mod);
if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr;
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
const union_llvm_ty = try self.dg.lowerType(struct_ty);
@@ -9835,12 +9895,12 @@ pub const FuncGen = struct {
ptr_alignment: u32,
is_volatile: bool,
) !*llvm.Value {
+ const mod = fg.dg.module;
const pointee_llvm_ty = try fg.dg.lowerType(pointee_type);
- const target = fg.dg.module.getTarget();
- const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target));
+ const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod));
const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align);
- const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits);
- const size_bytes = pointee_type.abiSize(target);
+ const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits);
+ const size_bytes = pointee_type.abiSize(mod);
_ = fg.builder.buildMemCpy(
result_ptr,
result_align,
@@ -9856,11 +9916,11 @@ pub const FuncGen = struct {
/// alloca and copies the value into it, then returns the alloca instruction.
/// For isByRef=false types, it creates a load instruction and returns it.
fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
+ const mod = self.dg.module;
const info = ptr_ty.ptrInfo().data;
- if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null;
- const target = self.dg.module.getTarget();
- const ptr_alignment = info.alignment(target);
+ const ptr_alignment = info.alignment(mod);
const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr());
assert(info.vector_index != .runtime);
@@ -9877,7 +9937,7 @@ pub const FuncGen = struct {
}
if (info.host_size == 0) {
- if (isByRef(info.pointee_type)) {
+ if (isByRef(info.pointee_type, mod)) {
return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile");
}
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
@@ -9892,13 +9952,13 @@ pub const FuncGen = struct {
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
+ const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod));
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
const elem_llvm_ty = try self.dg.lowerType(info.pointee_type);
- if (isByRef(info.pointee_type)) {
- const result_align = info.pointee_type.abiAlignment(target);
+ if (isByRef(info.pointee_type, mod)) {
+ const result_align = info.pointee_type.abiAlignment(mod);
const result_ptr = self.buildAlloca(elem_llvm_ty, result_align);
const same_size_int = self.context.intType(elem_bits);
@@ -9908,13 +9968,13 @@ pub const FuncGen = struct {
return result_ptr;
}
- if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) {
+ if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
}
- if (info.pointee_type.isPtrAtRuntime()) {
+ if (info.pointee_type.isPtrAtRuntime(mod)) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
@@ -9932,11 +9992,11 @@ pub const FuncGen = struct {
) !void {
const info = ptr_ty.ptrInfo().data;
const elem_ty = info.pointee_type;
- if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ const mod = self.dg.module;
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
- const target = self.dg.module.getTarget();
- const ptr_alignment = ptr_ty.ptrAlignment(target);
+ const ptr_alignment = ptr_ty.ptrAlignment(mod);
const ptr_volatile = llvm.Bool.fromBool(info.@"volatile");
assert(info.vector_index != .runtime);
@@ -9964,13 +10024,13 @@ pub const FuncGen = struct {
assert(ordering == .NotAtomic);
containing_int.setAlignment(ptr_alignment);
containing_int.setVolatile(ptr_volatile);
- const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
+ const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod));
const containing_int_ty = containing_int.typeOf();
const shift_amt = containing_int_ty.constInt(info.bit_offset, .False);
// Convert to equally-sized integer type in order to perform the bit
// operations on the value to store
const value_bits_type = self.context.intType(elem_bits);
- const value_bits = if (elem_ty.isPtrAtRuntime())
+ const value_bits = if (elem_ty.isPtrAtRuntime(mod))
self.builder.buildPtrToInt(elem, value_bits_type, "")
else
self.builder.buildBitCast(elem, value_bits_type, "");
@@ -9991,7 +10051,7 @@ pub const FuncGen = struct {
store_inst.setVolatile(ptr_volatile);
return;
}
- if (!isByRef(elem_ty)) {
+ if (!isByRef(elem_ty, mod)) {
const store_inst = self.builder.buildStore(elem, ptr);
store_inst.setOrdering(ordering);
store_inst.setAlignment(ptr_alignment);
@@ -9999,13 +10059,13 @@ pub const FuncGen = struct {
return;
}
assert(ordering == .NotAtomic);
- const size_bytes = elem_ty.abiSize(target);
+ const size_bytes = elem_ty.abiSize(mod);
_ = self.builder.buildMemCpy(
ptr,
ptr_alignment,
elem,
- elem_ty.abiAlignment(target),
- self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
+ elem_ty.abiAlignment(mod),
+ self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False),
info.@"volatile",
);
}
@@ -10030,11 +10090,12 @@ pub const FuncGen = struct {
a4: *llvm.Value,
a5: *llvm.Value,
) *llvm.Value {
- const target = fg.dg.module.getTarget();
+ const mod = fg.dg.module;
+ const target = mod.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
const usize_llvm_ty = fg.context.intType(target.ptrBitWidth());
- const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target));
+ const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod));
const array_llvm_ty = usize_llvm_ty.arrayType(6);
const array_ptr = fg.valgrind_client_request_array orelse a: {
@@ -10451,7 +10512,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
fn llvmFieldIndex(
ty: Type,
field_index: usize,
- target: std.Target,
+ mod: *const Module,
ptr_pl_buf: *Type.Payload.Pointer,
) ?c_uint {
// Detects where we inserted extra padding fields so that we can skip
@@ -10464,9 +10525,9 @@ fn llvmFieldIndex(
const tuple = ty.tupleFields();
var llvm_field_index: c_uint = 0;
for (tuple.types, 0..) |field_ty, i| {
- if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
- const field_align = field_ty.abiAlignment(target);
+ const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -10488,7 +10549,7 @@ fn llvmFieldIndex(
}
llvm_field_index += 1;
- offset += field_ty.abiSize(target);
+ offset += field_ty.abiSize(mod);
}
return null;
}
@@ -10496,10 +10557,10 @@ fn llvmFieldIndex(
assert(layout != .Packed);
var llvm_field_index: c_uint = 0;
- var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator();
+ var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
- const field_align = field.alignment(target, layout);
+ const field_align = field.alignment(mod, layout);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
@@ -10521,43 +10582,44 @@ fn llvmFieldIndex(
}
llvm_field_index += 1;
- offset += field.ty.abiSize(target);
+ offset += field.ty.abiSize(mod);
} else {
// We did not find an llvm field that corresponds to this zig field.
return null;
}
}
-fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false;
+fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool {
+ if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
+ const target = mod.getTarget();
switch (fn_info.cc) {
- .Unspecified, .Inline => return isByRef(fn_info.return_type),
+ .Unspecified, .Inline => return isByRef(fn_info.return_type, mod),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
- .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
- else => return firstParamSRetSystemV(fn_info.return_type, target),
+ .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
+ else => return firstParamSRetSystemV(fn_info.return_type, mod),
},
- .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
- .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory,
- .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect,
+ .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory,
+ .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
},
- .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory,
+ .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
- .SysV => return firstParamSRetSystemV(fn_info.return_type, target),
- .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
- .Stdcall => return !isScalar(fn_info.return_type),
+ .SysV => return firstParamSRetSystemV(fn_info.return_type, mod),
+ .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory,
+ .Stdcall => return !isScalar(mod, fn_info.return_type),
else => return false,
}
}
-fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
- const class = x86_64_abi.classifySystemV(ty, target, .ret);
+fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool {
+ const class = x86_64_abi.classifySystemV(ty, mod, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@@ -10567,20 +10629,21 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) {
+ const mod = dg.module;
+ if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
// pointer type which has anyerror as the return type.
- if (fn_info.return_type.isError()) {
+ if (fn_info.return_type.isError(mod)) {
return dg.lowerType(Type.anyerror);
} else {
return dg.context.voidType();
}
}
- const target = dg.module.getTarget();
+ const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => {
- if (isByRef(fn_info.return_type)) {
+ if (isByRef(fn_info.return_type, mod)) {
return dg.context.voidType();
} else {
return dg.lowerType(fn_info.return_type);
@@ -10594,33 +10657,33 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
else => return lowerSystemVFnRetTy(dg, fn_info),
},
.wasm32 => {
- if (isScalar(fn_info.return_type)) {
+ if (isScalar(mod, fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
}
- const classes = wasm_c_abi.classifyType(fn_info.return_type, target);
+ const classes = wasm_c_abi.classifyType(fn_info.return_type, mod);
if (classes[0] == .indirect or classes[0] == .none) {
return dg.context.voidType();
}
assert(classes[0] == .direct and classes[1] == .none);
- const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target);
- const abi_size = scalar_type.abiSize(target);
+ const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod);
+ const abi_size = scalar_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
},
.aarch64, .aarch64_be => {
- switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) {
+ switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) {
.memory => return dg.context.voidType(),
.float_array => return dg.lowerType(fn_info.return_type),
.byval => return dg.lowerType(fn_info.return_type),
.integer => {
- const bit_size = fn_info.return_type.bitSize(target);
+ const bit_size = fn_info.return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => return dg.context.intType(64).arrayType(2),
}
},
.arm, .armeb => {
- switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) {
+ switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) {
.memory, .i64_array => return dg.context.voidType(),
.i32_array => |len| if (len == 1) {
return dg.context.intType(32);
@@ -10631,10 +10694,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
}
},
.riscv32, .riscv64 => {
- switch (riscv_c_abi.classifyType(fn_info.return_type, target)) {
+ switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) {
.memory => return dg.context.voidType(),
.integer => {
- const bit_size = fn_info.return_type.bitSize(target);
+ const bit_size = fn_info.return_type.bitSize(mod);
return dg.context.intType(@intCast(c_uint, bit_size));
},
.double_integer => {
@@ -10654,7 +10717,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
.Stdcall => {
- if (isScalar(fn_info.return_type)) {
+ if (isScalar(mod, fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
} else {
return dg.context.voidType();
@@ -10665,13 +10728,13 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
}
fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- const target = dg.module.getTarget();
- switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
+ const mod = dg.module;
+ switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) {
.integer => {
- if (isScalar(fn_info.return_type)) {
+ if (isScalar(mod, fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
} else {
- const abi_size = fn_info.return_type.abiSize(target);
+ const abi_size = fn_info.return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
},
@@ -10683,11 +10746,11 @@ fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.T
}
fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
- if (isScalar(fn_info.return_type)) {
+ const mod = dg.module;
+ if (isScalar(mod, fn_info.return_type)) {
return dg.lowerType(fn_info.return_type);
}
- const target = dg.module.getTarget();
- const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
+ const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret);
if (classes[0] == .memory) {
return dg.context.voidType();
}
@@ -10728,7 +10791,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm
}
}
if (classes[0] == .integer and classes[1] == .none) {
- const abi_size = fn_info.return_type.abiSize(target);
+ const abi_size = fn_info.return_type.abiSize(mod);
return dg.context.intType(@intCast(c_uint, abi_size * 8));
}
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
@@ -10739,7 +10802,6 @@ const ParamTypeIterator = struct {
fn_info: Type.Payload.Function.Data,
zig_index: u32,
llvm_index: u32,
- target: std.Target,
llvm_types_len: u32,
llvm_types_buffer: [8]*llvm.Type,
byval_attr: bool,
@@ -10779,7 +10841,10 @@ const ParamTypeIterator = struct {
}
fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering {
- if (!ty.hasRuntimeBitsIgnoreComptime()) {
+ const mod = it.dg.module;
+ const target = mod.getTarget();
+
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
it.zig_index += 1;
return .no_bits;
}
@@ -10788,10 +10853,10 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
var buf: Type.Payload.ElemType = undefined;
- if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) {
+ if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) {
it.llvm_index += 1;
return .slice;
- } else if (isByRef(ty)) {
+ } else if (isByRef(ty, mod)) {
return .byref;
} else {
return .byval;
@@ -10801,23 +10866,23 @@ const ParamTypeIterator = struct {
@panic("TODO implement async function lowering in the LLVM backend");
},
.C => {
- switch (it.target.cpu.arch) {
+ switch (target.cpu.arch) {
.mips, .mipsel => {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
},
- .x86_64 => switch (it.target.os.tag) {
+ .x86_64 => switch (target.os.tag) {
.windows => return it.nextWin64(ty),
else => return it.nextSystemV(ty),
},
.wasm32 => {
it.zig_index += 1;
it.llvm_index += 1;
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
return .byval;
}
- const classes = wasm_c_abi.classifyType(ty, it.target);
+ const classes = wasm_c_abi.classifyType(ty, mod);
if (classes[0] == .indirect) {
return .byref;
}
@@ -10826,7 +10891,7 @@ const ParamTypeIterator = struct {
.aarch64, .aarch64_be => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (aarch64_c_abi.classifyType(ty, it.target)) {
+ switch (aarch64_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.float_array => |len| return Lowering{ .float_array = len },
.byval => return .byval,
@@ -10841,7 +10906,7 @@ const ParamTypeIterator = struct {
.arm, .armeb => {
it.zig_index += 1;
it.llvm_index += 1;
- switch (arm_c_abi.classifyType(ty, it.target, .arg)) {
+ switch (arm_c_abi.classifyType(ty, mod, .arg)) {
.memory => {
it.byval_attr = true;
return .byref;
@@ -10857,7 +10922,7 @@ const ParamTypeIterator = struct {
if (ty.tag() == .f16) {
return .as_u16;
}
- switch (riscv_c_abi.classifyType(ty, it.target)) {
+ switch (riscv_c_abi.classifyType(ty, mod)) {
.memory => return .byref_mut,
.byval => return .byval,
.integer => return .abi_sized_int,
@@ -10878,7 +10943,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
return .byval;
} else {
it.byval_attr = true;
@@ -10894,9 +10959,10 @@ const ParamTypeIterator = struct {
}
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
- switch (x86_64_abi.classifyWindows(ty, it.target)) {
+ const mod = it.dg.module;
+ switch (x86_64_abi.classifyWindows(ty, mod)) {
.integer => {
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -10926,14 +10992,15 @@ const ParamTypeIterator = struct {
}
fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering {
- const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
+ const mod = it.dg.module;
+ const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
it.byval_attr = true;
return .byref;
}
- if (isScalar(ty)) {
+ if (isScalar(mod, ty)) {
it.zig_index += 1;
it.llvm_index += 1;
return .byval;
@@ -10992,7 +11059,6 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
.fn_info = fn_info,
.zig_index = 0,
.llvm_index = 0,
- .target = dg.module.getTarget(),
.llvm_types_buffer = undefined,
.llvm_types_len = 0,
.byval_attr = false,
@@ -11001,16 +11067,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp
fn ccAbiPromoteInt(
cc: std.builtin.CallingConvention,
- target: std.Target,
+ mod: *const Module,
ty: Type,
) ?std.builtin.Signedness {
+ const target = mod.getTarget();
switch (cc) {
.Unspecified, .Inline, .Async => return null,
else => {},
}
- const int_info = switch (ty.zigTypeTag()) {
- .Bool => Type.u1.intInfo(target),
- .Int, .Enum, .ErrorSet => ty.intInfo(target),
+ const int_info = switch (ty.zigTypeTag(mod)) {
+ .Bool => Type.u1.intInfo(mod),
+ .Int, .Enum, .ErrorSet => ty.intInfo(mod),
else => return null,
};
if (int_info.bits <= 16) return int_info.signedness;
@@ -11039,12 +11106,12 @@ fn ccAbiPromoteInt(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
-fn isByRef(ty: Type) bool {
+fn isByRef(ty: Type, mod: *const Module) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 0;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeInt,
.ComptimeFloat,
@@ -11067,7 +11134,7 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
- .Array, .Frame => return ty.hasRuntimeBits(),
+ .Array, .Frame => return ty.hasRuntimeBits(mod),
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout() == .Packed) return false;
@@ -11075,32 +11142,32 @@ fn isByRef(ty: Type) bool {
const tuple = ty.tupleFields();
var count: usize = 0;
for (tuple.values, 0..) |field_val, i| {
- if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue;
+ if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(tuple.types[i])) return true;
+ if (isByRef(tuple.types[i], mod)) return true;
}
return false;
}
var count: usize = 0;
const fields = ty.structFields();
for (fields.values()) |field| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(field.ty)) return true;
+ if (isByRef(field.ty, mod)) return true;
}
return false;
},
.Union => switch (ty.containerLayout()) {
.Packed => return false,
- else => return ty.hasRuntimeBits(),
+ else => return ty.hasRuntimeBits(mod),
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
return true;
@@ -11108,10 +11175,10 @@ fn isByRef(ty: Type) bool {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return false;
}
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
return false;
}
return true;
@@ -11119,8 +11186,8 @@ fn isByRef(ty: Type) bool {
}
}
-fn isScalar(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+fn isScalar(mod: *const Module, ty: Type) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
.NoReturn,
@@ -11304,12 +11371,12 @@ fn buildAllocaInner(
return alloca;
}
-fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 {
- return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target));
+fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 {
+ return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod));
}
-fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 {
- return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target));
+fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 {
+ return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
src/codegen/spirv.zig
@@ -231,9 +231,10 @@ pub const DeclGen = struct {
/// Fetch the result-id for a previously generated instruction or constant.
fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef {
- if (self.air.value(inst)) |val| {
+ const mod = self.module;
+ if (self.air.value(inst, mod)) |val| {
const ty = self.air.typeOf(inst);
- if (ty.zigTypeTag() == .Fn) {
+ if (ty.zigTypeTag(mod) == .Fn) {
const fn_decl_index = switch (val.tag()) {
.extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
@@ -340,8 +341,9 @@ pub const DeclGen = struct {
}
fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo {
+ const mod = self.module;
const target = self.getTarget();
- return switch (ty.zigTypeTag()) {
+ return switch (ty.zigTypeTag(mod)) {
.Bool => ArithmeticTypeInfo{
.bits = 1, // Doesn't matter for this class.
.is_vector = false,
@@ -355,7 +357,7 @@ pub const DeclGen = struct {
.class = .float,
},
.Int => blk: {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
// TODO: Maybe it's useful to also return this value.
const maybe_backing_bits = self.backingIntBits(int_info.bits);
break :blk ArithmeticTypeInfo{
@@ -533,21 +535,22 @@ pub const DeclGen = struct {
}
fn addInt(self: *@This(), ty: Type, val: Value) !void {
- const target = self.dg.getTarget();
- const int_info = ty.intInfo(target);
+ const mod = self.dg.module;
+ const int_info = ty.intInfo(mod);
const int_bits = switch (int_info.signedness) {
- .signed => @bitCast(u64, val.toSignedInt(target)),
- .unsigned => val.toUnsignedInt(target),
+ .signed => @bitCast(u64, val.toSignedInt(mod)),
+ .unsigned => val.toUnsignedInt(mod),
};
// TODO: Swap endianess if the compiler is big endian.
- const len = ty.abiSize(target);
+ const len = ty.abiSize(mod);
try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]);
}
fn addFloat(self: *@This(), ty: Type, val: Value) !void {
+ const mod = self.dg.module;
const target = self.dg.getTarget();
- const len = ty.abiSize(target);
+ const len = ty.abiSize(mod);
// TODO: Swap endianess if the compiler is big endian.
switch (ty.floatBits(target)) {
@@ -607,15 +610,15 @@ pub const DeclGen = struct {
}
fn lower(self: *@This(), ty: Type, val: Value) !void {
- const target = self.dg.getTarget();
const dg = self.dg;
+ const mod = dg.module;
if (val.isUndef()) {
- const size = ty.abiSize(target);
+ const size = ty.abiSize(mod);
return try self.addUndef(size);
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Int => try self.addInt(ty, val),
.Float => try self.addFloat(ty, val),
.Bool => try self.addConstBool(val.toBool()),
@@ -644,7 +647,7 @@ pub const DeclGen = struct {
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try self.addBytes(bytes);
if (ty.sentinel()) |sentinel| {
- try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target)));
+ try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod)));
}
},
.bytes => {
@@ -690,13 +693,13 @@ pub const DeclGen = struct {
const struct_begin = self.size;
const field_vals = val.castTag(.aggregate).?.data;
for (struct_ty.fields.values(), 0..) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
try self.lower(field.ty, field_vals[i]);
// Add padding if required.
// TODO: Add to type generation as well?
const unpadded_field_end = self.size - struct_begin;
- const padded_field_end = ty.structFieldOffset(i + 1, target);
+ const padded_field_end = ty.structFieldOffset(i + 1, mod);
const padding = padded_field_end - unpadded_field_end;
try self.addUndef(padding);
}
@@ -705,13 +708,13 @@ pub const DeclGen = struct {
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&opt_buf);
- const has_payload = !val.isNull();
- const abi_size = ty.abiSize(target);
+ const has_payload = !val.isNull(mod);
+ const abi_size = ty.abiSize(mod);
- if (!payload_ty.hasRuntimeBits()) {
+ if (!payload_ty.hasRuntimeBits(mod)) {
try self.addConstBool(has_payload);
return;
- } else if (ty.optionalReprIsPayload()) {
+ } else if (ty.optionalReprIsPayload(mod)) {
// Optional representation is a nullable pointer or slice.
if (val.castTag(.opt_payload)) |payload| {
try self.lower(payload_ty, payload.data);
@@ -729,7 +732,7 @@ pub const DeclGen = struct {
// Subtract 1 for @sizeOf(bool).
// TODO: Make this not hardcoded.
- const payload_size = payload_ty.abiSize(target);
+ const payload_size = payload_ty.abiSize(mod);
const padding = abi_size - payload_size - 1;
if (val.castTag(.opt_payload)) |payload| {
@@ -744,14 +747,13 @@ pub const DeclGen = struct {
var int_val_buffer: Value.Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &int_val_buffer);
- var int_ty_buffer: Type.Payload.Bits = undefined;
- const int_ty = ty.intTagType(&int_ty_buffer);
+ const int_ty = ty.intTagType();
try self.lower(int_ty, int_val);
},
.Union => {
const tag_and_val = val.castTag(.@"union").?.data;
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
@@ -772,9 +774,9 @@ pub const DeclGen = struct {
try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag);
}
- const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
+ const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
try self.lower(active_field_ty, tag_and_val.val);
- break :blk active_field_ty.abiSize(target);
+ break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
@@ -808,9 +810,9 @@ pub const DeclGen = struct {
return try self.lower(Type.anyerror, error_val);
}
- const payload_size = payload_ty.abiSize(target);
- const error_size = Type.anyerror.abiAlignment(target);
- const ty_size = ty.abiSize(target);
+ const payload_size = payload_ty.abiSize(mod);
+ const error_size = Type.anyerror.abiAlignment(mod);
+ const ty_size = ty.abiSize(mod);
const padding = ty_size - payload_size - error_size;
const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
@@ -886,7 +888,7 @@ pub const DeclGen = struct {
// .id_result = result_id,
// .storage_class = storage_class,
// });
- // } else if (ty.abiSize(target) == 0) {
+ // } else if (ty.abiSize(mod) == 0) {
// // Special case: if the type has no size, then return an undefined pointer.
// return try section.emit(self.spv.gpa, .OpUndef, .{
// .id_result_type = self.typeId(ptr_ty_ref),
@@ -968,6 +970,7 @@ pub const DeclGen = struct {
/// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default.
/// This function should only be called during function code generation.
fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef {
+ const mod = self.module;
const target = self.getTarget();
const result_ty_ref = try self.resolveType(ty, repr);
@@ -977,12 +980,12 @@ pub const DeclGen = struct {
return self.spv.constUndef(result_ty_ref);
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Int => {
- if (ty.isSignedInt()) {
- return try self.spv.constInt(result_ty_ref, val.toSignedInt(target));
+ if (ty.isSignedInt(mod)) {
+ return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod));
} else {
- return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target));
+ return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod));
}
},
.Bool => switch (repr) {
@@ -1037,7 +1040,7 @@ pub const DeclGen = struct {
// The value cannot be generated directly, so generate it as an indirect constant,
// and then perform an OpLoad.
const result_id = self.spv.allocId();
- const alignment = ty.abiAlignment(target);
+ const alignment = ty.abiAlignment(mod);
const spv_decl_index = try self.spv.allocDecl(.global);
try self.lowerIndirectConstant(
@@ -1114,8 +1117,8 @@ pub const DeclGen = struct {
/// NOTE: When the active field is set to something other than the most aligned field, the
/// resulting struct will be *underaligned*.
fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef {
- const target = self.getTarget();
- const layout = ty.unionGetLayout(target);
+ const mod = self.module;
+ const layout = ty.unionGetLayout(mod);
const union_ty = ty.cast(Type.Payload.Union).?.data;
if (union_ty.layout == .Packed) {
@@ -1143,11 +1146,11 @@ pub const DeclGen = struct {
const active_field = maybe_active_field orelse layout.most_aligned_field;
const active_field_ty = union_ty.fields.values()[active_field].ty;
- const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: {
+ const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect);
member_types.appendAssumeCapacity(active_payload_ty_ref);
member_names.appendAssumeCapacity(try self.spv.resolveString("payload"));
- break :blk active_field_ty.abiSize(target);
+ break :blk active_field_ty.abiSize(mod);
} else 0;
const payload_padding_len = layout.payload_size - active_field_size;
@@ -1177,21 +1180,21 @@ pub const DeclGen = struct {
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
+ const mod = self.module;
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
const target = self.getTarget();
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => return try self.spv.resolve(.void_type),
.Bool => switch (repr) {
.direct => return try self.spv.resolve(.bool_type),
.indirect => return try self.intType(.unsigned, 1),
},
.Int => {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
- var buffer: Type.Payload.Bits = undefined;
- const tag_ty = ty.intTagType(&buffer);
+ const tag_ty = ty.intTagType();
return self.resolveType(tag_ty, repr);
},
.Float => {
@@ -1290,7 +1293,7 @@ pub const DeclGen = struct {
var member_index: usize = 0;
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue;
+ if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field_ty, .indirect);
member_index += 1;
@@ -1315,7 +1318,7 @@ pub const DeclGen = struct {
var member_index: usize = 0;
for (struct_ty.fields.values(), 0..) |field, i| {
- if (field.is_comptime or !field.ty.hasRuntimeBits()) continue;
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field.ty, .indirect);
member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]);
@@ -1334,7 +1337,7 @@ pub const DeclGen = struct {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// Just use a bool.
// Note: Always generate the bool with indirect format, to save on some sanity
// Perform the conversion to a direct bool when the field is extracted.
@@ -1342,7 +1345,7 @@ pub const DeclGen = struct {
}
const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
- if (ty.optionalReprIsPayload()) {
+ if (ty.optionalReprIsPayload(mod)) {
// Optional is actually a pointer or a slice.
return payload_ty_ref;
}
@@ -1445,14 +1448,14 @@ pub const DeclGen = struct {
};
fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout {
- const target = self.getTarget();
+ const mod = self.module;
- const error_align = Type.anyerror.abiAlignment(target);
- const payload_align = payload_ty.abiAlignment(target);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const payload_align = payload_ty.abiAlignment(mod);
const error_first = error_align > payload_align;
return .{
- .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(),
+ .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
.error_first = error_first,
};
}
@@ -1529,14 +1532,15 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.module.declPtr(self.decl_index);
+ const mod = self.module;
+ const decl = mod.declPtr(self.decl_index);
const spv_decl_index = try self.resolveDecl(self.decl_index);
const decl_id = self.spv.declPtr(spv_decl_index).result_id;
log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name });
if (decl.val.castTag(.function)) |_| {
- assert(decl.ty.zigTypeTag() == .Fn);
+ assert(decl.ty.zigTypeTag(mod) == .Fn);
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
.id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()),
@@ -1634,7 +1638,8 @@ pub const DeclGen = struct {
/// Convert representation from indirect (in memory) to direct (in 'register')
/// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct).
fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- return switch (ty.zigTypeTag()) {
+ const mod = self.module;
+ return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const direct_bool_ty_ref = try self.resolveType(ty, .direct);
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
@@ -1655,7 +1660,8 @@ pub const DeclGen = struct {
/// Convert representation from direct (in 'register) to direct (in memory)
/// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect).
fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
- return switch (ty.zigTypeTag()) {
+ const mod = self.module;
+ return switch (ty.zigTypeTag(mod)) {
.Bool => blk: {
const indirect_bool_ty_ref = try self.resolveType(ty, .indirect);
break :blk self.boolToInt(indirect_bool_ty_ref, operand_id);
@@ -2056,6 +2062,7 @@ pub const DeclGen = struct {
}
fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
+ const mod = self.module;
if (self.liveness.isUnused(inst)) return null;
const ty = self.air.typeOfIndex(inst);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -2083,7 +2090,7 @@ pub const DeclGen = struct {
if (elem.isUndef()) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
- const int = elem.toSignedInt(self.getTarget());
+ const int = elem.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
self.func.body.writeOperand(spec.LiteralInteger, unsigned);
}
@@ -2189,13 +2196,13 @@ pub const DeclGen = struct {
lhs_id: IdRef,
rhs_id: IdRef,
) !IdRef {
+ const mod = self.module;
var cmp_lhs_id = lhs_id;
var cmp_rhs_id = rhs_id;
const opcode: Opcode = opcode: {
- var int_buffer: Type.Payload.Bits = undefined;
- const op_ty = switch (ty.zigTypeTag()) {
+ const op_ty = switch (ty.zigTypeTag(mod)) {
.Int, .Bool, .Float => ty,
- .Enum => ty.intTagType(&int_buffer),
+ .Enum => ty.intTagType(),
.ErrorSet => Type.u16,
.Pointer => blk: {
// Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are
@@ -2303,13 +2310,14 @@ pub const DeclGen = struct {
src_ty: Type,
src_id: IdRef,
) !IdRef {
+ const mod = self.module;
const dst_ty_ref = try self.resolveType(dst_ty, .direct);
const result_id = self.spv.allocId();
// TODO: Some more cases are missing here
// See fn bitCast in llvm.zig
- if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) {
+ if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = self.typeId(dst_ty_ref),
.id_result = result_id,
@@ -2342,8 +2350,8 @@ pub const DeclGen = struct {
const dest_ty = self.air.typeOfIndex(inst);
const dest_ty_id = try self.resolveTypeId(dest_ty);
- const target = self.getTarget();
- const dest_info = dest_ty.intInfo(target);
+ const mod = self.module;
+ const dest_info = dest_ty.intInfo(mod);
// TODO: Masking?
@@ -2485,8 +2493,9 @@ pub const DeclGen = struct {
}
fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
+ const mod = self.module;
// Construct new pointer type for the resulting pointer
- const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T.
+ const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace()));
if (ptr_ty.isSinglePointer()) {
@@ -2502,12 +2511,13 @@ pub const DeclGen = struct {
fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
// TODO: Make this return a null ptr or something
- if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
const ptr_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
@@ -2536,8 +2546,8 @@ pub const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const un_ty = self.air.typeOf(ty_op.operand);
- const target = self.module.getTarget();
- const layout = un_ty.unionGetLayout(target);
+ const mod = self.module;
+ const layout = un_ty.unionGetLayout(mod);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolve(ty_op.operand);
@@ -2551,6 +2561,7 @@ pub const DeclGen = struct {
fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -2559,9 +2570,9 @@ pub const DeclGen = struct {
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
- if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet.
+ assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet.
return try self.extractField(field_ty, object_id, field_index);
}
@@ -2573,8 +2584,9 @@ pub const DeclGen = struct {
object_ptr: IdRef,
field_index: u32,
) !?IdRef {
+ const mod = self.module;
const object_ty = object_ptr_ty.childType();
- switch (object_ty.zigTypeTag()) {
+ switch (object_ty.zigTypeTag(mod)) {
.Struct => switch (object_ty.containerLayout()) {
.Packed => unreachable, // TODO
else => {
@@ -2667,6 +2679,7 @@ pub const DeclGen = struct {
// the current block by first generating the code of the block, then a label, and then generate the rest of the current
// ir.Block in a different SPIR-V block.
+ const mod = self.module;
const label_id = self.spv.allocId();
// 4 chosen as arbitrary initial capacity.
@@ -2690,7 +2703,7 @@ pub const DeclGen = struct {
try self.beginSpvBlock(label_id);
// If this block didn't produce a value, simply return here.
- if (!ty.hasRuntimeBitsIgnoreComptime())
+ if (!ty.hasRuntimeBitsIgnoreComptime(mod))
return null;
// Combine the result from the blocks using the Phi instruction.
@@ -2716,7 +2729,8 @@ pub const DeclGen = struct {
const block = self.blocks.get(br.block_inst).?;
const operand_ty = self.air.typeOf(br.operand);
- if (operand_ty.hasRuntimeBits()) {
+ const mod = self.module;
+ if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
@@ -2771,13 +2785,14 @@ pub const DeclGen = struct {
}
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
- const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
if (val_is_undef) {
const undef = try self.spv.constUndef(ptr_ty_ref);
try self.store(ptr_ty, ptr, undef);
@@ -2805,7 +2820,8 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
const operand_ty = self.air.typeOf(operand);
- if (operand_ty.hasRuntimeBits()) {
+ const mod = self.module;
+ if (operand_ty.hasRuntimeBits(mod)) {
const operand_id = try self.resolve(operand);
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id });
} else {
@@ -2814,11 +2830,12 @@ pub const DeclGen = struct {
}
fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
- if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try self.func.body.emit(self.spv.gpa, .OpReturn, {});
return;
}
@@ -2946,6 +2963,7 @@ pub const DeclGen = struct {
fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand_id = try self.resolve(un_op);
const optional_ty = self.air.typeOf(un_op);
@@ -2955,7 +2973,7 @@ pub const DeclGen = struct {
const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
// Pointer payload represents nullability: pointer or slice.
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -2985,7 +3003,7 @@ pub const DeclGen = struct {
return result_id;
}
- const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime())
+ const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.extractField(Type.bool, operand_id, 1)
else
// Optional representation is bool indicating whether the optional is set
@@ -3009,14 +3027,15 @@ pub const DeclGen = struct {
fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null;
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null;
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
@@ -3026,16 +3045,17 @@ pub const DeclGen = struct {
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
+ const mod = self.module;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return try self.constBool(true, .direct);
}
const operand_id = try self.resolve(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
- if (optional_ty.optionalReprIsPayload()) {
+ if (optional_ty.optionalReprIsPayload(mod)) {
return operand_id;
}
@@ -3045,30 +3065,29 @@ pub const DeclGen = struct {
}
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
- const target = self.getTarget();
+ const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolve(pl_op.operand);
const cond_ty = self.air.typeOf(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
- const cond_words: u32 = switch (cond_ty.zigTypeTag()) {
+ const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) {
.Int => blk: {
- const bits = cond_ty.intInfo(target).bits;
+ const bits = cond_ty.intInfo(mod).bits;
const backing_bits = self.backingIntBits(bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
.Enum => blk: {
- var buffer: Type.Payload.Bits = undefined;
- const int_ty = cond_ty.intTagType(&buffer);
- const int_info = int_ty.intInfo(target);
+ const int_ty = cond_ty.intTagType();
+ const int_info = int_ty.intInfo(mod);
const backing_bits = self.backingIntBits(int_info.bits) orelse {
return self.todo("implement composite int switch", .{});
};
break :blk if (backing_bits <= 32) @as(u32, 1) else 2;
},
- else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
+ else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers.
};
const num_cases = switch_br.data.cases_len;
@@ -3112,15 +3131,15 @@ pub const DeclGen = struct {
const label = IdRef{ .id = first_case_label.id + case_i };
for (items) |item| {
- const value = self.air.value(item) orelse {
+ const value = self.air.value(item, mod) orelse {
return self.todo("switch on runtime value???", .{});
};
- const int_val = switch (cond_ty.zigTypeTag()) {
- .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target),
+ const int_val = switch (cond_ty.zigTypeTag(mod)) {
+ .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod),
.Enum => blk: {
var int_buffer: Value.Payload.U64 = undefined;
// TODO: figure out of cond_ty is correct (something with enum literals)
- break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants
+ break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants
},
else => unreachable,
};
@@ -3294,11 +3313,12 @@ pub const DeclGen = struct {
fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef {
_ = modifier;
+ const mod = self.module;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
const callee_ty = self.air.typeOf(pl_op.operand);
- const zig_fn_ty = switch (callee_ty.zigTypeTag()) {
+ const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) {
.Fn => callee_ty,
.Pointer => return self.fail("cannot call function pointers", .{}),
else => unreachable,
@@ -3320,7 +3340,7 @@ pub const DeclGen = struct {
// temporary params buffer.
const arg_id = try self.resolve(arg);
const arg_ty = self.air.typeOf(arg);
- if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
params[n_params] = arg_id;
n_params += 1;
@@ -3337,7 +3357,7 @@ pub const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) {
+ if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
src/link/Coff.zig
@@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
},
};
- const required_alignment = tv.ty.abiAlignment(self.base.options.target);
+ const required_alignment = tv.ty.abiAlignment(mod);
const atom = self.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
@@ -1299,7 +1299,8 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.
fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
- const zig_ty = ty.zigTypeTag();
+ const mod = self.base.options.module.?;
+ const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const index: u16 = blk: {
if (val.isUndefDeep()) {
@@ -1330,7 +1331,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
defer gpa.free(decl_name);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
- const required_alignment = decl.getAlignment(self.base.options.target);
+ const required_alignment = decl.getAlignment(mod);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
src/link/Dwarf.zig
@@ -169,16 +169,16 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
- module: *Module,
+ mod: *Module,
atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
const dbg_info_buffer = &self.dbg_info;
- const target = module.getTarget();
+ const target = mod.getTarget();
const target_endian = target.cpu.arch.endian();
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.NoReturn => unreachable,
.Void => {
try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1));
@@ -189,12 +189,12 @@ pub const DeclState = struct {
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean);
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
},
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type));
// DW.AT.encoding, DW.FORM.data1
@@ -203,20 +203,20 @@ pub const DeclState = struct {
.unsigned => DW.ATE.unsigned,
});
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
},
.Optional => {
- if (ty.isPtrLikeOptional()) {
+ if (ty.isPtrLikeOptional(mod)) {
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
var buf = try arena.create(Type.Payload.ElemType);
@@ -224,10 +224,10 @@ pub const DeclState = struct {
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- const abi_size = ty.abiSize(target);
+ const abi_size = ty.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(7);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@@ -251,7 +251,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
- const offset = abi_size - payload_ty.abiSize(target);
+ const offset = abi_size - payload_ty.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
@@ -266,9 +266,9 @@ pub const DeclState = struct {
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(5);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@@ -311,7 +311,7 @@ pub const DeclState = struct {
// DW.AT.array_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type));
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
@@ -332,12 +332,12 @@ pub const DeclState = struct {
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
switch (ty.tag()) {
.tuple, .anon_struct => {
// DW.AT.name, DW.FORM.string
- try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
+ try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
const fields = ty.tupleFields();
for (fields.types, 0..) |field, field_index| {
@@ -350,13 +350,13 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
- const field_off = ty.structFieldOffset(field_index, target);
+ const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
else => {
// DW.AT.name, DW.FORM.string
- const struct_name = try ty.nameAllocArena(arena, module);
+ const struct_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -370,7 +370,7 @@ pub const DeclState = struct {
const fields = ty.structFields();
for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@@ -382,7 +382,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
- const field_off = ty.structFieldOffset(field_index, target);
+ const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
@@ -395,9 +395,9 @@ pub const DeclState = struct {
// DW.AT.enumeration_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type));
// DW.AT.byte_size, DW.FORM.udata
- try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
+ try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
- const enum_name = try ty.nameAllocArena(arena, module);
+ const enum_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(enum_name);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -424,7 +424,7 @@ pub const DeclState = struct {
// See https://github.com/ziglang/zig/issues/645
var int_buffer: Value.Payload.U64 = undefined;
const field_int_val = value.enumToInt(ty, &int_buffer);
- break :value @bitCast(u64, field_int_val.toSignedInt(target));
+ break :value @bitCast(u64, field_int_val.toSignedInt(mod));
} else @intCast(u64, field_i);
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
@@ -433,12 +433,12 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
},
.Union => {
- const layout = ty.unionGetLayout(target);
+ const layout = ty.unionGetLayout(mod);
const union_obj = ty.cast(Type.Payload.Union).?.data;
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
const is_tagged = layout.tag_size > 0;
- const union_name = try ty.nameAllocArena(arena, module);
+ const union_name = try ty.nameAllocArena(arena, mod);
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
@@ -481,7 +481,7 @@ pub const DeclState = struct {
const fields = ty.unionFields();
for (fields.keys()) |field_name| {
const field = fields.get(field_name).?;
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -517,7 +517,7 @@ pub const DeclState = struct {
.ErrorSet => {
try addDbgInfoErrorSet(
self.abbrev_type_arena.allocator(),
- module,
+ mod,
ty,
target,
&self.dbg_info,
@@ -526,18 +526,18 @@ pub const DeclState = struct {
.ErrorUnion => {
const error_ty = ty.errorUnionSet();
const payload_ty = ty.errorUnionPayload();
- const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
- const abi_size = ty.abiSize(target);
- const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0;
- const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target);
+ const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const abi_size = ty.abiSize(mod);
+ const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0;
+ const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod);
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- const name = try ty.nameAllocArena(arena, module);
+ const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
if (!payload_ty.isNoReturn()) {
@@ -685,7 +685,8 @@ pub const DeclState = struct {
const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
- const target = self.mod.getTarget();
+ const mod = self.mod;
+ const target = mod.getTarget();
const endian = target.cpu.arch.endian();
const child_ty = if (is_ptr) ty.childType() else ty;
@@ -790,9 +791,9 @@ pub const DeclState = struct {
const fixup = dbg_info.items.len;
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1,
- if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu,
+ if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
});
- if (child_ty.isSignedInt()) {
+ if (child_ty.isSignedInt(mod)) {
try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x));
} else {
try leb128.writeULEB128(dbg_info.writer(), x);
@@ -805,7 +806,7 @@ pub const DeclState = struct {
// DW.AT.location, DW.FORM.exprloc
// uleb128(exprloc_len)
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
- const abi_size = @intCast(u32, child_ty.abiSize(target));
+ const abi_size = @intCast(u32, child_ty.abiSize(mod));
var implicit_value_len = std.ArrayList(u8).init(self.gpa);
defer implicit_value_len.deinit();
try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
@@ -979,7 +980,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
assert(decl.has_tv);
- switch (decl.ty.zigTypeTag()) {
+ switch (decl.ty.zigTypeTag(mod)) {
.Fn => {
_ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
@@ -1027,7 +1028,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
- const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
+ const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram));
} else {
@@ -1059,7 +1060,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
pub fn commitDeclState(
self: *Dwarf,
- module: *Module,
+ mod: *Module,
decl_index: Module.Decl.Index,
sym_addr: u64,
sym_size: u64,
@@ -1071,12 +1072,12 @@ pub fn commitDeclState(
const gpa = self.allocator;
var dbg_line_buffer = &decl_state.dbg_line;
var dbg_info_buffer = &decl_state.dbg_info;
- const decl = module.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
const target_endian = self.target.cpu.arch.endian();
assert(decl.has_tv);
- switch (decl.ty.zigTypeTag()) {
+ switch (decl.ty.zigTypeTag(mod)) {
.Fn => {
// Since the Decl is a function, we need to update the .debug_line program.
// Perform the relocations based on vaddr.
@@ -1283,7 +1284,7 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(module, di_atom_index, ty);
+ try decl_state.addDbgInfoType(mod, di_atom_index, ty);
}
}
@@ -1319,7 +1320,7 @@ pub fn commitDeclState(
reloc.offset,
value,
target,
- ty.fmt(module),
+ ty.fmt(mod),
});
mem.writeInt(
u32,
@@ -2663,7 +2664,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
fn addDbgInfoErrorSet(
arena: Allocator,
- module: *Module,
+ mod: *Module,
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
@@ -2673,10 +2674,10 @@ fn addDbgInfoErrorSet(
// DW.AT.enumeration_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type));
// DW.AT.byte_size, DW.FORM.udata
- const abi_size = Type.anyerror.abiSize(target);
+ const abi_size = Type.anyerror.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
- const name = try ty.nameAllocArena(arena, module);
+ const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
// DW.AT.enumerator
@@ -2691,7 +2692,7 @@ fn addDbgInfoErrorSet(
const error_names = ty.errorSetNames();
for (error_names) |error_name| {
- const kv = module.getErrorValue(error_name) catch unreachable;
+ const kv = mod.getErrorValue(error_name) catch unreachable;
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
src/link/Elf.zig
@@ -2449,9 +2449,10 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I
}
fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
- const decl = self.base.options.module.?.declPtr(decl_index);
+ const mod = self.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
const ty = decl.ty;
- const zig_ty = ty.zigTypeTag();
+ const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
@@ -2482,7 +2483,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
defer self.base.allocator.free(decl_name);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
- const required_alignment = decl.getAlignment(self.base.options.target);
+ const required_alignment = decl.getAlignment(mod);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
@@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
},
};
- const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const required_alignment = typed_value.ty.abiAlignment(mod);
const shdr_index = self.rodata_section_index.?;
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
src/link/MachO.zig
@@ -1948,7 +1948,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
},
};
- const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const mod = self.base.options.module.?;
+ const required_alignment = typed_value.ty.abiAlignment(mod);
const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
// TODO: work out logic for disambiguating functions from function pointers
@@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In
}
fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
+ const mod = self.base.options.module.?;
// Lowering a TLV on macOS involves two stages:
// 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss)
// 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars
@@ -2202,7 +2204,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
},
};
- const required_alignment = decl.getAlignment(self.base.options.target);
+ const required_alignment = decl.getAlignment(mod);
const decl_name = try decl.getFullyQualifiedName(module);
defer gpa.free(decl_name);
@@ -2262,7 +2264,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
- const zig_ty = ty.zigTypeTag();
+ const mod = self.base.options.module.?;
+ const zig_ty = ty.zigTypeTag(mod);
const mode = self.base.options.optimize_mode;
const single_threaded = self.base.options.single_threaded;
const sect_id: u8 = blk: {
@@ -2301,7 +2304,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const required_alignment = decl.getAlignment(self.base.options.target);
+ const required_alignment = decl.getAlignment(mod);
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
src/link/Plan9.zig
@@ -432,8 +432,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
/// called at the end of update{Decl,Func}
fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
- const decl = self.base.options.module.?.declPtr(decl_index);
- const is_fn = (decl.ty.zigTypeTag() == .Fn);
+ const mod = self.base.options.module.?;
+ const decl = mod.declPtr(decl_index);
+ const is_fn = (decl.ty.zigTypeTag(mod) == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
@@ -704,7 +705,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset });
const code = blk: {
- const is_fn = source_decl.ty.zigTypeTag() == .Fn;
+ const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn;
if (is_fn) {
const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions;
const output = table.get(source_decl_index).?;
@@ -1031,7 +1032,7 @@ pub fn getDeclVAddr(
) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
var start = self.bases.text;
var it_file = self.fn_decl_table.iterator();
while (it_file.next()) |fentry| {
src/link/Wasm.zig
@@ -1473,7 +1473,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
atom.size = @intCast(u32, code.len);
if (code.len == 0) return;
- atom.alignment = decl.ty.abiAlignment(wasm.base.options.target);
+ atom.alignment = decl.ty.abiAlignment(mod);
}
/// From a given symbol location, returns its `wasm.GlobalType`.
@@ -1523,9 +1523,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
- assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions
-
const mod = wasm.base.options.module.?;
+ assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
@@ -1543,7 +1542,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const code = code: {
const atom = wasm.getAtomPtr(atom_index);
- atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
+ atom.alignment = tv.ty.abiAlignment(mod);
wasm.symbols.items[atom.sym_index] = .{
.name = try wasm.string_table.put(wasm.base.allocator, name),
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
@@ -1632,7 +1631,7 @@ pub fn getDeclVAddr(
const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
// We found a function pointer, so add it to our table,
// as function pointers are not allowed to be stored inside the data section.
@@ -2933,7 +2932,8 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
const atom_index = try wasm.createAtom();
const atom = wasm.getAtomPtr(atom_index);
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
- atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
+ const mod = wasm.base.options.module.?;
+ atom.alignment = slice_ty.abiAlignment(mod);
const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
@@ -3000,7 +3000,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
.offset = offset,
.addend = @intCast(i32, addend),
});
- atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target));
+ atom.size += @intCast(u32, slice_ty.abiSize(mod));
addend += len;
// as we updated the error name table, we now store the actual name within the names atom
@@ -3369,7 +3369,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
if (decl.isExtern()) continue;
const atom_index = entry.value_ptr.*;
const atom = wasm.getAtomPtr(atom_index);
- if (decl.ty.zigTypeTag() == .Fn) {
+ if (decl.ty.zigTypeTag(mod) == .Fn) {
try wasm.parseAtom(atom_index, .function);
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
src/Air.zig
@@ -5,10 +5,12 @@
const std = @import("std");
const builtin = @import("builtin");
-const Value = @import("value.zig").Value;
-const Type = @import("type.zig").Type;
const assert = std.debug.assert;
+
const Air = @This();
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const InternPool = @import("InternPool.zig");
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
@@ -837,7 +839,88 @@ pub const Inst = struct {
/// The position of an AIR instruction within the `Air` instructions array.
pub const Index = u32;
- pub const Ref = @import("Zir.zig").Inst.Ref;
+ pub const Ref = enum(u32) {
+ u1_type = @enumToInt(InternPool.Index.u1_type),
+ u8_type = @enumToInt(InternPool.Index.u8_type),
+ i8_type = @enumToInt(InternPool.Index.i8_type),
+ u16_type = @enumToInt(InternPool.Index.u16_type),
+ i16_type = @enumToInt(InternPool.Index.i16_type),
+ u29_type = @enumToInt(InternPool.Index.u29_type),
+ u32_type = @enumToInt(InternPool.Index.u32_type),
+ i32_type = @enumToInt(InternPool.Index.i32_type),
+ u64_type = @enumToInt(InternPool.Index.u64_type),
+ i64_type = @enumToInt(InternPool.Index.i64_type),
+ u80_type = @enumToInt(InternPool.Index.u80_type),
+ u128_type = @enumToInt(InternPool.Index.u128_type),
+ i128_type = @enumToInt(InternPool.Index.i128_type),
+ usize_type = @enumToInt(InternPool.Index.usize_type),
+ isize_type = @enumToInt(InternPool.Index.isize_type),
+ c_char_type = @enumToInt(InternPool.Index.c_char_type),
+ c_short_type = @enumToInt(InternPool.Index.c_short_type),
+ c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type),
+ c_int_type = @enumToInt(InternPool.Index.c_int_type),
+ c_uint_type = @enumToInt(InternPool.Index.c_uint_type),
+ c_long_type = @enumToInt(InternPool.Index.c_long_type),
+ c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type),
+ c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type),
+ c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type),
+ c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type),
+ f16_type = @enumToInt(InternPool.Index.f16_type),
+ f32_type = @enumToInt(InternPool.Index.f32_type),
+ f64_type = @enumToInt(InternPool.Index.f64_type),
+ f80_type = @enumToInt(InternPool.Index.f80_type),
+ f128_type = @enumToInt(InternPool.Index.f128_type),
+ anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type),
+ bool_type = @enumToInt(InternPool.Index.bool_type),
+ void_type = @enumToInt(InternPool.Index.void_type),
+ type_type = @enumToInt(InternPool.Index.type_type),
+ anyerror_type = @enumToInt(InternPool.Index.anyerror_type),
+ comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type),
+ comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type),
+ noreturn_type = @enumToInt(InternPool.Index.noreturn_type),
+ anyframe_type = @enumToInt(InternPool.Index.anyframe_type),
+ null_type = @enumToInt(InternPool.Index.null_type),
+ undefined_type = @enumToInt(InternPool.Index.undefined_type),
+ enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type),
+ atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type),
+ atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type),
+ calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type),
+ address_space_type = @enumToInt(InternPool.Index.address_space_type),
+ float_mode_type = @enumToInt(InternPool.Index.float_mode_type),
+ reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type),
+ call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type),
+ prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type),
+ export_options_type = @enumToInt(InternPool.Index.export_options_type),
+ extern_options_type = @enumToInt(InternPool.Index.extern_options_type),
+ type_info_type = @enumToInt(InternPool.Index.type_info_type),
+ manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type),
+ manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
+ single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
+ const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type),
+ anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
+ generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
+ var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type),
+ empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
+ undef = @enumToInt(InternPool.Index.undef),
+ zero = @enumToInt(InternPool.Index.zero),
+ zero_usize = @enumToInt(InternPool.Index.zero_usize),
+ one = @enumToInt(InternPool.Index.one),
+ one_usize = @enumToInt(InternPool.Index.one_usize),
+ calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
+ calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
+ void_value = @enumToInt(InternPool.Index.void_value),
+ unreachable_value = @enumToInt(InternPool.Index.unreachable_value),
+ null_value = @enumToInt(InternPool.Index.null_value),
+ bool_true = @enumToInt(InternPool.Index.bool_true),
+ bool_false = @enumToInt(InternPool.Index.bool_false),
+ empty_struct = @enumToInt(InternPool.Index.empty_struct),
+ generic_poison = @enumToInt(InternPool.Index.generic_poison),
+
+ /// This Ref does not correspond to any AIR instruction or constant
+ /// value and may instead be used as a sentinel to indicate null.
+ none = std.math.maxInt(u32),
+ _,
+ };
/// All instructions have an 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
@@ -1066,10 +1149,13 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type {
const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- return Air.Inst.Ref.typed_value_map[ref_int].ty;
+ if (ref_int < InternPool.static_keys.len) {
+ return .{
+ .ip_index = InternPool.static_keys[ref_int].typeOf(),
+ .legacy = undefined,
+ };
}
- return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len));
+ return air.typeOfIndex(ref_int - ref_start_index);
}
pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
@@ -1286,11 +1372,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand);
- switch (callee_ty.zigTypeTag()) {
- .Fn => return callee_ty.fnReturnType(),
- .Pointer => return callee_ty.childType().fnReturnType(),
- else => unreachable,
- }
+ return callee_ty.fnReturnType();
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
@@ -1328,11 +1410,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
const ref_int = @enumToInt(ref);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- var buffer: Value.ToTypeBuffer = undefined;
- return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer);
+ if (ref_int < ref_start_index) {
+ const ip_index = @intToEnum(InternPool.Index, ref_int);
+ return ip_index.toType();
}
- const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len;
+ const inst_index = ref_int - ref_start_index;
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
assert(air_tags[inst_index] == .const_ty);
@@ -1367,7 +1449,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
air.* = undefined;
}
-const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len;
+pub const ref_start_index: u32 = InternPool.static_len;
pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref {
return @intToEnum(Air.Inst.Ref, ref_start_index + inst);
@@ -1383,17 +1465,18 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index {
}
/// Returns `null` if runtime-known.
-pub fn value(air: Air, inst: Air.Inst.Ref) ?Value {
+pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value {
const ref_int = @enumToInt(inst);
- if (ref_int < Air.Inst.Ref.typed_value_map.len) {
- return Air.Inst.Ref.typed_value_map[ref_int].val;
+ if (ref_int < ref_start_index) {
+ const ip_index = @intToEnum(InternPool.Index, ref_int);
+ return ip_index.toValue();
}
- const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
+ const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index);
const air_datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst_index]) {
.constant => return air.values[air_datas[inst_index].ty_pl.payload],
.const_ty => unreachable,
- else => return air.typeOfIndex(inst_index).onePossibleValue(),
+ else => return air.typeOfIndex(inst_index).onePossibleValue(mod),
}
}
src/AstGen.zig
@@ -8530,7 +8530,7 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.call => {
- const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]);
+ const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]);
const callee = try expr(gz, scope, .{ .rl = .none }, params[1]);
const args = try expr(gz, scope, .{ .rl = .none }, params[2]);
const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{
@@ -10298,10 +10298,6 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type),
as_ty | @enumToInt(Zir.Inst.Ref.null_type),
as_ty | @enumToInt(Zir.Inst.Ref.undefined_type),
- as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type),
- as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type),
- as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type),
- as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type),
as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type),
src/Autodoc.zig
@@ -95,8 +95,6 @@ pub fn generateZirData(self: *Autodoc) !void {
}
}
- log.debug("Ref map size: {}", .{Ref.typed_value_map.len});
-
const root_src_dir = self.comp_module.main_pkg.root_src_directory;
const root_src_path = self.comp_module.main_pkg.root_src_path;
const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path});
src/codegen.zig
@@ -154,7 +154,7 @@ pub fn generateLazySymbol(
}
mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian);
return Result.ok;
- } else if (lazy_sym.ty.zigTypeTag() == .Enum) {
+ } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) {
alignment.* = 1;
for (lazy_sym.ty.enumFields().keys()) |tag_name| {
try code.ensureUnusedCapacity(tag_name.len + 1);
@@ -186,22 +186,22 @@ pub fn generateSymbol(
typed_value.val = rt.data;
}
- const target = bin_file.options.target;
+ const mod = bin_file.options.module.?;
+ const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- const mod = bin_file.options.module.?;
log.debug("generateSymbol: ty = {}, val = {}", .{
typed_value.ty.fmt(mod),
typed_value.val.fmtValue(typed_value.ty, mod),
});
if (typed_value.val.isUndefDeep()) {
- const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size);
return Result.ok;
}
- switch (typed_value.ty.zigTypeTag()) {
+ switch (typed_value.ty.zigTypeTag(mod)) {
.Fn => {
return Result{
.fail = try ErrorMsg.create(
@@ -219,7 +219,7 @@ pub fn generateSymbol(
64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)),
80 => {
writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10));
- const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
try code.appendNTimes(0, abi_size - 10);
},
128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)),
@@ -242,7 +242,7 @@ pub fn generateSymbol(
try code.ensureUnusedCapacity(bytes.len + 1);
code.appendSliceAssumeCapacity(bytes);
if (typed_value.ty.sentinel()) |sent_val| {
- const byte = @intCast(u8, sent_val.toUnsignedInt(target));
+ const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
code.appendAssumeCapacity(byte);
}
return Result.ok;
@@ -330,11 +330,11 @@ pub fn generateSymbol(
.zero, .one, .int_u64, .int_big_positive => {
switch (target.ptrBitWidth()) {
32 => {
- const x = typed_value.val.toUnsignedInt(target);
+ const x = typed_value.val.toUnsignedInt(mod);
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian);
},
64 => {
- const x = typed_value.val.toUnsignedInt(target);
+ const x = typed_value.val.toUnsignedInt(mod);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
},
else => unreachable,
@@ -399,19 +399,19 @@ pub fn generateSymbol(
},
},
.Int => {
- const info = typed_value.ty.intInfo(target);
+ const info = typed_value.ty.intInfo(mod);
if (info.bits <= 8) {
const x: u8 = switch (info.signedness) {
- .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)),
- .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))),
+ .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)),
+ .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))),
};
try code.append(x);
return Result.ok;
}
if (info.bits > 64) {
var bigint_buffer: Value.BigIntSpace = undefined;
- const bigint = typed_value.val.toBigInt(&bigint_buffer, target);
- const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
+ const bigint = typed_value.val.toBigInt(&bigint_buffer, mod);
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const start = code.items.len;
try code.resize(start + abi_size);
bigint.writeTwosComplement(code.items[start..][0..abi_size], endian);
@@ -420,25 +420,25 @@ pub fn generateSymbol(
switch (info.signedness) {
.unsigned => {
if (info.bits <= 16) {
- const x = @intCast(u16, typed_value.val.toUnsignedInt(target));
+ const x = @intCast(u16, typed_value.val.toUnsignedInt(mod));
mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(u32, typed_value.val.toUnsignedInt(target));
+ const x = @intCast(u32, typed_value.val.toUnsignedInt(mod));
mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
} else {
- const x = typed_value.val.toUnsignedInt(target);
+ const x = typed_value.val.toUnsignedInt(mod);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
}
},
.signed => {
if (info.bits <= 16) {
- const x = @intCast(i16, typed_value.val.toSignedInt(target));
+ const x = @intCast(i16, typed_value.val.toSignedInt(mod));
mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(i32, typed_value.val.toSignedInt(target));
+ const x = @intCast(i32, typed_value.val.toSignedInt(mod));
mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
} else {
- const x = typed_value.val.toSignedInt(target);
+ const x = typed_value.val.toSignedInt(mod);
mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
}
},
@@ -449,9 +449,9 @@ pub fn generateSymbol(
var int_buffer: Value.Payload.U64 = undefined;
const int_val = typed_value.enumToInt(&int_buffer);
- const info = typed_value.ty.intInfo(target);
+ const info = typed_value.ty.intInfo(mod);
if (info.bits <= 8) {
- const x = @intCast(u8, int_val.toUnsignedInt(target));
+ const x = @intCast(u8, int_val.toUnsignedInt(mod));
try code.append(x);
return Result.ok;
}
@@ -468,25 +468,25 @@ pub fn generateSymbol(
switch (info.signedness) {
.unsigned => {
if (info.bits <= 16) {
- const x = @intCast(u16, int_val.toUnsignedInt(target));
+ const x = @intCast(u16, int_val.toUnsignedInt(mod));
mem.writeInt(u16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(u32, int_val.toUnsignedInt(target));
+ const x = @intCast(u32, int_val.toUnsignedInt(mod));
mem.writeInt(u32, try code.addManyAsArray(4), x, endian);
} else {
- const x = int_val.toUnsignedInt(target);
+ const x = int_val.toUnsignedInt(mod);
mem.writeInt(u64, try code.addManyAsArray(8), x, endian);
}
},
.signed => {
if (info.bits <= 16) {
- const x = @intCast(i16, int_val.toSignedInt(target));
+ const x = @intCast(i16, int_val.toSignedInt(mod));
mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(i32, int_val.toSignedInt(target));
+ const x = @intCast(i32, int_val.toSignedInt(mod));
mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
} else {
- const x = int_val.toSignedInt(target);
+ const x = int_val.toSignedInt(mod);
mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
}
},
@@ -503,7 +503,7 @@ pub fn generateSymbol(
const struct_obj = typed_value.ty.castTag(.@"struct").?.data;
const fields = struct_obj.fields.values();
const field_vals = typed_value.val.castTag(.aggregate).?.data;
- const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
@@ -512,8 +512,8 @@ pub fn generateSymbol(
const field_ty = fields[index].ty;
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those seperately.
- if (field_ty.zigTypeTag() == .Pointer) {
- const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow;
+ if (field_ty.zigTypeTag(mod) == .Pointer) {
+ const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
@@ -526,7 +526,7 @@ pub fn generateSymbol(
} else {
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
}
- bits += @intCast(u16, field_ty.bitSize(target));
+ bits += @intCast(u16, field_ty.bitSize(mod));
}
return Result.ok;
@@ -536,7 +536,7 @@ pub fn generateSymbol(
const field_vals = typed_value.val.castTag(.aggregate).?.data;
for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
- if (!field_ty.hasRuntimeBits()) continue;
+ if (!field_ty.hasRuntimeBits(mod)) continue;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
@@ -548,7 +548,7 @@ pub fn generateSymbol(
const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required
- const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target);
+ const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow;
if (padding > 0) {
@@ -560,7 +560,7 @@ pub fn generateSymbol(
},
.Union => {
const union_obj = typed_value.val.castTag(.@"union").?.data;
- const layout = typed_value.ty.unionGetLayout(target);
+ const layout = typed_value.ty.unionGetLayout(mod);
if (layout.payload_size == 0) {
return generateSymbol(bin_file, src_loc, .{
@@ -584,7 +584,7 @@ pub fn generateSymbol(
const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?;
assert(union_ty.haveFieldTypes());
const field_ty = union_ty.fields.values()[field_index].ty;
- if (!field_ty.hasRuntimeBits()) {
+ if (!field_ty.hasRuntimeBits(mod)) {
try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
switch (try generateSymbol(bin_file, src_loc, .{
@@ -595,7 +595,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em },
}
- const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow;
+ const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
@@ -620,15 +620,15 @@ pub fn generateSymbol(
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_type = typed_value.ty.optionalChild(&opt_buf);
- const is_pl = !typed_value.val.isNull();
- const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
+ const is_pl = !typed_value.val.isNull(mod);
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
- if (!payload_type.hasRuntimeBits()) {
+ if (!payload_type.hasRuntimeBits(mod)) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
return Result.ok;
}
- if (typed_value.ty.optionalReprIsPayload()) {
+ if (typed_value.ty.optionalReprIsPayload(mod)) {
if (typed_value.val.castTag(.opt_payload)) |payload| {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
@@ -637,7 +637,7 @@ pub fn generateSymbol(
.ok => {},
.fail => |em| return Result{ .fail = em },
}
- } else if (!typed_value.val.isNull()) {
+ } else if (!typed_value.val.isNull(mod)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = typed_value.val,
@@ -652,7 +652,7 @@ pub fn generateSymbol(
return Result.ok;
}
- const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1;
+ const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
@@ -671,7 +671,7 @@ pub fn generateSymbol(
const payload_ty = typed_value.ty.errorUnionPayload();
const is_payload = typed_value.val.errorUnionIsPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val;
return generateSymbol(bin_file, src_loc, .{
.ty = error_ty,
@@ -679,9 +679,9 @@ pub fn generateSymbol(
}, code, debug_output, reloc_info);
}
- const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
- const abi_align = typed_value.ty.abiAlignment(target);
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ const abi_align = typed_value.ty.abiAlignment(mod);
// error value first when its type is larger than the error union's payload
if (error_align > payload_align) {
@@ -743,7 +743,7 @@ pub fn generateSymbol(
try code.writer().writeInt(u32, kv.value, endian);
},
else => {
- try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target)));
+ try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod)));
},
}
return Result.ok;
@@ -752,7 +752,7 @@ pub fn generateSymbol(
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow;
- const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse
return error.Overflow;
try code.ensureUnusedCapacity(len + padding);
code.appendSliceAssumeCapacity(bytes[0..len]);
@@ -763,8 +763,8 @@ pub fn generateSymbol(
const elem_vals = typed_value.val.castTag(.aggregate).?.data;
const elem_ty = typed_value.ty.elemType();
const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow;
- const padding = math.cast(usize, typed_value.ty.abiSize(target) -
- (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) {
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
+ (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
else => |e| return e,
})) orelse return error.Overflow;
@@ -784,8 +784,8 @@ pub fn generateSymbol(
const array = typed_value.val.castTag(.repeated).?.data;
const elem_ty = typed_value.ty.childType();
const len = typed_value.ty.arrayLen();
- const padding = math.cast(usize, typed_value.ty.abiSize(target) -
- (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) {
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
+ (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
else => |e| return e,
})) orelse return error.Overflow;
@@ -805,7 +805,7 @@ pub fn generateSymbol(
.str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
- const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse
+ const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse
return error.Overflow;
try code.ensureUnusedCapacity(str_lit.len + padding);
code.appendSliceAssumeCapacity(bytes);
@@ -832,7 +832,7 @@ fn lowerParentPtr(
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
) CodeGenError!Result {
- const target = bin_file.options.target;
+ const mod = bin_file.options.module.?;
switch (parent_ptr.tag()) {
.field_ptr => {
const field_ptr = parent_ptr.castTag(.field_ptr).?.data;
@@ -843,19 +843,19 @@ fn lowerParentPtr(
field_ptr.container_ptr,
code,
debug_output,
- reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) {
+ reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) {
.Pointer => offset: {
assert(field_ptr.container_ty.isSlice());
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
break :offset switch (field_ptr.field_index) {
0 => 0,
- 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target),
+ 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod),
else => unreachable,
};
},
.Struct, .Union => field_ptr.container_ty.structFieldOffset(
field_ptr.field_index,
- target,
+ mod,
),
else => return Result{ .fail = try ErrorMsg.create(
bin_file.allocator,
@@ -875,7 +875,7 @@ fn lowerParentPtr(
elem_ptr.array_ptr,
code,
debug_output,
- reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))),
+ reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))),
);
},
.opt_payload_ptr => {
@@ -900,7 +900,7 @@ fn lowerParentPtr(
eu_payload_ptr.container_ptr,
code,
debug_output,
- reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))),
+ reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))),
);
},
.variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef(
@@ -945,7 +945,7 @@ fn lowerDeclRef(
reloc_info: RelocInfo,
) CodeGenError!Result {
const target = bin_file.options.target;
- const module = bin_file.options.module.?;
+ const mod = bin_file.options.module.?;
if (typed_value.ty.isSlice()) {
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@@ -961,7 +961,7 @@ fn lowerDeclRef(
// generate length
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
- .data = typed_value.val.sliceLen(module),
+ .data = typed_value.val.sliceLen(mod),
};
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
@@ -975,14 +975,14 @@ fn lowerDeclRef(
}
const ptr_width = target.ptrBitWidth();
- const decl = module.declPtr(decl_index);
- const is_fn_body = decl.ty.zigTypeTag() == .Fn;
- if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
+ const decl = mod.declPtr(decl_index);
+ const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn;
+ if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) {
try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8));
return Result.ok;
}
- module.markDeclAlive(decl);
+ mod.markDeclAlive(decl);
const vaddr = try bin_file.getDeclVAddr(decl_index, .{
.parent_atom_index = reloc_info.parent_atom_index,
@@ -1059,16 +1059,16 @@ fn genDeclRef(
tv: TypedValue,
decl_index: Module.Decl.Index,
) CodeGenError!GenResult {
- const module = bin_file.options.module.?;
- log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) });
+ const mod = bin_file.options.module.?;
+ log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) });
const target = bin_file.options.target;
const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
- const decl = module.declPtr(decl_index);
+ const decl = mod.declPtr(decl_index);
- if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
+ if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
const imm: u64 = switch (ptr_bytes) {
1 => 0xaa,
2 => 0xaaaa,
@@ -1080,20 +1080,20 @@ fn genDeclRef(
}
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
- if (tv.ty.castPtrToFn()) |fn_ty| {
+ if (tv.ty.castPtrToFn(mod)) |fn_ty| {
if (fn_ty.fnInfo().is_generic) {
- return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) });
+ return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
}
- } else if (tv.ty.zigTypeTag() == .Pointer) {
- const elem_ty = tv.ty.elemType2();
- if (!elem_ty.hasRuntimeBits()) {
- return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) });
+ } else if (tv.ty.zigTypeTag(mod) == .Pointer) {
+ const elem_ty = tv.ty.elemType2(mod);
+ if (!elem_ty.hasRuntimeBits(mod)) {
+ return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) });
}
}
- module.markDeclAlive(decl);
+ mod.markDeclAlive(decl);
- const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded;
+ const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded;
if (bin_file.cast(link.File.Elf)) |elf_file| {
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
@@ -1186,7 +1186,7 @@ pub fn genTypedValue(
}
}
- switch (typed_value.ty.zigTypeTag()) {
+ switch (typed_value.ty.zigTypeTag(mod)) {
.Void => return GenResult.mcv(.none),
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {},
@@ -1196,18 +1196,18 @@ pub fn genTypedValue(
return GenResult.mcv(.{ .immediate = 0 });
},
.int_u64 => {
- return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) });
+ return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) });
},
else => {},
}
},
},
.Int => {
- const info = typed_value.ty.intInfo(target);
+ const info = typed_value.ty.intInfo(mod);
if (info.bits <= ptr_bits) {
const unsigned = switch (info.signedness) {
- .signed => @bitCast(u64, typed_value.val.toSignedInt(target)),
- .unsigned => typed_value.val.toUnsignedInt(target),
+ .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)),
+ .unsigned => typed_value.val.toUnsignedInt(mod),
};
return GenResult.mcv(.{ .immediate = unsigned });
}
@@ -1216,7 +1216,7 @@ pub fn genTypedValue(
return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) });
},
.Optional => {
- if (typed_value.ty.isPtrLikeOptional()) {
+ if (typed_value.ty.isPtrLikeOptional(mod)) {
if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 });
var buf: Type.Payload.ElemType = undefined;
@@ -1224,8 +1224,8 @@ pub fn genTypedValue(
.ty = typed_value.ty.optionalChild(&buf),
.val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val,
}, owner_decl_index);
- } else if (typed_value.ty.abiSize(target) == 1) {
- return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) });
+ } else if (typed_value.ty.abiSize(mod) == 1) {
+ return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) });
}
},
.Enum => {
@@ -1241,9 +1241,8 @@ pub fn genTypedValue(
typed_value.ty.cast(Type.Payload.EnumFull).?.data.values;
if (enum_values.count() != 0) {
const tag_val = enum_values.keys()[field_index.data];
- var buf: Type.Payload.Bits = undefined;
return genTypedValue(bin_file, src_loc, .{
- .ty = typed_value.ty.intTagType(&buf),
+ .ty = typed_value.ty.intTagType(),
.val = tag_val,
}, owner_decl_index);
} else {
@@ -1253,8 +1252,7 @@ pub fn genTypedValue(
else => unreachable,
}
} else {
- var int_tag_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer);
+ const int_tag_ty = typed_value.ty.intTagType();
return genTypedValue(bin_file, src_loc, .{
.ty = int_tag_ty,
.val = typed_value.val,
@@ -1281,7 +1279,7 @@ pub fn genTypedValue(
const payload_type = typed_value.ty.errorUnionPayload();
const is_pl = typed_value.val.errorUnionIsPayload();
- if (!payload_type.hasRuntimeBitsIgnoreComptime()) {
+ if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) {
// We use the error type directly as the type.
const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero);
return genTypedValue(bin_file, src_loc, .{
@@ -1306,23 +1304,23 @@ pub fn genTypedValue(
return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index);
}
-pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0;
- const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
- if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return 0;
} else {
- return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align);
+ return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align);
}
}
-pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 {
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0;
- const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
- if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) {
- return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align);
+pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 {
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
+ const payload_align = payload_ty.abiAlignment(mod);
+ const error_align = Type.anyerror.abiAlignment(mod);
+ if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
+ return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align);
} else {
return 0;
}
src/Compilation.zig
@@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.emit_h = emit_h,
.error_name_list = .{},
};
- try module.error_name_list.append(gpa, "(no error)");
+ try module.init();
break :blk module;
} else blk: {
@@ -2064,6 +2064,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
if (!build_options.only_c and !build_options.only_core_functionality) {
if (comp.emit_docs) |doc_location| {
if (comp.bin_file.options.module) |module| {
+ if (true) @panic("TODO: get autodoc working again in this branch");
var autodoc = Autodoc.init(module, doc_location);
defer autodoc.deinit();
try autodoc.generateZirData();
src/InternPool.zig
@@ -1,11 +1,16 @@
+//! All interned objects have both a value and a type.
+
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
items: std.MultiArrayList(Item) = .{},
extra: std.ArrayListUnmanaged(u32) = .{},
-const InternPool = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
+const BigIntConst = std.math.big.int.Const;
+
+const InternPool = @This();
+const DeclIndex = enum(u32) { _ };
const KeyAdapter = struct {
intern_pool: *const InternPool,
@@ -17,24 +22,21 @@ const KeyAdapter = struct {
pub fn hash(ctx: @This(), a: Key) u32 {
_ = ctx;
- return a.hash();
+ return a.hash32();
}
};
pub const Key = union(enum) {
- int_type: struct {
- signedness: std.builtin.Signedness,
- bits: u16,
- },
+ int_type: IntType,
ptr_type: struct {
elem_type: Index,
- sentinel: Index,
- alignment: u16,
+ sentinel: Index = .none,
+ alignment: u16 = 0,
size: std.builtin.Type.Pointer.Size,
- is_const: bool,
- is_volatile: bool,
- is_allowzero: bool,
- address_space: std.builtin.AddressSpace,
+ is_const: bool = false,
+ is_volatile: bool = false,
+ is_allowzero: bool = false,
+ address_space: std.builtin.AddressSpace = .generic,
},
array_type: struct {
len: u64,
@@ -52,20 +54,52 @@ pub const Key = union(enum) {
error_set_type: Index,
payload_type: Index,
},
- simple: Simple,
+ simple_type: SimpleType,
+ simple_value: SimpleValue,
+ extern_func: struct {
+ ty: Index,
+ /// The Decl that corresponds to the function itself.
+ owner_decl: DeclIndex,
+ /// Library name if specified.
+ /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
+ /// Index into the string table bytes.
+ lib_name: u32,
+ },
+ int: struct {
+ ty: Index,
+ big_int: BigIntConst,
+ },
+ enum_tag: struct {
+ ty: Index,
+ tag: BigIntConst,
+ },
+ struct_type: struct {
+ fields_len: u32,
+ // TODO move Module.Struct data to here
+ },
+
+ pub const IntType = std.builtin.Type.Int;
- pub fn hash(key: Key) u32 {
+ pub fn hash32(key: Key) u32 {
+ return @truncate(u32, key.hash64());
+ }
+
+ pub fn hash64(key: Key) u64 {
var hasher = std.hash.Wyhash.init(0);
+ key.hashWithHasher(&hasher);
+ return hasher.final();
+ }
+
+ pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void {
switch (key) {
.int_type => |int_type| {
- std.hash.autoHash(&hasher, int_type);
+ std.hash.autoHash(hasher, int_type);
},
.array_type => |array_type| {
- std.hash.autoHash(&hasher, array_type);
+ std.hash.autoHash(hasher, array_type);
},
else => @panic("TODO"),
}
- return @truncate(u32, hasher.final());
}
pub fn eql(a: Key, b: Key) bool {
@@ -85,6 +119,34 @@ pub const Key = union(enum) {
else => @panic("TODO"),
}
}
+
+ pub fn typeOf(key: Key) Index {
+ switch (key) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .optional_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ => return .type_type,
+
+ .int => |x| return x.ty,
+ .extern_func => |x| return x.ty,
+ .enum_tag => |x| return x.ty,
+
+ .simple_value => |s| switch (s) {
+ .undefined => return .undefined_type,
+ .void => return .void_type,
+ .null => return .null_type,
+ .false, .true => return .bool_type,
+ .empty_struct => return .empty_struct_type,
+ .@"unreachable" => return .noreturn_type,
+ .generic_poison => unreachable,
+ },
+ }
+ }
};
pub const Item = struct {
@@ -98,11 +160,330 @@ pub const Item = struct {
/// Two values which have the same type can be equality compared simply
/// by checking if their indexes are equal, provided they are both in
/// the same `InternPool`.
+/// When adding a tag to this enum, consider adding a corresponding entry to
+/// `primitives` in AstGen.zig.
pub const Index = enum(u32) {
+ u1_type,
+ u8_type,
+ i8_type,
+ u16_type,
+ i16_type,
+ u29_type,
+ u32_type,
+ i32_type,
+ u64_type,
+ i64_type,
+ u80_type,
+ u128_type,
+ i128_type,
+ usize_type,
+ isize_type,
+ c_char_type,
+ c_short_type,
+ c_ushort_type,
+ c_int_type,
+ c_uint_type,
+ c_long_type,
+ c_ulong_type,
+ c_longlong_type,
+ c_ulonglong_type,
+ c_longdouble_type,
+ f16_type,
+ f32_type,
+ f64_type,
+ f80_type,
+ f128_type,
+ anyopaque_type,
+ bool_type,
+ void_type,
+ type_type,
+ anyerror_type,
+ comptime_int_type,
+ comptime_float_type,
+ noreturn_type,
+ anyframe_type,
+ null_type,
+ undefined_type,
+ enum_literal_type,
+ atomic_order_type,
+ atomic_rmw_op_type,
+ calling_convention_type,
+ address_space_type,
+ float_mode_type,
+ reduce_op_type,
+ call_modifier_type,
+ prefetch_options_type,
+ export_options_type,
+ extern_options_type,
+ type_info_type,
+ manyptr_u8_type,
+ manyptr_const_u8_type,
+ single_const_pointer_to_comptime_int_type,
+ const_slice_u8_type,
+ anyerror_void_error_union_type,
+ generic_poison_type,
+ var_args_param_type,
+ empty_struct_type,
+
+ /// `undefined` (untyped)
+ undef,
+ /// `0` (comptime_int)
+ zero,
+ /// `0` (usize)
+ zero_usize,
+ /// `1` (comptime_int)
+ one,
+ /// `1` (usize)
+ one_usize,
+ /// `std.builtin.CallingConvention.C`
+ calling_convention_c,
+ /// `std.builtin.CallingConvention.Inline`
+ calling_convention_inline,
+ /// `{}`
+ void_value,
+ /// `unreachable` (noreturn type)
+ unreachable_value,
+ /// `null` (untyped)
+ null_value,
+ /// `true`
+ bool_true,
+ /// `false`
+ bool_false,
+ /// `.{}` (untyped)
+ empty_struct,
+ /// Used for generic parameters where the type and value
+ /// is not known until generic function instantiation.
+ generic_poison,
+
none = std.math.maxInt(u32),
+
_,
+
+ pub fn toType(i: Index) @import("type.zig").Type {
+ assert(i != .none);
+ return .{
+ .ip_index = i,
+ .legacy = undefined,
+ };
+ }
+
+ pub fn toValue(i: Index) @import("value.zig").Value {
+ assert(i != .none);
+ return .{
+ .ip_index = i,
+ .legacy = undefined,
+ };
+ }
+};
+
+pub const static_keys = [_]Key{
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 1,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 8,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .signed,
+ .bits = 8,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 16,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .signed,
+ .bits = 16,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 29,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 32,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .signed,
+ .bits = 32,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 64,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .signed,
+ .bits = 64,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 80,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 128,
+ } },
+
+ .{ .int_type = .{
+ .signedness = .signed,
+ .bits = 128,
+ } },
+
+ .{ .simple_type = .usize },
+ .{ .simple_type = .isize },
+ .{ .simple_type = .c_char },
+ .{ .simple_type = .c_short },
+ .{ .simple_type = .c_ushort },
+ .{ .simple_type = .c_int },
+ .{ .simple_type = .c_uint },
+ .{ .simple_type = .c_long },
+ .{ .simple_type = .c_ulong },
+ .{ .simple_type = .c_longlong },
+ .{ .simple_type = .c_ulonglong },
+ .{ .simple_type = .c_longdouble },
+ .{ .simple_type = .f16 },
+ .{ .simple_type = .f32 },
+ .{ .simple_type = .f64 },
+ .{ .simple_type = .f80 },
+ .{ .simple_type = .f128 },
+ .{ .simple_type = .anyopaque },
+ .{ .simple_type = .bool },
+ .{ .simple_type = .void },
+ .{ .simple_type = .type },
+ .{ .simple_type = .anyerror },
+ .{ .simple_type = .comptime_int },
+ .{ .simple_type = .comptime_float },
+ .{ .simple_type = .noreturn },
+ .{ .simple_type = .@"anyframe" },
+ .{ .simple_type = .null },
+ .{ .simple_type = .undefined },
+ .{ .simple_type = .enum_literal },
+ .{ .simple_type = .atomic_order },
+ .{ .simple_type = .atomic_rmw_op },
+ .{ .simple_type = .calling_convention },
+ .{ .simple_type = .address_space },
+ .{ .simple_type = .float_mode },
+ .{ .simple_type = .reduce_op },
+ .{ .simple_type = .call_modifier },
+ .{ .simple_type = .prefetch_options },
+ .{ .simple_type = .export_options },
+ .{ .simple_type = .extern_options },
+ .{ .simple_type = .type_info },
+
+ .{ .ptr_type = .{
+ .elem_type = .u8_type,
+ .size = .Many,
+ } },
+
+ .{ .ptr_type = .{
+ .elem_type = .u8_type,
+ .size = .Many,
+ .is_const = true,
+ } },
+
+ .{ .ptr_type = .{
+ .elem_type = .comptime_int_type,
+ .size = .One,
+ .is_const = true,
+ } },
+
+ .{ .ptr_type = .{
+ .elem_type = .u8_type,
+ .size = .Slice,
+ .is_const = true,
+ } },
+
+ .{ .error_union_type = .{
+ .error_set_type = .anyerror_type,
+ .payload_type = .void_type,
+ } },
+
+ // generic_poison_type
+ .{ .simple_type = .generic_poison },
+
+ // var_args_param_type
+ .{ .simple_type = .var_args_param },
+
+ // empty_struct_type
+ .{ .struct_type = .{
+ .fields_len = 0,
+ } },
+
+ .{ .simple_value = .undefined },
+
+ .{ .int = .{
+ .ty = .comptime_int_type,
+ .big_int = .{
+ .limbs = &.{0},
+ .positive = true,
+ },
+ } },
+
+ .{ .int = .{
+ .ty = .usize_type,
+ .big_int = .{
+ .limbs = &.{0},
+ .positive = true,
+ },
+ } },
+
+ .{ .int = .{
+ .ty = .comptime_int_type,
+ .big_int = .{
+ .limbs = &.{1},
+ .positive = true,
+ },
+ } },
+
+ .{ .int = .{
+ .ty = .usize_type,
+ .big_int = .{
+ .limbs = &.{1},
+ .positive = true,
+ },
+ } },
+
+ .{ .enum_tag = .{
+ .ty = .calling_convention_type,
+ .tag = .{
+ .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)},
+ .positive = true,
+ },
+ } },
+
+ .{ .enum_tag = .{
+ .ty = .calling_convention_type,
+ .tag = .{
+ .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)},
+ .positive = true,
+ },
+ } },
+
+ .{ .simple_value = .void },
+ .{ .simple_value = .@"unreachable" },
+ .{ .simple_value = .null },
+ .{ .simple_value = .true },
+ .{ .simple_value = .false },
+ .{ .simple_value = .empty_struct },
+ .{ .simple_value = .generic_poison },
};
+/// How many items in the InternPool are statically known.
+pub const static_len: u32 = static_keys.len;
+
pub const Tag = enum(u8) {
/// An integer type.
/// data is number of bits
@@ -113,9 +494,12 @@ pub const Tag = enum(u8) {
/// An array type.
/// data is payload to Array.
type_array,
- /// A type or value that can be represented with only an enum tag.
- /// data is Simple enum value
- simple,
+ /// A type that can be represented with only an enum tag.
+ /// data is SimpleType enum value.
+ simple_type,
+ /// A value that can be represented with only an enum tag.
+ /// data is SimpleValue enum value.
+ simple_value,
/// An unsigned integer value that can be represented by u32.
/// data is integer value
int_u32,
@@ -137,9 +521,20 @@ pub const Tag = enum(u8) {
/// A float value that can be represented by f128.
/// data is payload index to Float128.
float_f128,
+ /// An extern function.
+ extern_func,
+ /// A regular function.
+ func,
+ /// Represents the data that an enum declaration provides, when the fields
+ /// are auto-numbered, and there are no declarations.
+ /// data is payload index to `EnumSimple`.
+ enum_simple,
};
-pub const Simple = enum(u32) {
+/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
+/// implement logic that only wants to deal with types because the logic can
+/// ignore all simple values. Note that technically, types are values.
+pub const SimpleType = enum(u32) {
f16,
f32,
f64,
@@ -147,6 +542,7 @@ pub const Simple = enum(u32) {
f128,
usize,
isize,
+ c_char,
c_short,
c_ushort,
c_int,
@@ -165,14 +561,36 @@ pub const Simple = enum(u32) {
comptime_float,
noreturn,
@"anyframe",
- null_type,
- undefined_type,
- enum_literal_type,
+ null,
undefined,
- void_value,
+ enum_literal,
+
+ atomic_order,
+ atomic_rmw_op,
+ calling_convention,
+ address_space,
+ float_mode,
+ reduce_op,
+ call_modifier,
+ prefetch_options,
+ export_options,
+ extern_options,
+ type_info,
+
+ generic_poison,
+ var_args_param,
+};
+
+pub const SimpleValue = enum(u32) {
+ undefined,
+ void,
null,
- bool_true,
- bool_false,
+ empty_struct,
+ true,
+ false,
+ @"unreachable",
+
+ generic_poison,
};
pub const Array = struct {
@@ -180,10 +598,44 @@ pub const Array = struct {
child: Index,
};
+/// Trailing:
+/// 0. field name: null-terminated string index for each fields_len; declaration order
+pub const EnumSimple = struct {
+ /// The Decl that corresponds to the enum itself.
+ owner_decl: DeclIndex,
+ /// An integer type which is used for the numerical value of the enum. This
+ /// is inferred by Zig to be the smallest power of two unsigned int that
+ /// fits the number of fields. It is stored here to avoid unnecessary
+ /// calculations and possibly allocation failure when querying the tag type
+ /// of enums.
+ int_tag_ty: Index,
+ fields_len: u32,
+};
+
+pub fn init(ip: *InternPool, gpa: Allocator) !void {
+ assert(ip.items.len == 0);
+
+ // So that we can use `catch unreachable` below.
+ try ip.items.ensureUnusedCapacity(gpa, static_keys.len);
+ try ip.map.ensureUnusedCapacity(gpa, static_keys.len);
+ try ip.extra.ensureUnusedCapacity(gpa, static_keys.len);
+
+ // This inserts all the statically-known values into the intern pool in the
+ // order expected.
+ for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable;
+
+ // Sanity check.
+ assert(ip.indexToKey(.bool_true).simple_value == .true);
+ assert(ip.indexToKey(.bool_false).simple_value == .false);
+
+ assert(ip.items.len == static_keys.len);
+}
+
pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.map.deinit(gpa);
ip.items.deinit(gpa);
ip.extra.deinit(gpa);
+ ip.* = undefined;
}
pub fn indexToKey(ip: InternPool, index: Index) Key {
@@ -210,7 +662,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.sentinel = .none,
} };
},
- .simple => .{ .simple = @intToEnum(Simple, data) },
+ .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) },
+ .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) },
else => @panic("TODO"),
};
@@ -224,12 +677,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
switch (key) {
.int_type => |int_type| {
- const tag: Tag = switch (int_type.signedness) {
+ const t: Tag = switch (int_type.signedness) {
.signed => .type_int_signed,
.unsigned => .type_int_unsigned,
};
try ip.items.append(gpa, .{
- .tag = tag,
+ .tag = t,
.data = int_type.bits,
});
},
@@ -249,6 +702,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
return @intToEnum(Index, ip.items.len - 1);
}
+pub fn tag(ip: InternPool, index: Index) Tag {
+ const tags = ip.items.items(.tag);
+ return tags[@enumToInt(index)];
+}
+
fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try ip.extra.ensureUnusedCapacity(gpa, fields.len);
src/Liveness.zig
@@ -5,15 +5,17 @@
//! Some instructions are special, such as:
//! * Conditional Branches
//! * Switch Branches
-const Liveness = @This();
const std = @import("std");
-const trace = @import("tracy.zig").trace;
const log = std.log.scoped(.liveness);
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
-const Air = @import("Air.zig");
const Log2Int = std.math.Log2Int;
+const Liveness = @This();
+const trace = @import("tracy.zig").trace;
+const Air = @import("Air.zig");
+const InternPool = @import("InternPool.zig");
+
pub const Verify = @import("Liveness/Verify.zig");
/// This array is split into sets of 4 bits per AIR instruction.
src/Module.zig
@@ -32,6 +32,19 @@ const build_options = @import("build_options");
const Liveness = @import("Liveness.zig");
const isUpDir = @import("introspect.zig").isUpDir;
const clang = @import("clang.zig");
+const InternPool = @import("InternPool.zig");
+
+comptime {
+ @setEvalBranchQuota(4000);
+ for (
+ @typeInfo(Zir.Inst.Ref).Enum.fields,
+ @typeInfo(Air.Inst.Ref).Enum.fields,
+ @typeInfo(InternPool.Index).Enum.fields,
+ ) |zir_field, air_field, ip_field| {
+ assert(mem.eql(u8, zir_field.name, ip_field.name));
+ assert(mem.eql(u8, air_field.name, ip_field.name));
+ }
+}
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
@@ -83,6 +96,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{},
string_literal_bytes: ArrayListUnmanaged(u8) = .{},
+/// Stores all Type and Value objects; periodically garbage collected.
+intern_pool: InternPool = .{},
+
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
@@ -807,9 +823,9 @@ pub const Decl = struct {
return (try decl.typedValue()).val;
}
- pub fn isFunction(decl: Decl) !bool {
+ pub fn isFunction(decl: Decl, mod: *const Module) !bool {
const tv = try decl.typedValue();
- return tv.ty.zigTypeTag() == .Fn;
+ return tv.ty.zigTypeTag(mod) == .Fn;
}
/// If the Decl has a value and it is a struct, return it,
@@ -921,14 +937,14 @@ pub const Decl = struct {
};
}
- pub fn getAlignment(decl: Decl, target: Target) u32 {
+ pub fn getAlignment(decl: Decl, mod: *const Module) u32 {
assert(decl.has_tv);
if (decl.@"align" != 0) {
// Explicit alignment.
return decl.@"align";
} else {
// Natural alignment.
- return decl.ty.abiAlignment(target);
+ return decl.ty.abiAlignment(mod);
}
}
};
@@ -1030,7 +1046,7 @@ pub const Struct = struct {
/// Returns the field alignment. If the struct is packed, returns 0.
pub fn alignment(
field: Field,
- target: Target,
+ mod: *const Module,
layout: std.builtin.Type.ContainerLayout,
) u32 {
if (field.abi_align != 0) {
@@ -1038,24 +1054,26 @@ pub const Struct = struct {
return field.abi_align;
}
+ const target = mod.getTarget();
+
switch (layout) {
.Packed => return 0,
.Auto => {
if (target.ofmt == .c) {
- return alignmentExtern(field, target);
+ return alignmentExtern(field, mod);
} else {
- return field.ty.abiAlignment(target);
+ return field.ty.abiAlignment(mod);
}
},
- .Extern => return alignmentExtern(field, target),
+ .Extern => return alignmentExtern(field, mod),
}
}
- pub fn alignmentExtern(field: Field, target: Target) u32 {
+ pub fn alignmentExtern(field: Field, mod: *const Module) u32 {
// This logic is duplicated in Type.abiAlignmentAdvanced.
- const ty_abi_align = field.ty.abiAlignment(target);
+ const ty_abi_align = field.ty.abiAlignment(mod);
- if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
+ if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
return @max(ty_abi_align, 16);
@@ -1132,7 +1150,7 @@ pub const Struct = struct {
};
}
- pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
+ pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
@@ -1140,12 +1158,13 @@ pub const Struct = struct {
if (i == index) {
return @intCast(u16, bit_sum);
}
- bit_sum += field.ty.bitSize(target);
+ bit_sum += field.ty.bitSize(mod);
}
unreachable; // index out of bounds
}
pub const RuntimeFieldIterator = struct {
+ module: *const Module,
struct_obj: *const Struct,
index: u32 = 0,
@@ -1155,6 +1174,7 @@ pub const Struct = struct {
};
pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
+ const mod = it.module;
while (true) {
var i = it.index;
it.index += 1;
@@ -1167,15 +1187,18 @@ pub const Struct = struct {
}
const field = it.struct_obj.fields.values()[i];
- if (!field.is_comptime and field.ty.hasRuntimeBits()) {
+ if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
return FieldAndIndex{ .index = i, .field = field };
}
}
}
};
- pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator {
- return .{ .struct_obj = s };
+ pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator {
+ return .{
+ .struct_obj = s,
+ .module = module,
+ };
}
};
@@ -1323,9 +1346,9 @@ pub const Union = struct {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
- pub fn normalAlignment(field: Field, target: Target) u32 {
+ pub fn normalAlignment(field: Field, mod: *const Module) u32 {
if (field.abi_align == 0) {
- return field.ty.abiAlignment(target);
+ return field.ty.abiAlignment(mod);
} else {
return field.abi_align;
}
@@ -1383,22 +1406,22 @@ pub const Union = struct {
};
}
- pub fn hasAllZeroBitFieldTypes(u: Union) bool {
+ pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool {
assert(u.haveFieldTypes());
for (u.fields.values()) |field| {
- if (field.ty.hasRuntimeBits()) return false;
+ if (field.ty.hasRuntimeBits(mod)) return false;
}
return true;
}
- pub fn mostAlignedField(u: Union, target: Target) u32 {
+ pub fn mostAlignedField(u: Union, mod: *const Module) u32 {
assert(u.haveFieldTypes());
var most_alignment: u32 = 0;
var most_index: usize = undefined;
for (u.fields.values(), 0..) |field, i| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.normalAlignment(mod);
if (field_align > most_alignment) {
most_alignment = field_align;
most_index = i;
@@ -1408,20 +1431,20 @@ pub const Union = struct {
}
/// Returns 0 if the union is represented with 0 bits at runtime.
- pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 {
+ pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 {
var max_align: u32 = 0;
- if (have_tag) max_align = u.tag_ty.abiAlignment(target);
+ if (have_tag) max_align = u.tag_ty.abiAlignment(mod);
for (u.fields.values()) |field| {
- if (!field.ty.hasRuntimeBits()) continue;
+ if (!field.ty.hasRuntimeBits(mod)) continue;
- const field_align = field.normalAlignment(target);
+ const field_align = field.normalAlignment(mod);
max_align = @max(max_align, field_align);
}
return max_align;
}
- pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 {
- return u.getLayout(target, have_tag).abi_size;
+ pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 {
+ return u.getLayout(mod, have_tag).abi_size;
}
pub const Layout = struct {
@@ -1451,7 +1474,7 @@ pub const Union = struct {
};
}
- pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout {
+ pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout {
assert(u.haveLayout());
var most_aligned_field: u32 = undefined;
var most_aligned_field_size: u64 = undefined;
@@ -1460,16 +1483,16 @@ pub const Union = struct {
var payload_align: u32 = 0;
const fields = u.fields.values();
for (fields, 0..) |field, i| {
- if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue;
+ if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = a: {
if (field.abi_align == 0) {
- break :a field.ty.abiAlignment(target);
+ break :a field.ty.abiAlignment(mod);
} else {
break :a field.abi_align;
}
};
- const field_size = field.ty.abiSize(target);
+ const field_size = field.ty.abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(u32, i);
@@ -1481,7 +1504,7 @@ pub const Union = struct {
}
}
payload_align = @max(payload_align, 1);
- if (!have_tag or !u.tag_ty.hasRuntimeBits()) {
+ if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) {
return .{
.abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align),
.abi_align = payload_align,
@@ -1497,8 +1520,8 @@ pub const Union = struct {
}
// Put the tag before or after the payload depending on which one's
// alignment is greater.
- const tag_size = u.tag_ty.abiSize(target);
- const tag_align = @max(1, u.tag_ty.abiAlignment(target));
+ const tag_size = u.tag_ty.abiSize(mod);
+ const tag_align = @max(1, u.tag_ty.abiAlignment(mod));
var size: u64 = 0;
var padding: u32 = undefined;
if (tag_align >= payload_align) {
@@ -2281,7 +2304,7 @@ pub const ErrorMsg = struct {
) !*ErrorMsg {
const err_msg = try gpa.create(ErrorMsg);
errdefer gpa.destroy(err_msg);
- err_msg.* = try init(gpa, src_loc, format, args);
+ err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args);
return err_msg;
}
@@ -3391,6 +3414,12 @@ pub const CompileError = error{
ComptimeBreak,
};
+pub fn init(mod: *Module) !void {
+ const gpa = mod.gpa;
+ try mod.error_name_list.append(gpa, "(no error)");
+ try mod.intern_pool.init(gpa);
+}
+
pub fn deinit(mod: *Module) void {
const gpa = mod.gpa;
@@ -3518,6 +3547,8 @@ pub fn deinit(mod: *Module) void {
mod.string_literal_table.deinit(gpa);
mod.string_literal_bytes.deinit(gpa);
+
+ mod.intern_pool.deinit(gpa);
}
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
@@ -4277,7 +4308,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// Update all dependents which have at least this level of dependency.
// If our type remained the same and we're a function, only update
// decls which depend on our body; otherwise, update all dependents.
- const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal;
+ const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal;
for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| {
if (@enumToInt(dep_type) < @enumToInt(update_level)) continue;
@@ -4748,8 +4779,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl_tv.ty.fmt(mod),
});
}
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator);
+ const ty = try decl_tv.val.toType().copy(decl_arena_allocator);
if (ty.getNamespace() == null) {
return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)});
}
@@ -4775,7 +4805,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
var type_changed = true;
if (decl.has_tv) {
- prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
+ prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod);
type_changed = !decl.ty.eql(decl_tv.ty, mod);
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
@@ -5510,7 +5540,7 @@ pub fn clearDecl(
try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
- if (decl.ty.isFnOrHasRuntimeBits()) {
+ if (decl.ty.isFnOrHasRuntimeBits(mod)) {
mod.comp.bin_file.freeDecl(decl_index);
}
if (decl.getInnerNamespace()) |namespace| {
@@ -5699,7 +5729,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
const arg_val = if (arg_tv.val.tag() != .generic_poison)
arg_tv.val
- else if (arg_tv.ty.onePossibleValue()) |opv|
+ else if (arg_tv.ty.onePossibleValue(mod)) |opv|
opv
else
break :t arg_tv.ty;
@@ -5773,7 +5803,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// If we don't get an error return trace from a caller, create our own.
if (func.calls_or_awaits_errorable_fn and
mod.comp.bin_file.options.error_return_tracing and
- !sema.fn_ret_ty.isError())
+ !sema.fn_ret_ty.isError(mod))
{
sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
// TODO make these unreachable instead of @panic
@@ -5995,25 +6025,11 @@ pub fn initNewAnonDecl(
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
- if (typed_value.ty.isFnOrHasRuntimeBits()) {
+ if (typed_value.ty.isFnOrHasRuntimeBits(mod)) {
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
}
}
-pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
- const int_payload = try arena.create(Type.Payload.Bits);
- int_payload.* = .{
- .base = .{
- .tag = switch (signedness) {
- .signed => .int_signed,
- .unsigned => .int_unsigned,
- },
- },
- .data = bits,
- };
- return Type.initPayload(&int_payload.base);
-}
-
pub fn errNoteNonLazy(
mod: *Module,
src_loc: SrcLoc,
@@ -6779,3 +6795,204 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool {
.field_reordering => mod.comp.bin_file.options.use_llvm,
};
}
+
+/// Shortcut for calling `intern_pool.get`.
+pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index {
+ return mod.intern_pool.get(mod.gpa, key);
+}
+
+pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
+ const i = try intern(mod, .{ .int_type = .{
+ .signedness = signedness,
+ .bits = bits,
+ } });
+ return i.toType();
+}
+
+pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
+ return intType(mod, .unsigned, Type.smallestUnsignedBits(max));
+}
+
+/// Returns the smallest possible integer type containing both `min` and
+/// `max`. Asserts that neither value is undef.
+/// TODO: if #3806 is implemented, this becomes trivial
+pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
+ assert(!min.isUndef());
+ assert(!max.isUndef());
+
+ if (std.debug.runtime_safety) {
+ assert(Value.order(min, max, mod).compare(.lte));
+ }
+
+ const sign = min.orderAgainstZero(mod) == .lt;
+
+ const min_val_bits = intBitsForValue(mod, min, sign);
+ const max_val_bits = intBitsForValue(mod, max, sign);
+
+ return mod.intType(
+ if (sign) .signed else .unsigned,
+ @max(min_val_bits, max_val_bits),
+ );
+}
+
+/// Given a value representing an integer, returns the number of bits necessary to represent
+/// this value in an integer. If `sign` is true, returns the number of bits necessary in a
+/// twos-complement integer; otherwise in an unsigned integer.
+/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
+pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
+ assert(!val.isUndef());
+ switch (val.tag()) {
+ .int_big_positive => {
+ const limbs = val.castTag(.int_big_positive).?.data;
+ const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true };
+ return @intCast(u16, big.bitCountAbs() + @boolToInt(sign));
+ },
+ .int_big_negative => {
+ const limbs = val.castTag(.int_big_negative).?.data;
+ // Zero is still a possibility, in which case unsigned is fine
+ for (limbs) |limb| {
+ if (limb != 0) break;
+ } else return 0; // val == 0
+ assert(sign);
+ const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false };
+ return @intCast(u16, big.bitCountTwosComp());
+ },
+ .int_i64 => {
+ const x = val.castTag(.int_i64).?.data;
+ if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x));
+ assert(sign);
+ return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1;
+ },
+ else => {
+ const x = val.toUnsignedInt(mod);
+ return Type.smallestUnsignedBits(x) + @boolToInt(sign);
+ },
+ }
+}
+
+pub const AtomicPtrAlignmentError = error{
+ FloatTooBig,
+ IntTooBig,
+ BadType,
+};
+
+pub const AtomicPtrAlignmentDiagnostics = struct {
+ bits: u16 = undefined,
+ max_bits: u16 = undefined,
+};
+
+/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
+/// Otherwise returns the alignment required on a pointer for the target
+/// to perform atomic operations.
+// TODO this function does not take into account CPU features, which can affect
+// this value. Audit this!
+pub fn atomicPtrAlignment(
+ mod: *const Module,
+ ty: Type,
+ diags: *AtomicPtrAlignmentDiagnostics,
+) AtomicPtrAlignmentError!u32 {
+ const target = mod.getTarget();
+ const max_atomic_bits: u16 = switch (target.cpu.arch) {
+ .avr,
+ .msp430,
+ .spu_2,
+ => 16,
+
+ .arc,
+ .arm,
+ .armeb,
+ .hexagon,
+ .m68k,
+ .le32,
+ .mips,
+ .mipsel,
+ .nvptx,
+ .powerpc,
+ .powerpcle,
+ .r600,
+ .riscv32,
+ .sparc,
+ .sparcel,
+ .tce,
+ .tcele,
+ .thumb,
+ .thumbeb,
+ .x86,
+ .xcore,
+ .amdil,
+ .hsail,
+ .spir,
+ .kalimba,
+ .lanai,
+ .shave,
+ .wasm32,
+ .renderscript32,
+ .csky,
+ .spirv32,
+ .dxil,
+ .loongarch32,
+ .xtensa,
+ => 32,
+
+ .amdgcn,
+ .bpfel,
+ .bpfeb,
+ .le64,
+ .mips64,
+ .mips64el,
+ .nvptx64,
+ .powerpc64,
+ .powerpc64le,
+ .riscv64,
+ .sparc64,
+ .s390x,
+ .amdil64,
+ .hsail64,
+ .spir64,
+ .wasm64,
+ .renderscript64,
+ .ve,
+ .spirv64,
+ .loongarch64,
+ => 64,
+
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ => 128,
+
+ .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
+ };
+
+ const int_ty = switch (ty.zigTypeTag(mod)) {
+ .Int => ty,
+ .Enum => ty.intTagType(),
+ .Float => {
+ const bit_count = ty.floatBits(target);
+ if (bit_count > max_atomic_bits) {
+ diags.* = .{
+ .bits = bit_count,
+ .max_bits = max_atomic_bits,
+ };
+ return error.FloatTooBig;
+ }
+ return 0;
+ },
+ .Bool => return 0,
+ else => {
+ if (ty.isPtrAtRuntime(mod)) return 0;
+ return error.BadType;
+ },
+ };
+
+ const bit_count = int_ty.intInfo(mod).bits;
+ if (bit_count > max_atomic_bits) {
+ diags.* = .{
+ .bits = bit_count,
+ .max_bits = max_atomic_bits,
+ };
+ return error.IntTooBig;
+ }
+
+ return 0;
+}
src/print_air.zig
@@ -7,6 +7,7 @@ const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
+const InternPool = @import("InternPool.zig");
pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void {
const instruction_bytes = air.instructions.len *
@@ -965,14 +966,13 @@ const Writer = struct {
operand: Air.Inst.Ref,
dies: bool,
) @TypeOf(s).Error!void {
- var i: usize = @enumToInt(operand);
+ const i = @enumToInt(operand);
- if (i < Air.Inst.Ref.typed_value_map.len) {
+ if (i < InternPool.static_len) {
return s.print("@{}", .{operand});
}
- i -= Air.Inst.Ref.typed_value_map.len;
- return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies);
+ return w.writeInstIndex(s, i - InternPool.static_len, dies);
}
fn writeInstIndex(
src/print_zir.zig
@@ -3,6 +3,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Ast = std.zig.Ast;
+const InternPool = @import("InternPool.zig");
const Zir = @import("Zir.zig");
const Module = @import("Module.zig");
@@ -2468,14 +2469,9 @@ const Writer = struct {
}
fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
- var i: usize = @enumToInt(ref);
-
- if (i < Zir.Inst.Ref.typed_value_map.len) {
- return stream.print("@{}", .{ref});
- }
- i -= Zir.Inst.Ref.typed_value_map.len;
-
- return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i));
+ const i = @enumToInt(ref);
+ if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)});
+ return self.writeInstIndex(stream, i - InternPool.static_len);
}
fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
src/RangeSet.zig
@@ -60,13 +60,14 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
if (self.ranges.items.len == 0)
return false;
+ const mod = self.module;
std.mem.sort(Range, self.ranges.items, LessThanContext{
.ty = ty,
- .module = self.module,
+ .module = mod,
}, lessThan);
- if (!self.ranges.items[0].first.eql(first, ty, self.module) or
- !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module))
+ if (!self.ranges.items[0].first.eql(first, ty, mod) or
+ !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod))
{
return false;
}
@@ -76,18 +77,16 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
- const target = self.module.getTarget();
-
// look for gaps
for (self.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
- try counter.copy(prev.last.toBigInt(&space, target));
+ try counter.copy(prev.last.toBigInt(&space, mod));
try counter.addScalar(&counter, 1);
- const cur_start_int = cur.first.toBigInt(&space, target);
+ const cur_start_int = cur.first.toBigInt(&space, mod);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}
src/Sema.zig
@@ -114,6 +114,7 @@ const Package = @import("Package.zig");
const crash_report = @import("crash_report.zig");
const build_options = @import("build_options");
const Compilation = @import("Compilation.zig");
+const InternPool = @import("InternPool.zig");
pub const default_branch_quota = 1000;
pub const default_reference_trace_len = 2;
@@ -1614,6 +1615,7 @@ fn analyzeBodyInner(
},
.@"try" => blk: {
if (!block.is_comptime) break :blk try sema.zirTry(block, inst);
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -1621,18 +1623,18 @@ fn analyzeBodyInner(
const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len];
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
- if (err_union_ty.zigTypeTag() != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
- const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
+ const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- if (is_non_err_tv.val.toBool()) {
+ if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -1654,11 +1656,11 @@ fn analyzeBodyInner(
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
- const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
+ const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| {
if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err);
return err;
};
- if (is_non_err_tv.val.toBool()) {
+ if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse
@@ -1721,17 +1723,12 @@ fn analyzeBodyInner(
}
pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref {
- var i: usize = @enumToInt(zir_ref);
-
+ const i = @enumToInt(zir_ref);
// First section of indexes correspond to a set number of constant values.
- if (i < Zir.Inst.Ref.typed_value_map.len) {
- // We intentionally map the same indexes to the same values between ZIR and AIR.
- return zir_ref;
- }
- i -= Zir.Inst.Ref.typed_value_map.len;
-
- // Finally, the last section of indexes refers to the map of ZIR=>AIR.
- const inst = sema.inst_map.get(@intCast(u32, i)).?;
+ // We intentionally map the same indexes to the same values between ZIR and AIR.
+ if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i);
+ // The last section of indexes refers to the map of ZIR => AIR.
+ const inst = sema.inst_map.get(i - InternPool.static_len).?;
const ty = sema.typeOf(inst);
if (ty.tag() == .generic_poison) return error.GenericPoison;
return inst;
@@ -1766,9 +1763,8 @@ pub fn resolveConstString(
}
pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type {
- assert(zir_ref != .var_args_param);
const air_inst = try sema.resolveInst(zir_ref);
- assert(air_inst != .var_args_param);
+ assert(air_inst != .var_args_param_type);
const ty = try sema.analyzeAsType(block, src, air_inst);
if (ty.tag() == .generic_poison) return error.GenericPoison;
return ty;
@@ -1783,8 +1779,7 @@ fn analyzeAsType(
const wanted_type = Type.initTag(.type);
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known");
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = val.toType(&buffer);
+ const ty = val.toType();
return ty.copy(sema.arena);
}
@@ -1950,12 +1945,12 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime(
make_runtime: *bool,
) CompileError!?Value {
// First section of indexes correspond to a set number of constant values.
- var i: usize = @enumToInt(inst);
- if (i < Air.Inst.Ref.typed_value_map.len) {
- return Air.Inst.Ref.typed_value_map[i].val;
+ const int = @enumToInt(inst);
+ if (int < InternPool.static_len) {
+ return @intToEnum(InternPool.Index, int).toValue();
}
- i -= Air.Inst.Ref.typed_value_map.len;
+ const i = int - InternPool.static_len;
const air_tags = sema.air_instructions.items(.tag);
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
if (air_tags[i] == .constant) {
@@ -2010,13 +2005,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt
}
fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError {
+ const mod = sema.mod;
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{
- ty.fmt(sema.mod),
+ ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
if (ty.isSlice()) {
- try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)});
+ try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)});
}
break :msg msg;
};
@@ -2042,7 +2038,8 @@ fn failWithErrorSetCodeMissing(
}
fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError {
- if (int_ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (int_ty.zigTypeTag(mod) == .Vector) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{
int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod),
@@ -2084,12 +2081,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError
}
fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError {
+ const mod = sema.mod;
const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty;
- if (inner_ty.zigTypeTag() == .Optional) opt: {
+ if (inner_ty.zigTypeTag(mod) == .Optional) opt: {
var buf: Type.Payload.ElemType = undefined;
const child_ty = inner_ty.optionalChild(&buf);
- if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt;
+ if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt;
const msg = msg: {
const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -2097,9 +2095,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
- } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: {
+ } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: {
const child_ty = inner_ty.errorUnionPayload();
- if (!typeSupportsFieldAccess(child_ty, field_name)) break :err;
+ if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err;
const msg = msg: {
const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -2111,14 +2109,14 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)});
}
-fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool {
- switch (ty.zigTypeTag()) {
+fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool {
+ switch (ty.zigTypeTag(mod)) {
.Array => return mem.eql(u8, field_name, "len"),
.Pointer => {
const ptr_info = ty.ptrInfo().data;
if (ptr_info.size == .Slice) {
return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len");
- } else if (ptr_info.pointee_type.zigTypeTag() == .Array) {
+ } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
return mem.eql(u8, field_name, "len");
} else return false;
},
@@ -2352,10 +2350,10 @@ fn analyzeAsInt(
dest_ty: Type,
reason: []const u8,
) !u64 {
+ const mod = sema.mod;
const coerced = try sema.coerce(block, dest_ty, air_ref, src);
const val = try sema.resolveConstValue(block, src, coerced, reason);
- const target = sema.mod.getTarget();
- return (try val.getUnsignedIntAdvanced(target, sema)).?;
+ return (try val.getUnsignedIntAdvanced(mod, sema)).?;
}
// Returns a compile error if the value has tag `variable`. See `resolveInstValue` for
@@ -2926,23 +2924,23 @@ fn zirEnumDecl(
if (tag_type_ref != .none) {
const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref);
- if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) {
+ if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)});
}
enum_obj.tag_ty = try ty.copy(decl_arena_allocator);
enum_obj.tag_ty_inferred = false;
} else if (fields_len == 0) {
- enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0);
+ enum_obj.tag_ty = try mod.intType(.unsigned, 0);
enum_obj.tag_ty_inferred = true;
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
- enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits);
+ enum_obj.tag_ty = try mod.intType(.unsigned, bits);
enum_obj.tag_ty_inferred = true;
}
}
- if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) {
- if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) {
+ if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) {
+ if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) {
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
}
}
@@ -3319,7 +3317,8 @@ fn ensureResultUsed(
ty: Type,
src: LazySrcLoc,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Void, .NoReturn => return,
.ErrorSet, .ErrorUnion => {
const msg = msg: {
@@ -3347,11 +3346,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const src = inst_data.src();
const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.ErrorSet, .ErrorUnion => {
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is discarded", .{});
@@ -3369,16 +3369,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer)
+ const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer)
operand_ty.childType()
else
operand_ty;
- if (err_union_ty.zigTypeTag() != .ErrorUnion) return;
- const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag();
+ if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return;
+ const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod);
if (payload_ty != .Void and payload_ty != .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "error union payload is ignored", .{});
@@ -3920,19 +3921,20 @@ fn zirArrayBasePtr(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
- while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) {
+ while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
const elem_ty = sema.typeOf(base_ptr).childType();
- switch (elem_ty.zigTypeTag()) {
+ switch (elem_ty.zigTypeTag(mod)) {
.Array, .Vector => return base_ptr,
.Struct => if (elem_ty.isTuple()) {
// TODO validate element count
@@ -3948,19 +3950,20 @@ fn zirFieldBasePtr(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const start_ptr = try sema.resolveInst(inst_data.operand);
var base_ptr = start_ptr;
- while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) {
+ while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) {
.ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true),
.Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true),
else => break,
};
const elem_ty = sema.typeOf(base_ptr).childType();
- switch (elem_ty.zigTypeTag()) {
+ switch (elem_ty.zigTypeTag(mod)) {
.Struct, .Union => return base_ptr,
else => {},
}
@@ -3968,6 +3971,7 @@ fn zirFieldBasePtr(
}
fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
@@ -3991,7 +3995,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const object_ty = sema.typeOf(object);
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
- const is_int = switch (object_ty.zigTypeTag()) {
+ const is_int = switch (object_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => true,
else => false,
};
@@ -4000,7 +4004,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.input_index = i,
} };
const arg_len_uncoerced = if (is_int) object else l: {
- if (!object_ty.isIndexable()) {
+ if (!object_ty.isIndexable(mod)) {
// Instead of using checkIndexable we customize this error.
const msg = msg: {
const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)});
@@ -4010,7 +4014,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (!object_ty.indexableHasLen()) continue;
+ if (!object_ty.indexableHasLen(mod)) continue;
break :l try sema.fieldVal(block, arg_src, object, "len", arg_src);
};
@@ -4061,7 +4065,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const object_ty = sema.typeOf(object);
// Each arg could be an indexable, or a range, in which case the length
// is passed directly as an integer.
- switch (object_ty.zigTypeTag()) {
+ switch (object_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => continue,
else => {},
}
@@ -4096,13 +4100,14 @@ fn validateArrayInitTy(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data;
const ty = try sema.resolveType(block, ty_src, extra.ty);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Array => {
const array_len = ty.arrayLen();
if (extra.init_count != array_len) {
@@ -4141,11 +4146,12 @@ fn validateStructInitTy(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct, .Union => return,
else => {},
}
@@ -4160,6 +4166,7 @@ fn zirValidateStructInit(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
const init_src = validate_inst.src();
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -4168,7 +4175,7 @@ fn zirValidateStructInit(
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const object_ptr = try sema.resolveInst(field_ptr_extra.lhs);
const agg_ty = sema.typeOf(object_ptr).childType();
- switch (agg_ty.zigTypeTag()) {
+ switch (agg_ty.zigTypeTag(mod)) {
.Struct => return sema.validateStructInit(
block,
agg_ty,
@@ -4589,6 +4596,7 @@ fn zirValidateArrayInit(
block: *Block,
inst: Zir.Inst.Index,
) CompileError!void {
+ const mod = sema.mod;
const validate_inst = sema.code.instructions.items(.data)[inst].pl_node;
const init_src = validate_inst.src();
const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index);
@@ -4599,7 +4607,7 @@ fn zirValidateArrayInit(
const array_ty = sema.typeOf(array_ptr).childType();
const array_len = array_ty.arrayLen();
- if (instrs.len != array_len) switch (array_ty.zigTypeTag()) {
+ if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) {
.Struct => {
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
@@ -4667,7 +4675,7 @@ fn zirValidateArrayInit(
// Determine whether the value stored to this pointer is comptime-known.
if (array_ty.isTuple()) {
- if (array_ty.structFieldValueComptime(i)) |opv| {
+ if (array_ty.structFieldValueComptime(mod, i)) |opv| {
element_vals[i] = opv;
continue;
}
@@ -4770,12 +4778,13 @@ fn zirValidateArrayInit(
}
fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- if (operand_ty.zigTypeTag() != .Pointer) {
+ if (operand_ty.zigTypeTag(mod) != .Pointer) {
return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)});
} else switch (operand_ty.ptrSize()) {
.One, .C => {},
@@ -4788,7 +4797,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
return;
}
- const elem_ty = operand_ty.elemType2();
+ const elem_ty = operand_ty.elemType2(mod);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) {
return sema.fail(block, src, "cannot dereference undefined value", .{});
@@ -4818,7 +4827,8 @@ fn failWithBadMemberAccess(
field_src: LazySrcLoc,
field_name: []const u8,
) CompileError {
- const kw_name = switch (agg_ty.zigTypeTag()) {
+ const mod = sema.mod;
+ const kw_name = switch (agg_ty.zigTypeTag(mod)) {
.Union => "union",
.Struct => "struct",
.Opaque => "opaque",
@@ -4894,8 +4904,9 @@ fn failWithBadUnionFieldAccess(
}
fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void {
- const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return;
- const category = switch (decl_ty.zigTypeTag()) {
+ const mod = sema.mod;
+ const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return;
+ const category = switch (decl_ty.zigTypeTag(mod)) {
.Union => "union",
.Struct => "struct",
.Enum => "enum",
@@ -4903,7 +4914,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo
.ErrorSet => "error set",
else => unreachable,
};
- try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
+ try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category});
}
fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -5028,6 +5039,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const zir_tags = sema.code.instructions.items(.tag);
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].pl_node;
@@ -5046,9 +5058,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v
// %b = store(%a, %c)
// Where %c is an error union or error set. In such case we need to add
// to the current function's inferred error set, if any.
- if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or
- sema.typeOf(operand).zigTypeTag() == .ErrorSet) and
- sema.fn_ret_ty.zigTypeTag() == .ErrorUnion)
+ if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or
+ sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and
+ sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion)
{
try sema.addToInferredErrorSet(operand);
}
@@ -6270,6 +6282,7 @@ fn zirCall(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node };
const call_src = inst_data.src();
@@ -6342,7 +6355,7 @@ fn zirCall(
sema.inst_map.putAssumeCapacity(inst, inst: {
if (arg_index >= fn_params_len)
- break :inst Air.Inst.Ref.var_args_param;
+ break :inst Air.Inst.Ref.var_args_param_type;
if (func_ty_info.param_types[arg_index].tag() == .generic_poison)
break :inst Air.Inst.Ref.generic_poison_type;
@@ -6352,10 +6365,10 @@ fn zirCall(
const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst);
const resolved_ty = sema.typeOf(resolved);
- if (resolved_ty.zigTypeTag() == .NoReturn) {
+ if (resolved_ty.zigTypeTag(mod) == .NoReturn) {
return resolved;
}
- if (resolved_ty.isError()) {
+ if (resolved_ty.isError(mod)) {
input_is_error = true;
}
resolved_args[arg_index] = resolved;
@@ -6380,7 +6393,7 @@ fn zirCall(
// If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only
// need to clean-up our own trace if we were passed to a non-error-handling expression.
- if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) {
+ if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) {
const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace");
const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty);
const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src);
@@ -6417,20 +6430,21 @@ fn checkCallArgumentCount(
total_args: usize,
member_fn: bool,
) !Type {
+ const mod = sema.mod;
const func_ty = func_ty: {
- switch (callee_ty.zigTypeTag()) {
+ switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo().data;
- if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) {
+ if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const opt_child = callee_ty.optionalChild(&buf);
- if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and
- opt_child.childType().zigTypeTag() == .Fn))
+ if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and
+ opt_child.childType().zigTypeTag(mod) == .Fn))
{
const msg = msg: {
const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{
@@ -6488,13 +6502,14 @@ fn callBuiltin(
modifier: std.builtin.CallModifier,
args: []const Air.Inst.Ref,
) !void {
+ const mod = sema.mod;
const callee_ty = sema.typeOf(builtin_fn);
const func_ty = func_ty: {
- switch (callee_ty.zigTypeTag()) {
+ switch (callee_ty.zigTypeTag(mod)) {
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo().data;
- if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) {
+ if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
@@ -6715,7 +6730,7 @@ fn analyzeCall(
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
else => {
- assert(callee_ty.isPtrAtRuntime());
+ assert(callee_ty.isPtrAtRuntime(mod));
return sema.fail(block, call_src, "{s} call of function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
@@ -6978,7 +6993,7 @@ fn analyzeCall(
break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges);
};
- if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) {
+ if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) {
try sema.emitDbgInline(
block,
module_fn,
@@ -7068,7 +7083,7 @@ fn analyzeCall(
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
try sema.queueFullTypeResolution(func_ty_info.return_type);
- if (sema.owner_func != null and func_ty_info.return_type.isError()) {
+ if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
@@ -7301,8 +7316,9 @@ fn analyzeGenericCallArg(
new_fn_info: Type.Payload.Function.Data,
runtime_i: *u32,
) !void {
+ const mod = sema.mod;
const is_runtime = comptime_arg.val.tag() == .generic_poison and
- comptime_arg.ty.hasRuntimeBits() and
+ comptime_arg.ty.hasRuntimeBits(mod) and
!(try sema.typeRequiresComptime(comptime_arg.ty));
if (is_runtime) {
const param_ty = new_fn_info.param_types[runtime_i.*];
@@ -7591,7 +7607,7 @@ fn instantiateGenericCall(
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- if (sema.owner_func != null and new_fn_info.return_type.isError()) {
+ if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) {
sema.owner_func.?.calls_or_awaits_errorable_fn = true;
}
@@ -7872,8 +7888,9 @@ fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const int_type = sema.code.instructions.items(.data)[inst].int_type;
- const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count);
+ const ty = try mod.intType(int_type.signedness, int_type.bit_count);
return sema.addType(ty);
}
@@ -7882,12 +7899,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const child_type = try sema.resolveType(block, operand_src, inst_data.operand);
- if (child_type.zigTypeTag() == .Opaque) {
+ if (child_type.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
- } else if (child_type.zigTypeTag() == .Null) {
+ } else if (child_type.zigTypeTag(mod) == .Null) {
return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)});
}
const opt_type = try Type.optional(sema.arena, child_type);
@@ -7896,14 +7914,15 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const bin = sema.code.instructions.items(.data)[inst].bin;
const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs);
- assert(indexable_ty.isIndexable()); // validated by a previous instruction
- if (indexable_ty.zigTypeTag() == .Struct) {
+ assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction
+ if (indexable_ty.zigTypeTag(mod) == .Struct) {
const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs));
return sema.addType(elem_type);
} else {
- const elem_type = indexable_ty.elemType2();
+ const elem_type = indexable_ty.elemType2(mod);
return sema.addType(elem_type);
}
}
@@ -7960,9 +7979,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void {
- if (elem_type.zigTypeTag() == .Opaque) {
+ const mod = sema.mod;
+ if (elem_type.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)});
- } else if (elem_type.zigTypeTag() == .NoReturn) {
+ } else if (elem_type.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{});
}
}
@@ -7986,6 +8006,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -7993,7 +8014,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const error_set = try sema.resolveType(block, lhs_src, extra.lhs);
const payload = try sema.resolveType(block, rhs_src, extra.rhs);
- if (error_set.zigTypeTag() != .ErrorSet) {
+ if (error_set.zigTypeTag(mod) != .ErrorSet) {
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{
error_set.fmt(sema.mod),
});
@@ -8004,11 +8025,12 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
}
fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void {
- if (payload_ty.zigTypeTag() == .Opaque) {
+ const mod = sema.mod;
+ if (payload_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{
payload_ty.fmt(sema.mod),
});
- } else if (payload_ty.zigTypeTag() == .ErrorSet) {
+ } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) {
return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{
payload_ty.fmt(sema.mod),
});
@@ -8089,10 +8111,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const uncasted_operand = try sema.resolveInst(extra.operand);
const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src);
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
- const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target));
+ const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod));
if (int > sema.mod.global_error_set.count() or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
const payload = try sema.arena.create(Value.Payload.Error);
@@ -8123,6 +8145,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
@@ -8130,7 +8153,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
const lhs = try sema.resolveInst(extra.lhs);
const rhs = try sema.resolveInst(extra.rhs);
- if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) {
+ if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{});
errdefer msg.destroy(sema.gpa);
@@ -8141,9 +8164,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
}
const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs);
const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs);
- if (lhs_ty.zigTypeTag() != .ErrorSet)
+ if (lhs_ty.zigTypeTag(mod) != .ErrorSet)
return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)});
- if (rhs_ty.zigTypeTag() != .ErrorSet)
+ if (rhs_ty.zigTypeTag(mod) != .ErrorSet)
return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)});
// Anything merged with anyerror is anyerror.
@@ -8184,6 +8207,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
@@ -8191,7 +8215,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) {
+ const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) {
.Enum => operand,
.Union => blk: {
const union_ty = try sema.resolveTypeFields(operand_ty);
@@ -8213,8 +8237,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
const enum_tag_ty = sema.typeOf(enum_tag);
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena);
+ const int_tag_ty = try enum_tag_ty.intTagType().copy(arena);
if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| {
return sema.addConstant(int_tag_ty, opv);
@@ -8231,6 +8254,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
@@ -8239,15 +8263,14 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
- if (dest_ty.zigTypeTag() != .Enum) {
+ if (dest_ty.zigTypeTag(mod) != .Enum) {
return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)});
}
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
if (try sema.resolveMaybeUndefVal(operand)) |int_val| {
if (dest_ty.isNonexhaustiveEnum()) {
- var buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = dest_ty.intTagType(&buffer);
+ const int_tag_ty = dest_ty.intTagType();
if (try sema.intFitsInType(int_val, int_tag_ty, null)) {
return sema.addConstant(dest_ty, int_val);
}
@@ -8329,11 +8352,12 @@ fn analyzeOptionalPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const optional_ptr_ty = sema.typeOf(optional_ptr);
- assert(optional_ptr_ty.zigTypeTag() == .Pointer);
+ assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer);
const opt_type = optional_ptr_ty.elemType();
- if (opt_type.zigTypeTag() != .Optional) {
+ if (opt_type.zigTypeTag(mod) != .Optional) {
return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)});
}
@@ -8361,7 +8385,7 @@ fn analyzeOptionalPayloadPtr(
);
}
if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| {
- if (val.isNull()) {
+ if (val.isNull(mod)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
// The same Value represents the pointer to the optional and the payload.
@@ -8397,11 +8421,12 @@ fn zirOptionalPayload(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const result_ty = switch (operand_ty.zigTypeTag()) {
+ const result_ty = switch (operand_ty.zigTypeTag(mod)) {
.Optional => try operand_ty.optionalChildAlloc(sema.arena),
.Pointer => t: {
if (operand_ty.ptrSize() != .C) {
@@ -8424,7 +8449,7 @@ fn zirOptionalPayload(
};
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
- if (val.isNull()) {
+ if (val.isNull(mod)) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
if (val.castTag(.opt_payload)) |payload| {
@@ -8450,12 +8475,13 @@ fn zirErrUnionPayload(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_src = src;
const err_union_ty = sema.typeOf(operand);
- if (err_union_ty.zigTypeTag() != .ErrorUnion) {
+ if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
@@ -8468,7 +8494,7 @@ fn analyzeErrUnionPayload(
block: *Block,
src: LazySrcLoc,
err_union_ty: Type,
- operand: Zir.Inst.Ref,
+ operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
safety_check: bool,
) CompileError!Air.Inst.Ref {
@@ -8517,10 +8543,11 @@ fn analyzeErrUnionPayloadPtr(
safety_check: bool,
initializing: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
- assert(operand_ty.zigTypeTag() == .Pointer);
+ assert(operand_ty.zigTypeTag(mod) == .Pointer);
- if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) {
+ if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.elemType().fmt(sema.mod),
});
@@ -8594,8 +8621,9 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
}
fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
- if (operand_ty.zigTypeTag() != .ErrorUnion) {
+ if (operand_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.fmt(sema.mod),
});
@@ -8617,13 +8645,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- assert(operand_ty.zigTypeTag() == .Pointer);
+ assert(operand_ty.zigTypeTag(mod) == .Pointer);
- if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) {
+ if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(block, src, "expected error union type, found '{}'", .{
operand_ty.elemType().fmt(sema.mod),
});
@@ -8677,8 +8706,7 @@ fn zirFunc(
extra_index += ret_ty_body.len;
const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known");
- var buffer: Value.ToTypeBuffer = undefined;
- break :blk try ret_ty_val.toType(&buffer).copy(sema.arena);
+ break :blk try ret_ty_val.toType().copy(sema.arena);
},
};
@@ -8849,6 +8877,7 @@ fn funcCommon(
noalias_bits: u32,
is_noinline: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset };
const func_src = LazySrcLoc.nodeOffset(src_node_offset);
@@ -8890,31 +8919,6 @@ fn funcCommon(
const target = sema.mod.getTarget();
const fn_ty: Type = fn_ty: {
- // Hot path for some common function types.
- // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated.
- if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and
- alignment.? == 0 and
- address_space.? == target_util.defaultAddressSpace(target, .function) and
- section == .default and
- !is_noinline)
- {
- if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) {
- break :fn_ty Type.initTag(.fn_noreturn_no_args);
- }
-
- if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) {
- break :fn_ty Type.initTag(.fn_void_no_args);
- }
-
- if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) {
- break :fn_ty Type.initTag(.fn_naked_noreturn_no_args);
- }
-
- if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) {
- break :fn_ty Type.initTag(.fn_ccc_void_no_args);
- }
- }
-
// In the case of generic calling convention, or generic alignment, we use
// default values which are only meaningful for the generic function, *not*
// the instantiation, which can depend on comptime parameters.
@@ -8985,8 +8989,8 @@ fn funcCommon(
});
};
- if (!return_type.isValidReturnType()) {
- const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else "";
+ if (!return_type.isValidReturnType(mod)) {
+ const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{
opaque_str, return_type.fmt(sema.mod),
@@ -9201,22 +9205,23 @@ fn analyzeParameter(
has_body: bool,
is_noalias: bool,
) !void {
+ const mod = sema.mod;
const requires_comptime = try sema.typeRequiresComptime(param.ty);
comptime_params[i] = param.is_comptime or requires_comptime;
const this_generic = param.ty.tag() == .generic_poison;
is_generic.* = is_generic.* or this_generic;
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) {
return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)});
}
- if (!param.ty.isValidParamType()) {
- const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else "";
+ if (!param.ty.isValidParamType(mod)) {
+ const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else "";
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{
- opaque_str, param.ty.fmt(sema.mod),
+ opaque_str, param.ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
@@ -9228,11 +9233,11 @@ fn analyzeParameter(
if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) {
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{
- param.ty.fmt(sema.mod), @tagName(cc),
+ param.ty.fmt(mod), @tagName(cc),
});
errdefer msg.destroy(sema.gpa);
- const src_decl = sema.mod.declPtr(block.src_decl);
+ const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty);
try sema.addDeclaredHereNote(msg, param.ty);
@@ -9243,11 +9248,11 @@ fn analyzeParameter(
if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) {
const msg = msg: {
const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{
- param.ty.fmt(sema.mod),
+ param.ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
- const src_decl = sema.mod.declPtr(block.src_decl);
+ const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty);
try sema.addDeclaredHereNote(msg, param.ty);
@@ -9256,7 +9261,7 @@ fn analyzeParameter(
return sema.failWithOwnedErrorMsg(msg);
}
if (!sema.is_generic_instantiation and !this_generic and is_noalias and
- !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional()))
+ !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod)))
{
return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{});
}
@@ -9472,13 +9477,14 @@ fn analyzeAs(
zir_operand: Zir.Inst.Ref,
no_cast_to_comptime_int: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand = try sema.resolveInst(zir_operand);
- if (zir_dest_type == .var_args_param) return operand;
+ if (zir_dest_type == .var_args_param_type) return operand;
const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) {
error.GenericPoison => return operand,
else => |e| return e,
};
- if (dest_ty.zigTypeTag() == .NoReturn) {
+ if (dest_ty.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, src, "cannot cast to noreturn", .{});
}
const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index|
@@ -9495,11 +9501,12 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr = try sema.resolveInst(inst_data.operand);
const ptr_ty = sema.typeOf(ptr);
- if (!ptr_ty.isPtrAtRuntime()) {
+ if (!ptr_ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)});
}
if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| {
@@ -9586,25 +9593,25 @@ fn intCast(
operand_src: LazySrcLoc,
runtime_safety: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
if (try sema.isComptimeKnown(operand)) {
return sema.coerce(block, dest_ty, operand, operand_src);
- } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) {
+ } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
}
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
- const is_vector = dest_ty.zigTypeTag() == .Vector;
+ const is_vector = dest_ty.zigTypeTag(mod) == .Vector;
if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| {
// requirement: intCast(u0, input) iff input == 0
if (runtime_safety and block.wantSafety()) {
try sema.requireRuntimeBlock(block, src, operand_src);
- const target = sema.mod.getTarget();
- const wanted_info = dest_scalar_ty.intInfo(target);
+ const wanted_info = dest_scalar_ty.intInfo(mod);
const wanted_bits = wanted_info.bits;
if (wanted_bits == 0) {
@@ -9631,9 +9638,8 @@ fn intCast(
try sema.requireRuntimeBlock(block, src, operand_src);
if (runtime_safety and block.wantSafety()) {
- const target = sema.mod.getTarget();
- const actual_info = operand_scalar_ty.intInfo(target);
- const wanted_info = dest_scalar_ty.intInfo(target);
+ const actual_info = operand_scalar_ty.intInfo(mod);
+ const wanted_info = dest_scalar_ty.intInfo(mod);
const actual_bits = actual_info.bits;
const wanted_bits = wanted_info.bits;
const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed);
@@ -9642,7 +9648,7 @@ fn intCast(
// range shrinkage
// requirement: int value fits into target type
if (wanted_value_bits < actual_value_bits) {
- const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target);
+ const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod);
const dest_max_val = if (is_vector)
try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar)
else
@@ -9653,7 +9659,7 @@ fn intCast(
if (actual_info.signedness == .signed) {
// Reinterpret the sign-bit as part of the value. This will make
// negative differences (`operand` > `dest_max`) appear too big.
- const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits);
+ const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits);
const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff);
// If the destination type is signed, then we need to double its
@@ -9727,6 +9733,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -9735,7 +9742,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- switch (dest_ty.zigTypeTag()) {
+ switch (dest_ty.zigTypeTag(mod)) {
.AnyFrame,
.ComptimeFloat,
.ComptimeInt,
@@ -9757,7 +9764,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const msg = msg: {
const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
else => {},
}
@@ -9771,7 +9778,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const msg = msg: {
const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
.Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}),
else => {},
@@ -9782,7 +9789,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.failWithOwnedErrorMsg(msg);
},
.Struct, .Union => if (dest_ty.containerLayout() == .Auto) {
- const container = switch (dest_ty.zigTypeTag()) {
+ const container = switch (dest_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
else => unreachable,
@@ -9799,7 +9806,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Vector,
=> {},
}
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.AnyFrame,
.ComptimeFloat,
.ComptimeInt,
@@ -9821,7 +9828,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
- switch (dest_ty.zigTypeTag()) {
+ switch (dest_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
else => {},
}
@@ -9834,7 +9841,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
- switch (dest_ty.zigTypeTag()) {
+ switch (dest_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
.Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}),
else => {},
@@ -9845,7 +9852,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.failWithOwnedErrorMsg(msg);
},
.Struct, .Union => if (operand_ty.containerLayout() == .Auto) {
- const container = switch (operand_ty.zigTypeTag()) {
+ const container = switch (operand_ty.zigTypeTag(mod)) {
.Struct => "struct",
.Union => "union",
else => unreachable,
@@ -9869,6 +9876,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
@@ -9878,7 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const operand = try sema.resolveInst(extra.rhs);
const target = sema.mod.getTarget();
- const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) {
+ const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) {
.ComptimeFloat => true,
.Float => false,
else => return sema.fail(
@@ -9890,7 +9898,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt => {},
else => return sema.fail(
block,
@@ -9944,20 +9952,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array_ptr = try sema.resolveInst(extra.lhs);
const elem_index = try sema.resolveInst(extra.rhs);
const indexable_ty = sema.typeOf(array_ptr);
- if (indexable_ty.zigTypeTag() != .Pointer) {
+ if (indexable_ty.zigTypeTag(mod) != .Pointer) {
const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node };
const msg = msg: {
const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{
indexable_ty.fmt(sema.mod),
});
errdefer msg.destroy(sema.gpa);
- if (indexable_ty.zigTypeTag() == .Array) {
+ if (indexable_ty.zigTypeTag(mod) == .Array) {
try sema.errNote(block, src, msg, "consider using '&' here", .{});
}
break :msg msg;
@@ -10076,6 +10085,7 @@ fn zirSwitchCapture(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
@@ -10091,7 +10101,7 @@ fn zirSwitchCapture(
if (block.inline_case_capture != .none) {
const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable;
- if (operand_ty.zigTypeTag() == .Union) {
+ if (operand_ty.zigTypeTag(mod) == .Union) {
const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?);
const union_obj = operand_ty.cast(Type.Payload.Union).?.data;
const field_ty = union_obj.fields.values()[field_index].ty;
@@ -10144,7 +10154,7 @@ fn zirSwitchCapture(
return operand_ptr;
}
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.ErrorSet => if (block.switch_else_err_ty) |some| {
return sema.bitCast(block, some, operand, operand_src, null);
} else {
@@ -10162,7 +10172,7 @@ fn zirSwitchCapture(
switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item,
};
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Union => {
const union_obj = operand_ty.cast(Type.Payload.Union).?.data;
const first_item = try sema.resolveInst(items[0]);
@@ -10269,6 +10279,7 @@ fn zirSwitchCapture(
}
fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].un_tok;
const src = inst_data.src();
@@ -10280,7 +10291,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile
const operand_ptr_ty = sema.typeOf(operand_ptr);
const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty;
- if (operand_ty.zigTypeTag() != .Union) {
+ if (operand_ty.zigTypeTag(mod) != .Union) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{
operand_ty.fmt(sema.mod),
@@ -10301,6 +10312,7 @@ fn zirSwitchCond(
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
@@ -10311,7 +10323,7 @@ fn zirSwitchCond(
operand_ptr;
const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Type,
.Void,
.Bool,
@@ -10371,6 +10383,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -10415,7 +10428,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const target_ty = sema.typeOf(raw_operand);
break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty;
};
- const union_originally = maybe_union_ty.zigTypeTag() == .Union;
+ const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union;
// Duplicate checking variables later also used for `inline else`.
var seen_enum_fields: []?Module.SwitchProngSrc = &.{};
@@ -10433,7 +10446,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
var empty_enum = false;
const operand_ty = sema.typeOf(operand);
- const err_set = operand_ty.zigTypeTag() == .ErrorSet;
+ const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet;
var else_error_ty: ?Type = null;
@@ -10459,10 +10472,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
return sema.failWithOwnedErrorMsg(msg);
}
- const target = sema.mod.getTarget();
-
// Validate for duplicate items, missing else prong, and invalid range.
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Union => unreachable, // handled in zirSwitchCond
.Enum => {
seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount());
@@ -10774,12 +10785,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
}
check_range: {
- if (operand_ty.zigTypeTag() == .Int) {
+ if (operand_ty.zigTypeTag(mod) == .Int) {
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
- const min_int = try operand_ty.minInt(arena.allocator(), target);
- const max_int = try operand_ty.maxInt(arena.allocator(), target);
+ const min_int = try operand_ty.minInt(arena.allocator(), mod);
+ const max_int = try operand_ty.maxInt(arena.allocator(), mod);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
@@ -11080,7 +11091,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) {
return Air.Inst.Ref.unreachable_value;
}
- if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and
+ if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and
(!operand_ty.isNonexhaustiveEnum() or union_originally))
{
try sema.zirDbgStmt(block, cond_dbg_node_index);
@@ -11135,7 +11146,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
- break :blk field_ty.zigTypeTag() != .NoReturn;
+ break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) {
@@ -11242,7 +11253,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
- break :blk field_ty.zigTypeTag() != .NoReturn;
+ break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) {
@@ -11286,7 +11297,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const item = try sema.resolveInst(item_ref);
const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
- if (field_ty.zigTypeTag() != .NoReturn) break true;
+ if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
} else false
else
true;
@@ -11409,7 +11420,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
var final_else_body: []const Air.Inst.Index = &.{};
if (special.body.len != 0 or !is_first or case_block.wantSafety()) {
var emit_bb = false;
- if (special.is_inline) switch (operand_ty.zigTypeTag()) {
+ if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) {
.Enum => {
if (operand_ty.isNonexhaustiveEnum() and !union_originally) {
return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{
@@ -11429,7 +11440,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const analyze_body = if (union_originally) blk: {
const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod);
- break :blk field_ty.zigTypeTag() != .NoReturn;
+ break :blk field_ty.zigTypeTag(mod) != .NoReturn;
} else true;
if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src);
@@ -11551,7 +11562,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
case_block.inline_case_capture = .none;
if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and
- operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally))
+ operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally))
{
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
const ok = try case_block.addUnOp(.is_named_enum_value, operand);
@@ -11563,7 +11574,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
if (seen_field != null) continue;
const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data;
const field_ty = union_obj.fields.values()[index].ty;
- if (field_ty.zigTypeTag() != .NoReturn) break true;
+ if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
} else false
else
true;
@@ -11629,9 +11640,9 @@ const RangeSetUnhandledIterator = struct {
first: bool = true,
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
- const target = sema.mod.getTarget();
- const min = try ty.minInt(sema.arena, target);
- const max = try ty.maxInt(sema.arena, target);
+ const mod = sema.mod;
+ const min = try ty.minInt(sema.arena, mod);
+ const max = try ty.maxInt(sema.arena, mod);
return RangeSetUnhandledIterator{
.sema = sema,
@@ -11931,18 +11942,19 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op
}
fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void {
+ const mod = sema.mod;
const index = Zir.refToIndex(cond) orelse return;
if (sema.code.instructions.items(.tag)[index] != .is_non_err) return;
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
- if (operand_ty.zigTypeTag() == .ErrorSet) {
+ if (operand_ty.zigTypeTag(mod) == .ErrorSet) {
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
return;
}
if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| {
- if (!operand_ty.isError()) return;
+ if (!operand_ty.isError(mod)) return;
if (val.getError() == null) return;
try sema.maybeErrorUnwrapComptime(block, body, err_operand);
}
@@ -11972,6 +11984,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I
}
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -11995,7 +12008,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false;
break :hf field_index < ty.structFieldCount();
}
- break :hf switch (ty.zigTypeTag()) {
+ break :hf switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFields().contains(field_name),
.Union => ty.unionFields().contains(field_name),
.Enum => ty.enumFields().contains(field_name),
@@ -12126,6 +12139,7 @@ fn zirShl(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
sema.src = src;
@@ -12136,11 +12150,10 @@ fn zirShl(
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const target = sema.mod.getTarget();
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- const scalar_ty = lhs_ty.scalarType();
- const scalar_rhs_ty = rhs_ty.scalarType();
+ const scalar_ty = lhs_ty.scalarType(mod);
+ const scalar_rhs_ty = rhs_ty.scalarType(mod);
// TODO coerce rhs if air_tag is not shl_sat
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
@@ -12156,18 +12169,18 @@ fn zirShl(
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return lhs;
}
- if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) {
+ if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) {
var bits_payload = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
- .data = scalar_ty.intInfo(target).bits,
+ .data = scalar_ty.intInfo(mod).bits,
};
const bit_value = Value.initPayload(&bits_payload.base);
- if (rhs_ty.zigTypeTag() == .Vector) {
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
- if (rhs_elem.compareHetero(.gte, bit_value, target)) {
+ if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
@@ -12175,26 +12188,26 @@ fn zirShl(
});
}
}
- } else if (rhs_val.compareHetero(.gte, bit_value, target)) {
+ } else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
scalar_ty.fmt(sema.mod),
});
}
}
- if (rhs_ty.zigTypeTag() == .Vector) {
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
- if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
+ if (rhs_elem.compareHetero(.lt, Value.zero, mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
}
- } else if (rhs_val.compareHetero(.lt, Value.zero, target)) {
+ } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
});
@@ -12204,7 +12217,7 @@ fn zirShl(
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse {
- if (scalar_ty.zigTypeTag() == .ComptimeInt) {
+ if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
break :rs rhs_src;
@@ -12213,7 +12226,7 @@ fn zirShl(
const val = switch (air_tag) {
.shl_exact => val: {
const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod);
- if (scalar_ty.zigTypeTag() == .ComptimeInt) {
+ if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
break :val shifted.wrapped_result;
}
if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) {
@@ -12222,12 +12235,12 @@ fn zirShl(
return sema.fail(block, src, "operation caused overflow", .{});
},
- .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt)
+ .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod),
- .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt)
+ .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt)
try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod)
else
try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod),
@@ -12241,11 +12254,11 @@ fn zirShl(
const new_rhs = if (air_tag == .shl_sat) rhs: {
// Limit the RHS type for saturating shl to be an integer as small as the LHS.
if (rhs_is_comptime_int or
- scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits)
+ scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits)
{
const max_int = try sema.addConstant(
lhs_ty,
- try lhs_ty.maxInt(sema.arena, target),
+ try lhs_ty.maxInt(sema.arena, mod),
);
const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src });
break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false);
@@ -12256,11 +12269,11 @@ fn zirShl(
try sema.requireRuntimeBlock(block, src, runtime_src);
if (block.wantSafety()) {
- const bit_count = scalar_ty.intInfo(target).bits;
+ const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
- const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
@@ -12290,7 +12303,7 @@ fn zirShl(
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
- const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector)
+ const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -12319,6 +12332,7 @@ fn zirShr(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
sema.src = src;
@@ -12330,8 +12344,7 @@ fn zirShr(
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- const target = sema.mod.getTarget();
- const scalar_ty = lhs_ty.scalarType();
+ const scalar_ty = lhs_ty.scalarType(mod);
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
@@ -12344,18 +12357,18 @@ fn zirShr(
if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) {
return lhs;
}
- if (scalar_ty.zigTypeTag() != .ComptimeInt) {
+ if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
var bits_payload = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
- .data = scalar_ty.intInfo(target).bits,
+ .data = scalar_ty.intInfo(mod).bits,
};
const bit_value = Value.initPayload(&bits_payload.base);
- if (rhs_ty.zigTypeTag() == .Vector) {
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
- if (rhs_elem.compareHetero(.gte, bit_value, target)) {
+ if (rhs_elem.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
@@ -12363,26 +12376,26 @@ fn zirShr(
});
}
}
- } else if (rhs_val.compareHetero(.gte, bit_value, target)) {
+ } else if (rhs_val.compareHetero(.gte, bit_value, mod)) {
return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
scalar_ty.fmt(sema.mod),
});
}
}
- if (rhs_ty.zigTypeTag() == .Vector) {
+ if (rhs_ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < rhs_ty.vectorLen()) : (i += 1) {
var elem_value_buf: Value.ElemValueBuffer = undefined;
const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf);
- if (rhs_elem.compareHetero(.lt, Value.zero, target)) {
+ if (rhs_elem.compareHetero(.lt, Value.zero, mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{
rhs_elem.fmtValue(scalar_ty, sema.mod),
i,
});
}
}
- } else if (rhs_val.compareHetero(.lt, Value.zero, target)) {
+ } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) {
return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{
rhs_val.fmtValue(scalar_ty, sema.mod),
});
@@ -12405,18 +12418,18 @@ fn zirShr(
}
} else rhs_src;
- if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) {
+ if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{});
}
try sema.requireRuntimeBlock(block, src, runtime_src);
const result = try block.addBinOp(air_tag, lhs, rhs);
if (block.wantSafety()) {
- const bit_count = scalar_ty.intInfo(target).bits;
+ const bit_count = scalar_ty.intInfo(mod).bits;
if (!std.math.isPowerOfTwo(bit_count)) {
const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count);
- const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val));
const lt = try block.addCmpVector(rhs, bit_count_inst, .lt);
break :ok try block.addInst(.{
@@ -12436,7 +12449,7 @@ fn zirShr(
if (air_tag == .shr_exact) {
const back = try block.addBinOp(.shl, result, rhs);
- const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: {
+ const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
const eql = try block.addCmpVector(lhs, back, .eq);
break :ok try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
@@ -12461,6 +12474,7 @@ fn zirBitwise(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -12475,8 +12489,8 @@ fn zirBitwise(
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
- const scalar_type = resolved_type.scalarType();
- const scalar_tag = scalar_type.zigTypeTag();
+ const scalar_type = resolved_type.scalarType(mod);
+ const scalar_tag = scalar_type.zigTypeTag(mod);
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -12484,7 +12498,7 @@ fn zirBitwise(
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
if (!is_int) {
- return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) });
+ return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) });
}
const runtime_src = runtime: {
@@ -12515,15 +12529,16 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_type = sema.typeOf(operand);
- const scalar_type = operand_type.scalarType();
+ const scalar_type = operand_type.scalarType(mod);
- if (scalar_type.zigTypeTag() != .Int) {
+ if (scalar_type.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{
operand_type.fmt(sema.mod),
});
@@ -12532,7 +12547,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) {
return sema.addConstUndef(operand_type);
- } else if (operand_type.zigTypeTag() == .Vector) {
+ } else if (operand_type.zigTypeTag(mod) == .Vector) {
const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen());
var elem_val_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
@@ -12728,18 +12743,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod);
+ const mod = sema.mod;
const ptr_addrspace = p: {
- if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace();
- if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace();
+ if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace();
+ if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace();
break :p null;
};
- const runtime_src = if (switch (lhs_ty.zigTypeTag()) {
+ const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) {
.Array, .Struct => try sema.resolveMaybeUndefVal(lhs),
.Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs),
else => unreachable,
}) |lhs_val| rs: {
- if (switch (rhs_ty.zigTypeTag()) {
+ if (switch (rhs_ty.zigTypeTag(mod)) {
.Array, .Struct => try sema.resolveMaybeUndefVal(rhs),
.Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs),
else => unreachable,
@@ -12841,8 +12857,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Array => return operand_ty.arrayInfo(),
.Pointer => {
const ptr_info = operand_ty.ptrInfo().data;
@@ -12859,7 +12876,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
};
},
.One => {
- if (ptr_info.pointee_type.zigTypeTag() == .Array) {
+ if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) {
return ptr_info.pointee_type.arrayInfo();
}
},
@@ -12867,10 +12884,10 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
}
},
.Struct => {
- if (operand_ty.isTuple() and peer_ty.isIndexable()) {
+ if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) {
assert(!peer_ty.isTuple());
return .{
- .elem_type = peer_ty.elemType2(),
+ .elem_type = peer_ty.elemType2(mod),
.sentinel = null,
.len = operand_ty.arrayLen(),
};
@@ -12970,11 +12987,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
+ const mod = sema.mod;
const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse {
const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
- switch (lhs_ty.zigTypeTag()) {
+ switch (lhs_ty.zigTypeTag(mod)) {
.Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => {
try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{});
},
@@ -12994,7 +13012,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod);
- const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null;
+ const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null;
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
@@ -13082,6 +13100,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const lhs_src = src;
@@ -13089,9 +13108,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const rhs = try sema.resolveInst(inst_data.operand);
const rhs_ty = sema.typeOf(rhs);
- const rhs_scalar_ty = rhs_ty.scalarType();
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) {
+ if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt, .Float, .ComptimeFloat => false,
else => true,
}) {
@@ -13108,7 +13127,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
}
- const lhs = if (rhs_ty.zigTypeTag() == .Vector)
+ const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero))
else
try sema.resolveInst(.zero);
@@ -13117,6 +13136,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const lhs_src = src;
@@ -13124,14 +13144,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const rhs = try sema.resolveInst(inst_data.operand);
const rhs_ty = sema.typeOf(rhs);
- const rhs_scalar_ty = rhs_ty.scalarType();
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
- switch (rhs_scalar_ty.zigTypeTag()) {
+ switch (rhs_scalar_ty.zigTypeTag(mod)) {
.Int, .ComptimeInt, .Float, .ComptimeFloat => {},
else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}),
}
- const lhs = if (rhs_ty.zigTypeTag() == .Vector)
+ const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector)
try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero))
else
try sema.resolveInst(.zero);
@@ -13161,6 +13181,7 @@ fn zirArithmetic(
}
fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -13171,8 +13192,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -13181,25 +13202,24 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
- if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or
- (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat))
+ if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or
+ (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat))
{
// If it makes a difference whether we coerce to ints or floats before doing the division, error.
// If lhs % rhs is 0, it doesn't matter.
@@ -13268,7 +13288,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const runtime_src = rs: {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
@@ -13309,7 +13329,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
const air_tag = if (is_int) blk: {
- if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) {
+ if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) {
return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) });
}
break :blk Air.Inst.Tag.div_trunc;
@@ -13321,6 +13341,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -13331,8 +13352,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -13341,19 +13362,18 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13437,7 +13457,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ok = if (!is_int) ok: {
const floored = try block.addUnOp(.floor, result);
- if (resolved_type.zigTypeTag() == .Vector) {
+ if (resolved_type.zigTypeTag(mod) == .Vector) {
const eql = try block.addCmpVector(result, floored, .eq);
break :ok try block.addInst(.{
.tag = switch (block.float_mode) {
@@ -13459,7 +13479,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} else ok: {
const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs);
- if (resolved_type.zigTypeTag() == .Vector) {
+ if (resolved_type.zigTypeTag(mod) == .Vector) {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const eql = try block.addCmpVector(remainder, zero, .eq);
@@ -13484,6 +13504,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -13494,8 +13515,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -13504,20 +13525,19 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13562,7 +13582,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
@@ -13600,6 +13620,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -13610,8 +13631,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -13620,20 +13641,19 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13677,7 +13697,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
- if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) {
+ if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) {
if (maybe_rhs_val) |rhs_val| {
if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) {
return sema.addConstUndef(resolved_type);
@@ -13727,22 +13747,20 @@ fn addDivIntOverflowSafety(
casted_rhs: Air.Inst.Ref,
is_int: bool,
) CompileError!void {
+ const mod = sema.mod;
if (!is_int) return;
// If the LHS is unsigned, it cannot cause overflow.
- if (!lhs_scalar_ty.isSignedInt()) return;
-
- const mod = sema.mod;
- const target = mod.getTarget();
+ if (!lhs_scalar_ty.isSignedInt(mod)) return;
// If the LHS is widened to a larger integer type, no overflow is possible.
- if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) {
+ if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) {
return;
}
- const min_int = try resolved_type.minInt(sema.arena, target);
+ const min_int = try resolved_type.minInt(sema.arena, mod);
const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1);
- const neg_one = if (resolved_type.zigTypeTag() == .Vector)
+ const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector)
try Value.Tag.repeated.create(sema.arena, neg_one_scalar)
else
neg_one_scalar;
@@ -13759,7 +13777,7 @@ fn addDivIntOverflowSafety(
}
var ok: Air.Inst.Ref = .none;
- if (resolved_type.zigTypeTag() == .Vector) {
+ if (resolved_type.zigTypeTag(mod) == .Vector) {
if (maybe_lhs_val == null) {
const min_int_ref = try sema.addConstant(resolved_type, min_int);
ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq);
@@ -13815,7 +13833,8 @@ fn addDivByZeroSafety(
// emitted above.
if (maybe_rhs_val != null) return;
- const ok = if (resolved_type.zigTypeTag() == .Vector) ok: {
+ const mod = sema.mod;
+ const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero = try sema.addConstant(resolved_type, zero_val);
const ok = try block.addCmpVector(casted_rhs, zero, .neq);
@@ -13842,6 +13861,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst
}
fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -13852,8 +13872,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -13862,20 +13882,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const lhs_scalar_ty = lhs_ty.scalarType();
- const rhs_scalar_ty = rhs_ty.scalarType();
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const lhs_scalar_ty = lhs_ty.scalarType(mod);
+ const rhs_scalar_ty = rhs_ty.scalarType(mod);
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -13904,7 +13923,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
} else Value.zero;
return sema.addConstant(resolved_type, zero_val);
}
- } else if (lhs_scalar_ty.isSignedInt()) {
+ } else if (lhs_scalar_ty.isSignedInt(mod)) {
return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty);
}
if (maybe_rhs_val) |rhs_val| {
@@ -13929,7 +13948,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.addConstant(resolved_type, rem_result);
}
break :rs lhs_src;
- } else if (rhs_scalar_ty.isSignedInt()) {
+ } else if (rhs_scalar_ty.isSignedInt(mod)) {
return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty);
} else {
break :rs rhs_src;
@@ -13978,7 +13997,8 @@ fn intRem(
lhs: Value,
rhs: Value,
) CompileError!Value {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
@@ -13997,13 +14017,13 @@ fn intRemScalar(
lhs: Value,
rhs: Value,
) CompileError!Value {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs_q = try sema.arena.alloc(
math.big.Limb,
lhs_bigint.limbs.len,
@@ -14025,6 +14045,7 @@ fn intRemScalar(
}
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -14035,8 +14056,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -14048,13 +14069,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -14127,6 +14147,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
}
fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
sema.src = src;
@@ -14137,8 +14158,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const rhs = try sema.resolveInst(extra.rhs);
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
@@ -14150,13 +14171,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
@@ -14268,7 +14288,7 @@ fn zirOverflowArithmetic(
const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src);
const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src);
- if (dest_ty.scalarType().zigTypeTag() != .Int) {
+ if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)});
}
@@ -14434,12 +14454,14 @@ fn zirOverflowArithmetic(
}
fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value {
- if (ty.zigTypeTag() != .Vector) return val;
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) != .Vector) return val;
return Value.Tag.repeated.create(sema.arena, val);
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
- const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1;
+ const mod = sema.mod;
+ const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1;
const types = try sema.arena.alloc(Type, 2);
const values = try sema.arena.alloc(Value, 2);
@@ -14468,10 +14490,11 @@ fn analyzeArithmetic(
rhs_src: LazySrcLoc,
want_safety: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) {
@@ -14491,18 +14514,17 @@ fn analyzeArithmetic(
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
- const is_vector = resolved_type.zigTypeTag() == .Vector;
+ const is_vector = resolved_type.zigTypeTag(mod) == .Vector;
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
- const scalar_tag = resolved_type.scalarType().zigTypeTag();
+ const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod);
const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt;
try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag);
- const mod = sema.mod;
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs);
const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: {
@@ -14910,7 +14932,7 @@ fn analyzeArithmetic(
} },
});
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
- const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector)
+ const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
try block.addInst(.{
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
.data = .{ .reduce = .{
@@ -14944,12 +14966,12 @@ fn analyzePtrArithmetic(
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr);
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
const ptr_ty = sema.typeOf(ptr);
const ptr_info = ptr_ty.ptrInfo().data;
- const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array)
+ const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array)
ptr_info.pointee_type.childType()
else
ptr_info.pointee_type;
@@ -14963,9 +14985,9 @@ fn analyzePtrArithmetic(
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
- const elem_size = elem_ty.abiSize(target);
+ const elem_size = elem_ty.abiSize(mod);
const addend = if (opt_off_val) |off_val| a: {
- const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target));
+ const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod));
break :a elem_size * off_int;
} else elem_size;
@@ -14991,10 +15013,10 @@ fn analyzePtrArithmetic(
if (opt_off_val) |offset_val| {
if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty);
- const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target));
+ const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod));
if (offset_int == 0) return ptr;
- if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| {
- const elem_size = elem_ty.abiSize(target);
+ if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
+ const elem_size = elem_ty.abiSize(mod);
const new_addr = switch (air_tag) {
.ptr_add => addr + elem_size * offset_int,
.ptr_sub => addr - elem_size * offset_int,
@@ -15116,6 +15138,7 @@ fn zirAsm(
const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len);
const inputs = try sema.arena.alloc(ConstraintName, inputs_len);
+ const mod = sema.mod;
for (args, 0..) |*arg, arg_i| {
const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i);
@@ -15123,7 +15146,7 @@ fn zirAsm(
const uncasted_arg = try sema.resolveInst(input.data.operand);
const uncasted_arg_ty = sema.typeOf(uncasted_arg);
- switch (uncasted_arg_ty.zigTypeTag()) {
+ switch (uncasted_arg_ty.zigTypeTag(mod)) {
.ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src),
.ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src),
else => {
@@ -15205,6 +15228,7 @@ fn zirCmpEq(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = inst_data.src();
@@ -15215,8 +15239,8 @@ fn zirCmpEq(
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- const lhs_ty_tag = lhs_ty.zigTypeTag();
- const rhs_ty_tag = rhs_ty.zigTypeTag();
+ const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
+ const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) {
// null == null, null != null
if (op == .eq) {
@@ -15295,6 +15319,7 @@ fn analyzeCmpUnionTag(
tag_src: LazySrcLoc,
op: std.math.CompareOperator,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const union_ty = try sema.resolveTypeFields(sema.typeOf(un));
const union_tag_ty = union_ty.unionTagType() orelse {
const msg = msg: {
@@ -15313,7 +15338,7 @@ fn analyzeCmpUnionTag(
if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| {
if (enum_val.isUndef()) return sema.addConstUndef(Type.bool);
const field_ty = union_ty.unionFieldType(enum_val, sema.mod);
- if (field_ty.zigTypeTag() == .NoReturn) {
+ if (field_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
}
}
@@ -15352,32 +15377,33 @@ fn analyzeCmp(
rhs_src: LazySrcLoc,
is_equality_cmp: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) {
+ if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) {
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
}
- if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) {
+ if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) {
return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
- if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) {
+ if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) {
// This operation allows any combination of integer and float types, regardless of the
// signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for
// numeric types.
return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src);
}
- if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) {
+ if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) {
const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs);
return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src);
}
- if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) {
+ if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) {
const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs);
return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src);
}
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } });
- if (!resolved_type.isSelfComparable(is_equality_cmp)) {
+ if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) {
return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{
compareOperatorName(op), resolved_type.fmt(sema.mod),
});
@@ -15408,6 +15434,7 @@ fn cmpSelf(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const resolved_type = sema.typeOf(casted_lhs);
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| {
@@ -15415,7 +15442,7 @@ fn cmpSelf(
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
- if (resolved_type.zigTypeTag() == .Vector) {
+ if (resolved_type.zigTypeTag(mod) == .Vector) {
const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool);
const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type);
return sema.addConstant(result_ty, cmp_val);
@@ -15427,7 +15454,7 @@ fn cmpSelf(
return Air.Inst.Ref.bool_false;
}
} else {
- if (resolved_type.zigTypeTag() == .Bool) {
+ if (resolved_type.zigTypeTag(mod) == .Bool) {
// We can lower bool eq/neq more efficiently.
return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src);
}
@@ -15436,7 +15463,7 @@ fn cmpSelf(
} else {
// For bools, we still check the other operand, because we can lower
// bool eq/neq more efficiently.
- if (resolved_type.zigTypeTag() == .Bool) {
+ if (resolved_type.zigTypeTag(mod) == .Bool) {
if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| {
if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool);
return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src);
@@ -15446,7 +15473,7 @@ fn cmpSelf(
}
};
try sema.requireRuntimeBlock(block, src, runtime_src);
- if (resolved_type.zigTypeTag() == .Vector) {
+ if (resolved_type.zigTypeTag(mod) == .Vector) {
return block.addCmpVector(casted_lhs, casted_rhs, op);
}
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
@@ -15475,10 +15502,11 @@ fn runtimeBoolCmp(
}
fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Fn,
.NoReturn,
.Undefined,
@@ -15509,8 +15537,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.AnyFrame,
=> {},
}
- const target = sema.mod.getTarget();
- const val = try ty.lazyAbiSize(target, sema.arena);
+ const val = try ty.lazyAbiSize(mod, sema.arena);
if (val.tag() == .lazy_size) {
try sema.queueFullTypeResolution(ty);
}
@@ -15518,10 +15545,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Fn,
.NoReturn,
.Undefined,
@@ -15552,8 +15580,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.AnyFrame,
=> {},
}
- const target = sema.mod.getTarget();
- const bit_size = try operand_ty.bitSizeAdvanced(target, sema);
+ const bit_size = try operand_ty.bitSizeAdvanced(mod, sema);
return sema.addIntUnsigned(Type.comptime_int, bit_size);
}
@@ -15765,13 +15792,13 @@ fn zirBuiltinSrc(
}
fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ty = try sema.resolveType(block, src, inst_data.operand);
const type_info_ty = try sema.getBuiltinType("Type");
- const target = sema.mod.getTarget();
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Type => return sema.addConstant(
type_info_ty,
try Value.Tag.@"union".create(sema.arena, .{
@@ -15881,8 +15908,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index);
try sema.ensureDeclAnalyzed(fn_info_decl_index);
const fn_info_decl = sema.mod.declPtr(fn_info_decl_index);
- var fn_ty_buffer: Value.ToTypeBuffer = undefined;
- const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer);
+ const fn_ty = fn_info_decl.val.toType();
const param_info_decl_index = (try sema.namespaceLookup(
block,
src,
@@ -15892,8 +15918,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index);
try sema.ensureDeclAnalyzed(param_info_decl_index);
const param_info_decl = sema.mod.declPtr(param_info_decl_index);
- var param_buffer: Value.ToTypeBuffer = undefined;
- const param_ty = param_info_decl.val.toType(¶m_buffer);
+ const param_ty = param_info_decl.val.toType();
const new_decl = try params_anon_decl.finish(
try Type.Tag.array.create(params_anon_decl.arena(), .{
.len = param_vals.len,
@@ -15924,7 +15949,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// calling_convention: CallingConvention,
try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)),
// alignment: comptime_int,
- try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)),
+ try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)),
// is_generic: bool,
Value.makeBool(info.is_generic),
// is_var_args: bool,
@@ -15944,7 +15969,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
);
},
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
const field_values = try sema.arena.alloc(Value, 2);
// signedness: Signedness,
field_values[0] = try Value.Tag.enum_field_index.create(
@@ -15965,7 +15990,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Float => {
const field_values = try sema.arena.alloc(Value, 1);
// bits: comptime_int,
- field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target));
+ field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod));
return sema.addConstant(
type_info_ty,
@@ -15980,7 +16005,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const alignment = if (info.@"align" != 0)
try Value.Tag.int_u64.create(sema.arena, info.@"align")
else
- try info.pointee_type.lazyAbiAlignment(target, sema.arena);
+ try info.pointee_type.lazyAbiAlignment(mod, sema.arena);
const field_values = try sema.arena.create([8]Value);
field_values.* = .{
@@ -16072,8 +16097,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index);
try sema.ensureDeclAnalyzed(set_field_ty_decl_index);
const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index);
- var buffer: Value.ToTypeBuffer = undefined;
- break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena());
+ break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena());
};
try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena));
@@ -16164,8 +16188,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Enum => {
// TODO: look into memoizing this result.
- var int_tag_type_buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena);
+ const int_tag_ty = try ty.intTagType().copy(sema.arena);
const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum());
@@ -16182,8 +16205,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index);
try sema.ensureDeclAnalyzed(enum_field_ty_decl_index);
const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index);
- var buffer: Value.ToTypeBuffer = undefined;
- break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena());
+ break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena());
};
const enum_fields = ty.enumFields();
@@ -16275,8 +16297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index);
try sema.ensureDeclAnalyzed(union_field_ty_decl_index);
const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index);
- var buffer: Value.ToTypeBuffer = undefined;
- break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena());
+ break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena());
};
const union_ty = try sema.resolveTypeFields(ty);
@@ -16383,8 +16404,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index);
try sema.ensureDeclAnalyzed(struct_field_ty_decl_index);
const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index);
- var buffer: Value.ToTypeBuffer = undefined;
- break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena());
+ break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena());
};
const struct_ty = try sema.resolveTypeFields(ty);
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
@@ -16430,7 +16450,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime),
// alignment: comptime_int,
- try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()),
+ try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()),
};
struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
}
@@ -16463,7 +16483,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
else
field.default_val;
const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
- const alignment = field.alignment(target, layout);
+ const alignment = field.alignment(mod, layout);
struct_field_fields.* = .{
// name: []const u8,
@@ -16506,7 +16526,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (layout == .Packed) {
const struct_obj = struct_ty.castTag(.@"struct").?.data;
assert(struct_obj.haveLayout());
- assert(struct_obj.backing_int_ty.isInt());
+ assert(struct_obj.backing_int_ty.isInt(mod));
const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
} else {
@@ -16584,8 +16604,7 @@ fn typeInfoDecls(
try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index);
try sema.ensureDeclAnalyzed(declaration_ty_decl_index);
const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index);
- var buffer: Value.ToTypeBuffer = undefined;
- break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena());
+ break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena());
};
try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena));
@@ -16632,8 +16651,7 @@ fn typeInfoNamespaceDecls(
if (decl.kind == .@"usingnamespace") {
if (decl.analysis == .in_progress) continue;
try sema.mod.ensureDeclAnalyzed(decl_index);
- var buf: Value.ToTypeBuffer = undefined;
- const new_ns = decl.val.toType(&buf).getNamespace().?;
+ const new_ns = decl.val.toType().getNamespace().?;
try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces);
continue;
}
@@ -16709,10 +16727,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type {
- switch (operand.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (operand.zigTypeTag(mod)) {
.ComptimeInt => return Type.comptime_int,
.Int => {
- const bits = operand.bitSize(sema.mod.getTarget());
+ const bits = operand.bitSize(mod);
const count = if (bits == 0)
0
else blk: {
@@ -16723,10 +16742,10 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi
}
break :blk count;
};
- return Module.makeIntType(sema.arena, .unsigned, count);
+ return mod.intType(.unsigned, count);
},
.Vector => {
- const elem_ty = operand.elemType2();
+ const elem_ty = operand.elemType2(mod);
const log2_elem_ty = try sema.log2IntType(block, elem_ty, src);
return Type.Tag.vector.create(sema.arena, .{
.len = operand.vectorLen(),
@@ -16920,9 +16939,10 @@ fn finishCondBr(
}
fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Optional, .Null, .Undefined => return,
- .Pointer => if (ty.isPtrLikeOptional()) return,
+ .Pointer => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.failWithExpectedOptionalType(block, src, ty);
@@ -16951,10 +16971,11 @@ fn zirIsNonNullPtr(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
- try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2());
+ try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod));
if ((try sema.resolveMaybeUndefVal(ptr)) == null) {
return block.addUnOp(.is_non_null_ptr, ptr);
}
@@ -16963,7 +16984,8 @@ fn zirIsNonNullPtr(
}
fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.ErrorSet, .ErrorUnion, .Undefined => return,
else => return sema.fail(block, src, "expected error union type, found '{}'", .{
ty.fmt(sema.mod),
@@ -16986,10 +17008,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const ptr = try sema.resolveInst(inst_data.operand);
- try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2());
+ try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod));
const loaded = try sema.analyzeLoad(block, src, ptr, src);
return sema.analyzeIsNonErr(block, src, loaded);
}
@@ -17012,6 +17035,7 @@ fn zirCondbr(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index);
@@ -17052,7 +17076,7 @@ fn zirCondbr(
const err_inst_data = sema.code.instructions.items(.data)[index].un_node;
const err_operand = try sema.resolveInst(err_inst_data.operand);
const operand_ty = sema.typeOf(err_operand);
- assert(operand_ty.zigTypeTag() == .ErrorUnion);
+ assert(operand_ty.zigTypeTag(mod) == .ErrorUnion);
const result_ty = operand_ty.errorUnionSet();
break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand);
};
@@ -17079,7 +17103,7 @@ fn zirCondbr(
return always_noreturn;
}
-fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref {
+fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -17087,7 +17111,8 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
const body = sema.code.extra[extra.end..][0..extra.data.body_len];
const err_union = try sema.resolveInst(extra.data.operand);
const err_union_ty = sema.typeOf(err_union);
- if (err_union_ty.zigTypeTag() != .ErrorUnion) {
+ const mod = sema.mod;
+ if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
@@ -17124,7 +17149,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
return try_inst;
}
-fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref {
+fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
@@ -17133,7 +17158,8 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const operand = try sema.resolveInst(extra.data.operand);
const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src);
const err_union_ty = sema.typeOf(err_union);
- if (err_union_ty.zigTypeTag() != .ErrorUnion) {
+ const mod = sema.mod;
+ if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) {
return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{
err_union_ty.fmt(sema.mod),
});
@@ -17275,16 +17301,17 @@ fn zirRetImplicit(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
const operand = try sema.resolveInst(inst_data.operand);
const r_brace_src = inst_data.src();
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
- const base_tag = sema.fn_ret_ty.baseZigTypeTag();
+ const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod);
if (base_tag == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{
- sema.fn_ret_ty.fmt(sema.mod),
+ sema.fn_ret_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
@@ -17294,7 +17321,7 @@ fn zirRetImplicit(
} else if (base_tag != .Void) {
const msg = msg: {
const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
- sema.fn_ret_ty.fmt(sema.mod),
+ sema.fn_ret_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
@@ -17397,17 +17424,19 @@ fn retWithErrTracing(
}
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
- if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false;
+ const mod = sema.mod;
+ if (!mod.backendSupportsFeature(.error_return_trace)) return false;
- return fn_ret_ty.isError() and
- sema.mod.comp.bin_file.options.error_return_tracing;
+ return fn_ret_ty.isError(mod) and
+ mod.comp.bin_file.options.error_return_tracing;
}
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index;
- if (!sema.mod.backendSupportsFeature(.error_return_trace)) return;
- if (!sema.mod.comp.bin_file.options.error_return_tracing) return;
+ if (!mod.backendSupportsFeature(.error_return_trace)) return;
+ if (!mod.comp.bin_file.options.error_return_tracing) return;
// This is only relevant at runtime.
if (block.is_comptime or block.is_typeof) return;
@@ -17415,7 +17444,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const save_index = inst_data.operand == .none or b: {
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- break :b operand_ty.isError();
+ break :b operand_ty.isError(mod);
};
if (save_index)
@@ -17467,11 +17496,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index)
}
fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
- assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion);
+ const mod = sema.mod;
+ assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
const op_ty = sema.typeOf(uncasted_operand);
- switch (op_ty.zigTypeTag()) {
+ switch (op_ty.zigTypeTag(mod)) {
.ErrorSet => {
try payload.data.addErrorSet(sema.gpa, op_ty);
},
@@ -17492,7 +17522,8 @@ fn analyzeRet(
// Special case for returning an error to an inferred error set; we need to
// add the error tag to the inferred error set of the in-scope function, so
// that the coercion below works correctly.
- if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
+ const mod = sema.mod;
+ if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) {
try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) {
@@ -17540,6 +17571,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type;
const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index);
const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node };
@@ -17582,7 +17614,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
break :blk 0;
}
}
- const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?);
+ const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?);
try sema.validateAlign(block, align_src, abi_align);
break :blk abi_align;
} else 0;
@@ -17591,7 +17623,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer);
- } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic;
+ } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic;
const bit_offset = if (inst_data.flags.has_bit_range) blk: {
const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]);
@@ -17611,9 +17643,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
}
- if (elem_ty.zigTypeTag() == .NoReturn) {
+ if (elem_ty.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
- } else if (elem_ty.zigTypeTag() == .Fn) {
+ } else if (elem_ty.zigTypeTag(mod) == .Fn) {
if (inst_data.size != .One) {
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
}
@@ -17623,7 +17655,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
{
return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{});
}
- } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) {
+ } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{});
} else if (inst_data.size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
@@ -17639,7 +17671,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (elem_ty.zigTypeTag() == .Opaque) {
+ if (elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{});
}
}
@@ -17666,8 +17698,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const obj_ty = try sema.resolveType(block, src, inst_data.operand);
+ const mod = sema.mod;
- switch (obj_ty.zigTypeTag()) {
+ switch (obj_ty.zigTypeTag(mod)) {
.Struct => return sema.structInitEmpty(block, obj_ty, src, src),
.Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty),
.Void => return sema.addConstant(obj_ty, Value.void),
@@ -17696,9 +17729,10 @@ fn structInitEmpty(
}
fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const arr_len = obj_ty.arrayLen();
if (arr_len != 0) {
- if (obj_ty.zigTypeTag() == .Array) {
+ if (obj_ty.zigTypeTag(mod) == .Array) {
return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len});
} else {
return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len});
@@ -17766,13 +17800,14 @@ fn zirStructInit(
const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index);
const src = inst_data.src();
+ const mod = sema.mod;
const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data;
const first_field_type_data = zir_datas[first_item.field_type].pl_node;
const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data;
const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type);
try sema.resolveTypeLayout(resolved_ty);
- if (resolved_ty.zigTypeTag() == .Struct) {
+ if (resolved_ty.zigTypeTag(mod) == .Struct) {
// This logic must be synchronized with that in `zirStructInitEmpty`.
// Maps field index to field_type index of where it was already initialized.
@@ -17815,7 +17850,7 @@ fn zirStructInit(
}
found_fields[field_index] = item.data.field_type;
field_inits[field_index] = try sema.resolveInst(item.data.init);
- if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| {
+ if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| {
const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse {
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
};
@@ -17827,7 +17862,7 @@ fn zirStructInit(
}
return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref);
- } else if (resolved_ty.zigTypeTag() == .Union) {
+ } else if (resolved_ty.zigTypeTag(mod) == .Union) {
if (extra.data.fields_len != 1) {
return sema.fail(block, src, "union initialization expects exactly one field", .{});
}
@@ -18014,6 +18049,7 @@ fn zirStructInitAnon(
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
@@ -18050,7 +18086,7 @@ fn zirStructInitAnon(
const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init);
- if (types[i].zigTypeTag() == .Opaque) {
+ if (types[i].zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i);
@@ -18148,15 +18184,16 @@ fn zirArrayInit(
const array_ty = try sema.resolveType(block, src, args[0]);
const sentinel_val = array_ty.sentinel();
+ const mod = sema.mod;
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null));
defer gpa.free(resolved_args);
for (args[1..], 0..) |arg, i| {
const resolved_arg = try sema.resolveInst(arg);
- const elem_ty = if (array_ty.zigTypeTag() == .Struct)
+ const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct)
array_ty.structFieldType(i)
else
- array_ty.elemType2();
+ array_ty.elemType2(mod);
resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
@@ -18169,7 +18206,7 @@ fn zirArrayInit(
}
if (sentinel_val) |some| {
- resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some);
+ resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some);
}
const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| {
@@ -18227,7 +18264,7 @@ fn zirArrayInit(
const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
- .pointee_type = array_ty.elemType2(),
+ .pointee_type = array_ty.elemType2(mod),
});
const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty);
@@ -18252,6 +18289,7 @@ fn zirArrayInitAnon(
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
+ const mod = sema.mod;
const types = try sema.arena.alloc(Type, operands.len);
const values = try sema.arena.alloc(Value, operands.len);
@@ -18262,7 +18300,7 @@ fn zirArrayInitAnon(
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem);
- if (types[i].zigTypeTag() == .Opaque) {
+ if (types[i].zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
@@ -18379,11 +18417,12 @@ fn fieldType(
field_src: LazySrcLoc,
ty_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
var cur_ty = aggregate_ty;
while (true) {
const resolved_ty = try sema.resolveTypeFields(cur_ty);
cur_ty = resolved_ty;
- switch (cur_ty.zigTypeTag()) {
+ switch (cur_ty.zigTypeTag(mod)) {
.Struct => {
if (cur_ty.isAnonStruct()) {
const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
@@ -18449,14 +18488,14 @@ fn zirFrame(
}
fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
if (ty.isNoReturn()) {
return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)});
}
- const target = sema.mod.getTarget();
- const val = try ty.lazyAbiAlignment(target, sema.arena);
+ const val = try ty.lazyAbiAlignment(mod, sema.arena);
if (val.tag() == .lazy_align) {
try sema.queueFullTypeResolution(ty);
}
@@ -18499,16 +18538,17 @@ fn zirUnaryMath(
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand = try sema.resolveInst(inst_data.operand);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_ty = sema.typeOf(operand);
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float => {},
.Vector => {
- const scalar_ty = operand_ty.scalarType();
- switch (scalar_ty.zigTypeTag()) {
+ const scalar_ty = operand_ty.scalarType(mod);
+ switch (scalar_ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float => {},
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}),
}
@@ -18516,9 +18556,9 @@ fn zirUnaryMath(
else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}),
}
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
const vec_len = operand_ty.vectorLen();
const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty);
if (try sema.resolveMaybeUndefVal(operand)) |val| {
@@ -18564,7 +18604,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const mod = sema.mod;
try sema.resolveTypeLayout(operand_ty);
- const enum_ty = switch (operand_ty.zigTypeTag()) {
+ const enum_ty = switch (operand_ty.zigTypeTag(mod)) {
.EnumLiteral => {
const val = try sema.resolveConstValue(block, .unneeded, operand, "");
const bytes = val.castTag(.enum_literal).?.data;
@@ -18654,11 +18694,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const bits_val = struct_val[1];
const signedness = signedness_val.toEnum(std.builtin.Signedness);
- const bits = @intCast(u16, bits_val.toUnsignedInt(target));
- const ty = switch (signedness) {
- .signed => try Type.Tag.int_signed.create(sema.arena, bits),
- .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits),
- };
+ const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
+ const ty = try mod.intType(signedness, bits);
return sema.addType(ty);
},
.Vector => {
@@ -18667,9 +18704,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const len_val = struct_val[0];
const child_val = struct_val[1];
- const len = len_val.toUnsignedInt(target);
- var buffer: Value.ToTypeBuffer = undefined;
- const child_ty = child_val.toType(&buffer);
+ const len = len_val.toUnsignedInt(mod);
+ const child_ty = child_val.toType();
try sema.checkVectorElemType(block, src, child_ty);
@@ -18682,7 +18718,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// bits: comptime_int,
const bits_val = struct_val[0];
- const bits = @intCast(u16, bits_val.toUnsignedInt(target));
+ const bits = @intCast(u16, bits_val.toUnsignedInt(mod));
const ty = switch (bits) {
16 => Type.f16,
32 => Type.f32,
@@ -18708,10 +18744,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?);
+ const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?);
- var buffer: Value.ToTypeBuffer = undefined;
- const unresolved_elem_ty = child_val.toType(&buffer);
+ const unresolved_elem_ty = child_val.toType();
const elem_ty = if (abi_align == 0)
unresolved_elem_ty
else t: {
@@ -18723,7 +18758,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size);
var actual_sentinel: ?Value = null;
- if (!sentinel_val.isNull()) {
+ if (!sentinel_val.isNull(mod)) {
if (ptr_size == .One or ptr_size == .C) {
return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{});
}
@@ -18735,9 +18770,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?;
}
- if (elem_ty.zigTypeTag() == .NoReturn) {
+ if (elem_ty.zigTypeTag(mod) == .NoReturn) {
return sema.fail(block, src, "pointer to noreturn not allowed", .{});
- } else if (elem_ty.zigTypeTag() == .Fn) {
+ } else if (elem_ty.zigTypeTag(mod) == .Fn) {
if (ptr_size != .One) {
return sema.fail(block, src, "function pointers must be single pointers", .{});
}
@@ -18747,7 +18782,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
{
return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{});
}
- } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) {
+ } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{});
} else if (ptr_size == .C) {
if (!try sema.validateExternType(elem_ty, .other)) {
@@ -18763,7 +18798,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (elem_ty.zigTypeTag() == .Opaque) {
+ if (elem_ty.zigTypeTag(mod) == .Opaque) {
return sema.fail(block, src, "C pointers cannot point to opaque types", .{});
}
}
@@ -18790,9 +18825,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// sentinel: ?*const anyopaque,
const sentinel_val = struct_val[2];
- const len = len_val.toUnsignedInt(target);
- var buffer: Value.ToTypeBuffer = undefined;
- const child_ty = try child_val.toType(&buffer).copy(sema.arena);
+ const len = len_val.toUnsignedInt(mod);
+ const child_ty = try child_val.toType().copy(sema.arena);
const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: {
const ptr_ty = try Type.ptr(sema.arena, mod, .{
.@"addrspace" = .generic,
@@ -18810,8 +18844,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// child: type,
const child_val = struct_val[0];
- var buffer: Value.ToTypeBuffer = undefined;
- const child_ty = try child_val.toType(&buffer).copy(sema.arena);
+ const child_ty = try child_val.toType().copy(sema.arena);
const ty = try Type.optional(sema.arena, child_ty);
return sema.addType(ty);
@@ -18824,11 +18857,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
// payload: type,
const payload_val = struct_val[1];
- var buffer: Value.ToTypeBuffer = undefined;
- const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena);
- const payload_ty = try payload_val.toType(&buffer).copy(sema.arena);
+ const error_set_ty = try error_set_val.toType().copy(sema.arena);
+ const payload_ty = try payload_val.toType().copy(sema.arena);
- if (error_set_ty.zigTypeTag() != .ErrorSet) {
+ if (error_set_ty.zigTypeTag(mod) != .ErrorSet) {
return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{});
}
@@ -18839,11 +18871,11 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.addType(ty);
},
.ErrorSet => {
- const payload_val = union_val.val.optionalValue() orelse
+ const payload_val = union_val.val.optionalValue(mod) orelse
return sema.addType(Type.initTag(.anyerror));
const slice_val = payload_val.castTag(.slice).?.data;
- const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget()));
+ const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod));
var names: Module.ErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
var i: usize = 0;
@@ -18890,7 +18922,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.fail(block, src, "reified structs must have no decls", .{});
}
- if (layout != .Packed and !backing_int_val.isNull()) {
+ if (layout != .Packed and !backing_int_val.isNull(mod)) {
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
}
@@ -18954,10 +18986,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
};
// Enum tag type
- var buffer: Value.ToTypeBuffer = undefined;
- const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator);
+ const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator);
- if (int_tag_ty.zigTypeTag() != .Int) {
+ if (int_tag_ty.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
}
enum_obj.tag_ty = int_tag_ty;
@@ -19090,7 +19121,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
const new_decl_arena_allocator = new_decl_arena.allocator();
const union_obj = try new_decl_arena_allocator.create(Module.Union);
- const type_tag = if (!tag_type_val.isNull())
+ const type_tag = if (!tag_type_val.isNull(mod))
Type.Tag.union_tagged
else if (layout != .Auto)
Type.Tag.@"union"
@@ -19130,11 +19161,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
var tag_ty_field_names: ?Module.EnumFull.NameMap = null;
var enum_field_names: ?*Module.EnumNumbered.NameMap = null;
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
- if (tag_type_val.optionalValue()) |payload_val| {
- var buffer: Value.ToTypeBuffer = undefined;
- union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator);
+ if (tag_type_val.optionalValue(mod)) |payload_val| {
+ union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator);
- if (union_obj.tag_ty.zigTypeTag() != .Enum) {
+ if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) {
return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{});
}
tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena);
@@ -19187,14 +19217,13 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.fail(block, src, "duplicate union field {s}", .{field_name});
}
- var buffer: Value.ToTypeBuffer = undefined;
- const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator);
+ const field_ty = try type_val.toType().copy(new_decl_arena_allocator);
gop.value_ptr.* = .{
.ty = field_ty,
- .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?),
+ .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?),
};
- if (field_ty.zigTypeTag() == .Opaque) {
+ if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{});
errdefer msg.destroy(sema.gpa);
@@ -19216,7 +19245,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
- } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) {
+ } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -19280,20 +19309,18 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const alignment = @intCast(u29, alignment_val.toUnsignedInt(target));
+ const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod));
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :alignment 0;
} else {
break :alignment alignment;
}
};
- const return_type = return_type_val.optionalValue() orelse
+ const return_type = return_type_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
- var buf: Value.ToTypeBuffer = undefined;
-
const args_slice_val = args_val.castTag(.slice).?.data;
- const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget()));
+ const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod));
const param_types = try sema.arena.alloc(Type, args_len);
const comptime_params = try sema.arena.alloc(bool, args_len);
@@ -19316,12 +19343,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{});
}
- const param_type_val = param_type_opt_val.optionalValue() orelse
+ const param_type_val = param_type_opt_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{});
- const param_type = try param_type_val.toType(&buf).copy(sema.arena);
+ const param_type = try param_type_val.toType().copy(sema.arena);
if (arg_is_noalias) {
- if (!param_type.isPtrAtRuntime()) {
+ if (!param_type.isPtrAtRuntime(mod)) {
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
}
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
@@ -19336,7 +19363,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
.param_types = param_types,
.comptime_params = comptime_params.ptr,
.noalias_bits = noalias_bits,
- .return_type = try return_type.toType(&buf).copy(sema.arena),
+ .return_type = try return_type.toType().copy(sema.arena),
.alignment = alignment,
.cc = cc,
.is_var_args = is_var_args,
@@ -19396,8 +19423,6 @@ fn reifyStruct(
},
};
- const target = mod.getTarget();
-
// Fields
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
@@ -19420,7 +19445,7 @@ fn reifyStruct(
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
- const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?);
+ const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?);
if (layout == .Packed) {
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
@@ -19461,7 +19486,7 @@ fn reifyStruct(
return sema.fail(block, src, "duplicate struct field {s}", .{field_name});
}
- const default_val = if (default_value_val.optionalValue()) |opt_val| blk: {
+ const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: {
const payload_val = if (opt_val.pointerDecl()) |opt_decl|
mod.declPtr(opt_decl).val
else
@@ -19472,8 +19497,7 @@ fn reifyStruct(
return sema.fail(block, src, "comptime field without default initialization value", .{});
}
- var buffer: Value.ToTypeBuffer = undefined;
- const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator);
+ const field_ty = try type_val.toType().copy(new_decl_arena_allocator);
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = abi_align,
@@ -19482,7 +19506,7 @@ fn reifyStruct(
.offset = undefined,
};
- if (field_ty.zigTypeTag() == .Opaque) {
+ if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
@@ -19492,7 +19516,7 @@ fn reifyStruct(
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (field_ty.zigTypeTag() == .NoReturn) {
+ if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{});
errdefer msg.destroy(sema.gpa);
@@ -19514,7 +19538,7 @@ fn reifyStruct(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
- } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) {
+ } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -19545,20 +19569,15 @@ fn reifyStruct(
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
- fields_bit_sum += field.ty.bitSize(target);
+ fields_bit_sum += field.ty.bitSize(mod);
}
- if (backing_int_val.optionalValue()) |payload| {
- var buf: Value.ToTypeBuffer = undefined;
- const backing_int_ty = payload.toType(&buf);
+ if (backing_int_val.optionalValue(mod)) |payload| {
+ const backing_int_ty = payload.toType();
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator);
} else {
- var buf: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, fields_bit_sum),
- };
- struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator);
+ struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
}
struct_obj.status = .have_layout;
@@ -19569,6 +19588,7 @@ fn reifyStruct(
}
fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -19594,7 +19614,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
ptr_info.@"addrspace" = dest_addrspace;
const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
- const dest_ty = if (ptr_ty.zigTypeTag() == .Optional)
+ const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional)
try Type.optional(sema.arena, dest_ptr_ty)
else
dest_ptr_ty;
@@ -19716,6 +19736,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -19730,12 +19751,12 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveMaybeUndefVal(operand)) |val| {
const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty);
return sema.addConstant(dest_ty, result_val);
- } else if (dest_ty.zigTypeTag() == .ComptimeInt) {
+ } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known");
}
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
- if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) {
+ if (dest_ty.intInfo(mod).bits == 0) {
if (block.wantSafety()) {
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero));
try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds);
@@ -19755,6 +19776,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -19769,7 +19791,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
if (try sema.resolveMaybeUndefVal(operand)) |val| {
const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema);
return sema.addConstant(dest_ty, result_val);
- } else if (dest_ty.zigTypeTag() == .ComptimeFloat) {
+ } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known");
}
@@ -19778,6 +19800,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -19790,9 +19813,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr_ty = try sema.resolveType(block, src, extra.lhs);
try sema.checkPtrType(block, type_src, ptr_ty);
- const elem_ty = ptr_ty.elemType2();
- const target = sema.mod.getTarget();
- const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema);
+ const elem_ty = ptr_ty.elemType2(mod);
+ const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema);
if (ptr_ty.isSlice()) {
const msg = msg: {
@@ -19805,8 +19827,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| {
- const addr = val.toUnsignedInt(target);
- if (!ptr_ty.isAllowzeroPtr() and addr == 0)
+ const addr = val.toUnsignedInt(mod);
+ if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0)
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
@@ -19820,8 +19842,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
try sema.requireRuntimeBlock(block, src, operand_src);
- if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) {
- if (!ptr_ty.isAllowzeroPtr()) {
+ if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) {
+ if (!ptr_ty.isAllowzeroPtr(mod)) {
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, is_non_zero, .cast_to_null);
}
@@ -19926,6 +19948,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
}
fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -19934,7 +19957,6 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- const target = sema.mod.getTarget();
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
@@ -19982,18 +20004,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
else
operand;
- const dest_elem_ty = dest_ty.elemType2();
+ const dest_elem_ty = dest_ty.elemType2(mod);
try sema.resolveTypeLayout(dest_elem_ty);
- const dest_align = dest_ty.ptrAlignment(target);
+ const dest_align = dest_ty.ptrAlignment(mod);
- const operand_elem_ty = operand_ty.elemType2();
+ const operand_elem_ty = operand_ty.elemType2(mod);
try sema.resolveTypeLayout(operand_elem_ty);
- const operand_align = operand_ty.ptrAlignment(target);
+ const operand_align = operand_ty.ptrAlignment(mod);
// If the destination is less aligned than the source, preserve the source alignment
const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: {
// Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result
- if (dest_ty.zigTypeTag() == .Optional) {
+ if (dest_ty.zigTypeTag(mod) == .Optional) {
var buf: Type.Payload.ElemType = undefined;
var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data;
dest_ptr_info.@"align" = operand_align;
@@ -20006,8 +20028,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
};
if (dest_is_slice) {
- const operand_elem_size = operand_elem_ty.abiSize(target);
- const dest_elem_size = dest_elem_ty.abiSize(target);
+ const operand_elem_size = operand_elem_ty.abiSize(mod);
+ const dest_elem_size = dest_elem_ty.abiSize(mod);
if (operand_elem_size != dest_elem_size) {
return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{});
}
@@ -20032,21 +20054,21 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| {
- if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) {
+ if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) {
return sema.failWithUseOfUndef(block, operand_src);
}
- if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) {
+ if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) {
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
- if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) {
+ if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) {
return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val));
}
return sema.addConstant(aligned_dest_ty, operand_val);
}
try sema.requireRuntimeBlock(block, src, null);
- if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and
- (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn))
+ if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
+ (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
{
const ptr_int = try block.addUnOp(.ptrtoint, ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
@@ -20102,6 +20124,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -20112,7 +20135,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty);
const operand_ty = sema.typeOf(operand);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
- const is_vector = operand_ty.zigTypeTag() == .Vector;
+ const is_vector = operand_ty.zigTypeTag(mod) == .Vector;
const dest_ty = if (is_vector)
try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty)
else
@@ -20122,15 +20145,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.coerce(block, dest_ty, operand, operand_src);
}
- const target = sema.mod.getTarget();
- const dest_info = dest_scalar_ty.intInfo(target);
+ const dest_info = dest_scalar_ty.intInfo(mod);
if (try sema.typeHasOnePossibleValue(dest_ty)) |val| {
return sema.addConstant(dest_ty, val);
}
- if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) {
- const operand_info = operand_ty.intInfo(target);
+ if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) {
+ const operand_info = operand_ty.intInfo(mod);
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
@@ -20186,6 +20208,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -20199,12 +20222,12 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
var ptr_info = ptr_ty.ptrInfo().data;
ptr_info.@"align" = dest_align;
var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
- if (ptr_ty.zigTypeTag() == .Optional) {
+ if (ptr_ty.zigTypeTag(mod) == .Optional) {
dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty);
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| {
- if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| {
+ if (try val.getUnsignedIntAdvanced(mod, null)) |addr| {
if (addr % dest_align != 0) {
return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align });
}
@@ -20247,23 +20270,23 @@ fn zirBitCount(
block: *Block,
inst: Zir.Inst.Index,
air_tag: Air.Inst.Tag,
- comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64,
+ comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVector(block, operand, operand_src);
- const target = sema.mod.getTarget();
- const bits = operand_ty.intInfo(target).bits;
+ const bits = operand_ty.intInfo(mod).bits;
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
}
- const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits);
- switch (operand_ty.zigTypeTag()) {
+ const result_scalar_ty = try mod.smallestUnsignedInt(bits);
+ switch (operand_ty.zigTypeTag(mod)) {
.Vector => {
const vec_len = operand_ty.vectorLen();
const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty);
@@ -20272,10 +20295,10 @@ fn zirBitCount(
var elem_buf: Value.ElemValueBuffer = undefined;
const elems = try sema.arena.alloc(Value, vec_len);
- const scalar_ty = operand_ty.scalarType();
+ const scalar_ty = operand_ty.scalarType(mod);
for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- const count = comptimeOp(elem_val, scalar_ty, target);
+ const count = comptimeOp(elem_val, scalar_ty, mod);
elem.* = try Value.Tag.int_u64.create(sema.arena, count);
}
return sema.addConstant(
@@ -20291,7 +20314,7 @@ fn zirBitCount(
if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(result_scalar_ty);
try sema.resolveLazyValue(val);
- return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target));
+ return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod));
} else {
try sema.requireRuntimeBlock(block, src, operand_src);
return block.addTyOp(air_tag, result_scalar_ty, operand);
@@ -20302,14 +20325,14 @@ fn zirBitCount(
}
fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand = try sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
- const target = sema.mod.getTarget();
- const bits = scalar_ty.intInfo(target).bits;
+ const bits = scalar_ty.intInfo(mod).bits;
if (bits % 8 != 0) {
return sema.fail(
block,
@@ -20323,11 +20346,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
return sema.addConstant(operand_ty, val);
}
- switch (operand_ty.zigTypeTag()) {
+ switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(operand_ty);
- const result_val = try val.byteSwap(operand_ty, target, sema.arena);
+ const result_val = try val.byteSwap(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
@@ -20344,7 +20367,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena);
+ elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena);
}
return sema.addConstant(
operand_ty,
@@ -20371,12 +20394,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return sema.addConstant(operand_ty, val);
}
- const target = sema.mod.getTarget();
- switch (operand_ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (operand_ty.zigTypeTag(mod)) {
.Int => {
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
if (val.isUndef()) return sema.addConstUndef(operand_ty);
- const result_val = try val.bitReverse(operand_ty, target, sema.arena);
+ const result_val = try val.bitReverse(operand_ty, mod, sema.arena);
return sema.addConstant(operand_ty, result_val);
} else operand_src;
@@ -20393,7 +20416,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const elems = try sema.arena.alloc(Value, vec_len);
for (elems, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
- elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
+ elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena);
}
return sema.addConstant(
operand_ty,
@@ -20429,10 +20452,10 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
const ty = try sema.resolveType(block, lhs_src, extra.lhs);
const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known");
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
try sema.resolveTypeLayout(ty);
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {},
else => {
const msg = msg: {
@@ -20464,15 +20487,16 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
if (i == field_index) {
return bit_sum;
}
- bit_sum += field.ty.bitSize(target);
+ bit_sum += field.ty.bitSize(mod);
} else unreachable;
},
- else => return ty.structFieldOffset(field_index, target) * 8,
+ else => return ty.structFieldOffset(field_index, mod) * 8,
}
}
fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Struct, .Enum, .Union, .Opaque => return,
else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}),
}
@@ -20480,7 +20504,8 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com
/// Returns `true` if the type was a comptime_int.
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
- switch (try ty.zigTypeTagOrPoison()) {
+ const mod = sema.mod;
+ switch (try ty.zigTypeTagOrPoison(mod)) {
.ComptimeInt => return true,
.Int => return false,
else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}),
@@ -20493,7 +20518,8 @@ fn checkInvalidPtrArithmetic(
src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (try ty.zigTypeTagOrPoison()) {
+ const mod = sema.mod;
+ switch (try ty.zigTypeTagOrPoison(mod)) {
.Pointer => switch (ty.ptrSize()) {
.One, .Slice => return,
.Many, .C => return sema.fail(
@@ -20532,7 +20558,8 @@ fn checkPtrOperand(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Pointer => return,
.Fn => {
const msg = msg: {
@@ -20550,7 +20577,7 @@ fn checkPtrOperand(
};
return sema.failWithOwnedErrorMsg(msg);
},
- .Optional => if (ty.isPtrLikeOptional()) return,
+ .Optional => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)});
@@ -20562,7 +20589,8 @@ fn checkPtrType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Pointer => return,
.Fn => {
const msg = msg: {
@@ -20580,7 +20608,7 @@ fn checkPtrType(
};
return sema.failWithOwnedErrorMsg(msg);
},
- .Optional => if (ty.isPtrLikeOptional()) return,
+ .Optional => if (ty.isPtrLikeOptional(mod)) return,
else => {},
}
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)});
@@ -20592,9 +20620,10 @@ fn checkVectorElemType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Int, .Float, .Bool => return,
- else => if (ty.isPtrAtRuntime()) return,
+ else => if (ty.isPtrAtRuntime(mod)) return,
}
return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)});
}
@@ -20605,7 +20634,8 @@ fn checkFloatType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.ComptimeInt, .ComptimeFloat, .Float => {},
else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}),
}
@@ -20617,9 +20647,10 @@ fn checkNumericType(
ty_src: LazySrcLoc,
ty: Type,
) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
- .Vector => switch (ty.childType().zigTypeTag()) {
+ .Vector => switch (ty.childType().zigTypeTag(mod)) {
.ComptimeFloat, .Float, .ComptimeInt, .Int => {},
else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}),
},
@@ -20637,9 +20668,9 @@ fn checkAtomicPtrOperand(
ptr_src: LazySrcLoc,
ptr_const: bool,
) CompileError!Air.Inst.Ref {
- const target = sema.mod.getTarget();
- var diag: target_util.AtomicPtrAlignmentDiagnostics = .{};
- const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) {
+ const mod = sema.mod;
+ var diag: Module.AtomicPtrAlignmentDiagnostics = .{};
+ const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) {
error.FloatTooBig => return sema.fail(
block,
elem_ty_src,
@@ -20668,7 +20699,7 @@ fn checkAtomicPtrOperand(
};
const ptr_ty = sema.typeOf(ptr);
- const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) {
+ const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
.Pointer => ptr_ty.ptrInfo().data,
else => {
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
@@ -20735,12 +20766,13 @@ fn checkIntOrVector(
operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
) CompileError!Type {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
- switch (try operand_ty.zigTypeTagOrPoison()) {
+ switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int => return operand_ty,
.Vector => {
const elem_ty = operand_ty.childType();
- switch (try elem_ty.zigTypeTagOrPoison()) {
+ switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(sema.mod),
@@ -20759,11 +20791,12 @@ fn checkIntOrVectorAllowComptime(
operand_ty: Type,
operand_src: LazySrcLoc,
) CompileError!Type {
- switch (try operand_ty.zigTypeTagOrPoison()) {
+ const mod = sema.mod;
+ switch (try operand_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return operand_ty,
.Vector => {
const elem_ty = operand_ty.childType();
- switch (try elem_ty.zigTypeTagOrPoison()) {
+ switch (try elem_ty.zigTypeTagOrPoison(mod)) {
.Int, .ComptimeInt => return elem_ty,
else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{
elem_ty.fmt(sema.mod),
@@ -20777,7 +20810,8 @@ fn checkIntOrVectorAllowComptime(
}
fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.ErrorSet => return,
else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}),
}
@@ -20805,11 +20839,12 @@ fn checkSimdBinOp(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!SimdBinOp {
+ const mod = sema.mod;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
- var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null;
+ var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null;
const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{
.override = &[_]?LazySrcLoc{ lhs_src, rhs_src },
});
@@ -20823,7 +20858,7 @@ fn checkSimdBinOp(
.lhs_val = try sema.resolveMaybeUndefVal(lhs),
.rhs_val = try sema.resolveMaybeUndefVal(rhs),
.result_ty = result_ty,
- .scalar_ty = result_ty.scalarType(),
+ .scalar_ty = result_ty.scalarType(mod),
};
}
@@ -20836,8 +20871,9 @@ fn checkVectorizableBinaryOperands(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!void {
- const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
- const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
+ const mod = sema.mod;
+ const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod);
+ const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod);
if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return;
const lhs_is_vector = switch (lhs_zig_ty_tag) {
@@ -20892,6 +20928,7 @@ fn resolveExportOptions(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.ExportOptions {
+ const mod = sema.mod;
const export_options_ty = try sema.getBuiltinType("ExportOptions");
const air_ref = try sema.resolveInst(zir_ref);
const options = try sema.coerce(block, export_options_ty, air_ref, src);
@@ -20904,7 +20941,7 @@ fn resolveExportOptions(
const name_operand = try sema.fieldVal(block, src, options, "name", name_src);
const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known");
const name_ty = Type.initTag(.const_slice_u8);
- const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod);
+ const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod);
const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src);
const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known");
@@ -20913,8 +20950,8 @@ fn resolveExportOptions(
const section_operand = try sema.fieldVal(block, src, options, "section", section_src);
const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known");
const section_ty = Type.initTag(.const_slice_u8);
- const section = if (section_opt_val.optionalValue()) |section_val|
- try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod)
+ const section = if (section_opt_val.optionalValue(mod)) |section_val|
+ try section_val.toAllocatedBytes(section_ty, sema.arena, mod)
else
null;
@@ -20979,6 +21016,7 @@ fn zirCmpxchg(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data;
const air_tag: Air.Inst.Tag = switch (extended.small) {
0 => .cmpxchg_weak,
@@ -20996,7 +21034,7 @@ fn zirCmpxchg(
// zig fmt: on
const expected_value = try sema.resolveInst(extra.expected_value);
const elem_ty = sema.typeOf(expected_value);
- if (elem_ty.zigTypeTag() == .Float) {
+ if (elem_ty.zigTypeTag(mod) == .Float) {
return sema.fail(
block,
elem_ty_src,
@@ -21102,26 +21140,26 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known");
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
- if (operand_ty.zigTypeTag() != .Vector) {
- return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)});
+ if (operand_ty.zigTypeTag(mod) != .Vector) {
+ return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)});
}
const scalar_ty = operand_ty.childType();
// Type-check depending on operation.
switch (operation) {
- .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) {
+ .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Bool => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{
- @tagName(operation), operand_ty.fmt(sema.mod),
+ @tagName(operation), operand_ty.fmt(mod),
}),
},
- .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) {
+ .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) {
.Int, .Float => {},
else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{
- @tagName(operation), operand_ty.fmt(sema.mod),
+ @tagName(operation), operand_ty.fmt(mod),
}),
},
}
@@ -21136,19 +21174,19 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty);
- var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0);
+ var accum: Value = try operand_val.elemValue(mod, sema.arena, 0);
var elem_buf: Value.ElemValueBuffer = undefined;
var i: u32 = 1;
while (i < vec_len) : (i += 1) {
- const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf);
+ const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf);
switch (operation) {
- .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod),
- .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod),
- .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod),
- .Min => accum = accum.numberMin(elem_val, target),
- .Max => accum = accum.numberMax(elem_val, target),
+ .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod),
+ .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod),
+ .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod),
+ .Min => accum = accum.numberMin(elem_val, mod),
+ .Max => accum = accum.numberMax(elem_val, mod),
.Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty),
- .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod),
+ .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod),
}
}
return sema.addConstant(scalar_ty, accum);
@@ -21165,6 +21203,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
}
fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data;
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -21177,7 +21216,7 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
var mask = try sema.resolveInst(extra.mask);
var mask_ty = sema.typeOf(mask);
- const mask_len = switch (sema.typeOf(mask).zigTypeTag()) {
+ const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(mask).arrayLen(),
else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}),
};
@@ -21200,6 +21239,7 @@ fn analyzeShuffle(
mask: Value,
mask_len: u32,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node };
const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node };
const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node };
@@ -21211,7 +21251,7 @@ fn analyzeShuffle(
.elem_type = elem_ty,
});
- var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) {
+ var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(a).arrayLen(),
.Undefined => null,
else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{
@@ -21219,7 +21259,7 @@ fn analyzeShuffle(
sema.typeOf(a).fmt(sema.mod),
}),
};
- var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) {
+ var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) {
.Array, .Vector => sema.typeOf(b).arrayLen(),
.Undefined => null,
else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{
@@ -21255,7 +21295,7 @@ fn analyzeShuffle(
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(sema.mod, i, &buf);
if (elem.isUndef()) continue;
- const int = elem.toSignedInt(sema.mod.getTarget());
+ const int = elem.toSignedInt(mod);
var unsigned: u32 = undefined;
var chosen: u32 = undefined;
if (int >= 0) {
@@ -21297,7 +21337,7 @@ fn analyzeShuffle(
values[i] = Value.undef;
continue;
}
- const int = mask_elem_val.toSignedInt(sema.mod.getTarget());
+ const int = mask_elem_val.toSignedInt(mod);
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int);
if (int >= 0) {
values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned);
@@ -21356,6 +21396,7 @@ fn analyzeShuffle(
}
fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
@@ -21369,7 +21410,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C
const pred_uncoerced = try sema.resolveInst(extra.pred);
const pred_ty = sema.typeOf(pred_uncoerced);
- const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) {
+ const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) {
.Vector, .Array => pred_ty.arrayLen(),
else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}),
};
@@ -21489,6 +21530,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
const src = inst_data.src();
@@ -21505,7 +21547,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
- switch (elem_ty.zigTypeTag()) {
+ switch (elem_ty.zigTypeTag(mod)) {
.Enum => if (op != .Xchg) {
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
},
@@ -21536,7 +21578,6 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :rs operand_src;
};
if (ptr_val.isComptimeMutablePtr()) {
- const target = sema.mod.getTarget();
const ptr_ty = sema.typeOf(ptr);
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
@@ -21544,12 +21585,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Xchg => operand_val,
.Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty),
.Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty),
- .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod),
- .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod),
- .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod),
- .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod),
- .Max => stored_val.numberMax (operand_val, target),
- .Min => stored_val.numberMin (operand_val, target),
+ .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod),
+ .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod),
+ .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod),
+ .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod),
+ .Max => stored_val.numberMax (operand_val, mod),
+ .Min => stored_val.numberMin (operand_val, mod),
// zig fmt: on
};
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
@@ -21623,8 +21664,9 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1);
const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2);
const maybe_addend = try sema.resolveMaybeUndefVal(addend);
+ const mod = sema.mod;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.ComptimeFloat, .Float, .Vector => {},
else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}),
}
@@ -21743,7 +21785,6 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const callee_ty = sema.typeOf(func);
const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false);
-
const ensure_result_used = extra.flags.ensure_result_used;
return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null);
}
@@ -21760,13 +21801,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known");
const field_ptr = try sema.resolveInst(extra.field_ptr);
const field_ptr_ty = sema.typeOf(field_ptr);
+ const mod = sema.mod;
- if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) {
+ if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) {
return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
}
try sema.resolveTypeLayout(parent_ty);
- const field_index = switch (parent_ty.zigTypeTag()) {
+ const field_index = switch (parent_ty.zigTypeTag(mod)) {
.Struct => blk: {
if (parent_ty.isTuple()) {
if (mem.eql(u8, field_name, "len")) {
@@ -21781,7 +21823,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
else => unreachable,
};
- if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) {
+ if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) {
return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
}
@@ -21913,15 +21955,14 @@ fn analyzeMinMax(
) CompileError!Air.Inst.Ref {
assert(operands.len == operand_srcs.len);
assert(operands.len > 0);
+ const mod = sema.mod;
if (operands.len == 1) return operands[0];
- const mod = sema.mod;
- const target = mod.getTarget();
const opFunc = switch (air_tag) {
.min => Value.numberMin,
.max => Value.numberMax,
- else => unreachable,
+ else => @compileError("unreachable"),
};
// First, find all comptime-known arguments, and get their min/max
@@ -21949,7 +21990,7 @@ fn analyzeMinMax(
try sema.resolveLazyValue(operand_val);
const vec_len = simd_op.len orelse {
- const result_val = opFunc(cur_val, operand_val, target);
+ const result_val = opFunc(cur_val, operand_val, mod);
cur_minmax = try sema.addConstant(simd_op.result_ty, result_val);
continue;
};
@@ -21959,7 +22000,7 @@ fn analyzeMinMax(
for (elems, 0..) |*elem, i| {
const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf);
- elem.* = opFunc(lhs_elem_val, rhs_elem_val, target);
+ elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod);
}
cur_minmax = try sema.addConstant(
simd_op.result_ty,
@@ -21984,7 +22025,7 @@ fn analyzeMinMax(
break :refined orig_ty;
}
- const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: {
+ const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: {
const elem_ty = orig_ty.childType();
const len = orig_ty.vectorLen();
@@ -21996,16 +22037,16 @@ fn analyzeMinMax(
for (1..len) |idx| {
const elem_val = try val.elemValue(mod, sema.arena, idx);
if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef
- if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val;
- if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val;
+ if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val;
+ if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val;
}
- const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max);
+ const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max);
break :blk try Type.vector(sema.arena, len, refined_elem_ty);
} else blk: {
if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats
if (val.isUndef()) break :blk orig_ty; // can't refine undef
- break :blk try Type.intFittingRange(target, sema.arena, val, val);
+ break :blk try mod.intFittingRange(val, val);
};
// Apply the refined type to the current value - this isn't strictly necessary in the
@@ -22061,7 +22102,7 @@ fn analyzeMinMax(
// Finally, refine the type based on the comptime-known bound.
if (known_undef) break :refine; // can't refine undef
const unrefined_ty = sema.typeOf(cur_minmax.?);
- const is_vector = unrefined_ty.zigTypeTag() == .Vector;
+ const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector;
const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty;
const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty;
@@ -22069,18 +22110,18 @@ fn analyzeMinMax(
// Compute the final bounds based on the runtime type and the comptime-known bound type
const min_val = switch (air_tag) {
- .min => try unrefined_elem_ty.minInt(sema.arena, target),
- .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct
+ .min => try unrefined_elem_ty.minInt(sema.arena, mod),
+ .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct
else => unreachable,
};
const max_val = switch (air_tag) {
- .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct
- .max => try unrefined_elem_ty.maxInt(sema.arena, target),
+ .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct
+ .max => try unrefined_elem_ty.maxInt(sema.arena, mod),
else => unreachable,
};
// Find the smallest type which can contain these bounds
- const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val);
+ const final_elem_ty = try mod.intFittingRange(min_val, max_val);
const final_ty = if (is_vector)
try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty)
@@ -22132,6 +22173,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr);
const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr);
const target = sema.mod.getTarget();
+ const mod = sema.mod;
if (dest_ty.isConstPtr()) {
return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{});
@@ -22196,7 +22238,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src;
if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| {
- const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?;
+ const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
for (0..len) |i| {
const elem_index = try sema.addIntUnsigned(Type.usize, i);
@@ -22239,12 +22281,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
// lowering. The AIR instruction requires pointers with element types of
// equal ABI size.
- if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) {
+ if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) {
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{});
}
- const dest_elem_ty = dest_ty.elemType2();
- const src_elem_ty = src_ty.elemType2();
+ const dest_elem_ty = dest_ty.elemType2(mod);
+ const src_elem_ty = src_ty.elemType2(mod);
if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) {
return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{});
}
@@ -22255,7 +22297,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
var new_dest_ptr = dest_ptr;
var new_src_ptr = src_ptr;
if (len_val) |val| {
- const len = val.toUnsignedInt(target);
+ const len = val.toUnsignedInt(mod);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
return;
@@ -22320,6 +22362,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
}
fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src = inst_data.src();
@@ -22334,14 +22377,13 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
return sema.fail(block, dest_src, "cannot memset constant pointer", .{});
}
- const dest_elem_ty = dest_ptr_ty.elemType2();
- const target = sema.mod.getTarget();
+ const dest_elem_ty = dest_ptr_ty.elemType2(mod);
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src);
const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse
break :rs dest_src;
- const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?;
+ const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?;
const len = try sema.usizeCast(block, dest_src, len_u64);
if (len == 0) {
// This AIR instruction guarantees length > 0 if it is comptime-known.
@@ -22499,9 +22541,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const tracy = trace(@src());
defer tracy.end();
+ const mod = sema.mod;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node };
const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node };
@@ -22535,7 +22578,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (val.tag() == .generic_poison) {
break :blk null;
}
- const alignment = @intCast(u32, val.toUnsignedInt(target));
+ const alignment = @intCast(u32, val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk 0;
@@ -22551,7 +22594,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
- const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target));
+ const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk 0;
@@ -22642,8 +22685,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
extra_index += body.len;
const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known");
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = try val.toType(&buffer).copy(sema.arena);
+ const ty = try val.toType().copy(sema.arena);
break :blk ty;
} else if (extra.data.bits.has_ret_ty_ref) blk: {
const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -22654,8 +22696,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
- var buffer: Value.ToTypeBuffer = undefined;
- const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena);
+ const ty = try ret_ty_tv.val.toType().copy(sema.arena);
break :blk ty;
} else Type.void;
@@ -22727,13 +22768,14 @@ fn zirCDefine(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known");
const rhs = try sema.resolveInst(extra.rhs);
- if (sema.typeOf(rhs).zigTypeTag() != .Void) {
+ if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) {
const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known");
try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value });
} else {
@@ -22799,9 +22841,9 @@ fn resolvePrefetchOptions(
src: LazySrcLoc,
zir_ref: Zir.Inst.Ref,
) CompileError!std.builtin.PrefetchOptions {
+ const mod = sema.mod;
const options_ty = try sema.getBuiltinType("PrefetchOptions");
const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src);
- const target = sema.mod.getTarget();
const rw_src = sema.maybeOptionsSrc(block, src, "rw");
const locality_src = sema.maybeOptionsSrc(block, src, "locality");
@@ -22818,7 +22860,7 @@ fn resolvePrefetchOptions(
return std.builtin.PrefetchOptions{
.rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw),
- .locality = @intCast(u2, locality_val.toUnsignedInt(target)),
+ .locality = @intCast(u2, locality_val.toUnsignedInt(mod)),
.cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache),
};
}
@@ -22887,7 +22929,7 @@ fn resolveExternOptions(
const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src);
const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known");
- const library_name = if (!library_name_val.isNull()) blk: {
+ const library_name = if (!library_name_val.isNull(mod)) blk: {
const payload = library_name_val.castTag(.opt_payload).?.data;
const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod);
if (library_name.len == 0) {
@@ -22917,17 +22959,17 @@ fn zirBuiltinExtern(
block: *Block,
extended: Zir.Inst.Extended.InstData,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node };
var ty = try sema.resolveType(block, ty_src, extra.lhs);
- if (!ty.isPtrAtRuntime()) {
+ if (!ty.isPtrAtRuntime(mod)) {
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
}
if (!try sema.validateExternType(ty.childType(), .other)) {
const msg = msg: {
- const mod = sema.mod;
const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
const src_decl = sema.mod.declPtr(block.src_decl);
@@ -22945,7 +22987,7 @@ fn zirBuiltinExtern(
else => |e| return e,
};
- if (options.linkage == .Weak and !ty.ptrAllowsZero()) {
+ if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
ty = try Type.optional(sema.arena, ty);
}
@@ -23087,7 +23129,7 @@ fn validateVarType(
const src_decl = mod.declPtr(block.src_decl);
try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty);
- if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) {
+ if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) {
try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
}
@@ -23101,8 +23143,9 @@ fn validateRunTimeType(
var_ty: Type,
is_extern: bool,
) CompileError!bool {
+ const mod = sema.mod;
var ty = var_ty;
- while (true) switch (ty.zigTypeTag()) {
+ while (true) switch (ty.zigTypeTag(mod)) {
.Bool,
.Int,
.Float,
@@ -23126,9 +23169,9 @@ fn validateRunTimeType(
.Pointer => {
const elem_ty = ty.childType();
- switch (elem_ty.zigTypeTag()) {
+ switch (elem_ty.zigTypeTag(mod)) {
.Opaque => return true,
- .Fn => return elem_ty.isFnOrHasRuntimeBits(),
+ .Fn => return elem_ty.isFnOrHasRuntimeBits(mod),
else => ty = elem_ty,
}
},
@@ -23174,7 +23217,7 @@ fn explainWhyTypeIsComptimeInner(
type_set: *TypeSet,
) CompileError!void {
const mod = sema.mod;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Bool,
.Int,
.Float,
@@ -23211,8 +23254,8 @@ fn explainWhyTypeIsComptimeInner(
try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set);
},
.Pointer => {
- const elem_ty = ty.elemType2();
- if (elem_ty.zigTypeTag() == .Fn) {
+ const elem_ty = ty.elemType2(mod);
+ if (elem_ty.zigTypeTag(mod) == .Fn) {
const fn_info = elem_ty.fnInfo();
if (fn_info.is_generic) {
try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{});
@@ -23221,7 +23264,7 @@ fn explainWhyTypeIsComptimeInner(
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
else => {},
}
- if (fn_info.return_type.comptimeOnly()) {
+ if (fn_info.return_type.comptimeOnly(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
}
return;
@@ -23295,7 +23338,8 @@ fn validateExternType(
ty: Type,
position: ExternPosition,
) !bool {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeFloat,
.ComptimeInt,
@@ -23314,7 +23358,7 @@ fn validateExternType(
.AnyFrame,
=> return true,
.Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)),
- .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) {
+ .Int => switch (ty.intInfo(mod).bits) {
8, 16, 32, 64, 128 => return true,
else => return false,
},
@@ -23329,14 +23373,12 @@ fn validateExternType(
return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention());
},
.Enum => {
- var buf: Type.Payload.Bits = undefined;
- return sema.validateExternType(ty.intTagType(&buf), position);
+ return sema.validateExternType(ty.intTagType(), position);
},
.Struct, .Union => switch (ty.containerLayout()) {
.Extern => return true,
.Packed => {
- const target = sema.mod.getTarget();
- const bit_size = try ty.bitSizeAdvanced(target, sema);
+ const bit_size = try ty.bitSizeAdvanced(mod, sema);
switch (bit_size) {
8, 16, 32, 64, 128 => return true,
else => return false,
@@ -23346,10 +23388,10 @@ fn validateExternType(
},
.Array => {
if (position == .ret_ty or position == .param_ty) return false;
- return sema.validateExternType(ty.elemType2(), .element);
+ return sema.validateExternType(ty.elemType2(mod), .element);
},
- .Vector => return sema.validateExternType(ty.elemType2(), .element),
- .Optional => return ty.isPtrLikeOptional(),
+ .Vector => return sema.validateExternType(ty.elemType2(mod), .element),
+ .Optional => return ty.isPtrLikeOptional(mod),
}
}
@@ -23361,7 +23403,7 @@ fn explainWhyTypeIsNotExtern(
position: ExternPosition,
) CompileError!void {
const mod = sema.mod;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Opaque,
.Bool,
.Float,
@@ -23390,7 +23432,7 @@ fn explainWhyTypeIsNotExtern(
},
.Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}),
.NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}),
- .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) {
+ .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) {
try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{});
} else {
try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{});
@@ -23409,8 +23451,7 @@ fn explainWhyTypeIsNotExtern(
}
},
.Enum => {
- var buf: Type.Payload.Bits = undefined;
- const tag_ty = ty.intTagType(&buf);
+ const tag_ty = ty.intTagType();
try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)});
try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position);
},
@@ -23422,17 +23463,17 @@ fn explainWhyTypeIsNotExtern(
} else if (position == .param_ty) {
return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{});
}
- try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element);
+ try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element);
},
- .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element),
+ .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element),
.Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}),
}
}
/// Returns true if `ty` is allowed in packed types.
/// Does *NOT* require `ty` to be resolved in any way.
-fn validatePackedType(ty: Type) bool {
- switch (ty.zigTypeTag()) {
+fn validatePackedType(ty: Type, mod: *const Module) bool {
+ switch (ty.zigTypeTag(mod)) {
.Type,
.ComptimeFloat,
.ComptimeInt,
@@ -23448,7 +23489,7 @@ fn validatePackedType(ty: Type) bool {
.Fn,
.Array,
=> return false,
- .Optional => return ty.isPtrLikeOptional(),
+ .Optional => return ty.isPtrLikeOptional(mod),
.Void,
.Bool,
.Float,
@@ -23468,7 +23509,7 @@ fn explainWhyTypeIsNotPacked(
ty: Type,
) CompileError!void {
const mod = sema.mod;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void,
.Bool,
.Float,
@@ -23731,6 +23772,7 @@ fn panicSentinelMismatch(
sentinel_index: Air.Inst.Ref,
) !void {
assert(!parent_block.is_comptime);
+ const mod = sema.mod;
const expected_sentinel_val = maybe_sentinel orelse return;
const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val);
@@ -23743,7 +23785,7 @@ fn panicSentinelMismatch(
break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr);
};
- const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: {
+ const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: {
const eql =
try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addInst(.{
@@ -23753,7 +23795,7 @@ fn panicSentinelMismatch(
.operation = .And,
} },
});
- } else if (sentinel_ty.isSelfComparable(true))
+ } else if (sentinel_ty.isSelfComparable(mod, true))
try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel)
else {
const panic_fn = try sema.getBuiltin("checkNonScalarSentinel");
@@ -23848,6 +23890,7 @@ fn fieldVal(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldPtr`. This function takes a value and returns a value.
+ const mod = sema.mod;
const arena = sema.arena;
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
@@ -23862,7 +23905,7 @@ fn fieldVal(
else
object_ty;
- switch (inner_ty.zigTypeTag()) {
+ switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
@@ -23926,10 +23969,9 @@ fn fieldVal(
object;
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
- var to_type_buffer: Value.ToTypeBuffer = undefined;
- const child_type = val.toType(&to_type_buffer);
+ const child_type = val.toType();
- switch (try child_type.zigTypeTagOrPoison()) {
+ switch (try child_type.zigTypeTagOrPoison(mod)) {
.ErrorSet => {
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
if (payload.data.names.getEntry(field_name)) |entry| {
@@ -23997,7 +24039,7 @@ fn fieldVal(
const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{});
- if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{});
+ if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -24035,9 +24077,10 @@ fn fieldPtr(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
+ const mod = sema.mod;
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
- const object_ty = switch (object_ptr_ty.zigTypeTag()) {
+ const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) {
.Pointer => object_ptr_ty.elemType(),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}),
};
@@ -24052,7 +24095,7 @@ fn fieldPtr(
else
object_ty;
- switch (inner_ty.zigTypeTag()) {
+ switch (inner_ty.zigTypeTag(mod)) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
var anon_decl = try block.startAnonDecl();
@@ -24142,10 +24185,9 @@ fn fieldPtr(
result;
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
- var to_type_buffer: Value.ToTypeBuffer = undefined;
- const child_type = val.toType(&to_type_buffer);
+ const child_type = val.toType();
- switch (child_type.zigTypeTag()) {
+ switch (child_type.zigTypeTag(mod)) {
.ErrorSet => {
// TODO resolve inferred error sets
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
@@ -24258,15 +24300,16 @@ fn fieldCallBind(
// When editing this function, note that there is corresponding logic to be edited
// in `fieldVal`. This function takes a pointer and returns a pointer.
+ const mod = sema.mod;
const raw_ptr_src = src; // TODO better source location
const raw_ptr_ty = sema.typeOf(raw_ptr);
- const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C))
+ const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C))
raw_ptr_ty.childType()
else
return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)});
// Optionally dereference a second pointer to get the concrete type.
- const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One;
+ const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One;
const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty;
const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty;
const object_ptr = if (is_double_ptr)
@@ -24275,7 +24318,7 @@ fn fieldCallBind(
raw_ptr;
find_field: {
- switch (concrete_ty.zigTypeTag()) {
+ switch (concrete_ty.zigTypeTag(mod)) {
.Struct => {
const struct_ty = try sema.resolveTypeFields(concrete_ty);
if (struct_ty.castTag(.@"struct")) |struct_obj| {
@@ -24321,21 +24364,21 @@ fn fieldCallBind(
}
// If we get here, we need to look for a decl in the struct type instead.
- const found_decl = switch (concrete_ty.zigTypeTag()) {
+ const found_decl = switch (concrete_ty.zigTypeTag(mod)) {
.Struct, .Opaque, .Union, .Enum => found_decl: {
if (concrete_ty.getNamespace()) |namespace| {
if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| {
try sema.addReferencedBy(block, src, decl_idx);
const decl_val = try sema.analyzeDeclVal(block, src, decl_idx);
const decl_type = sema.typeOf(decl_val);
- if (decl_type.zigTypeTag() == .Fn and
+ if (decl_type.zigTypeTag(mod) == .Fn and
decl_type.fnParamLen() >= 1)
{
const first_param_type = decl_type.fnParamType(0);
const first_param_tag = first_param_type.tag();
// zig fmt: off
if (first_param_tag == .generic_poison or (
- first_param_type.zigTypeTag() == .Pointer and
+ first_param_type.zigTypeTag(mod) == .Pointer and
(first_param_type.ptrSize() == .One or
first_param_type.ptrSize() == .C) and
first_param_type.childType().eql(concrete_ty, sema.mod)))
@@ -24356,7 +24399,7 @@ fn fieldCallBind(
.func_inst = decl_val,
.arg0_inst = deref,
} };
- } else if (first_param_type.zigTypeTag() == .Optional) {
+ } else if (first_param_type.zigTypeTag(mod) == .Optional) {
var opt_buf: Type.Payload.ElemType = undefined;
const child = first_param_type.optionalChild(&opt_buf);
if (child.eql(concrete_ty, sema.mod)) {
@@ -24365,7 +24408,7 @@ fn fieldCallBind(
.func_inst = decl_val,
.arg0_inst = deref,
} };
- } else if (child.zigTypeTag() == .Pointer and
+ } else if (child.zigTypeTag(mod) == .Pointer and
child.ptrSize() == .One and
child.childType().eql(concrete_ty, sema.mod))
{
@@ -24374,7 +24417,7 @@ fn fieldCallBind(
.arg0_inst = object_ptr,
} };
}
- } else if (first_param_type.zigTypeTag() == .ErrorUnion and
+ } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and
first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod))
{
const deref = try sema.analyzeLoad(block, src, object_ptr, src);
@@ -24421,9 +24464,10 @@ fn finishFieldCallBind(
.@"addrspace" = ptr_ty.ptrAddressSpace(),
});
+ const mod = sema.mod;
const container_ty = ptr_ty.childType();
- if (container_ty.zigTypeTag() == .Struct) {
- if (container_ty.structFieldValueComptime(field_index)) |default_val| {
+ if (container_ty.zigTypeTag(mod) == .Struct) {
+ if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return .{ .direct = try sema.addConstant(field_ty, default_val) };
}
}
@@ -24504,7 +24548,8 @@ fn structFieldPtr(
unresolved_struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
- assert(unresolved_struct_ty.zigTypeTag() == .Struct);
+ const mod = sema.mod;
+ assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct);
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
try sema.resolveStructLayout(struct_ty);
@@ -24544,6 +24589,7 @@ fn structFieldPtrByIndex(
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
+ const mod = sema.mod;
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field = struct_obj.fields.values()[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
@@ -24568,7 +24614,7 @@ fn structFieldPtrByIndex(
if (i == field_index) {
ptr_ty_data.bit_offset = running_bits;
}
- running_bits += @intCast(u16, f.ty.bitSize(target));
+ running_bits += @intCast(u16, f.ty.bitSize(mod));
}
ptr_ty_data.host_size = (running_bits + 7) / 8;
@@ -24582,7 +24628,7 @@ fn structFieldPtrByIndex(
const parent_align = if (struct_ptr_ty_info.@"align" != 0)
struct_ptr_ty_info.@"align"
else
- struct_ptr_ty_info.pointee_type.abiAlignment(target);
+ struct_ptr_ty_info.pointee_type.abiAlignment(mod);
ptr_ty_data.@"align" = parent_align;
// If the field happens to be byte-aligned, simplify the pointer type.
@@ -24596,8 +24642,8 @@ fn structFieldPtrByIndex(
if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and
target.cpu.arch.endian() == .Little)
{
- const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target);
- const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target);
+ const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod);
+ const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.bit_offset / 8;
const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align));
@@ -24644,7 +24690,8 @@ fn structFieldVal(
field_name_src: LazySrcLoc,
unresolved_struct_ty: Type,
) CompileError!Air.Inst.Ref {
- assert(unresolved_struct_ty.zigTypeTag() == .Struct);
+ const mod = sema.mod;
+ assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct);
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
switch (struct_ty.tag()) {
@@ -24728,9 +24775,10 @@ fn tupleFieldValByIndex(
field_index: u32,
tuple_ty: Type,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const field_ty = tuple_ty.structFieldType(field_index);
- if (tuple_ty.structFieldValueComptime(field_index)) |default_value| {
+ if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
return sema.addConstant(field_ty, default_value);
}
@@ -24743,7 +24791,7 @@ fn tupleFieldValByIndex(
return sema.addConstant(field_ty, field_values[field_index]);
}
- if (tuple_ty.structFieldValueComptime(field_index)) |default_val| {
+ if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
return sema.addConstant(field_ty, default_val);
}
@@ -24762,7 +24810,9 @@ fn unionFieldPtr(
initializing: bool,
) CompileError!Air.Inst.Ref {
const arena = sema.arena;
- assert(unresolved_union_ty.zigTypeTag() == .Union);
+ const mod = sema.mod;
+
+ assert(unresolved_union_ty.zigTypeTag(mod) == .Union);
const union_ptr_ty = sema.typeOf(union_ptr);
const union_ty = try sema.resolveTypeFields(unresolved_union_ty);
@@ -24777,7 +24827,7 @@ fn unionFieldPtr(
});
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?);
- if (initializing and field.ty.zigTypeTag() == .NoReturn) {
+ if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
@@ -24839,7 +24889,7 @@ fn unionFieldPtr(
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val);
try sema.panicInactiveUnionField(block, active_tag, wanted_tag);
}
- if (field.ty.zigTypeTag() == .NoReturn) {
+ if (field.ty.zigTypeTag(mod) == .NoReturn) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
@@ -24855,7 +24905,8 @@ fn unionFieldVal(
field_name_src: LazySrcLoc,
unresolved_union_ty: Type,
) CompileError!Air.Inst.Ref {
- assert(unresolved_union_ty.zigTypeTag() == .Union);
+ const mod = sema.mod;
+ assert(unresolved_union_ty.zigTypeTag(mod) == .Union);
const union_ty = try sema.resolveTypeFields(unresolved_union_ty);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
@@ -24911,7 +24962,7 @@ fn unionFieldVal(
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
try sema.panicInactiveUnionField(block, active_tag, wanted_tag);
}
- if (field.ty.zigTypeTag() == .NoReturn) {
+ if (field.ty.zigTypeTag(mod) == .NoReturn) {
_ = try block.addNoOp(.unreach);
return Air.Inst.Ref.unreachable_value;
}
@@ -24928,22 +24979,22 @@ fn elemPtr(
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const indexable_ptr_src = src; // TODO better source location
const indexable_ptr_ty = sema.typeOf(indexable_ptr);
- const target = sema.mod.getTarget();
- const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) {
+ const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) {
.Pointer => indexable_ptr_ty.elemType(),
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}),
};
try checkIndexable(sema, block, src, indexable_ty);
- switch (indexable_ty.zigTypeTag()) {
+ switch (indexable_ty.zigTypeTag(mod)) {
.Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
- const index = @intCast(u32, index_val.toUnsignedInt(target));
+ const index = @intCast(u32, index_val.toUnsignedInt(mod));
return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init);
},
else => {
@@ -24966,7 +25017,7 @@ fn elemPtrOneLayerOnly(
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
try checkIndexable(sema, block, src, indexable_ty);
@@ -24978,7 +25029,7 @@ fn elemPtrOneLayerOnly(
const runtime_src = rs: {
const ptr_val = maybe_ptr_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
const result_ty = try sema.elemPtrType(indexable_ty, index);
return sema.addConstant(result_ty, elem_ptr);
@@ -24989,7 +25040,7 @@ fn elemPtrOneLayerOnly(
return block.addPtrElemPtr(indexable, elem_index, result_ty);
},
.One => {
- assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable
+ assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety);
},
}
@@ -25006,7 +25057,7 @@ fn elemVal(
) CompileError!Air.Inst.Ref {
const indexable_src = src; // TODO better source location
const indexable_ty = sema.typeOf(indexable);
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
try checkIndexable(sema, block, src, indexable_ty);
@@ -25014,7 +25065,7 @@ fn elemVal(
// index is a scalar or vector instead of unconditionally casting to usize.
const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src);
- switch (indexable_ty.zigTypeTag()) {
+ switch (indexable_ty.zigTypeTag(mod)) {
.Pointer => switch (indexable_ty.ptrSize()) {
.Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
.Many, .C => {
@@ -25024,10 +25075,10 @@ fn elemVal(
const runtime_src = rs: {
const indexable_val = maybe_indexable_val orelse break :rs indexable_src;
const index_val = maybe_index_val orelse break :rs elem_index_src;
- const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod);
if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| {
- return sema.addConstant(indexable_ty.elemType2(), elem_val);
+ return sema.addConstant(indexable_ty.elemType2(mod), elem_val);
}
break :rs indexable_src;
};
@@ -25036,7 +25087,7 @@ fn elemVal(
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
},
.One => {
- assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable
+ assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable
const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety);
return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src);
},
@@ -25049,7 +25100,7 @@ fn elemVal(
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known");
- const index = @intCast(u32, index_val.toUnsignedInt(target));
+ const index = @intCast(u32, index_val.toUnsignedInt(mod));
return sema.tupleField(block, indexable_src, indexable, elem_index_src, index);
},
else => unreachable,
@@ -25093,6 +25144,7 @@ fn tupleFieldPtr(
field_index: u32,
init: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
const tuple_ty = tuple_ptr_ty.childType();
_ = try sema.resolveTypeFields(tuple_ty);
@@ -25116,7 +25168,7 @@ fn tupleFieldPtr(
.@"addrspace" = tuple_ptr_ty.ptrAddressSpace(),
});
- if (tuple_ty.structFieldValueComptime(field_index)) |default_val| {
+ if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| {
const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{
.field_ty = field_ty,
.field_val = default_val,
@@ -25151,6 +25203,7 @@ fn tupleField(
field_index_src: LazySrcLoc,
field_index: u32,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple));
const field_count = tuple_ty.structFieldCount();
@@ -25166,13 +25219,13 @@ fn tupleField(
const field_ty = tuple_ty.structFieldType(field_index);
- if (tuple_ty.structFieldValueComptime(field_index)) |default_value| {
+ if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| {
return sema.addConstant(field_ty, default_value); // comptime field
}
if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| {
if (tuple_val.isUndef()) return sema.addConstUndef(field_ty);
- return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index));
+ return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index));
}
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
@@ -25191,6 +25244,7 @@ fn elemValArray(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const array_ty = sema.typeOf(array);
const array_sent = array_ty.sentinel();
const array_len = array_ty.arrayLen();
@@ -25204,10 +25258,9 @@ fn elemValArray(
const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array);
// index must be defined since it can access out of bounds
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
- const target = sema.mod.getTarget();
if (maybe_index_val) |index_val| {
- const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const index = @intCast(usize, index_val.toUnsignedInt(mod));
if (array_sent) |s| {
if (index == array_len) {
return sema.addConstant(elem_ty, s);
@@ -25223,7 +25276,7 @@ fn elemValArray(
return sema.addConstUndef(elem_ty);
}
if (maybe_index_val) |index_val| {
- const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const index = @intCast(usize, index_val.toUnsignedInt(mod));
const elem_val = try array_val.elemValue(sema.mod, sema.arena, index);
return sema.addConstant(elem_ty, elem_val);
}
@@ -25255,7 +25308,7 @@ fn elemPtrArray(
init: bool,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
const array_ptr_ty = sema.typeOf(array_ptr);
const array_ty = array_ptr_ty.childType();
const array_sent = array_ty.sentinel() != null;
@@ -25269,7 +25322,7 @@ fn elemPtrArray(
const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
- const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target));
+ const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
if (index >= array_len_s) {
const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label });
@@ -25290,7 +25343,7 @@ fn elemPtrArray(
}
if (!init) {
- try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src);
+ try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src);
}
const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src;
@@ -25316,16 +25369,16 @@ fn elemValSlice(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel() != null;
- const elem_ty = slice_ty.elemType2();
+ const elem_ty = slice_ty.elemType2(mod);
var runtime_src = slice_src;
// slice must be defined since it can dereferenced as null
const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice);
// index must be defined since it can index out of bounds
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
- const target = sema.mod.getTarget();
if (maybe_slice_val) |slice_val| {
runtime_src = elem_index_src;
@@ -25335,7 +25388,7 @@ fn elemValSlice(
return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{});
}
if (maybe_index_val) |index_val| {
- const index = @intCast(usize, index_val.toUnsignedInt(target));
+ const index = @intCast(usize, index_val.toUnsignedInt(mod));
if (index >= slice_len_s) {
const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else "";
return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label });
@@ -25373,14 +25426,14 @@ fn elemPtrSlice(
elem_index: Air.Inst.Ref,
oob_safety: bool,
) CompileError!Air.Inst.Ref {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
const slice_ty = sema.typeOf(slice);
const slice_sent = slice_ty.sentinel() != null;
const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice);
// The index must not be undefined since it can be out of bounds.
const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: {
- const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target));
+ const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod));
break :o index;
} else null;
@@ -25484,6 +25537,7 @@ fn coerceExtra(
const dest_ty_src = inst_src; // TODO better source location
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst));
+ const mod = sema.mod;
const target = sema.mod.getTarget();
// If the types are the same, we can return the operand.
if (dest_ty.eql(inst_ty, sema.mod))
@@ -25502,9 +25556,9 @@ fn coerceExtra(
return block.addBitCast(dest_ty, inst);
}
- const is_undef = inst_ty.zigTypeTag() == .Undefined;
+ const is_undef = inst_ty.zigTypeTag(mod) == .Undefined;
- switch (dest_ty.zigTypeTag()) {
+ switch (dest_ty.zigTypeTag(mod)) {
.Optional => optional: {
// undefined sets the optional bit also to undefined.
if (is_undef) {
@@ -25512,18 +25566,18 @@ fn coerceExtra(
}
// null to ?T
- if (inst_ty.zigTypeTag() == .Null) {
+ if (inst_ty.zigTypeTag(mod) == .Null) {
return sema.addConstant(dest_ty, Value.null);
}
// cast from ?*T and ?[*]T to ?*anyopaque
// but don't do it if the source type is a double pointer
- if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and
- inst_ty.isPtrAtRuntime())
+ if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and
+ inst_ty.isPtrAtRuntime(mod))
anyopaque_check: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional;
- const elem_ty = inst_ty.elemType2();
- if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) {
+ const elem_ty = inst_ty.elemType2(mod);
+ if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -25532,7 +25586,7 @@ fn coerceExtra(
}
// Let the logic below handle wrapping the optional now that
// it has been checked to correctly coerce.
- if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check;
+ if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check;
return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
@@ -25554,7 +25608,7 @@ fn coerceExtra(
const dest_info = dest_ty.ptrInfo().data;
// Function body to function pointer.
- if (inst_ty.zigTypeTag() == .Fn) {
+ if (inst_ty.zigTypeTag(mod) == .Fn) {
const fn_val = try sema.resolveConstValue(block, .unneeded, inst, "");
const fn_decl = fn_val.pointerDecl().?;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
@@ -25568,7 +25622,7 @@ fn coerceExtra(
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType();
const array_ty = dest_info.pointee_type;
- if (array_ty.zigTypeTag() != .Array) break :single_item;
+ if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
const array_elem_ty = array_ty.childType();
if (array_ty.arrayLen() != 1) break :single_item;
const dest_is_mut = dest_info.mutable;
@@ -25584,7 +25638,7 @@ fn coerceExtra(
if (!inst_ty.isSinglePointer()) break :src_array_ptr;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const array_ty = inst_ty.childType();
- if (array_ty.zigTypeTag() != .Array) break :src_array_ptr;
+ if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr;
const array_elem_type = array_ty.childType();
const dest_is_mut = dest_info.mutable;
@@ -25656,10 +25710,10 @@ fn coerceExtra(
// cast from *T and [*]T to *anyopaque
// but don't do it if the source type is a double pointer
- if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: {
+ if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: {
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
- const elem_ty = inst_ty.elemType2();
- if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) {
+ const elem_ty = inst_ty.elemType2(mod);
+ if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) {
in_memory_result = .{ .double_ptr_to_anyopaque = .{
.actual = inst_ty,
.wanted = dest_ty,
@@ -25679,7 +25733,7 @@ fn coerceExtra(
switch (dest_info.size) {
// coercion to C pointer
- .C => switch (inst_ty.zigTypeTag()) {
+ .C => switch (inst_ty.zigTypeTag(mod)) {
.Null => {
return sema.addConstant(dest_ty, Value.null);
},
@@ -25691,7 +25745,7 @@ fn coerceExtra(
return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
},
.Int => {
- const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) {
+ const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) {
.signed => Type.isize,
.unsigned => Type.usize,
};
@@ -25733,7 +25787,7 @@ fn coerceExtra(
},
else => {},
},
- .One => switch (dest_info.pointee_type.zigTypeTag()) {
+ .One => switch (dest_info.pointee_type.zigTypeTag(mod)) {
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer() and
@@ -25767,7 +25821,7 @@ fn coerceExtra(
else => {},
},
.Slice => to_slice: {
- if (inst_ty.zigTypeTag() == .Array) {
+ if (inst_ty.zigTypeTag(mod) == .Array) {
return sema.fail(
block,
inst_src,
@@ -25789,7 +25843,7 @@ fn coerceExtra(
.ptr = if (dest_info.@"align" != 0)
try Value.Tag.int_u64.create(sema.arena, dest_info.@"align")
else
- try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena),
+ try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena),
.len = Value.zero,
});
return sema.addConstant(dest_ty, slice_val);
@@ -25834,13 +25888,13 @@ fn coerceExtra(
},
}
},
- .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) {
+ .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) {
.Float, .ComptimeFloat => float: {
if (is_undef) {
return sema.addConstUndef(dest_ty);
}
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
- if (dest_ty.zigTypeTag() == .ComptimeInt) {
+ if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
}
@@ -25870,15 +25924,15 @@ fn coerceExtra(
}
return try sema.addConstant(dest_ty, val);
}
- if (dest_ty.zigTypeTag() == .ComptimeInt) {
+ if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
if (opts.no_cast_to_comptime_int) return inst;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known");
}
// integer widening
- const dst_info = dest_ty.intInfo(target);
- const src_info = inst_ty.intInfo(target);
+ const dst_info = dest_ty.intInfo(mod);
+ const src_info = inst_ty.intInfo(mod);
if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or
// small enough unsigned ints can get casted to large enough signed ints
(dst_info.signedness == .signed and dst_info.bits > src_info.bits))
@@ -25892,7 +25946,7 @@ fn coerceExtra(
},
else => {},
},
- .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) {
+ .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) {
.ComptimeFloat => {
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
const result_val = try val.floatCast(sema.arena, dest_ty, target);
@@ -25913,7 +25967,7 @@ fn coerceExtra(
);
}
return try sema.addConstant(dest_ty, result_val);
- } else if (dest_ty.zigTypeTag() == .ComptimeFloat) {
+ } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
}
@@ -25931,7 +25985,7 @@ fn coerceExtra(
return sema.addConstUndef(dest_ty);
}
const val = (try sema.resolveMaybeUndefVal(inst)) orelse {
- if (dest_ty.zigTypeTag() == .ComptimeFloat) {
+ if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known");
}
@@ -25955,7 +26009,7 @@ fn coerceExtra(
},
else => {},
},
- .Enum => switch (inst_ty.zigTypeTag()) {
+ .Enum => switch (inst_ty.zigTypeTag(mod)) {
.EnumLiteral => {
// enum literal to enum
const val = try sema.resolveConstValue(block, .unneeded, inst, "");
@@ -25991,7 +26045,7 @@ fn coerceExtra(
},
else => {},
},
- .ErrorUnion => switch (inst_ty.zigTypeTag()) {
+ .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) {
.ErrorUnion => eu: {
if (maybe_inst_val) |inst_val| {
switch (inst_val.tag()) {
@@ -26031,7 +26085,7 @@ fn coerceExtra(
};
},
},
- .Union => switch (inst_ty.zigTypeTag()) {
+ .Union => switch (inst_ty.zigTypeTag(mod)) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isAnonStruct()) {
@@ -26043,7 +26097,7 @@ fn coerceExtra(
},
else => {},
},
- .Array => switch (inst_ty.zigTypeTag()) {
+ .Array => switch (inst_ty.zigTypeTag(mod)) {
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst == .empty_struct) {
@@ -26058,7 +26112,7 @@ fn coerceExtra(
},
else => {},
},
- .Vector => switch (inst_ty.zigTypeTag()) {
+ .Vector => switch (inst_ty.zigTypeTag(mod)) {
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst_ty.isTuple()) {
@@ -26093,7 +26147,7 @@ fn coerceExtra(
if (!opts.report_err) return error.NotCoercible;
- if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) {
+ if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
errdefer msg.destroy(sema.gpa);
@@ -26111,7 +26165,7 @@ fn coerceExtra(
errdefer msg.destroy(sema.gpa);
// E!T to T
- if (inst_ty.zigTypeTag() == .ErrorUnion and
+ if (inst_ty.zigTypeTag(mod) == .ErrorUnion and
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
@@ -26120,7 +26174,7 @@ fn coerceExtra(
// ?T to T
var buf: Type.Payload.ElemType = undefined;
- if (inst_ty.zigTypeTag() == .Optional and
+ if (inst_ty.zigTypeTag(mod) == .Optional and
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
@@ -26133,7 +26187,7 @@ fn coerceExtra(
if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = sema.mod.declPtr(sema.func.?.owner_decl);
- if (inst_ty.isError() and !dest_ty.isError()) {
+ if (inst_ty.isError(mod) and !dest_ty.isError(mod)) {
try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{});
} else {
try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{});
@@ -26264,6 +26318,7 @@ const InMemoryCoercionResult = union(enum) {
}
fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void {
+ const mod = sema.mod;
var cur = res;
while (true) switch (cur.*) {
.ok => unreachable,
@@ -26445,8 +26500,8 @@ const InMemoryCoercionResult = union(enum) {
break;
},
.ptr_allowzero => |pair| {
- const wanted_allow_zero = pair.wanted.ptrAllowsZero();
- const actual_allow_zero = pair.actual.ptrAllowsZero();
+ const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod);
+ const actual_allow_zero = pair.actual.ptrAllowsZero(mod);
if (actual_allow_zero and !wanted_allow_zero) {
try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{
pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod),
@@ -26522,13 +26577,15 @@ fn coerceInMemoryAllowed(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) CompileError!InMemoryCoercionResult {
- if (dest_ty.eql(src_ty, sema.mod))
+ const mod = sema.mod;
+
+ if (dest_ty.eql(src_ty, mod))
return .ok;
// Differently-named integers with the same number of bits.
- if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) {
- const dest_info = dest_ty.intInfo(target);
- const src_info = src_ty.intInfo(target);
+ if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) {
+ const dest_info = dest_ty.intInfo(mod);
+ const src_info = src_ty.intInfo(mod);
if (dest_info.signedness == src_info.signedness and
dest_info.bits == src_info.bits)
@@ -26551,7 +26608,7 @@ fn coerceInMemoryAllowed(
}
// Differently-named floats with the same number of bits.
- if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) {
+ if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) {
const dest_bits = dest_ty.floatBits(target);
const src_bits = src_ty.floatBits(target);
if (dest_bits == src_bits) {
@@ -26575,8 +26632,8 @@ fn coerceInMemoryAllowed(
return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src);
}
- const dest_tag = dest_ty.zigTypeTag();
- const src_tag = src_ty.zigTypeTag();
+ const dest_tag = dest_ty.zigTypeTag(mod);
+ const src_tag = src_ty.zigTypeTag(mod);
// Functions
if (dest_tag == .Fn and src_tag == .Fn) {
@@ -26624,7 +26681,7 @@ fn coerceInMemoryAllowed(
}
const ok_sent = dest_info.sentinel == null or
(src_info.sentinel != null and
- dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod));
+ dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod));
if (!ok_sent) {
return InMemoryCoercionResult{ .array_sentinel = .{
.actual = src_info.sentinel orelse Value.initTag(.unreachable_value),
@@ -26646,8 +26703,8 @@ fn coerceInMemoryAllowed(
} };
}
- const dest_elem_ty = dest_ty.scalarType();
- const src_elem_ty = src_ty.scalarType();
+ const dest_elem_ty = dest_ty.scalarType(mod);
+ const src_elem_ty = src_ty.scalarType(mod);
const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .vector_elem = .{
@@ -26923,6 +26980,7 @@ fn coerceInMemoryAllowedPtrs(
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
+ const mod = sema.mod;
const dest_info = dest_ptr_ty.ptrInfo().data;
const src_info = src_ptr_ty.ptrInfo().data;
@@ -26964,8 +27022,8 @@ fn coerceInMemoryAllowedPtrs(
} };
}
- const dest_allow_zero = dest_ty.ptrAllowsZero();
- const src_allow_zero = src_ty.ptrAllowsZero();
+ const dest_allow_zero = dest_ty.ptrAllowsZero(mod);
+ const src_allow_zero = src_ty.ptrAllowsZero(mod);
const ok_allows_zero = (dest_allow_zero and
(src_allow_zero or !dest_is_mut)) or
@@ -27013,12 +27071,12 @@ fn coerceInMemoryAllowedPtrs(
const src_align = if (src_info.@"align" != 0)
src_info.@"align"
else
- src_info.pointee_type.abiAlignment(target);
+ src_info.pointee_type.abiAlignment(mod);
const dest_align = if (dest_info.@"align" != 0)
dest_info.@"align"
else
- dest_info.pointee_type.abiAlignment(target);
+ dest_info.pointee_type.abiAlignment(mod);
if (dest_align > src_align) {
return InMemoryCoercionResult{ .ptr_alignment = .{
@@ -27041,8 +27099,9 @@ fn coerceVarArgParam(
) !Air.Inst.Ref {
if (block.is_typeof) return inst;
+ const mod = sema.mod;
const uncasted_ty = sema.typeOf(inst);
- const coerced = switch (uncasted_ty.zigTypeTag()) {
+ const coerced = switch (uncasted_ty.zigTypeTag(mod)) {
// TODO consider casting to c_int/f64 if they fit
.ComptimeInt, .ComptimeFloat => return sema.fail(
block,
@@ -27124,7 +27183,8 @@ fn storePtr2(
// this code does not handle tuple-to-struct coercion which requires dealing with missing
// fields.
const operand_ty = sema.typeOf(uncasted_operand);
- if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) {
+ const mod = sema.mod;
+ if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) {
const field_count = operand_ty.structFieldCount();
var i: u32 = 0;
while (i < field_count) : (i += 1) {
@@ -27225,7 +27285,8 @@ fn storePtr2(
/// lengths match.
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
const array_ty = sema.typeOf(ptr).childType();
- if (array_ty.zigTypeTag() != .Array) return null;
+ const mod = sema.mod;
+ if (array_ty.zigTypeTag(mod) != .Array) return null;
var ptr_inst = Air.refToIndex(ptr) orelse return null;
const air_datas = sema.air_instructions.items(.data);
const air_tags = sema.air_instructions.items(.tag);
@@ -27237,7 +27298,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
.pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type,
else => return null,
};
- if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr;
+ if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr;
ptr_inst = Air.refToIndex(prev_ptr) orelse return null;
} else return null;
@@ -27263,6 +27324,7 @@ fn storePtrVal(
operand_val: Value,
operand_ty: Type,
) !void {
+ const mod = sema.mod;
var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty);
try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut);
@@ -27281,8 +27343,7 @@ fn storePtrVal(
val_ptr.* = try operand_val.copy(arena);
},
.reinterpret => |reinterpret| {
- const target = sema.mod.getTarget();
- const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target));
+ const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
@@ -27354,7 +27415,7 @@ fn beginComptimePtrMutation(
ptr_val: Value,
ptr_elem_ty: Type,
) CompileError!ComptimePtrMutationKit {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data;
@@ -27375,7 +27436,7 @@ fn beginComptimePtrMutation(
var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty);
switch (parent.pointee) {
- .direct => |val_ptr| switch (parent.ty.zigTypeTag()) {
+ .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) {
.Array, .Vector => {
const check_len = parent.ty.arrayLenIncludingSentinel();
if (elem_ptr.index >= check_len) {
@@ -27570,7 +27631,7 @@ fn beginComptimePtrMutation(
},
},
.reinterpret => |reinterpret| {
- if (!elem_ptr.elem_ty.hasWellDefinedLayout()) {
+ if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) {
// Even though the parent value type has well-defined memory layout, our
// pointer type does not.
return ComptimePtrMutationKit{
@@ -27608,7 +27669,7 @@ fn beginComptimePtrMutation(
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
- switch (parent.ty.zigTypeTag()) {
+ switch (parent.ty.zigTypeTag(mod)) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount());
@memset(fields, Value.undef);
@@ -27746,7 +27807,7 @@ fn beginComptimePtrMutation(
else => unreachable,
},
.reinterpret => |reinterpret| {
- const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target);
+ const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod);
const field_offset = try sema.usizeCast(block, src, field_offset_u64);
return ComptimePtrMutationKit{
.decl_ref_mut = parent.decl_ref_mut,
@@ -27872,7 +27933,8 @@ fn beginComptimePtrMutationInner(
ptr_elem_ty: Type,
decl_ref_mut: Value.Payload.DeclRefMut.Data,
) CompileError!ComptimePtrMutationKit {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
+ const target = mod.getTarget();
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
if (coerce_ok) {
return ComptimePtrMutationKit{
@@ -27883,7 +27945,7 @@ fn beginComptimePtrMutationInner(
}
// Handle the case that the decl is an array and we're actually trying to point to an element.
- if (decl_ty.isArrayOrVector()) {
+ if (decl_ty.isArrayOrVector(mod)) {
const decl_elem_ty = decl_ty.childType();
if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) {
return ComptimePtrMutationKit{
@@ -27894,14 +27956,14 @@ fn beginComptimePtrMutationInner(
}
}
- if (!decl_ty.hasWellDefinedLayout()) {
+ if (!decl_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .bad_decl_ty = {} },
.ty = decl_ty,
};
}
- if (!ptr_elem_ty.hasWellDefinedLayout()) {
+ if (!ptr_elem_ty.hasWellDefinedLayout(mod)) {
return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut,
.pointee = .{ .bad_ptr_ty = {} },
@@ -27951,6 +28013,7 @@ fn beginComptimePtrLoad(
ptr_val: Value,
maybe_array_ty: ?Type,
) ComptimePtrLoadError!ComptimePtrLoadKit {
+ const mod = sema.mod;
const target = sema.mod.getTarget();
var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) {
.decl_ref,
@@ -27966,7 +28029,7 @@ fn beginComptimePtrLoad(
const decl_tv = try decl.typedValue();
if (decl_tv.val.tag() == .variable) return error.RuntimeLoad;
- const layout_defined = decl.ty.hasWellDefinedLayout();
+ const layout_defined = decl.ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
@@ -27988,7 +28051,7 @@ fn beginComptimePtrLoad(
}
if (elem_ptr.index != 0) {
- if (elem_ty.hasWellDefinedLayout()) {
+ if (elem_ty.hasWellDefinedLayout(mod)) {
if (deref.parent) |*parent| {
// Update the byte offset (in-place)
const elem_size = try sema.typeAbiSize(elem_ty);
@@ -28003,7 +28066,7 @@ fn beginComptimePtrLoad(
// If we're loading an elem_ptr that was derived from a different type
// than the true type of the underlying decl, we cannot deref directly
- const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: {
+ const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: {
const deref_elem_ty = deref.pointee.?.ty.childType();
break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or
(try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok;
@@ -28018,7 +28081,7 @@ fn beginComptimePtrLoad(
if (maybe_array_ty) |load_ty| {
// It's possible that we're loading a [N]T, in which case we'd like to slice
// the pointee array directly from our parent array.
- if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) {
+ if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) {
const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel());
deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{
.ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod),
@@ -28058,7 +28121,7 @@ fn beginComptimePtrLoad(
const field_index = @intCast(u32, field_ptr.field_index);
var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty);
- if (field_ptr.container_ty.hasWellDefinedLayout()) {
+ if (field_ptr.container_ty.hasWellDefinedLayout(mod)) {
const struct_ty = field_ptr.container_ty.castTag(.@"struct");
if (struct_ty != null and struct_ty.?.data.layout == .Packed) {
// packed structs are not byte addressable
@@ -28066,7 +28129,7 @@ fn beginComptimePtrLoad(
} else if (deref.parent) |*parent| {
// Update the byte offset (in-place)
try sema.resolveTypeLayout(field_ptr.container_ty);
- const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target);
+ const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod);
parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset);
}
} else {
@@ -28103,7 +28166,7 @@ fn beginComptimePtrLoad(
const field_ty = field_ptr.container_ty.structFieldType(field_index);
deref.pointee = TypedValue{
.ty = field_ty,
- .val = tv.val.fieldValue(tv.ty, field_index),
+ .val = tv.val.fieldValue(tv.ty, mod, field_index),
};
}
break :blk deref;
@@ -28146,7 +28209,7 @@ fn beginComptimePtrLoad(
return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name});
},
.opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: {
- if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{});
+ if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{});
break :opt tv.val;
},
else => unreachable,
@@ -28181,7 +28244,7 @@ fn beginComptimePtrLoad(
};
if (deref.pointee) |tv| {
- if (deref.parent == null and tv.ty.hasWellDefinedLayout()) {
+ if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) {
deref.parent = .{ .tv = tv, .byte_offset = 0 };
}
}
@@ -28196,15 +28259,15 @@ fn bitCast(
inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
try sema.resolveTypeLayout(dest_ty);
const old_ty = try sema.resolveTypeFields(sema.typeOf(inst));
try sema.resolveTypeLayout(old_ty);
- const target = sema.mod.getTarget();
- const dest_bits = dest_ty.bitSize(target);
- const old_bits = old_ty.bitSize(target);
+ const dest_bits = dest_ty.bitSize(mod);
+ const old_bits = old_ty.bitSize(mod);
if (old_bits != dest_bits) {
return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{
@@ -28233,20 +28296,20 @@ fn bitCastVal(
new_ty: Type,
buffer_offset: usize,
) !?Value {
- const target = sema.mod.getTarget();
- if (old_ty.eql(new_ty, sema.mod)) return val;
+ const mod = sema.mod;
+ if (old_ty.eql(new_ty, mod)) return val;
// For types with well-defined memory layouts, we serialize them a byte buffer,
// then deserialize to the new type.
- const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
+ const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod));
const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer);
- val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) {
+ val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
error.ReinterpretDeclRef => return null,
error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
- error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}),
+ error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}),
};
- return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
+ return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena);
}
fn coerceArrayPtrToSlice(
@@ -28272,7 +28335,8 @@ fn coerceArrayPtrToSlice(
fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool {
const dest_info = dest_ty.ptrInfo().data;
const inst_info = inst_ty.ptrInfo().data;
- const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or
+ const mod = sema.mod;
+ const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or
(inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or
(inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0);
@@ -28298,17 +28362,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
}
if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true;
if (len0) return true;
- const target = sema.mod.getTarget();
const inst_align = if (inst_info.@"align" != 0)
inst_info.@"align"
else
- inst_info.pointee_type.abiAlignment(target);
+ inst_info.pointee_type.abiAlignment(mod);
const dest_align = if (dest_info.@"align" != 0)
dest_info.@"align"
else
- dest_info.pointee_type.abiAlignment(target);
+ dest_info.pointee_type.abiAlignment(mod);
if (dest_align > inst_align) {
in_memory_result.* = .{ .ptr_alignment = .{
@@ -28327,18 +28390,19 @@ fn coerceCompatiblePtrs(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
if (try sema.resolveMaybeUndefVal(inst)) |val| {
- if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) {
+ if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) {
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src, null);
- const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero();
- if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and
- (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn))
+ const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
+ if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and
+ (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn))
{
const actual_ptr = if (inst_ty.isSlice())
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
@@ -28364,6 +28428,7 @@ fn coerceEnumToUnion(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
+ const mod = sema.mod;
const inst_ty = sema.typeOf(inst);
const tag_ty = union_ty.unionTagType() orelse {
@@ -28396,7 +28461,7 @@ fn coerceEnumToUnion(
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = union_obj.fields.values()[field_index];
const field_ty = try sema.resolveTypeFields(field.ty);
- if (field_ty.zigTypeTag() == .NoReturn) {
+ if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
@@ -28449,7 +28514,7 @@ fn coerceEnumToUnion(
errdefer if (msg) |some| some.destroy(sema.gpa);
for (union_obj.fields.values(), 0..) |field, i| {
- if (field.ty.zigTypeTag() == .NoReturn) {
+ if (field.ty.zigTypeTag(mod) == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
inst_src,
@@ -28469,7 +28534,7 @@ fn coerceEnumToUnion(
}
// If the union has all fields 0 bits, the union value is just the enum value.
- if (union_ty.unionHasAllZeroBitFieldTypes()) {
+ if (union_ty.unionHasAllZeroBitFieldTypes(mod)) {
return block.addBitCast(union_ty, enum_tag);
}
@@ -28487,7 +28552,7 @@ fn coerceEnumToUnion(
while (it.next()) |field| : (field_index += 1) {
const field_name = field.key_ptr.*;
const field_ty = field.value_ptr.ty;
- if (!field_ty.hasRuntimeBits()) continue;
+ if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) });
}
try sema.addDeclaredHereNote(msg, union_ty);
@@ -29066,12 +29131,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo
}
fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void {
- const decl = sema.mod.declPtr(decl_index);
+ const mod = sema.mod;
+ const decl = mod.declPtr(decl_index);
const tv = try decl.typedValue();
- if (tv.ty.zigTypeTag() != .Fn) return;
+ if (tv.ty.zigTypeTag(mod) != .Fn) return;
if (!try sema.fnHasRuntimeBits(tv.ty)) return;
const func = tv.val.castTag(.function) orelse return; // undef or extern_fn
- try sema.mod.ensureFuncBodyAnalysisQueued(func.data);
+ try mod.ensureFuncBodyAnalysisQueued(func.data);
}
fn analyzeRef(
@@ -29124,8 +29190,9 @@ fn analyzeLoad(
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const ptr_ty = sema.typeOf(ptr);
- const elem_ty = switch (ptr_ty.zigTypeTag()) {
+ const elem_ty = switch (ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ty.childType(),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}),
};
@@ -29196,12 +29263,13 @@ fn analyzeIsNull(
operand: Air.Inst.Ref,
invert_logic: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const result_ty = Type.bool;
if (try sema.resolveMaybeUndefVal(operand)) |opt_val| {
if (opt_val.isUndef()) {
return sema.addConstUndef(result_ty);
}
- const is_null = opt_val.isNull();
+ const is_null = opt_val.isNull(mod);
const bool_value = if (invert_logic) !is_null else is_null;
if (bool_value) {
return Air.Inst.Ref.bool_true;
@@ -29213,10 +29281,10 @@ fn analyzeIsNull(
const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
const operand_ty = sema.typeOf(operand);
var buf: Type.Payload.ElemType = undefined;
- if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) {
+ if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) {
return inverted_non_null_res;
}
- if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) {
+ if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) {
return inverted_non_null_res;
}
try sema.requireRuntimeBlock(block, src, null);
@@ -29230,11 +29298,12 @@ fn analyzePtrIsNonErrComptimeOnly(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const ptr_ty = sema.typeOf(operand);
- assert(ptr_ty.zigTypeTag() == .Pointer);
+ assert(ptr_ty.zigTypeTag(mod) == .Pointer);
const child_ty = ptr_ty.childType();
- const child_tag = child_ty.zigTypeTag();
+ const child_tag = child_ty.zigTypeTag(mod);
if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false;
assert(child_tag == .ErrorUnion);
@@ -29251,14 +29320,15 @@ fn analyzeIsNonErrComptimeOnly(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
- const ot = operand_ty.zigTypeTag();
+ const ot = operand_ty.zigTypeTag(mod);
if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (ot == .ErrorSet) return Air.Inst.Ref.bool_false;
assert(ot == .ErrorUnion);
const payload_ty = operand_ty.errorUnionPayload();
- if (payload_ty.zigTypeTag() == .NoReturn) {
+ if (payload_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
}
@@ -29375,22 +29445,21 @@ fn analyzeSlice(
end_src: LazySrcLoc,
by_length: bool,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
// Slice expressions can operate on a variable whose type is an array. This requires
// the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
const ptr_ptr_ty = sema.typeOf(ptr_ptr);
- const target = sema.mod.getTarget();
- const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) {
+ const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) {
.Pointer => ptr_ptr_ty.elemType(),
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}),
};
- const mod = sema.mod;
var array_ty = ptr_ptr_child_ty;
var slice_ty = ptr_ptr_ty;
var ptr_or_slice = ptr_ptr;
var elem_ty: Type = undefined;
var ptr_sentinel: ?Value = null;
- switch (ptr_ptr_child_ty.zigTypeTag()) {
+ switch (ptr_ptr_child_ty.zigTypeTag(mod)) {
.Array => {
ptr_sentinel = ptr_ptr_child_ty.sentinel();
elem_ty = ptr_ptr_child_ty.childType();
@@ -29398,7 +29467,7 @@ fn analyzeSlice(
.Pointer => switch (ptr_ptr_child_ty.ptrSize()) {
.One => {
const double_child_ty = ptr_ptr_child_ty.childType();
- if (double_child_ty.zigTypeTag() == .Array) {
+ if (double_child_ty.zigTypeTag(mod) == .Array) {
ptr_sentinel = double_child_ty.sentinel();
ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
slice_ty = ptr_ptr_child_ty;
@@ -29417,7 +29486,7 @@ fn analyzeSlice(
if (ptr_ptr_child_ty.ptrSize() == .C) {
if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| {
- if (ptr_val.isNull()) {
+ if (ptr_val.isNull(mod)) {
return sema.fail(block, src, "slice of null pointer", .{});
}
}
@@ -29448,7 +29517,7 @@ fn analyzeSlice(
// we might learn of the length because it is a comptime-known slice value.
var end_is_len = uncasted_end_opt == .none;
const end = e: {
- if (array_ty.zigTypeTag() == .Array) {
+ if (array_ty.zigTypeTag(mod) == .Array) {
const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen());
if (!end_is_len) {
@@ -29587,8 +29656,8 @@ fn analyzeSlice(
}
if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: {
const expected_sentinel = sentinel orelse break :sentinel_check;
- const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?;
- const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?;
+ const start_int = start_val.getUnsignedInt(mod).?;
+ const end_int = end_val.getUnsignedInt(mod).?;
const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int);
const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod);
@@ -29641,7 +29710,7 @@ fn analyzeSlice(
const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C;
if (opt_new_len_val) |new_len_val| {
- const new_len_int = new_len_val.toUnsignedInt(target);
+ const new_len_int = new_len_val.toUnsignedInt(mod);
const return_ty = try Type.ptr(sema.arena, mod, .{
.pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod),
@@ -29724,7 +29793,7 @@ fn analyzeSlice(
}
// requirement: end <= len
- const opt_len_inst = if (array_ty.zigTypeTag() == .Array)
+ const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array)
try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel())
else if (slice_ty.isSlice()) blk: {
if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| {
@@ -29778,14 +29847,15 @@ fn cmpNumeric(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
- assert(lhs_ty.isNumeric());
- assert(rhs_ty.isNumeric());
+ assert(lhs_ty.isNumeric(mod));
+ assert(rhs_ty.isNumeric(mod));
- const lhs_ty_tag = lhs_ty.zigTypeTag();
- const rhs_ty_tag = rhs_ty.zigTypeTag();
+ const lhs_ty_tag = lhs_ty.zigTypeTag(mod);
+ const rhs_ty_tag = rhs_ty.zigTypeTag(mod);
const target = sema.mod.getTarget();
// One exception to heterogeneous comparison: comptime_float needs to
@@ -29805,14 +29875,14 @@ fn cmpNumeric(
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
// Compare ints: const vs. undefined (or vice versa)
- if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) {
+ if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) {
try sema.resolveLazyValue(lhs_val);
- if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| {
+ if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
- } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) {
+ } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) {
try sema.resolveLazyValue(rhs_val);
- if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| {
+ if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
@@ -29827,16 +29897,16 @@ fn cmpNumeric(
return Air.Inst.Ref.bool_false;
}
}
- if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) {
+ if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
- if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) {
+ if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) {
// Compare ints: const vs. var
try sema.resolveLazyValue(lhs_val);
- if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| {
+ if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
@@ -29844,10 +29914,10 @@ fn cmpNumeric(
}
} else {
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
- if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) {
+ if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) {
// Compare ints: var vs. const
try sema.resolveLazyValue(rhs_val);
- if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| {
+ if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| {
return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false;
}
}
@@ -29901,11 +29971,11 @@ fn cmpNumeric(
const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val|
!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))
else
- (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt());
+ (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod));
const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val|
!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))
else
- (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt());
+ (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod));
const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
var dest_float_type: ?Type = null;
@@ -29926,7 +29996,7 @@ fn cmpNumeric(
.lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false,
};
if (!rhs_is_signed) {
- switch (lhs_val.orderAgainstZero()) {
+ switch (lhs_val.orderAgainstZero(mod)) {
.gt => {},
.eq => switch (op) { // LHS = 0, RHS is unsigned
.lte => return Air.Inst.Ref.bool_true,
@@ -29959,13 +30029,13 @@ fn cmpNumeric(
}
lhs_bits = bigint.toConst().bitCountTwosComp();
} else {
- lhs_bits = lhs_val.intBitCountTwosComp(target);
+ lhs_bits = lhs_val.intBitCountTwosComp(mod);
}
lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed);
} else if (lhs_is_float) {
dest_float_type = lhs_ty;
} else {
- const int_info = lhs_ty.intInfo(target);
+ const int_info = lhs_ty.intInfo(mod);
lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed);
}
@@ -29985,7 +30055,7 @@ fn cmpNumeric(
.lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true,
};
if (!lhs_is_signed) {
- switch (rhs_val.orderAgainstZero()) {
+ switch (rhs_val.orderAgainstZero(mod)) {
.gt => {},
.eq => switch (op) { // RHS = 0, LHS is unsigned
.gte => return Air.Inst.Ref.bool_true,
@@ -30018,13 +30088,13 @@ fn cmpNumeric(
}
rhs_bits = bigint.toConst().bitCountTwosComp();
} else {
- rhs_bits = rhs_val.intBitCountTwosComp(target);
+ rhs_bits = rhs_val.intBitCountTwosComp(mod);
}
rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed);
} else if (rhs_is_float) {
dest_float_type = rhs_ty;
} else {
- const int_info = rhs_ty.intInfo(target);
+ const int_info = rhs_ty.intInfo(mod);
rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed);
}
@@ -30032,7 +30102,7 @@ fn cmpNumeric(
const max_bits = std.math.max(lhs_bits, rhs_bits);
const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits});
const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned;
- break :blk try Module.makeIntType(sema.arena, signedness, casted_bits);
+ break :blk try mod.intType(signedness, casted_bits);
};
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
@@ -30040,13 +30110,20 @@ fn cmpNumeric(
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
}
-/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int.
-/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result.
+/// Asserts that LHS value is an int or comptime int and not undefined, and
+/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to
+/// determine whether `op` has a guaranteed result.
/// If it cannot be determined, returns null.
/// Otherwise returns a bool for the guaranteed comparison operation.
-fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool {
- const rhs_info = rhs_ty.intInfo(target);
- const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable;
+fn compareIntsOnlyPossibleResult(
+ sema: *Sema,
+ lhs_val: Value,
+ op: std.math.CompareOperator,
+ rhs_ty: Type,
+) Allocator.Error!?bool {
+ const mod = sema.mod;
+ const rhs_info = rhs_ty.intInfo(mod);
+ const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable;
const is_zero = vs_zero == .eq;
const is_negative = vs_zero == .lt;
const is_positive = vs_zero == .gt;
@@ -30078,7 +30155,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value
};
const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed);
- const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj;
+ const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj;
// No sized type can have more than 65535 bits.
// The RHS type operand is either a runtime value or sized (but undefined) constant.
@@ -30111,12 +30188,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value
.max = false,
};
- var ty_buffer: Type.Payload.Bits = .{
- .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned },
- .data = @intCast(u16, req_bits),
- };
- const ty = Type.initPayload(&ty_buffer.base);
- const pop_count = lhs_val.popCount(ty, target);
+ const ty = try mod.intType(
+ if (is_negative) .signed else .unsigned,
+ @intCast(u16, req_bits),
+ );
+ const pop_count = lhs_val.popCount(ty, mod);
if (is_negative) {
break :edge .{
@@ -30152,10 +30228,11 @@ fn cmpVector(
lhs_src: LazySrcLoc,
rhs_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
+ const mod = sema.mod;
const lhs_ty = sema.typeOf(lhs);
const rhs_ty = sema.typeOf(rhs);
- assert(lhs_ty.zigTypeTag() == .Vector);
- assert(rhs_ty.zigTypeTag() == .Vector);
+ assert(lhs_ty.zigTypeTag(mod) == .Vector);
+ assert(rhs_ty.zigTypeTag(mod) == .Vector);
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } });
@@ -30296,16 +30373,17 @@ fn resolvePeerTypes(
instructions: []const Air.Inst.Ref,
candidate_srcs: Module.PeerTypeCandidateSrc,
) !Type {
+ const mod = sema.mod;
switch (instructions.len) {
0 => return Type.initTag(.noreturn),
1 => return sema.typeOf(instructions[0]),
else => {},
}
- const target = sema.mod.getTarget();
+ const target = mod.getTarget();
var chosen = instructions[0];
- // If this is non-null then it does the following thing, depending on the chosen zigTypeTag().
+ // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod).
// * ErrorSet: this is an override
// * ErrorUnion: this is an override of the error set only
// * other: at the end we make an ErrorUnion with the other thing and this
@@ -30318,8 +30396,8 @@ fn resolvePeerTypes(
const candidate_ty = sema.typeOf(candidate);
const chosen_ty = sema.typeOf(chosen);
- const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison();
- const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison();
+ const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod);
+ const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod);
// If the candidate can coerce into our chosen type, we're done.
// If the chosen type can coerce into the candidate, use that.
@@ -30347,8 +30425,8 @@ fn resolvePeerTypes(
continue;
},
.Int => {
- const chosen_info = chosen_ty.intInfo(target);
- const candidate_info = candidate_ty.intInfo(target);
+ const chosen_info = chosen_ty.intInfo(mod);
+ const candidate_info = candidate_ty.intInfo(mod);
if (chosen_info.bits < candidate_info.bits) {
chosen = candidate;
@@ -30537,7 +30615,7 @@ fn resolvePeerTypes(
// *[N]T to []T
if ((cand_info.size == .Many or cand_info.size == .Slice) and
chosen_info.size == .One and
- chosen_info.pointee_type.zigTypeTag() == .Array)
+ chosen_info.pointee_type.zigTypeTag(mod) == .Array)
{
// In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T`
convert_to_slice = false;
@@ -30546,7 +30624,7 @@ fn resolvePeerTypes(
continue;
}
if (cand_info.size == .One and
- cand_info.pointee_type.zigTypeTag() == .Array and
+ cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
// In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T`
@@ -30559,8 +30637,8 @@ fn resolvePeerTypes(
// Keep the one whose element type can be coerced into.
if (chosen_info.size == .One and
cand_info.size == .One and
- chosen_info.pointee_type.zigTypeTag() == .Array and
- cand_info.pointee_type.zigTypeTag() == .Array)
+ chosen_info.pointee_type.zigTypeTag(mod) == .Array and
+ cand_info.pointee_type.zigTypeTag(mod) == .Array)
{
const chosen_elem_ty = chosen_info.pointee_type.childType();
const cand_elem_ty = cand_info.pointee_type.childType();
@@ -30631,7 +30709,7 @@ fn resolvePeerTypes(
.Optional => {
var opt_child_buf: Type.Payload.ElemType = undefined;
const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf);
- if (chosen_ptr_ty.zigTypeTag() == .Pointer) {
+ if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
const chosen_info = chosen_ptr_ty.ptrInfo().data;
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
@@ -30639,7 +30717,7 @@ fn resolvePeerTypes(
// *[N]T to ?![*]T
// *[N]T to ?![]T
if (cand_info.size == .One and
- cand_info.pointee_type.zigTypeTag() == .Array and
+ cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
continue;
@@ -30648,7 +30726,7 @@ fn resolvePeerTypes(
},
.ErrorUnion => {
const chosen_ptr_ty = chosen_ty.errorUnionPayload();
- if (chosen_ptr_ty.zigTypeTag() == .Pointer) {
+ if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) {
const chosen_info = chosen_ptr_ty.ptrInfo().data;
seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable;
@@ -30656,7 +30734,7 @@ fn resolvePeerTypes(
// *[N]T to E![*]T
// *[N]T to E![]T
if (cand_info.size == .One and
- cand_info.pointee_type.zigTypeTag() == .Array and
+ cand_info.pointee_type.zigTypeTag(mod) == .Array and
(chosen_info.size == .Many or chosen_info.size == .Slice))
{
continue;
@@ -30664,7 +30742,7 @@ fn resolvePeerTypes(
}
},
.Fn => {
- if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) {
+ if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
@@ -30697,16 +30775,16 @@ fn resolvePeerTypes(
const chosen_child_ty = chosen_ty.childType();
const candidate_child_ty = candidate_ty.childType();
- if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) {
- const chosen_info = chosen_child_ty.intInfo(target);
- const candidate_info = candidate_child_ty.intInfo(target);
+ if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) {
+ const chosen_info = chosen_child_ty.intInfo(mod);
+ const candidate_info = candidate_child_ty.intInfo(mod);
if (chosen_info.bits < candidate_info.bits) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
}
- if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) {
+ if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) {
if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
chosen = candidate;
chosen_i = candidate_i + 1;
@@ -30725,7 +30803,7 @@ fn resolvePeerTypes(
.Vector => continue,
else => {},
},
- .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) {
+ .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) {
if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) {
continue;
}
@@ -30790,27 +30868,27 @@ fn resolvePeerTypes(
// the source locations.
const chosen_src = candidate_srcs.resolve(
sema.gpa,
- sema.mod.declPtr(block.src_decl),
+ mod.declPtr(block.src_decl),
chosen_i,
);
const candidate_src = candidate_srcs.resolve(
sema.gpa,
- sema.mod.declPtr(block.src_decl),
+ mod.declPtr(block.src_decl),
candidate_i + 1,
);
const msg = msg: {
const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{
- chosen_ty.fmt(sema.mod),
- candidate_ty.fmt(sema.mod),
+ chosen_ty.fmt(mod),
+ candidate_ty.fmt(mod),
});
errdefer msg.destroy(sema.gpa);
if (chosen_src) |src_loc|
- try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)});
+ try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)});
if (candidate_src) |src_loc|
- try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)});
+ try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)});
break :msg msg;
};
@@ -30826,72 +30904,73 @@ fn resolvePeerTypes(
info.data.sentinel = chosen_child_ty.sentinel();
info.data.size = .Slice;
info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr());
- info.data.pointee_type = chosen_child_ty.elemType2();
+ info.data.pointee_type = chosen_child_ty.elemType2(mod);
- const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data);
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
- return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod);
+ return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod);
}
if (seen_const) {
// turn []T => []const T
- switch (chosen_ty.zigTypeTag()) {
+ switch (chosen_ty.zigTypeTag(mod)) {
.ErrorUnion => {
const ptr_ty = chosen_ty.errorUnionPayload();
var info = ptr_ty.ptrInfo();
info.data.mutable = false;
- const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data);
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty)
else
new_ptr_ty;
const set_ty = err_set_ty orelse chosen_ty.errorUnionSet();
- return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod);
+ return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod);
},
.Pointer => {
var info = chosen_ty.ptrInfo();
info.data.mutable = false;
- const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data);
+ const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data);
const opt_ptr_ty = if (any_are_null)
try Type.optional(sema.arena, new_ptr_ty)
else
new_ptr_ty;
const set_ty = err_set_ty orelse return opt_ptr_ty;
- return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod);
+ return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod);
},
else => return chosen_ty,
}
}
if (any_are_null) {
- const opt_ty = switch (chosen_ty.zigTypeTag()) {
+ const opt_ty = switch (chosen_ty.zigTypeTag(mod)) {
.Null, .Optional => chosen_ty,
else => try Type.optional(sema.arena, chosen_ty),
};
const set_ty = err_set_ty orelse return opt_ty;
- return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod);
+ return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod);
}
- if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) {
+ if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) {
.ErrorSet => return ty,
.ErrorUnion => {
const payload_ty = chosen_ty.errorUnionPayload();
- return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod);
+ return try Type.errorUnion(sema.arena, ty, payload_ty, mod);
},
- else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod),
+ else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod),
};
return chosen_ty;
}
pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void {
+ const mod = sema.mod;
try sema.resolveTypeFully(fn_info.return_type);
- if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) {
+ if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) {
// Ensure the type exists so that backends can assume that.
_ = try sema.getBuiltinType("StackTrace");
}
@@ -30943,7 +31022,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void {
}
pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Struct => return sema.resolveStructLayout(ty),
.Union => return sema.resolveUnionLayout(ty),
.Array => {
@@ -31021,7 +31101,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
struct_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
- if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) {
+ if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(sema.mod),
@@ -31043,7 +31123,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
};
for (struct_obj.fields.values(), 0..) |field, i| {
- optimized_order[i] = if (field.ty.hasRuntimeBits())
+ optimized_order[i] = if (!(try sema.typeHasRuntimeBits(field.ty)))
@intCast(u32, i)
else
Module.Struct.omitted_field;
@@ -31054,11 +31134,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
sema: *Sema,
fn lessThan(ctx: @This(), a: u32, b: u32) bool {
+ const m = ctx.sema.mod;
if (a == Module.Struct.omitted_field) return false;
if (b == Module.Struct.omitted_field) return true;
- const target = ctx.sema.mod.getTarget();
- return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) >
- ctx.struct_obj.fields.values()[b].ty.abiAlignment(target);
+ return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) >
+ ctx.struct_obj.fields.values()[b].ty.abiAlignment(m);
}
};
mem.sort(u32, optimized_order, AlignSortContext{
@@ -31073,11 +31153,10 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
- const target = mod.getTarget();
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
- fields_bit_sum += field.ty.bitSize(target);
+ fields_bit_sum += field.ty.bitSize(mod);
}
const decl_index = struct_obj.owner_decl;
@@ -31178,32 +31257,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
};
return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
}
- var buf: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = @intCast(u16, fields_bit_sum),
- };
- struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator);
+ struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum));
}
}
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
- if (!backing_int_ty.isInt()) {
+ if (!backing_int_ty.isInt(mod)) {
return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
}
- if (backing_int_ty.bitSize(target) != fields_bit_sum) {
+ if (backing_int_ty.bitSize(mod) != fields_bit_sum) {
return sema.fail(
block,
src,
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
- .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum },
+ .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum },
);
}
}
fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- if (!ty.isIndexable()) {
+ const mod = sema.mod;
+ if (!ty.isIndexable(mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)});
errdefer msg.destroy(sema.gpa);
@@ -31215,12 +31291,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
}
fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
- if (ty.zigTypeTag() == .Pointer) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Pointer) {
switch (ty.ptrSize()) {
.Slice, .Many, .C => return,
.One => {
const elem_ty = ty.childType();
- if (elem_ty.zigTypeTag() == .Array) return;
+ if (elem_ty.zigTypeTag(mod) == .Array) return;
// TODO https://github.com/ziglang/zig/issues/15479
// if (elem_ty.isTuple()) return;
},
@@ -31270,7 +31347,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
union_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
- if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) {
+ if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
union_obj.srcLoc(sema.mod),
@@ -31285,6 +31362,23 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
// for hasRuntimeBits() of each field, so we need "requires comptime"
// to be known already before this function returns.
pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
+ const mod = sema.mod;
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => return false,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
return switch (ty.tag()) {
.u1,
.u8,
@@ -31349,8 +31443,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
- .int_signed,
- .int_unsigned,
.enum_simple,
=> false,
@@ -31360,11 +31452,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.comptime_float,
.enum_literal,
.type_info,
- // These are function bodies, not function pointers.
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
=> true,
@@ -31387,7 +31474,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.mut_slice,
=> {
const child_ty = ty.childType();
- if (child_ty.zigTypeTag() == .Fn) {
+ if (child_ty.zigTypeTag(mod) == .Fn) {
return child_ty.fnInfo().is_generic;
} else {
return sema.resolveTypeRequiresComptime(child_ty);
@@ -31474,7 +31561,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
/// Returns `error.AnalysisFail` if any of the types (recursively) failed to
/// be resolved.
pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
- switch (ty.zigTypeTag()) {
+ const mod = sema.mod;
+ switch (ty.zigTypeTag(mod)) {
.Pointer => {
const child_ty = try sema.resolveTypeFields(ty.childType());
return sema.resolveTypeFully(child_ty);
@@ -31840,7 +31928,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
type_body_len: u32 = 0,
align_body_len: u32 = 0,
init_body_len: u32 = 0,
- type_ref: Air.Inst.Ref = .none,
+ type_ref: Zir.Inst.Ref = .none,
};
const fields = try sema.arena.alloc(Field, fields_len);
var any_inits = false;
@@ -31967,7 +32055,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
const field = &struct_obj.fields.values()[field_i];
field.ty = try field_ty.copy(decl_arena_allocator);
- if (field_ty.zigTypeTag() == .Opaque) {
+ if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{
.index = field_i,
@@ -31981,7 +32069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
};
return sema.failWithOwnedErrorMsg(msg);
}
- if (field_ty.zigTypeTag() == .NoReturn) {
+ if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{
.index = field_i,
@@ -32010,7 +32098,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
- } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) {
+ } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) {
const msg = msg: {
const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{
.index = field_i,
@@ -32191,7 +32279,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
if (small.auto_enum_tag) {
// The provided type is an integer type and we must construct the enum tag type here.
int_tag_ty = provided_ty;
- if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) {
+ if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)});
}
@@ -32220,7 +32308,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
} else {
// The provided type is the enum tag type.
union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator);
- if (union_obj.tag_ty.zigTypeTag() != .Enum) {
+ if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) {
return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)});
}
// The fields of the union must match the enum exactly.
@@ -32281,7 +32369,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
break :blk align_ref;
} else .none;
- const tag_ref: Zir.Inst.Ref = if (has_tag) blk: {
+ const tag_ref: Air.Inst.Ref = if (has_tag) blk: {
const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveInst(tag_ref);
@@ -32391,7 +32479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
}
}
- if (field_ty.zigTypeTag() == .Opaque) {
+ if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const ty_src = union_obj.fieldSrcLoc(sema.mod, .{
.index = field_i,
@@ -32420,7 +32508,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
- } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) {
+ } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const ty_src = union_obj.fieldSrcLoc(sema.mod, .{
.index = field_i,
@@ -32673,6 +32761,29 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type {
/// that the types are already resolved.
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
+ const mod = sema.mod;
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| {
+ if (int_type.bits == 0) {
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
switch (ty.tag()) {
.f16,
.f32,
@@ -32712,10 +32823,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.error_set,
.error_set_merged,
.error_union,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
.single_const_pointer_to_comptime_int,
.array_sentinel,
@@ -32803,7 +32910,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
const resolved_ty = try sema.resolveTypeFields(ty);
const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;
// An explicit tag type is always provided for enum_numbered.
- if (enum_obj.tag_ty.hasRuntimeBits()) {
+ if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) {
return null;
}
if (enum_obj.fields.count() == 1) {
@@ -32819,7 +32926,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.enum_full => {
const resolved_ty = try sema.resolveTypeFields(ty);
const enum_obj = resolved_ty.castTag(.enum_full).?.data;
- if (enum_obj.tag_ty.hasRuntimeBits()) {
+ if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) {
return null;
}
switch (enum_obj.fields.count()) {
@@ -32843,7 +32950,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
},
.enum_nonexhaustive => {
const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
- if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) {
+ if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) {
return Value.zero;
} else {
return null;
@@ -32883,13 +32990,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.null => return Value.null,
.undefined => return Value.initTag(.undef),
- .int_unsigned, .int_signed => {
- if (ty.cast(Type.Payload.Bits).?.data == 0) {
- return Value.zero;
- } else {
- return null;
- }
- },
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
@@ -32919,6 +33019,89 @@ pub fn getTmpAir(sema: Sema) Air {
}
pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
+ switch (ty.ip_index) {
+ .u1_type => return .u1_type,
+ .u8_type => return .u8_type,
+ .i8_type => return .i8_type,
+ .u16_type => return .u16_type,
+ .i16_type => return .i16_type,
+ .u29_type => return .u29_type,
+ .u32_type => return .u32_type,
+ .i32_type => return .i32_type,
+ .u64_type => return .u64_type,
+ .i64_type => return .i64_type,
+ .u80_type => return .u80_type,
+ .u128_type => return .u128_type,
+ .i128_type => return .i128_type,
+ .usize_type => return .usize_type,
+ .isize_type => return .isize_type,
+ .c_char_type => return .c_char_type,
+ .c_short_type => return .c_short_type,
+ .c_ushort_type => return .c_ushort_type,
+ .c_int_type => return .c_int_type,
+ .c_uint_type => return .c_uint_type,
+ .c_long_type => return .c_long_type,
+ .c_ulong_type => return .c_ulong_type,
+ .c_longlong_type => return .c_longlong_type,
+ .c_ulonglong_type => return .c_ulonglong_type,
+ .c_longdouble_type => return .c_longdouble_type,
+ .f16_type => return .f16_type,
+ .f32_type => return .f32_type,
+ .f64_type => return .f64_type,
+ .f80_type => return .f80_type,
+ .f128_type => return .f128_type,
+ .anyopaque_type => return .anyopaque_type,
+ .bool_type => return .bool_type,
+ .void_type => return .void_type,
+ .type_type => return .type_type,
+ .anyerror_type => return .anyerror_type,
+ .comptime_int_type => return .comptime_int_type,
+ .comptime_float_type => return .comptime_float_type,
+ .noreturn_type => return .noreturn_type,
+ .anyframe_type => return .anyframe_type,
+ .null_type => return .null_type,
+ .undefined_type => return .undefined_type,
+ .enum_literal_type => return .enum_literal_type,
+ .atomic_order_type => return .atomic_order_type,
+ .atomic_rmw_op_type => return .atomic_rmw_op_type,
+ .calling_convention_type => return .calling_convention_type,
+ .address_space_type => return .address_space_type,
+ .float_mode_type => return .float_mode_type,
+ .reduce_op_type => return .reduce_op_type,
+ .call_modifier_type => return .call_modifier_type,
+ .prefetch_options_type => return .prefetch_options_type,
+ .export_options_type => return .export_options_type,
+ .extern_options_type => return .extern_options_type,
+ .type_info_type => return .type_info_type,
+ .manyptr_u8_type => return .manyptr_u8_type,
+ .manyptr_const_u8_type => return .manyptr_const_u8_type,
+ .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type,
+ .const_slice_u8_type => return .const_slice_u8_type,
+ .anyerror_void_error_union_type => return .anyerror_void_error_union_type,
+ .generic_poison_type => return .generic_poison_type,
+ .var_args_param_type => return .var_args_param_type,
+ .empty_struct_type => return .empty_struct_type,
+
+ // values
+ .undef => unreachable,
+ .zero => unreachable,
+ .zero_usize => unreachable,
+ .one => unreachable,
+ .one_usize => unreachable,
+ .calling_convention_c => unreachable,
+ .calling_convention_inline => unreachable,
+ .void_value => unreachable,
+ .unreachable_value => unreachable,
+ .null_value => unreachable,
+ .bool_true => unreachable,
+ .bool_false => unreachable,
+ .empty_struct => unreachable,
+ .generic_poison => unreachable,
+
+ _ => {},
+
+ .none => unreachable,
+ }
switch (ty.tag()) {
.u1 => return .u1_type,
.u8 => return .u8_type,
@@ -32934,6 +33117,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.i128 => return .i128_type,
.usize => return .usize_type,
.isize => return .isize_type,
+ .c_char => return .c_char_type,
.c_short => return .c_short_type,
.c_ushort => return .c_ushort_type,
.c_int => return .c_int_type,
@@ -32966,17 +33150,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref {
.address_space => return .address_space_type,
.float_mode => return .float_mode_type,
.reduce_op => return .reduce_op_type,
- .modifier => return .modifier_type,
+ .modifier => return .call_modifier_type,
.prefetch_options => return .prefetch_options_type,
.export_options => return .export_options_type,
.extern_options => return .extern_options_type,
.type_info => return .type_info_type,
.manyptr_u8 => return .manyptr_u8_type,
.manyptr_const_u8 => return .manyptr_const_u8_type,
- .fn_noreturn_no_args => return .fn_noreturn_no_args_type,
- .fn_void_no_args => return .fn_void_no_args_type,
- .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type,
- .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type,
.const_slice_u8 => return .const_slice_u8_type,
.anyerror_void_error_union => return .anyerror_void_error_union_type,
@@ -33186,7 +33366,8 @@ const DerefResult = union(enum) {
};
fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
+ const target = mod.getTarget();
const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) {
error.RuntimeLoad => return DerefResult{ .runtime_load = {} },
else => |e| return e,
@@ -33211,7 +33392,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
// The type is not in-memory coercible or the direct dereference failed, so it must
// be bitcast according to the pointer type we are performing the load through.
- if (!load_ty.hasWellDefinedLayout()) {
+ if (!load_ty.hasWellDefinedLayout(mod)) {
return DerefResult{ .needed_well_defined = load_ty };
}
@@ -33253,6 +33434,7 @@ fn typePtrOrOptionalPtrTy(
ty: Type,
buf: *Type.Payload.ElemType,
) !?Type {
+ const mod = sema.mod;
switch (ty.tag()) {
.optional_single_const_pointer,
.optional_single_mut_pointer,
@@ -33281,7 +33463,7 @@ fn typePtrOrOptionalPtrTy(
.optional => {
const child_type = ty.optionalChild(buf);
- if (child_type.zigTypeTag() != .Pointer) return null;
+ if (child_type.zigTypeTag(mod) != .Pointer) return null;
const info = child_type.ptrInfo().data;
switch (info.size) {
@@ -33310,6 +33492,23 @@ fn typePtrOrOptionalPtrTy(
/// TODO merge these implementations together with the "advanced"/opt_sema pattern seen
/// elsewhere in value.zig
pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
+ const mod = sema.mod;
+ if (ty.ip_index != .none) {
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => return false,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ }
+ }
return switch (ty.tag()) {
.u1,
.u8,
@@ -33374,8 +33573,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
- .int_signed,
- .int_unsigned,
.enum_simple,
=> false,
@@ -33385,11 +33582,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.comptime_float,
.enum_literal,
.type_info,
- // These are function bodies, not function pointers.
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
=> true,
@@ -33412,7 +33604,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.mut_slice,
=> {
const child_ty = ty.childType();
- if (child_ty.zigTypeTag() == .Fn) {
+ if (child_ty.zigTypeTag(mod) == .Fn) {
return child_ty.fnInfo().is_generic;
} else {
return sema.typeRequiresComptime(child_ty);
@@ -33504,7 +33696,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
- return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) {
+ const mod = sema.mod;
+ return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) {
error.NeedLazy => unreachable,
else => |e| return e,
};
@@ -33512,19 +33705,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool {
fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
try sema.resolveTypeLayout(ty);
- const target = sema.mod.getTarget();
- return ty.abiSize(target);
+ return ty.abiSize(sema.mod);
}
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
- const target = sema.mod.getTarget();
- return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar;
+ return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
}
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.Union.Field.normalAlignment`.
fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 {
- if (field.ty.zigTypeTag() == .NoReturn) {
+ const mod = sema.mod;
+ if (field.ty.zigTypeTag(mod) == .NoReturn) {
return @as(u32, 0);
} else if (field.abi_align == 0) {
return sema.typeAbiAlignment(field.ty);
@@ -33605,13 +33797,14 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void {
}
fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
- const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
- const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
+ const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
+ const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
@@ -33620,13 +33813,13 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value {
}
fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value {
+ const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const target = sema.mod.getTarget();
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -33645,7 +33838,8 @@ fn numberAddWrapScalar(
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- if (ty.zigTypeTag() == .ComptimeInt) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intAdd(lhs, rhs, ty);
}
@@ -33663,7 +33857,8 @@ fn intSub(
rhs: Value,
ty: Type,
) !Value {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
@@ -33678,13 +33873,13 @@ fn intSub(
}
fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value {
+ const mod = sema.mod;
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const target = sema.mod.getTarget();
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
@@ -33703,7 +33898,8 @@ fn numberSubWrapScalar(
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- if (ty.zigTypeTag() == .ComptimeInt) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .ComptimeInt) {
return sema.intSub(lhs, rhs, ty);
}
@@ -33721,14 +33917,15 @@ fn floatAdd(
rhs: Value,
float_type: Type,
) !Value {
- if (float_type.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType());
+ scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod));
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -33778,14 +33975,15 @@ fn floatSub(
rhs: Value,
float_type: Type,
) !Value {
- if (float_type.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try sema.arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType());
+ scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod));
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -33835,7 +34033,8 @@ fn intSubWithOverflow(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
@@ -33843,7 +34042,7 @@ fn intSubWithOverflow(
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
+ const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod));
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
@@ -33861,13 +34060,13 @@ fn intSubWithOverflowScalar(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const target = sema.mod.getTarget();
- const info = ty.intInfo(target);
+ const mod = sema.mod;
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -33889,13 +34088,14 @@ fn floatToInt(
float_ty: Type,
int_ty: Type,
) CompileError!Value {
- if (float_ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (float_ty.zigTypeTag(mod) == .Vector) {
const elem_ty = float_ty.childType();
const result_data = try sema.arena.alloc(Value, float_ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(sema.mod, i, &buf);
- scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType());
+ scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod));
}
return Value.Tag.aggregate.create(sema.arena, result_data);
}
@@ -33976,7 +34176,8 @@ fn intFitsInType(
ty: Type,
vector_index: ?*usize,
) CompileError!bool {
- const target = sema.mod.getTarget();
+ const mod = sema.mod;
+ const target = mod.getTarget();
switch (val.tag()) {
.zero,
.undef,
@@ -33985,9 +34186,9 @@ fn intFitsInType(
.one,
.bool_true,
- => switch (ty.zigTypeTag()) {
+ => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
return switch (info.signedness) {
.signed => info.bits >= 2,
.unsigned => info.bits >= 1,
@@ -33997,9 +34198,9 @@ fn intFitsInType(
else => unreachable,
},
- .lazy_align => switch (ty.zigTypeTag()) {
+ .lazy_align => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed);
// If it is u16 or bigger we know the alignment fits without resolving it.
if (info.bits >= max_needed_bits) return true;
@@ -34011,9 +34212,9 @@ fn intFitsInType(
.ComptimeInt => return true,
else => unreachable,
},
- .lazy_size => switch (ty.zigTypeTag()) {
+ .lazy_size => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed);
// If it is u64 or bigger we know the size fits without resolving it.
if (info.bits >= max_needed_bits) return true;
@@ -34026,41 +34227,41 @@ fn intFitsInType(
else => unreachable,
},
- .int_u64 => switch (ty.zigTypeTag()) {
+ .int_u64 => switch (ty.zigTypeTag(mod)) {
.Int => {
const x = val.castTag(.int_u64).?.data;
if (x == 0) return true;
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed);
return info.bits >= needed_bits;
},
.ComptimeInt => return true,
else => unreachable,
},
- .int_i64 => switch (ty.zigTypeTag()) {
+ .int_i64 => switch (ty.zigTypeTag(mod)) {
.Int => {
const x = val.castTag(.int_i64).?.data;
if (x == 0) return true;
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
if (info.signedness == .unsigned and x < 0)
return false;
var buffer: Value.BigIntSpace = undefined;
- return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits);
+ return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
else => unreachable,
},
- .int_big_positive => switch (ty.zigTypeTag()) {
+ .int_big_positive => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
else => unreachable,
},
- .int_big_negative => switch (ty.zigTypeTag()) {
+ .int_big_negative => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits);
},
.ComptimeInt => return true,
@@ -34068,7 +34269,7 @@ fn intFitsInType(
},
.the_only_possible_value => {
- assert(ty.intInfo(target).bits == 0);
+ assert(ty.intInfo(mod).bits == 0);
return true;
},
@@ -34077,9 +34278,9 @@ fn intFitsInType(
.decl_ref,
.function,
.variable,
- => switch (ty.zigTypeTag()) {
+ => switch (ty.zigTypeTag(mod)) {
.Int => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
const ptr_bits = target.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
@@ -34091,9 +34292,9 @@ fn intFitsInType(
},
.aggregate => {
- assert(ty.zigTypeTag() == .Vector);
+ assert(ty.zigTypeTag(mod) == .Vector);
for (val.castTag(.aggregate).?.data, 0..) |elem, i| {
- if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) {
+ if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) {
if (vector_index) |some| some.* = i;
return false;
}
@@ -34122,11 +34323,8 @@ fn intInRange(
}
/// Asserts the type is an enum.
-fn enumHasInt(
- sema: *Sema,
- ty: Type,
- int: Value,
-) CompileError!bool {
+fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
+ const mod = sema.mod;
switch (ty.tag()) {
.enum_nonexhaustive => unreachable,
.enum_full => {
@@ -34157,11 +34355,7 @@ fn enumHasInt(
const enum_simple = ty.castTag(.enum_simple).?.data;
const fields_len = enum_simple.fields.count();
const bits = std.math.log2_int_ceil(usize, fields_len);
- var buffer: Type.Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = bits,
- };
- const tag_ty = Type.initPayload(&buffer.base);
+ const tag_ty = try mod.intType(.unsigned, bits);
return sema.intInRange(tag_ty, int, fields_len);
},
.atomic_order,
@@ -34186,7 +34380,8 @@ fn intAddWithOverflow(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen());
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
@@ -34194,7 +34389,7 @@ fn intAddWithOverflow(
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType());
+ const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod));
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
@@ -34212,13 +34407,13 @@ fn intAddWithOverflowScalar(
rhs: Value,
ty: Type,
) !Value.OverflowArithmeticResult {
- const target = sema.mod.getTarget();
- const info = ty.intInfo(target);
+ const mod = sema.mod;
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -34243,14 +34438,15 @@ fn compareAll(
rhs: Value,
ty: Type,
) CompileError!bool {
- if (ty.zigTypeTag() == .Vector) {
+ const mod = sema.mod;
+ if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) {
+ if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) {
return false;
}
}
@@ -34270,7 +34466,7 @@ fn compareScalar(
switch (op) {
.eq => return sema.valuesEqual(lhs, rhs, ty),
.neq => return !(try sema.valuesEqual(lhs, rhs, ty)),
- else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema),
+ else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema),
}
}
@@ -34291,14 +34487,15 @@ fn compareVector(
rhs: Value,
ty: Type,
) !Value {
- assert(ty.zigTypeTag() == .Vector);
+ const mod = sema.mod;
+ assert(ty.zigTypeTag(mod) == .Vector);
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf);
- const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType());
+ const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
scalar.* = Value.makeBool(res_bool);
}
return Value.Tag.aggregate.create(sema.arena, result_data);
@@ -34312,10 +34509,10 @@ fn compareVector(
/// Handles const-ness and address spaces in particular.
/// This code is duplicated in `analyzePtrArithmetic`.
fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
+ const mod = sema.mod;
const ptr_info = ptr_ty.ptrInfo().data;
- const elem_ty = ptr_ty.elemType2();
+ const elem_ty = ptr_ty.elemType2(mod);
const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0;
- const target = sema.mod.getTarget();
const parent_ty = ptr_ty.childType();
const VI = Type.Payload.Pointer.Data.VectorIndex;
@@ -34325,14 +34522,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
alignment: u32 = 0,
vector_index: VI = .none,
} = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: {
- const elem_bits = elem_ty.bitSize(target);
+ const elem_bits = elem_ty.bitSize(mod);
if (elem_bits == 0) break :blk .{};
const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
if (!is_packed) break :blk .{};
break :blk .{
.host_size = @intCast(u16, parent_ty.arrayLen()),
- .alignment = @intCast(u16, parent_ty.abiAlignment(target)),
+ .alignment = @intCast(u16, parent_ty.abiAlignment(mod)),
.vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime,
};
} else .{};
src/target.zig
@@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool {
return target.os.tag == .windows;
}
-pub const AtomicPtrAlignmentError = error{
- FloatTooBig,
- IntTooBig,
- BadType,
-};
-
-pub const AtomicPtrAlignmentDiagnostics = struct {
- bits: u16 = undefined,
- max_bits: u16 = undefined,
-};
-
-/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
-/// Otherwise returns the alignment required on a pointer for the target
-/// to perform atomic operations.
-// TODO this function does not take into account CPU features, which can affect
-// this value. Audit this!
-pub fn atomicPtrAlignment(
- target: std.Target,
- ty: Type,
- diags: *AtomicPtrAlignmentDiagnostics,
-) AtomicPtrAlignmentError!u32 {
- const max_atomic_bits: u16 = switch (target.cpu.arch) {
- .avr,
- .msp430,
- .spu_2,
- => 16,
-
- .arc,
- .arm,
- .armeb,
- .hexagon,
- .m68k,
- .le32,
- .mips,
- .mipsel,
- .nvptx,
- .powerpc,
- .powerpcle,
- .r600,
- .riscv32,
- .sparc,
- .sparcel,
- .tce,
- .tcele,
- .thumb,
- .thumbeb,
- .x86,
- .xcore,
- .amdil,
- .hsail,
- .spir,
- .kalimba,
- .lanai,
- .shave,
- .wasm32,
- .renderscript32,
- .csky,
- .spirv32,
- .dxil,
- .loongarch32,
- .xtensa,
- => 32,
-
- .amdgcn,
- .bpfel,
- .bpfeb,
- .le64,
- .mips64,
- .mips64el,
- .nvptx64,
- .powerpc64,
- .powerpc64le,
- .riscv64,
- .sparc64,
- .s390x,
- .amdil64,
- .hsail64,
- .spir64,
- .wasm64,
- .renderscript64,
- .ve,
- .spirv64,
- .loongarch64,
- => 64,
-
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- => 128,
-
- .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
- };
-
- var buffer: Type.Payload.Bits = undefined;
-
- const int_ty = switch (ty.zigTypeTag()) {
- .Int => ty,
- .Enum => ty.intTagType(&buffer),
- .Float => {
- const bit_count = ty.floatBits(target);
- if (bit_count > max_atomic_bits) {
- diags.* = .{
- .bits = bit_count,
- .max_bits = max_atomic_bits,
- };
- return error.FloatTooBig;
- }
- return 0;
- },
- .Bool => return 0,
- else => {
- if (ty.isPtrAtRuntime()) return 0;
- return error.BadType;
- },
- };
-
- const bit_count = int_ty.intInfo(target).bits;
- if (bit_count > max_atomic_bits) {
- diags.* = .{
- .bits = bit_count,
- .max_bits = max_atomic_bits,
- };
- return error.IntTooBig;
- }
-
- return 0;
-}
-
pub fn defaultAddressSpace(
target: std.Target,
context: enum {
src/type.zig
@@ -9,27 +9,102 @@ const log = std.log.scoped(.Type);
const target_util = @import("target.zig");
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
+const InternPool = @import("InternPool.zig");
const file_struct = @This();
-/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
-/// It's important for this type to be small.
-/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement
-/// of obtaining a lock on a global type table, as well as making the
-/// garbage collection bookkeeping simpler.
-/// This union takes advantage of the fact that the first page of memory
-/// is unmapped, giving us 4096 possible enum tags that have no payload.
-pub const Type = extern union {
- /// If the tag value is less than Tag.no_payload_count, then no pointer
- /// dereference is needed.
- tag_if_small_enough: Tag,
- ptr_otherwise: *Payload,
-
- pub fn zigTypeTag(ty: Type) std.builtin.TypeId {
- return ty.zigTypeTagOrPoison() catch unreachable;
- }
+pub const Type = struct {
+ /// We are migrating towards using this for every Type object. However, many
+ /// types are still represented the legacy way. This is indicated by using
+ /// InternPool.Index.none.
+ ip_index: InternPool.Index,
+
+ /// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
+ /// This union takes advantage of the fact that the first page of memory
+ /// is unmapped, giving us 4096 possible enum tags that have no payload.
+ legacy: extern union {
+ /// If the tag value is less than Tag.no_payload_count, then no pointer
+ /// dereference is needed.
+ tag_if_small_enough: Tag,
+ ptr_otherwise: *Payload,
+ },
+
+ pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId {
+ return ty.zigTypeTagOrPoison(mod) catch unreachable;
+ }
+
+ pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId {
+ if (ty.ip_index != .none) {
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => return .Int,
+ .ptr_type => return .Pointer,
+ .array_type => return .Array,
+ .vector_type => return .Vector,
+ .optional_type => return .Optional,
+ .error_union_type => return .ErrorUnion,
+ .struct_type => return .Struct,
+ .simple_type => |s| switch (s) {
+ .f16,
+ .f32,
+ .f64,
+ .f80,
+ .f128,
+ => return .Float,
+
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ .c_longdouble,
+ => return .Int,
+
+ .anyopaque => return .Opaque,
+ .bool => return .Bool,
+ .void => return .Void,
+ .type => return .Type,
+ .anyerror => return .ErrorSet,
+ .comptime_int => return .ComptimeInt,
+ .comptime_float => return .ComptimeFloat,
+ .noreturn => return .NoReturn,
+ .@"anyframe" => return .AnyFrame,
+ .null => return .Null,
+ .undefined => return .Undefined,
+ .enum_literal => return .EnumLiteral,
+
+ .atomic_order,
+ .atomic_rmw_op,
+ .calling_convention,
+ .address_space,
+ .float_mode,
+ .reduce_op,
+ => return .Enum,
+
+ .call_modifier,
+ .prefetch_options,
+ .export_options,
+ .extern_options,
+ => return .Struct,
+
+ .type_info => return .Union,
+
+ .generic_poison => unreachable,
+ .var_args_param => unreachable,
+ },
- pub fn zigTypeTagOrPoison(ty: Type) error{GenericPoison}!std.builtin.TypeId {
+ .extern_func,
+ .int,
+ .enum_tag,
+ .simple_value,
+ => unreachable, // it's a value, not a type
+ }
+ }
switch (ty.tag()) {
.generic_poison => return error.GenericPoison,
@@ -56,8 +131,6 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
- .int_signed,
- .int_unsigned,
=> return .Int,
.f16,
@@ -85,10 +158,6 @@ pub const Type = extern union {
.null => return .Null,
.undefined => return .Undefined,
- .fn_noreturn_no_args => return .Fn,
- .fn_void_no_args => return .Fn,
- .fn_naked_noreturn_no_args => return .Fn,
- .fn_ccc_void_no_args => return .Fn,
.function => return .Fn,
.array,
@@ -159,26 +228,26 @@ pub const Type = extern union {
}
}
- pub fn baseZigTypeTag(self: Type) std.builtin.TypeId {
- return switch (self.zigTypeTag()) {
- .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(),
+ pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId {
+ return switch (self.zigTypeTag(mod)) {
+ .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod),
.Optional => {
var buf: Payload.ElemType = undefined;
- return self.optionalChild(&buf).baseZigTypeTag();
+ return self.optionalChild(&buf).baseZigTypeTag(mod);
},
else => |t| t,
};
}
- pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Int,
.Float,
.ComptimeFloat,
.ComptimeInt,
=> true,
- .Vector => ty.elemType2().isSelfComparable(is_equality_cmp),
+ .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp),
.Bool,
.Type,
@@ -205,44 +274,54 @@ pub const Type = extern union {
.Optional => {
if (!is_equality_cmp) return false;
var buf: Payload.ElemType = undefined;
- return ty.optionalChild(&buf).isSelfComparable(is_equality_cmp);
+ return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp);
},
};
}
pub fn initTag(comptime small_tag: Tag) Type {
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
- return .{ .tag_if_small_enough = small_tag };
+ return Type{
+ .ip_index = .none,
+ .legacy = .{ .tag_if_small_enough = small_tag },
+ };
}
pub fn initPayload(payload: *Payload) Type {
assert(@enumToInt(payload.tag) >= Tag.no_payload_count);
- return .{ .ptr_otherwise = payload };
+ return Type{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = payload },
+ };
}
- pub fn tag(self: Type) Tag {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
- return self.tag_if_small_enough;
+ pub fn tag(ty: Type) Tag {
+ assert(ty.ip_index == .none);
+ if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) {
+ return ty.legacy.tag_if_small_enough;
} else {
- return self.ptr_otherwise.tag;
+ return ty.legacy.ptr_otherwise.tag;
}
}
/// Prefer `castTag` to this.
pub fn cast(self: Type, comptime T: type) ?*T {
+ if (self.ip_index != .none) {
+ return null;
+ }
if (@hasField(T, "base_tag")) {
return self.castTag(T.base_tag);
}
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
return null;
}
inline for (@typeInfo(Tag).Enum.fields) |field| {
if (field.value < Tag.no_payload_count)
continue;
const t = @intToEnum(Tag, field.value);
- if (self.ptr_otherwise.tag == t) {
+ if (self.legacy.ptr_otherwise.tag == t) {
if (T == t.Type()) {
- return @fieldParentPtr(T, "base", self.ptr_otherwise);
+ return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise);
}
return null;
}
@@ -251,11 +330,14 @@ pub const Type = extern union {
}
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
+ if (self.ip_index != .none) {
+ return null;
+ }
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count)
return null;
- if (self.ptr_otherwise.tag == t)
- return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
+ if (self.legacy.ptr_otherwise.tag == t)
+ return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise);
return null;
}
@@ -285,10 +367,10 @@ pub const Type = extern union {
}
/// If it is a function pointer, returns the function type. Otherwise returns null.
- pub fn castPtrToFn(ty: Type) ?Type {
- if (ty.zigTypeTag() != .Pointer) return null;
+ pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type {
+ if (ty.zigTypeTag(mod) != .Pointer) return null;
const elem_ty = ty.childType();
- if (elem_ty.zigTypeTag() != .Fn) return null;
+ if (elem_ty.zigTypeTag(mod) != .Fn) return null;
return elem_ty;
}
@@ -536,7 +618,10 @@ pub const Type = extern union {
pub fn eql(a: Type, b: Type, mod: *Module) bool {
// As a shortcut, if the small tags / addresses match, we're done.
- if (a.tag_if_small_enough == b.tag_if_small_enough) return true;
+ if (a.ip_index != .none or b.ip_index != .none) {
+ return a.ip_index == b.ip_index;
+ }
+ if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true;
switch (a.tag()) {
.generic_poison => unreachable,
@@ -589,16 +674,11 @@ pub const Type = extern union {
.i64,
.u128,
.i128,
- .int_signed,
- .int_unsigned,
=> {
- if (b.zigTypeTag() != .Int) return false;
+ if (b.zigTypeTag(mod) != .Int) return false;
if (b.isNamedInt()) return false;
-
- // Arbitrary sized integers. The target will not be branched upon,
- // because we handled target-dependent cases above.
- const info_a = a.intInfo(@as(Target, undefined));
- const info_b = b.intInfo(@as(Target, undefined));
+ const info_a = a.intInfo(mod);
+ const info_b = b.intInfo(mod);
return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits;
},
@@ -641,13 +721,8 @@ pub const Type = extern union {
return opaque_obj_a == opaque_obj_b;
},
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
- .function,
- => {
- if (b.zigTypeTag() != .Fn) return false;
+ .function => {
+ if (b.zigTypeTag(mod) != .Fn) return false;
const a_info = a.fnInfo();
const b_info = b.fnInfo();
@@ -699,7 +774,7 @@ pub const Type = extern union {
.array_sentinel,
.vector,
=> {
- if (a.zigTypeTag() != b.zigTypeTag()) return false;
+ if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false;
if (a.arrayLen() != b.arrayLen())
return false;
@@ -737,7 +812,7 @@ pub const Type = extern union {
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
=> {
- if (b.zigTypeTag() != .Pointer) return false;
+ if (b.zigTypeTag(mod) != .Pointer) return false;
const info_a = a.ptrInfo().data;
const info_b = b.ptrInfo().data;
@@ -783,7 +858,7 @@ pub const Type = extern union {
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
- if (b.zigTypeTag() != .Optional) return false;
+ if (b.zigTypeTag(mod) != .Optional) return false;
var buf_a: Payload.ElemType = undefined;
var buf_b: Payload.ElemType = undefined;
@@ -791,7 +866,7 @@ pub const Type = extern union {
},
.anyerror_void_error_union, .error_union => {
- if (b.zigTypeTag() != .ErrorUnion) return false;
+ if (b.zigTypeTag(mod) != .ErrorUnion) return false;
const a_set = a.errorUnionSet();
const b_set = b.errorUnionSet();
@@ -805,8 +880,8 @@ pub const Type = extern union {
},
.anyframe_T => {
- if (b.zigTypeTag() != .AnyFrame) return false;
- return a.elemType2().eql(b.elemType2(), mod);
+ if (b.zigTypeTag(mod) != .AnyFrame) return false;
+ return a.elemType2(mod).eql(b.elemType2(mod), mod);
},
.empty_struct => {
@@ -941,6 +1016,9 @@ pub const Type = extern union {
}
pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
+ if (ty.ip_index != .none) {
+ return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher);
+ }
switch (ty.tag()) {
.generic_poison => unreachable,
@@ -1007,13 +1085,10 @@ pub const Type = extern union {
.i64,
.u128,
.i128,
- .int_signed,
- .int_unsigned,
=> {
- // Arbitrary sized integers. The target will not be branched upon,
- // because we handled target-dependent cases above.
+ // Arbitrary sized integers.
std.hash.autoHash(hasher, std.builtin.TypeId.Int);
- const info = ty.intInfo(@as(Target, undefined));
+ const info = ty.intInfo(mod);
std.hash.autoHash(hasher, info.signedness);
std.hash.autoHash(hasher, info.bits);
},
@@ -1052,12 +1127,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, opaque_obj);
},
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
- .function,
- => {
+ .function => {
std.hash.autoHash(hasher, std.builtin.TypeId.Fn);
const fn_info = ty.fnInfo();
@@ -1275,9 +1345,15 @@ pub const Type = extern union {
};
pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
- return Type{ .tag_if_small_enough = self.tag_if_small_enough };
- } else switch (self.ptr_otherwise.tag) {
+ if (self.ip_index != .none) {
+ return Type{ .ip_index = self.ip_index, .legacy = undefined };
+ }
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
+ return Type{
+ .ip_index = .none,
+ .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
+ };
+ } else switch (self.legacy.ptr_otherwise.tag) {
.u1,
.u8,
.i8,
@@ -1317,10 +1393,6 @@ pub const Type = extern union {
.noreturn,
.null,
.undefined,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.const_slice_u8_sentinel_0,
@@ -1370,13 +1442,12 @@ pub const Type = extern union {
.base = .{ .tag = payload.base.tag },
.data = try payload.data.copy(allocator),
};
- return Type{ .ptr_otherwise = &new_payload.base };
+ return Type{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
- .int_signed,
- .int_unsigned,
- => return self.copyPayloadShallow(allocator, Payload.Bits),
-
.vector => {
const payload = self.castTag(.vector).?.data;
return Tag.vector.create(allocator, .{
@@ -1511,7 +1582,10 @@ pub const Type = extern union {
const payload = self.cast(T).?;
const new_payload = try allocator.create(T);
new_payload.* = payload.*;
- return Type{ .ptr_otherwise = &new_payload.base };
+ return Type{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
}
pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
@@ -1550,7 +1624,7 @@ pub const Type = extern union {
}
/// This is a debug function. In order to print types in a meaningful way
- /// we also need access to the target.
+ /// we also need access to the module.
pub fn dump(
start_type: Type,
comptime unused_format_string: []const u8,
@@ -1559,10 +1633,13 @@ pub const Type = extern union {
) @TypeOf(writer).Error!void {
_ = options;
comptime assert(unused_format_string.len == 0);
+ if (start_type.ip_index != .none) {
+ return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)});
+ }
if (true) {
- // This is disabled to work around a bug where this function
- // recursively causes more generic function instantiations
- // resulting in an infinite loop in the compiler.
+ // This is disabled to work around a stage2 bug where this function recursively
+ // causes more generic function instantiations resulting in an infinite loop
+ // in the compiler.
try writer.writeAll("[TODO fix internal compiler bug regarding dump]");
return;
}
@@ -1656,10 +1733,6 @@ pub const Type = extern union {
.anyerror_void_error_union => return writer.writeAll("anyerror!void"),
.const_slice_u8 => return writer.writeAll("[]const u8"),
.const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"),
- .fn_noreturn_no_args => return writer.writeAll("fn() noreturn"),
- .fn_void_no_args => return writer.writeAll("fn() void"),
- .fn_naked_noreturn_no_args => return writer.writeAll("fn() callconv(.Naked) noreturn"),
- .fn_ccc_void_no_args => return writer.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"),
.manyptr_u8 => return writer.writeAll("[*]u8"),
.manyptr_const_u8 => return writer.writeAll("[*]const u8"),
@@ -1820,14 +1893,6 @@ pub const Type = extern union {
ty = pointee_type;
continue;
},
- .int_signed => {
- const bits = ty.castTag(.int_signed).?.data;
- return writer.print("i{d}", .{bits});
- },
- .int_unsigned => {
- const bits = ty.castTag(.int_unsigned).?.data;
- return writer.print("u{d}", .{bits});
- },
.optional => {
const child_type = ty.castTag(.optional).?.data;
try writer.writeByte('?');
@@ -1938,6 +2003,26 @@ pub const Type = extern union {
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| {
+ const sign_char: u8 = switch (int_type.signedness) {
+ .signed => 'i',
+ .unsigned => 'u',
+ };
+ return writer.print("{c}{d}", .{ sign_char, int_type.bits });
+ },
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => |s| return writer.writeAll(@tagName(s)),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable,
+ };
const t = ty.tag();
switch (t) {
.inferred_alloc_const => unreachable,
@@ -2041,10 +2126,6 @@ pub const Type = extern union {
.anyerror_void_error_union => try writer.writeAll("anyerror!void"),
.const_slice_u8 => try writer.writeAll("[]const u8"),
.const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"),
- .fn_noreturn_no_args => try writer.writeAll("fn() noreturn"),
- .fn_void_no_args => try writer.writeAll("fn() void"),
- .fn_naked_noreturn_no_args => try writer.writeAll("fn() callconv(.Naked) noreturn"),
- .fn_ccc_void_no_args => try writer.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"),
.manyptr_u8 => try writer.writeAll("[*]u8"),
.manyptr_const_u8 => try writer.writeAll("[*]const u8"),
@@ -2200,7 +2281,7 @@ pub const Type = extern union {
if (info.@"align" != 0) {
try writer.print("align({d}", .{info.@"align"});
} else {
- const alignment = info.pointee_type.abiAlignment(mod.getTarget());
+ const alignment = info.pointee_type.abiAlignment(mod);
try writer.print("align({d}", .{alignment});
}
@@ -2224,14 +2305,6 @@ pub const Type = extern union {
try print(info.pointee_type, writer, mod);
},
- .int_signed => {
- const bits = ty.castTag(.int_signed).?.data;
- return writer.print("i{d}", .{bits});
- },
- .int_unsigned => {
- const bits = ty.castTag(.int_unsigned).?.data;
- return writer.print("u{d}", .{bits});
- },
.optional => {
const child_type = ty.castTag(.optional).?.data;
try writer.writeByte('?');
@@ -2317,10 +2390,6 @@ pub const Type = extern union {
.noreturn => return Value.initTag(.noreturn_type),
.null => return Value.initTag(.null_type),
.undefined => return Value.initTag(.undefined_type),
- .fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type),
- .fn_void_no_args => return Value.initTag(.fn_void_no_args_type),
- .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type),
- .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type),
.single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type),
.const_slice_u8 => return Value.initTag(.const_slice_u8_type),
.const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type),
@@ -2360,9 +2429,24 @@ pub const Type = extern union {
/// may return false positives.
pub fn hasRuntimeBitsAdvanced(
ty: Type,
+ mod: *const Module,
ignore_comptime_only: bool,
strat: AbiAlignmentAdvancedStrat,
) RuntimeBitsError!bool {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| return int_type.bits != 0,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
switch (ty.tag()) {
.u1,
.u8,
@@ -2440,12 +2524,12 @@ pub const Type = extern union {
=> {
if (ignore_comptime_only) {
return true;
- } else if (ty.childType().zigTypeTag() == .Fn) {
+ } else if (ty.childType().zigTypeTag(mod) == .Fn) {
return !ty.childType().fnInfo().is_generic;
} else if (strat == .sema) {
return !(try strat.sema.typeRequiresComptime(ty));
} else {
- return !comptimeOnly(ty);
+ return !comptimeOnly(ty, mod);
}
},
@@ -2465,10 +2549,6 @@ pub const Type = extern union {
// Special exceptions have to be made when emitting functions due to
// this returning false.
.function,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
=> return false,
.optional => {
@@ -2483,7 +2563,7 @@ pub const Type = extern union {
} else if (strat == .sema) {
return !(try strat.sema.typeRequiresComptime(child_ty));
} else {
- return !comptimeOnly(child_ty);
+ return !comptimeOnly(child_ty, mod);
}
},
@@ -2502,7 +2582,7 @@ pub const Type = extern union {
}
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
- if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
+ if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2511,16 +2591,15 @@ pub const Type = extern union {
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
- return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat);
+ return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
.enum_numbered, .enum_nonexhaustive => {
- var buffer: Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&buffer);
- return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat);
+ const int_tag_ty = ty.intTagType();
+ return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
},
.@"union" => {
@@ -2537,7 +2616,7 @@ pub const Type = extern union {
.lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy,
}
for (union_obj.fields.values()) |value| {
- if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
+ if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2545,7 +2624,7 @@ pub const Type = extern union {
},
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) {
+ if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) {
return true;
}
@@ -2555,7 +2634,7 @@ pub const Type = extern union {
.lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy,
}
for (union_obj.fields.values()) |value| {
- if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat))
+ if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -2563,18 +2642,16 @@ pub const Type = extern union {
},
.array, .vector => return ty.arrayLen() != 0 and
- try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat),
+ try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.array_u8 => return ty.arrayLen() != 0,
- .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat),
-
- .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0,
+ .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
- if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true;
+ if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
}
return false;
},
@@ -2588,7 +2665,21 @@ pub const Type = extern union {
/// true if and only if the type has a well-defined memory layout
/// readFrom/writeToMemory are supported only for types with a well-
/// defined memory layout
- pub fn hasWellDefinedLayout(ty: Type) bool {
+ pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => return true,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
return switch (ty.tag()) {
.u1,
.u8,
@@ -2626,8 +2717,6 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
.array_u8,
.array_u8_sentinel_0,
- .int_signed,
- .int_unsigned,
.pointer,
.single_const_pointer,
.single_mut_pointer,
@@ -2670,10 +2759,6 @@ pub const Type = extern union {
.enum_literal,
.type_info,
// These are function bodies, not function pointers.
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
.const_slice_u8,
.const_slice_u8_sentinel_0,
@@ -2698,25 +2783,25 @@ pub const Type = extern union {
.array,
.array_sentinel,
- => ty.childType().hasWellDefinedLayout(),
+ => ty.childType().hasWellDefinedLayout(mod),
- .optional => ty.isPtrLikeOptional(),
+ .optional => ty.isPtrLikeOptional(mod),
.@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto,
.@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto,
.union_tagged => false,
};
}
- pub fn hasRuntimeBits(ty: Type) bool {
- return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable;
+ pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool {
+ return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable;
}
- pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool {
- return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable;
+ pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool {
+ return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable;
}
- pub fn isFnOrHasRuntimeBits(ty: Type) bool {
- switch (ty.zigTypeTag()) {
+ pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool {
+ switch (ty.zigTypeTag(mod)) {
.Fn => {
const fn_info = ty.fnInfo();
if (fn_info.is_generic) return false;
@@ -2727,18 +2812,18 @@ pub const Type = extern union {
.Inline => return false,
else => {},
}
- if (fn_info.return_type.comptimeOnly()) return false;
+ if (fn_info.return_type.comptimeOnly(mod)) return false;
return true;
},
- else => return ty.hasRuntimeBits(),
+ else => return ty.hasRuntimeBits(mod),
}
}
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
- pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Fn => true,
- else => return ty.hasRuntimeBitsIgnoreComptime(),
+ else => return ty.hasRuntimeBitsIgnoreComptime(mod),
};
}
@@ -2761,11 +2846,11 @@ pub const Type = extern union {
}
/// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
- pub fn ptrAlignment(ty: Type, target: Target) u32 {
- return ptrAlignmentAdvanced(ty, target, null) catch unreachable;
+ pub fn ptrAlignment(ty: Type, mod: *const Module) u32 {
+ return ptrAlignmentAdvanced(ty, mod, null) catch unreachable;
}
- pub fn ptrAlignmentAdvanced(ty: Type, target: Target, opt_sema: ?*Sema) !u32 {
+ pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 {
switch (ty.tag()) {
.single_const_pointer,
.single_mut_pointer,
@@ -2780,10 +2865,10 @@ pub const Type = extern union {
=> {
const child_type = ty.cast(Payload.ElemType).?.data;
if (opt_sema) |sema| {
- const res = try child_type.abiAlignmentAdvanced(target, .{ .sema = sema });
+ const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema });
return res.scalar;
}
- return (child_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar;
+ return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
},
.manyptr_u8,
@@ -2798,13 +2883,13 @@ pub const Type = extern union {
if (ptr_info.@"align" != 0) {
return ptr_info.@"align";
} else if (opt_sema) |sema| {
- const res = try ptr_info.pointee_type.abiAlignmentAdvanced(target, .{ .sema = sema });
+ const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema });
return res.scalar;
} else {
- return (ptr_info.pointee_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar;
+ return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
},
- .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(target, opt_sema),
+ .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema),
else => unreachable,
}
@@ -2843,13 +2928,13 @@ pub const Type = extern union {
}
/// Returns 0 for 0-bit types.
- pub fn abiAlignment(ty: Type, target: Target) u32 {
- return (ty.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar;
+ pub fn abiAlignment(ty: Type, mod: *const Module) u32 {
+ return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
/// May capture a reference to `ty`.
- pub fn lazyAbiAlignment(ty: Type, target: Target, arena: Allocator) !Value {
- switch (try ty.abiAlignmentAdvanced(target, .{ .lazy = arena })) {
+ pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value {
+ switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) {
.val => |val| return val,
.scalar => |x| return Value.Tag.int_u64.create(arena, x),
}
@@ -2874,9 +2959,29 @@ pub const Type = extern union {
/// necessary, possibly returning a CompileError.
pub fn abiAlignmentAdvanced(
ty: Type,
- target: Target,
+ mod: *const Module,
strat: AbiAlignmentAdvancedStrat,
) Module.CompileError!AbiAlignmentAdvanced {
+ const target = mod.getTarget();
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| {
+ if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
+ return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) };
+ },
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
const opt_sema = switch (strat) {
.sema => |sema| sema,
else => null,
@@ -2902,12 +3007,6 @@ pub const Type = extern union {
.anyopaque,
=> return AbiAlignmentAdvanced{ .scalar = 1 },
- .fn_noreturn_no_args, // represents machine code; not a pointer
- .fn_void_no_args, // represents machine code; not a pointer
- .fn_naked_noreturn_no_args, // represents machine code; not a pointer
- .fn_ccc_void_no_args, // represents machine code; not a pointer
- => return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) },
-
// represents machine code; not a pointer
.function => {
const alignment = ty.castTag(.function).?.data.alignment;
@@ -2958,12 +3057,11 @@ pub const Type = extern union {
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => {
- var payload: Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = 80,
+ const u80_ty: Type = .{
+ .ip_index = .u80_type,
+ .legacy = undefined,
};
- const u80_ty = initPayload(&payload.base);
- return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) };
+ return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) };
},
},
.f128 => switch (target.c_type_bit_size(.longdouble)) {
@@ -2980,11 +3078,11 @@ pub const Type = extern union {
.error_set_merged,
=> return AbiAlignmentAdvanced{ .scalar = 2 },
- .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat),
+ .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat),
.vector => {
const len = ty.arrayLen();
- const bits = try bitSizeAdvanced(ty.elemType(), target, opt_sema);
+ const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema);
const bytes = ((bits * len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes);
return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) };
@@ -2996,34 +3094,28 @@ pub const Type = extern union {
.i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) },
.u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) },
- .int_signed, .int_unsigned => {
- const bits: u16 = ty.cast(Payload.Bits).?.data;
- if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
- return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) };
- },
-
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = ty.optionalChild(&buf);
- switch (child_type.zigTypeTag()) {
+ switch (child_type.zigTypeTag(mod)) {
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
- .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat),
+ .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
.NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 },
else => {},
}
switch (strat) {
.eager, .sema => {
- if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
else => |e| return e,
})) {
return AbiAlignmentAdvanced{ .scalar = 1 };
}
- return child_type.abiAlignmentAdvanced(target, strat);
+ return child_type.abiAlignmentAdvanced(mod, strat);
},
- .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) {
+ .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
.val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
},
@@ -3034,10 +3126,10 @@ pub const Type = extern union {
// This code needs to be kept in sync with the equivalent switch prong
// in abiSizeAdvanced.
const data = ty.castTag(.error_union).?.data;
- const code_align = abiAlignment(Type.anyerror, target);
+ const code_align = abiAlignment(Type.anyerror, mod);
switch (strat) {
.eager, .sema => {
- if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
else => |e| return e,
})) {
@@ -3045,11 +3137,11 @@ pub const Type = extern union {
}
return AbiAlignmentAdvanced{ .scalar = @max(
code_align,
- (try data.payload.abiAlignmentAdvanced(target, strat)).scalar,
+ (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar,
) };
},
.lazy => |arena| {
- switch (try data.payload.abiAlignmentAdvanced(target, strat)) {
+ switch (try data.payload.abiAlignmentAdvanced(mod, strat)) {
.scalar => |payload_align| {
return AbiAlignmentAdvanced{
.scalar = @max(code_align, payload_align),
@@ -3089,20 +3181,20 @@ pub const Type = extern union {
.eager => {},
}
assert(struct_obj.haveLayout());
- return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) };
+ return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) };
}
const fields = ty.structFields();
var big_align: u32 = 0;
for (fields.values()) |field| {
- if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
else => |e| return e,
})) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
- else switch (try field.ty.abiAlignmentAdvanced(target, strat)) {
+ else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
@@ -3114,7 +3206,7 @@ pub const Type = extern union {
// This logic is duplicated in Module.Struct.Field.alignment.
if (struct_obj.layout == .Extern or target.ofmt == .c) {
- if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) {
+ if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
big_align = @max(big_align, 16);
@@ -3130,9 +3222,9 @@ pub const Type = extern union {
for (tuple.types, 0..) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
- if (!(field_ty.hasRuntimeBits())) continue;
+ if (!(field_ty.hasRuntimeBits(mod))) continue;
- switch (try field_ty.abiAlignmentAdvanced(target, strat)) {
+ switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |field_align| big_align = @max(big_align, field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
@@ -3145,17 +3237,16 @@ pub const Type = extern union {
},
.enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => {
- var buffer: Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&buffer);
- return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(target) };
+ const int_tag_ty = ty.intTagType();
+ return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) };
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
- return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false);
+ return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false);
},
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true);
+ return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true);
},
.empty_struct,
@@ -3181,7 +3272,7 @@ pub const Type = extern union {
pub fn abiAlignmentAdvancedUnion(
ty: Type,
- target: Target,
+ mod: *const Module,
strat: AbiAlignmentAdvancedStrat,
union_obj: *Module.Union,
have_tag: bool,
@@ -3195,6 +3286,7 @@ pub const Type = extern union {
// We'll guess "pointer-aligned", if the union has an
// underaligned pointer field then some allocations
// might require explicit alignment.
+ const target = mod.getTarget();
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
}
_ = try sema.resolveTypeFields(ty);
@@ -3206,23 +3298,23 @@ pub const Type = extern union {
};
if (union_obj.fields.count() == 0) {
if (have_tag) {
- return abiAlignmentAdvanced(union_obj.tag_ty, target, strat);
+ return abiAlignmentAdvanced(union_obj.tag_ty, mod, strat);
} else {
return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) };
}
}
var max_align: u32 = 0;
- if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target);
+ if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod);
for (union_obj.fields.values()) |field| {
- if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) },
else => |e| return e,
})) continue;
const field_align = if (field.abi_align != 0)
field.abi_align
- else switch (try field.ty.abiAlignmentAdvanced(target, strat)) {
+ else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
@@ -3236,8 +3328,8 @@ pub const Type = extern union {
}
/// May capture a reference to `ty`.
- pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value {
- switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) {
+ pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value {
+ switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) {
.val => |val| return val,
.scalar => |x| return Value.Tag.int_u64.create(arena, x),
}
@@ -3245,8 +3337,8 @@ pub const Type = extern union {
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasRuntimeBits() return 0.
- pub fn abiSize(ty: Type, target: Target) u64 {
- return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar;
+ pub fn abiSize(ty: Type, mod: *const Module) u64 {
+ return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar;
}
const AbiSizeAdvanced = union(enum) {
@@ -3262,14 +3354,30 @@ pub const Type = extern union {
/// necessary, possibly returning a CompileError.
pub fn abiSizeAdvanced(
ty: Type,
- target: Target,
+ mod: *const Module,
strat: AbiAlignmentAdvancedStrat,
) Module.CompileError!AbiSizeAdvanced {
+ const target = mod.getTarget();
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| {
+ if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
+ return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) };
+ },
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
switch (ty.tag()) {
- .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
- .fn_void_no_args => unreachable, // represents machine code; not a pointer
- .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
- .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
.function => unreachable, // represents machine code; not a pointer
.@"opaque" => unreachable, // no size available
.noreturn => unreachable,
@@ -3308,7 +3416,7 @@ pub const Type = extern union {
.eager => {},
}
assert(struct_obj.haveLayout());
- return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) };
+ return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) };
},
else => {
switch (strat) {
@@ -3327,22 +3435,21 @@ pub const Type = extern union {
if (field_count == 0) {
return AbiSizeAdvanced{ .scalar = 0 };
}
- return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) };
+ return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
- var buffer: Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&buffer);
- return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) };
+ const int_tag_ty = ty.intTagType();
+ return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) };
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
- return abiSizeAdvancedUnion(ty, target, strat, union_obj, false);
+ return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false);
},
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- return abiSizeAdvancedUnion(ty, target, strat, union_obj, true);
+ return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true);
},
.u1,
@@ -3361,7 +3468,7 @@ pub const Type = extern union {
.array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 },
.array => {
const payload = ty.castTag(.array).?.data;
- switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
+ switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size },
.val => switch (strat) {
.sema => unreachable,
@@ -3372,7 +3479,7 @@ pub const Type = extern union {
},
.array_sentinel => {
const payload = ty.castTag(.array_sentinel).?.data;
- switch (try payload.elem_type.abiSizeAdvanced(target, strat)) {
+ switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size },
.val => switch (strat) {
.sema => unreachable,
@@ -3391,10 +3498,10 @@ pub const Type = extern union {
.val = try Value.Tag.lazy_size.create(arena, ty),
},
};
- const elem_bits = try payload.elem_type.bitSizeAdvanced(target, opt_sema);
+ const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema);
const total_bits = elem_bits * payload.len;
const total_bytes = (total_bits + 7) / 8;
- const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) {
+ const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| x,
.val => return AbiSizeAdvanced{
.val = try Value.Tag.lazy_size.create(strat.lazy, ty),
@@ -3450,12 +3557,11 @@ pub const Type = extern union {
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
else => {
- var payload: Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = 80,
+ const u80_ty: Type = .{
+ .ip_index = .u80_type,
+ .legacy = undefined,
};
- const u80_ty = initPayload(&payload.base);
- return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
+ return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) };
},
},
@@ -3473,11 +3579,6 @@ pub const Type = extern union {
.i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) },
.i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) },
.u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) },
- .int_signed, .int_unsigned => {
- const bits: u16 = ty.cast(Payload.Bits).?.data;
- if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
- return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) };
- },
.optional => {
var buf: Payload.ElemType = undefined;
@@ -3487,16 +3588,16 @@ pub const Type = extern union {
return AbiSizeAdvanced{ .scalar = 0 };
}
- if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
else => |e| return e,
})) return AbiSizeAdvanced{ .scalar = 1 };
- if (ty.optionalReprIsPayload()) {
- return abiSizeAdvanced(child_type, target, strat);
+ if (ty.optionalReprIsPayload(mod)) {
+ return abiSizeAdvanced(child_type, mod, strat);
}
- const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) {
+ const payload_size = switch (try child_type.abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
@@ -3510,7 +3611,7 @@ pub const Type = extern union {
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
return AbiSizeAdvanced{
- .scalar = child_type.abiAlignment(target) + payload_size,
+ .scalar = child_type.abiAlignment(mod) + payload_size,
};
},
@@ -3518,17 +3619,17 @@ pub const Type = extern union {
// This code needs to be kept in sync with the equivalent switch prong
// in abiAlignmentAdvanced.
const data = ty.castTag(.error_union).?.data;
- const code_size = abiSize(Type.anyerror, target);
- if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ const code_size = abiSize(Type.anyerror, mod);
+ if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) },
else => |e| return e,
})) {
// Same as anyerror.
return AbiSizeAdvanced{ .scalar = code_size };
}
- const code_align = abiAlignment(Type.anyerror, target);
- const payload_align = abiAlignment(data.payload, target);
- const payload_size = switch (try data.payload.abiSizeAdvanced(target, strat)) {
+ const code_align = abiAlignment(Type.anyerror, mod);
+ const payload_align = abiAlignment(data.payload, mod);
+ const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| elem_size,
.val => switch (strat) {
.sema => unreachable,
@@ -3556,7 +3657,7 @@ pub const Type = extern union {
pub fn abiSizeAdvancedUnion(
ty: Type,
- target: Target,
+ mod: *const Module,
strat: AbiAlignmentAdvancedStrat,
union_obj: *Module.Union,
have_tag: bool,
@@ -3570,7 +3671,7 @@ pub const Type = extern union {
},
.eager => {},
}
- return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) };
+ return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) };
}
fn intAbiSize(bits: u16, target: Target) u64 {
@@ -3585,8 +3686,8 @@ pub const Type = extern union {
);
}
- pub fn bitSize(ty: Type, target: Target) u64 {
- return bitSizeAdvanced(ty, target, null) catch unreachable;
+ pub fn bitSize(ty: Type, mod: *const Module) u64 {
+ return bitSizeAdvanced(ty, mod, null) catch unreachable;
}
/// If you pass `opt_sema`, any recursive type resolutions will happen if
@@ -3594,15 +3695,29 @@ pub const Type = extern union {
/// the type is fully resolved, and there will be no error, guaranteed.
pub fn bitSizeAdvanced(
ty: Type,
- target: Target,
+ mod: *const Module,
opt_sema: ?*Sema,
) Module.CompileError!u64 {
+ const target = mod.getTarget();
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| return int_type.bits,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
+
switch (ty.tag()) {
- .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
- .fn_void_no_args => unreachable, // represents machine code; not a pointer
- .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
- .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
.function => unreachable, // represents machine code; not a pointer
.anyopaque => unreachable,
.type => unreachable,
@@ -3633,68 +3748,67 @@ pub const Type = extern union {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout != .Packed) {
- return (try ty.abiSizeAdvanced(target, strat)).scalar * 8;
+ return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
assert(struct_obj.haveLayout());
- return try struct_obj.backing_int_ty.bitSizeAdvanced(target, opt_sema);
+ return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
},
.tuple, .anon_struct => {
if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
if (ty.containerLayout() != .Packed) {
- return (try ty.abiSizeAdvanced(target, strat)).scalar * 8;
+ return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
var total: u64 = 0;
for (ty.tupleFields().types) |field_ty| {
- total += try bitSizeAdvanced(field_ty, target, opt_sema);
+ total += try bitSizeAdvanced(field_ty, mod, opt_sema);
}
return total;
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
- var buffer: Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&buffer);
- return try bitSizeAdvanced(int_tag_ty, target, opt_sema);
+ const int_tag_ty = ty.intTagType();
+ return try bitSizeAdvanced(int_tag_ty, mod, opt_sema);
},
.@"union", .union_safety_tagged, .union_tagged => {
if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
if (ty.containerLayout() != .Packed) {
- return (try ty.abiSizeAdvanced(target, strat)).scalar * 8;
+ return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
const union_obj = ty.cast(Payload.Union).?.data;
assert(union_obj.haveFieldTypes());
var size: u64 = 0;
for (union_obj.fields.values()) |field| {
- size = @max(size, try bitSizeAdvanced(field.ty, target, opt_sema));
+ size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema));
}
return size;
},
.vector => {
const payload = ty.castTag(.vector).?.data;
- const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
return elem_bit_size * payload.len;
},
.array_u8 => return 8 * ty.castTag(.array_u8).?.data,
.array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1),
.array => {
const payload = ty.castTag(.array).?.data;
- const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target));
+ const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod));
if (elem_size == 0 or payload.len == 0)
return @as(u64, 0);
- const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
return (payload.len - 1) * 8 * elem_size + elem_bit_size;
},
.array_sentinel => {
const payload = ty.castTag(.array_sentinel).?.data;
const elem_size = std.math.max(
- payload.elem_type.abiAlignment(target),
- payload.elem_type.abiSize(target),
+ payload.elem_type.abiAlignment(mod),
+ payload.elem_type.abiSize(mod),
);
- const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema);
+ const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema);
return payload.len * 8 * elem_size + elem_bit_size;
},
@@ -3757,12 +3871,10 @@ pub const Type = extern union {
.error_set_merged,
=> return 16, // TODO revisit this when we have the concept of the error tag type
- .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data,
-
.optional, .error_union => {
// Optionals and error unions are not packed so their bitsize
// includes padding bits.
- return (try abiSizeAdvanced(ty, target, strat)).scalar * 8;
+ return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8;
},
.atomic_order,
@@ -3782,8 +3894,8 @@ pub const Type = extern union {
/// Returns true if the type's layout is already resolved and it is safe
/// to use `abiSize`, `abiAlignment` and `bitSize` on it.
- pub fn layoutIsResolved(ty: Type) bool {
- switch (ty.zigTypeTag()) {
+ pub fn layoutIsResolved(ty: Type, mod: *const Module) bool {
+ switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.castTag(.@"struct")) |struct_ty| {
return struct_ty.data.haveLayout();
@@ -3798,16 +3910,16 @@ pub const Type = extern union {
},
.Array => {
if (ty.arrayLenIncludingSentinel() == 0) return true;
- return ty.childType().layoutIsResolved();
+ return ty.childType().layoutIsResolved(mod);
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
- return payload_ty.layoutIsResolved();
+ return payload_ty.layoutIsResolved(mod);
},
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload();
- return payload_ty.layoutIsResolved();
+ return payload_ty.layoutIsResolved(mod);
},
else => return true,
}
@@ -3994,13 +4106,13 @@ pub const Type = extern union {
};
}
- pub fn isAllowzeroPtr(self: Type) bool {
+ pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool {
return switch (self.tag()) {
.pointer => {
const payload = self.castTag(.pointer).?.data;
return payload.@"allowzero";
},
- else => return self.zigTypeTag() == .Optional,
+ else => return self.zigTypeTag(mod) == .Optional,
};
}
@@ -4016,7 +4128,7 @@ pub const Type = extern union {
};
}
- pub fn isPtrAtRuntime(self: Type) bool {
+ pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool {
switch (self.tag()) {
.c_const_pointer,
.c_mut_pointer,
@@ -4040,7 +4152,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
- if (child_type.zigTypeTag() != .Pointer) return false;
+ if (child_type.zigTypeTag(mod) != .Pointer) return false;
const info = child_type.ptrInfo().data;
switch (info.size) {
.Slice, .C => return false,
@@ -4054,15 +4166,15 @@ pub const Type = extern union {
/// For pointer-like optionals, returns true, otherwise returns the allowzero property
/// of pointers.
- pub fn ptrAllowsZero(ty: Type) bool {
- if (ty.isPtrLikeOptional()) {
+ pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool {
+ if (ty.isPtrLikeOptional(mod)) {
return true;
}
return ty.ptrInfo().data.@"allowzero";
}
/// See also `isPtrLikeOptional`.
- pub fn optionalReprIsPayload(ty: Type) bool {
+ pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
switch (ty.tag()) {
.optional_single_const_pointer,
.optional_single_mut_pointer,
@@ -4072,7 +4184,7 @@ pub const Type = extern union {
.optional => {
const child_ty = ty.castTag(.optional).?.data;
- switch (child_ty.zigTypeTag()) {
+ switch (child_ty.zigTypeTag(mod)) {
.Pointer => {
const info = child_ty.ptrInfo().data;
switch (info.size) {
@@ -4093,7 +4205,7 @@ pub const Type = extern union {
/// Returns true if the type is optional and would be lowered to a single pointer
/// address value, using 0 for null. Note that this returns true for C pointers.
- pub fn isPtrLikeOptional(self: Type) bool {
+ pub fn isPtrLikeOptional(self: Type, mod: *const Module) bool {
switch (self.tag()) {
.optional_single_const_pointer,
.optional_single_mut_pointer,
@@ -4103,7 +4215,7 @@ pub const Type = extern union {
.optional => {
const child_ty = self.castTag(.optional).?.data;
- if (child_ty.zigTypeTag() != .Pointer) return false;
+ if (child_ty.zigTypeTag(mod) != .Pointer) return false;
const info = child_ty.ptrInfo().data;
switch (info.size) {
.Slice, .C => return false,
@@ -4166,7 +4278,7 @@ pub const Type = extern union {
/// For [N]T, returns T.
/// For []T, returns T.
/// For anyframe->T, returns T.
- pub fn elemType2(ty: Type) Type {
+ pub fn elemType2(ty: Type, mod: *const Module) Type {
return switch (ty.tag()) {
.vector => ty.castTag(.vector).?.data.elem_type,
.array => ty.castTag(.array).?.data.elem_type,
@@ -4181,7 +4293,7 @@ pub const Type = extern union {
.single_const_pointer,
.single_mut_pointer,
- => ty.castPointer().?.data.shallowElemType(),
+ => ty.castPointer().?.data.shallowElemType(mod),
.array_u8,
.array_u8_sentinel_0,
@@ -4197,7 +4309,7 @@ pub const Type = extern union {
const info = ty.castTag(.pointer).?.data;
const child_ty = info.pointee_type;
if (info.size == .One) {
- return child_ty.shallowElemType();
+ return child_ty.shallowElemType(mod);
} else {
return child_ty;
}
@@ -4213,16 +4325,16 @@ pub const Type = extern union {
};
}
- fn shallowElemType(child_ty: Type) Type {
- return switch (child_ty.zigTypeTag()) {
+ fn shallowElemType(child_ty: Type, mod: *const Module) Type {
+ return switch (child_ty.zigTypeTag(mod)) {
.Array, .Vector => child_ty.childType(),
else => child_ty,
};
}
/// For vectors, returns the element type. Otherwise returns self.
- pub fn scalarType(ty: Type) Type {
- return switch (ty.zigTypeTag()) {
+ pub fn scalarType(ty: Type, mod: *const Module) Type {
+ return switch (ty.zigTypeTag(mod)) {
.Vector => ty.childType(),
else => ty,
};
@@ -4360,19 +4472,19 @@ pub const Type = extern union {
return union_obj.fields.getIndex(name);
}
- pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool {
- return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes();
+ pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool {
+ return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod);
}
- pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout {
+ pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout {
switch (ty.tag()) {
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
- return union_obj.getLayout(target, false);
+ return union_obj.getLayout(mod, false);
},
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- return union_obj.getLayout(target, true);
+ return union_obj.getLayout(mod, true);
},
else => unreachable,
}
@@ -4441,8 +4553,8 @@ pub const Type = extern union {
};
}
- pub fn isError(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isError(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.ErrorUnion, .ErrorSet => true,
else => false,
};
@@ -4543,14 +4655,21 @@ pub const Type = extern union {
}
/// Returns true if and only if the type is a fixed-width integer.
- pub fn isInt(self: Type) bool {
- return self.isSignedInt() or self.isUnsignedInt();
+ pub fn isInt(self: Type, mod: *const Module) bool {
+ return self.isSignedInt(mod) or self.isUnsignedInt(mod);
}
/// Returns true if and only if the type is a fixed-width, signed integer.
- pub fn isSignedInt(self: Type) bool {
- return switch (self.tag()) {
- .int_signed,
+ pub fn isSignedInt(ty: Type, mod: *const Module) bool {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| return int_type.signedness == .signed,
+ .simple_type => |s| return switch (s) {
+ .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true,
+ else => false,
+ },
+ else => return false,
+ };
+ return switch (ty.tag()) {
.i8,
.isize,
.c_char,
@@ -4569,9 +4688,16 @@ pub const Type = extern union {
}
/// Returns true if and only if the type is a fixed-width, unsigned integer.
- pub fn isUnsignedInt(self: Type) bool {
- return switch (self.tag()) {
- .int_unsigned,
+ pub fn isUnsignedInt(ty: Type, mod: *const Module) bool {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| return int_type.signedness == .unsigned,
+ .simple_type => |s| return switch (s) {
+ .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true,
+ else => false,
+ },
+ else => return false,
+ };
+ return switch (ty.tag()) {
.usize,
.c_ushort,
.c_uint,
@@ -4592,8 +4718,8 @@ pub const Type = extern union {
/// Returns true for integers, enums, error sets, and packed structs.
/// If this function returns true, then intInfo() can be called on the type.
- pub fn isAbiInt(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isAbiInt(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Int, .Enum, .ErrorSet => true,
.Struct => ty.containerLayout() == .Packed,
else => false,
@@ -4601,17 +4727,26 @@ pub const Type = extern union {
}
/// Asserts the type is an integer, enum, error set, or vector of one of them.
- pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int {
- var ty = self;
+ pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType {
+ const target = mod.getTarget();
+ var ty = starting_ty;
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| return int_type,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => unreachable,
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
while (true) switch (ty.tag()) {
- .int_unsigned => return .{
- .signedness = .unsigned,
- .bits = ty.castTag(.int_unsigned).?.data,
- },
- .int_signed => return .{
- .signedness = .signed,
- .bits = ty.castTag(.int_signed).?.data,
- },
.u1 => return .{ .signedness = .unsigned, .bits = 1 },
.u8 => return .{ .signedness = .unsigned, .bits = 8 },
.i8 => return .{ .signedness = .signed, .bits = 8 },
@@ -4729,32 +4864,14 @@ pub const Type = extern union {
/// Asserts the type is a function.
pub fn fnParamLen(self: Type) usize {
- return switch (self.tag()) {
- .fn_noreturn_no_args => 0,
- .fn_void_no_args => 0,
- .fn_naked_noreturn_no_args => 0,
- .fn_ccc_void_no_args => 0,
- .function => self.castTag(.function).?.data.param_types.len,
-
- else => unreachable,
- };
+ return self.castTag(.function).?.data.param_types.len;
}
/// Asserts the type is a function. The length of the slice must be at least the length
/// given by `fnParamLen`.
pub fn fnParamTypes(self: Type, types: []Type) void {
- switch (self.tag()) {
- .fn_noreturn_no_args => return,
- .fn_void_no_args => return,
- .fn_naked_noreturn_no_args => return,
- .fn_ccc_void_no_args => return,
- .function => {
- const payload = self.castTag(.function).?.data;
- @memcpy(types[0..payload.param_types.len], payload.param_types);
- },
-
- else => unreachable,
- }
+ const payload = self.castTag(.function).?.data;
+ @memcpy(types[0..payload.param_types.len], payload.param_types);
}
/// Asserts the type is a function.
@@ -4769,33 +4886,15 @@ pub const Type = extern union {
}
}
- /// Asserts the type is a function.
- pub fn fnReturnType(self: Type) Type {
- return switch (self.tag()) {
- .fn_noreturn_no_args => Type.initTag(.noreturn),
- .fn_naked_noreturn_no_args => Type.initTag(.noreturn),
-
- .fn_void_no_args,
- .fn_ccc_void_no_args,
- => Type.initTag(.void),
-
- .function => self.castTag(.function).?.data.return_type,
-
- else => unreachable,
- };
+ /// Asserts the type is a function or a function pointer.
+ pub fn fnReturnType(ty: Type) Type {
+ const fn_ty = if (ty.castPointer()) |p| p.data else ty;
+ return fn_ty.castTag(.function).?.data.return_type;
}
/// Asserts the type is a function.
pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention {
- return switch (self.tag()) {
- .fn_noreturn_no_args => .Unspecified,
- .fn_void_no_args => .Unspecified,
- .fn_naked_noreturn_no_args => .Naked,
- .fn_ccc_void_no_args => .C,
- .function => self.castTag(.function).?.data.cc,
-
- else => unreachable,
- };
+ return self.castTag(.function).?.data.cc;
}
/// Asserts the type is a function.
@@ -4809,15 +4908,15 @@ pub const Type = extern union {
};
}
- pub fn isValidParamType(self: Type) bool {
- return switch (self.zigTypeTagOrPoison() catch return true) {
+ pub fn isValidParamType(self: Type, mod: *const Module) bool {
+ return switch (self.zigTypeTagOrPoison(mod) catch return true) {
.Undefined, .Null, .Opaque, .NoReturn => false,
else => true,
};
}
- pub fn isValidReturnType(self: Type) bool {
- return switch (self.zigTypeTagOrPoison() catch return true) {
+ pub fn isValidReturnType(self: Type, mod: *const Module) bool {
+ return switch (self.zigTypeTagOrPoison(mod) catch return true) {
.Undefined, .Null, .Opaque => false,
else => true,
};
@@ -4825,87 +4924,43 @@ pub const Type = extern union {
/// Asserts the type is a function.
pub fn fnIsVarArgs(self: Type) bool {
- return switch (self.tag()) {
- .fn_noreturn_no_args => false,
- .fn_void_no_args => false,
- .fn_naked_noreturn_no_args => false,
- .fn_ccc_void_no_args => false,
- .function => self.castTag(.function).?.data.is_var_args,
-
- else => unreachable,
- };
+ return self.castTag(.function).?.data.is_var_args;
}
pub fn fnInfo(ty: Type) Payload.Function.Data {
- return switch (ty.tag()) {
- .fn_noreturn_no_args => .{
- .param_types = &.{},
- .comptime_params = undefined,
- .return_type = initTag(.noreturn),
- .cc = .Unspecified,
- .alignment = 0,
- .is_var_args = false,
- .is_generic = false,
- .is_noinline = false,
- .align_is_generic = false,
- .cc_is_generic = false,
- .section_is_generic = false,
- .addrspace_is_generic = false,
- .noalias_bits = 0,
- },
- .fn_void_no_args => .{
- .param_types = &.{},
- .comptime_params = undefined,
- .return_type = initTag(.void),
- .cc = .Unspecified,
- .alignment = 0,
- .is_var_args = false,
- .is_generic = false,
- .is_noinline = false,
- .align_is_generic = false,
- .cc_is_generic = false,
- .section_is_generic = false,
- .addrspace_is_generic = false,
- .noalias_bits = 0,
- },
- .fn_naked_noreturn_no_args => .{
- .param_types = &.{},
- .comptime_params = undefined,
- .return_type = initTag(.noreturn),
- .cc = .Naked,
- .alignment = 0,
- .is_var_args = false,
- .is_generic = false,
- .is_noinline = false,
- .align_is_generic = false,
- .cc_is_generic = false,
- .section_is_generic = false,
- .addrspace_is_generic = false,
- .noalias_bits = 0,
- },
- .fn_ccc_void_no_args => .{
- .param_types = &.{},
- .comptime_params = undefined,
- .return_type = initTag(.void),
- .cc = .C,
- .alignment = 0,
- .is_var_args = false,
- .is_generic = false,
- .is_noinline = false,
- .align_is_generic = false,
- .cc_is_generic = false,
- .section_is_generic = false,
- .addrspace_is_generic = false,
- .noalias_bits = 0,
- },
- .function => ty.castTag(.function).?.data,
-
- else => unreachable,
- };
+ return ty.castTag(.function).?.data;
}
- pub fn isNumeric(self: Type) bool {
- return switch (self.tag()) {
+ pub fn isNumeric(ty: Type, mod: *const Module) bool {
+ if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => true,
+ .simple_type => |s| return switch (s) {
+ .f16,
+ .f32,
+ .f64,
+ .f80,
+ .f128,
+ .c_longdouble,
+ .comptime_int,
+ .comptime_float,
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ => true,
+
+ else => false,
+ },
+ else => false,
+ };
+ return switch (ty.tag()) {
.f16,
.f32,
.f64,
@@ -4937,8 +4992,6 @@ pub const Type = extern union {
.c_ulong,
.c_longlong,
.c_ulonglong,
- .int_unsigned,
- .int_signed,
=> true,
else => false,
@@ -4947,8 +5000,30 @@ pub const Type = extern union {
/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which
/// resolves field types rather than asserting they are already resolved.
- pub fn onePossibleValue(starting_type: Type) ?Value {
+ pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value {
var ty = starting_type;
+
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => |int_type| {
+ if (int_type.bits == 0) {
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
while (true) switch (ty.tag()) {
.f16,
.f32,
@@ -4988,10 +5063,6 @@ pub const Type = extern union {
.error_set_single,
.error_set,
.error_set_merged,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
.single_const_pointer_to_comptime_int,
.array_sentinel,
@@ -5047,7 +5118,7 @@ pub const Type = extern union {
assert(s.haveFieldTypes());
for (s.fields.values()) |field| {
if (field.is_comptime) continue;
- if (field.ty.onePossibleValue() != null) continue;
+ if (field.ty.onePossibleValue(mod) != null) continue;
return null;
}
return Value.initTag(.empty_struct_value);
@@ -5058,7 +5129,7 @@ pub const Type = extern union {
for (tuple.values, 0..) |val, i| {
const is_comptime = val.tag() != .unreachable_value;
if (is_comptime) continue;
- if (tuple.types[i].onePossibleValue() != null) continue;
+ if (tuple.types[i].onePossibleValue(mod) != null) continue;
return null;
}
return Value.initTag(.empty_struct_value);
@@ -5067,7 +5138,7 @@ pub const Type = extern union {
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
// An explicit tag type is always provided for enum_numbered.
- if (enum_numbered.tag_ty.hasRuntimeBits()) {
+ if (enum_numbered.tag_ty.hasRuntimeBits(mod)) {
return null;
}
assert(enum_numbered.fields.count() == 1);
@@ -5075,7 +5146,7 @@ pub const Type = extern union {
},
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
- if (enum_full.tag_ty.hasRuntimeBits()) {
+ if (enum_full.tag_ty.hasRuntimeBits(mod)) {
return null;
}
switch (enum_full.fields.count()) {
@@ -5098,7 +5169,7 @@ pub const Type = extern union {
},
.enum_nonexhaustive => {
const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
- if (!tag_ty.hasRuntimeBits()) {
+ if (!tag_ty.hasRuntimeBits(mod)) {
return Value.zero;
} else {
return null;
@@ -5106,10 +5177,10 @@ pub const Type = extern union {
},
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null;
+ const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null;
if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value);
const only_field = union_obj.fields.values()[0];
- const val_val = only_field.ty.onePossibleValue() orelse return null;
+ const val_val = only_field.ty.onePossibleValue(mod) orelse return null;
_ = tag_val;
_ = val_val;
return Value.initTag(.empty_struct_value);
@@ -5121,17 +5192,10 @@ pub const Type = extern union {
.null => return Value.initTag(.null_value),
.undefined => return Value.initTag(.undef),
- .int_unsigned, .int_signed => {
- if (ty.cast(Payload.Bits).?.data == 0) {
- return Value.zero;
- } else {
- return null;
- }
- },
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
- if (ty.elemType().onePossibleValue() != null)
+ if (ty.elemType().onePossibleValue(mod) != null)
return Value.initTag(.the_only_possible_value);
return null;
},
@@ -5146,7 +5210,22 @@ pub const Type = extern union {
/// resolves field types rather than asserting they are already resolved.
/// TODO merge these implementations together with the "advanced" pattern seen
/// elsewhere in this file.
- pub fn comptimeOnly(ty: Type) bool {
+ pub fn comptimeOnly(ty: Type, mod: *const Module) bool {
+ if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .int_type => return false,
+ .ptr_type => @panic("TODO"),
+ .array_type => @panic("TODO"),
+ .vector_type => @panic("TODO"),
+ .optional_type => @panic("TODO"),
+ .error_union_type => @panic("TODO"),
+ .simple_type => @panic("TODO"),
+ .struct_type => @panic("TODO"),
+ .simple_value => unreachable,
+ .extern_func => unreachable,
+ .int => unreachable,
+ .enum_tag => unreachable, // it's a value, not a type
+ };
+
return switch (ty.tag()) {
.u1,
.u8,
@@ -5211,8 +5290,6 @@ pub const Type = extern union {
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
- .int_signed,
- .int_unsigned,
.enum_simple,
=> false,
@@ -5223,10 +5300,6 @@ pub const Type = extern union {
.enum_literal,
.type_info,
// These are function bodies, not function pointers.
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.function,
=> true,
@@ -5236,7 +5309,7 @@ pub const Type = extern union {
.array,
.array_sentinel,
.vector,
- => return ty.childType().comptimeOnly(),
+ => return ty.childType().comptimeOnly(mod),
.pointer,
.single_const_pointer,
@@ -5249,10 +5322,10 @@ pub const Type = extern union {
.mut_slice,
=> {
const child_ty = ty.childType();
- if (child_ty.zigTypeTag() == .Fn) {
+ if (child_ty.zigTypeTag(mod) == .Fn) {
return false;
} else {
- return child_ty.comptimeOnly();
+ return child_ty.comptimeOnly(mod);
}
},
@@ -5261,14 +5334,14 @@ pub const Type = extern union {
.optional_single_const_pointer,
=> {
var buf: Type.Payload.ElemType = undefined;
- return ty.optionalChild(&buf).comptimeOnly();
+ return ty.optionalChild(&buf).comptimeOnly(mod);
},
.tuple, .anon_struct => {
const tuple = ty.tupleFields();
for (tuple.types, 0..) |field_ty, i| {
const have_comptime_val = tuple.values[i].tag() != .unreachable_value;
- if (!have_comptime_val and field_ty.comptimeOnly()) return true;
+ if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true;
}
return false;
},
@@ -5301,48 +5374,48 @@ pub const Type = extern union {
}
},
- .error_union => return ty.errorUnionPayload().comptimeOnly(),
+ .error_union => return ty.errorUnionPayload().comptimeOnly(mod),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
- return child_ty.comptimeOnly();
+ return child_ty.comptimeOnly(mod);
},
.enum_numbered => {
const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty;
- return tag_ty.comptimeOnly();
+ return tag_ty.comptimeOnly(mod);
},
.enum_full, .enum_nonexhaustive => {
const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty;
- return tag_ty.comptimeOnly();
+ return tag_ty.comptimeOnly(mod);
},
};
}
- pub fn isArrayOrVector(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isArrayOrVector(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
else => false,
};
}
- pub fn isIndexable(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn isIndexable(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
.Pointer => switch (ty.ptrSize()) {
.Slice, .Many, .C => true,
- .One => ty.elemType().zigTypeTag() == .Array,
+ .One => ty.elemType().zigTypeTag(mod) == .Array,
},
.Struct => ty.isTuple(),
else => false,
};
}
- pub fn indexableHasLen(ty: Type) bool {
- return switch (ty.zigTypeTag()) {
+ pub fn indexableHasLen(ty: Type, mod: *const Module) bool {
+ return switch (ty.zigTypeTag(mod)) {
.Array, .Vector => true,
.Pointer => switch (ty.ptrSize()) {
.Many, .C => false,
.Slice => true,
- .One => ty.elemType().zigTypeTag() == .Array,
+ .One => ty.elemType().zigTypeTag(mod) == .Array,
},
.Struct => ty.isTuple(),
else => false,
@@ -5366,19 +5439,19 @@ pub const Type = extern union {
}
// Works for vectors and vectors of integers.
- pub fn minInt(ty: Type, arena: Allocator, target: Target) !Value {
- const scalar = try minIntScalar(ty.scalarType(), arena, target);
- if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) {
+ pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value {
+ const scalar = try minIntScalar(ty.scalarType(mod), arena, mod);
+ if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) {
return Value.Tag.repeated.create(arena, scalar);
} else {
return scalar;
}
}
- /// Asserts that self.zigTypeTag() == .Int.
- pub fn minIntScalar(ty: Type, arena: Allocator, target: Target) !Value {
- assert(ty.zigTypeTag() == .Int);
- const info = ty.intInfo(target);
+ /// Asserts that self.zigTypeTag(mod) == .Int.
+ pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value {
+ assert(ty.zigTypeTag(mod) == .Int);
+ const info = ty.intInfo(mod);
if (info.bits == 0) {
return Value.initTag(.the_only_possible_value);
@@ -5405,9 +5478,9 @@ pub const Type = extern union {
}
// Works for vectors and vectors of integers.
- pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value {
- const scalar = try maxIntScalar(ty.scalarType(), arena, target);
- if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) {
+ pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value {
+ const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod);
+ if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) {
return Value.Tag.repeated.create(arena, scalar);
} else {
return scalar;
@@ -5415,9 +5488,9 @@ pub const Type = extern union {
}
/// Asserts that self.zigTypeTag() == .Int.
- pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value {
- assert(self.zigTypeTag() == .Int);
- const info = self.intInfo(target);
+ pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value {
+ assert(self.zigTypeTag(mod) == .Int);
+ const info = self.intInfo(mod);
if (info.bits == 0) {
return Value.initTag(.the_only_possible_value);
@@ -5452,21 +5525,25 @@ pub const Type = extern union {
}
/// Asserts the type is an enum or a union.
- pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type {
+ pub fn intTagType(ty: Type) Type {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- const field_count = enum_simple.fields.count();
- const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count);
- buffer.* = .{
- .base = .{ .tag = .int_unsigned },
- .data = bits,
- };
- return Type.initPayload(&buffer.base);
+ @panic("TODO move enum_simple to use the intern pool");
+ //const enum_simple = ty.castTag(.enum_simple).?.data;
+ //const field_count = enum_simple.fields.count();
+ //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count);
+ //buffer.* = .{
+ // .base = .{ .tag = .int_unsigned },
+ // .data = bits,
+ //};
+ //return Type.initPayload(&buffer.base);
+ },
+ .union_tagged => {
+ @panic("TODO move union_tagged to use the intern pool");
+ //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer),
},
- .union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer),
else => unreachable,
}
}
@@ -5566,7 +5643,7 @@ pub const Type = extern union {
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compareAll(.gte, end_val, int_ty, m)) return null;
- return @intCast(usize, int_val.toUnsignedInt(m.getTarget()));
+ return @intCast(usize, int_val.toUnsignedInt(m));
}
};
switch (ty.tag()) {
@@ -5598,11 +5675,7 @@ pub const Type = extern union {
const enum_simple = ty.castTag(.enum_simple).?.data;
const fields_len = enum_simple.fields.count();
const bits = std.math.log2_int_ceil(usize, fields_len);
- var buffer: Payload.Bits = .{
- .base = .{ .tag = .int_unsigned },
- .data = bits,
- };
- const tag_ty = Type.initPayload(&buffer.base);
+ const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here");
return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod);
},
.atomic_order,
@@ -5675,19 +5748,19 @@ pub const Type = extern union {
}
}
- pub fn structFieldAlign(ty: Type, index: usize, target: Target) u32 {
+ pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout != .Packed);
- return struct_obj.fields.values()[index].alignment(target, struct_obj.layout);
+ return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
},
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- return union_obj.fields.values()[index].normalAlignment(target);
+ return union_obj.fields.values()[index].normalAlignment(mod);
},
- .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(target),
- .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(target),
+ .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod),
+ .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod),
else => unreachable,
}
}
@@ -5710,7 +5783,7 @@ pub const Type = extern union {
}
}
- pub fn structFieldValueComptime(ty: Type, index: usize) ?Value {
+ pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
@@ -5718,14 +5791,14 @@ pub const Type = extern union {
if (field.is_comptime) {
return field.default_val;
} else {
- return field.ty.onePossibleValue();
+ return field.ty.onePossibleValue(mod);
}
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
const val = tuple.values[index];
if (val.tag() == .unreachable_value) {
- return tuple.types[index].onePossibleValue();
+ return tuple.types[index].onePossibleValue(mod);
} else {
return val;
}
@@ -5734,7 +5807,7 @@ pub const Type = extern union {
const anon_struct = ty.castTag(.anon_struct).?.data;
const val = anon_struct.values[index];
if (val.tag() == .unreachable_value) {
- return anon_struct.types[index].onePossibleValue();
+ return anon_struct.types[index].onePossibleValue(mod);
} else {
return val;
}
@@ -5765,7 +5838,7 @@ pub const Type = extern union {
}
}
- pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 {
+ pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout == .Packed);
comptime assert(Type.packed_struct_layout_version == 2);
@@ -5774,9 +5847,9 @@ pub const Type = extern union {
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
for (struct_obj.fields.values(), 0..) |f, i| {
- if (!f.ty.hasRuntimeBits()) continue;
+ if (!f.ty.hasRuntimeBits(mod)) continue;
- const field_bits = @intCast(u16, f.ty.bitSize(target));
+ const field_bits = @intCast(u16, f.ty.bitSize(mod));
if (i == field_index) {
bit_offset = running_bits;
elem_size_bits = field_bits;
@@ -5797,9 +5870,10 @@ pub const Type = extern union {
offset: u64 = 0,
big_align: u32 = 0,
struct_obj: *Module.Struct,
- target: Target,
+ module: *const Module,
pub fn next(it: *StructOffsetIterator) ?FieldOffset {
+ const mod = it.module;
var i = it.field;
if (it.struct_obj.fields.count() <= i)
return null;
@@ -5811,35 +5885,35 @@ pub const Type = extern union {
const field = it.struct_obj.fields.values()[i];
it.field += 1;
- if (field.is_comptime or !field.ty.hasRuntimeBits()) {
+ if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) {
return FieldOffset{ .field = i, .offset = it.offset };
}
- const field_align = field.alignment(it.target, it.struct_obj.layout);
+ const field_align = field.alignment(mod, it.struct_obj.layout);
it.big_align = @max(it.big_align, field_align);
const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
- it.offset = field_offset + field.ty.abiSize(it.target);
+ it.offset = field_offset + field.ty.abiSize(mod);
return FieldOffset{ .field = i, .offset = field_offset };
}
};
/// Get an iterator that iterates over all the struct field, returning the field and
/// offset of that field. Asserts that the type is a non-packed struct.
- pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator {
+ pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.haveLayout());
assert(struct_obj.layout != .Packed);
- return .{ .struct_obj = struct_obj, .target = target };
+ return .{ .struct_obj = struct_obj, .module = mod };
}
/// Supports structs and unions.
- pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
+ pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.haveLayout());
assert(struct_obj.layout != .Packed);
- var it = ty.iterateStructOffsets(target);
+ var it = ty.iterateStructOffsets(mod);
while (it.next()) |field_offset| {
if (index == field_offset.field)
return field_offset.offset;
@@ -5856,17 +5930,17 @@ pub const Type = extern union {
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
- if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) {
+ if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) {
// comptime field
if (i == index) return offset;
continue;
}
- const field_align = field_ty.abiAlignment(target);
+ const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
if (i == index) return offset;
- offset += field_ty.abiSize(target);
+ offset += field_ty.abiSize(mod);
}
offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1));
return offset;
@@ -5875,7 +5949,7 @@ pub const Type = extern union {
.@"union" => return 0,
.union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
- const layout = union_obj.getLayout(target, true);
+ const layout = union_obj.getLayout(mod, true);
if (layout.tag_align >= layout.payload_align) {
// {Tag, Payload}
return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
@@ -6050,10 +6124,6 @@ pub const Type = extern union {
manyptr_u8,
manyptr_const_u8,
manyptr_const_u8_sentinel_0,
- fn_noreturn_no_args,
- fn_void_no_args,
- fn_naked_noreturn_no_args,
- fn_ccc_void_no_args,
single_const_pointer_to_comptime_int,
const_slice_u8,
const_slice_u8_sentinel_0,
@@ -6087,8 +6157,6 @@ pub const Type = extern union {
c_mut_pointer,
const_slice,
mut_slice,
- int_signed,
- int_unsigned,
function,
optional,
optional_single_mut_pointer,
@@ -6157,10 +6225,6 @@ pub const Type = extern union {
.enum_literal,
.null,
.undefined,
- .fn_noreturn_no_args,
- .fn_void_no_args,
- .fn_naked_noreturn_no_args,
- .fn_ccc_void_no_args,
.single_const_pointer_to_comptime_int,
.anyerror_void_error_union,
.const_slice_u8,
@@ -6204,10 +6268,6 @@ pub const Type = extern union {
.anyframe_T,
=> Payload.ElemType,
- .int_signed,
- .int_unsigned,
- => Payload.Bits,
-
.error_set => Payload.ErrorSet,
.error_set_inferred => Payload.ErrorSetInferred,
.error_set_merged => Payload.ErrorSetMerged,
@@ -6232,7 +6292,10 @@ pub const Type = extern union {
pub fn init(comptime t: Tag) file_struct.Type {
comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count);
- return .{ .tag_if_small_enough = t };
+ return file_struct.Type{
+ .ip_index = .none,
+ .legacy = .{ .tag_if_small_enough = t },
+ };
}
pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
@@ -6241,7 +6304,10 @@ pub const Type = extern union {
.base = .{ .tag = t },
.data = data,
};
- return file_struct.Type{ .ptr_otherwise = &p.base };
+ return file_struct.Type{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &p.base },
+ };
}
pub fn Data(comptime t: Tag) type {
@@ -6422,10 +6488,9 @@ pub const Type = extern union {
runtime = std.math.maxInt(u32) - 1,
_,
};
-
- pub fn alignment(data: Data, target: Target) u32 {
+ pub fn alignment(data: Data, mod: *const Module) u32 {
if (data.@"align" != 0) return data.@"align";
- return abiAlignment(data.pointee_type, target);
+ return abiAlignment(data.pointee_type, mod);
}
};
};
@@ -6537,12 +6602,11 @@ pub const Type = extern union {
pub const @"anyerror" = initTag(.anyerror);
pub const @"anyopaque" = initTag(.anyopaque);
pub const @"null" = initTag(.null);
+ pub const @"noreturn" = initTag(.noreturn);
pub const err_int = Type.u16;
pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type {
- const target = mod.getTarget();
-
var d = data;
if (d.size == .C) {
@@ -6554,8 +6618,8 @@ pub const Type = extern union {
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (d.@"align" != 0) canonicalize: {
- if (!d.pointee_type.layoutIsResolved()) break :canonicalize;
- if (d.@"align" == d.pointee_type.abiAlignment(target)) {
+ if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize;
+ if (d.@"align" == d.pointee_type.abiAlignment(mod)) {
d.@"align" = 0;
}
}
@@ -6565,7 +6629,7 @@ pub const Type = extern union {
// needs to be resolved before calling this ptr() function.
if (d.host_size != 0) {
assert(d.bit_offset < d.host_size * 8);
- if (d.host_size * 8 == d.pointee_type.bitSize(target)) {
+ if (d.host_size * 8 == d.pointee_type.bitSize(mod)) {
assert(d.bit_offset == 0);
d.host_size = 0;
}
@@ -6676,7 +6740,7 @@ pub const Type = extern union {
payload: Type,
mod: *Module,
) Allocator.Error!Type {
- assert(error_set.zigTypeTag() == .ErrorSet);
+ assert(error_set.zigTypeTag(mod) == .ErrorSet);
if (error_set.eql(Type.anyerror, mod) and
payload.eql(Type.void, mod))
{
@@ -6696,83 +6760,6 @@ pub const Type = extern union {
return @intCast(u16, base + @boolToInt(upper < max));
}
- pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type {
- const bits = smallestUnsignedBits(max);
- return intWithBits(arena, false, bits);
- }
-
- pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type {
- return if (sign) switch (bits) {
- 8 => initTag(.i8),
- 16 => initTag(.i16),
- 32 => initTag(.i32),
- 64 => initTag(.i64),
- else => return Tag.int_signed.create(arena, bits),
- } else switch (bits) {
- 1 => initTag(.u1),
- 8 => initTag(.u8),
- 16 => initTag(.u16),
- 32 => initTag(.u32),
- 64 => initTag(.u64),
- else => return Tag.int_unsigned.create(arena, bits),
- };
- }
-
- /// Given a value representing an integer, returns the number of bits necessary to represent
- /// this value in an integer. If `sign` is true, returns the number of bits necessary in a
- /// twos-complement integer; otherwise in an unsigned integer.
- /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
- pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 {
- assert(!val.isUndef());
- switch (val.tag()) {
- .int_big_positive => {
- const limbs = val.castTag(.int_big_positive).?.data;
- const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true };
- return @intCast(u16, big.bitCountAbs() + @boolToInt(sign));
- },
- .int_big_negative => {
- const limbs = val.castTag(.int_big_negative).?.data;
- // Zero is still a possibility, in which case unsigned is fine
- for (limbs) |limb| {
- if (limb != 0) break;
- } else return 0; // val == 0
- assert(sign);
- const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false };
- return @intCast(u16, big.bitCountTwosComp());
- },
- .int_i64 => {
- const x = val.castTag(.int_i64).?.data;
- if (x >= 0) return smallestUnsignedBits(@intCast(u64, x));
- assert(sign);
- return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1;
- },
- else => {
- const x = val.toUnsignedInt(target);
- return smallestUnsignedBits(x) + @boolToInt(sign);
- },
- }
- }
-
- /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither
- /// value is undef.
- /// TODO: if #3806 is implemented, this becomes trivial
- pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type {
- assert(!min.isUndef());
- assert(!max.isUndef());
-
- if (std.debug.runtime_safety) {
- assert(Value.order(min, max, target).compare(.lte));
- }
-
- const sign = min.orderAgainstZero() == .lt;
-
- const min_val_bits = intBitsForValue(target, min, sign);
- const max_val_bits = intBitsForValue(target, max, sign);
- const bits = @max(min_val_bits, max_val_bits);
-
- return intWithBits(arena, sign, bits);
- }
-
/// This is only used for comptime asserts. Bump this number when you make a change
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
src/TypedValue.zig
@@ -71,7 +71,6 @@ pub fn print(
level: u8,
mod: *Module,
) @TypeOf(writer).Error!void {
- const target = mod.getTarget();
var val = tv.val;
var ty = tv.ty;
if (val.isVariable(mod))
@@ -117,10 +116,6 @@ pub fn print(
.noreturn_type => return writer.writeAll("noreturn"),
.null_type => return writer.writeAll("@Type(.Null)"),
.undefined_type => return writer.writeAll("@Type(.Undefined)"),
- .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"),
- .fn_void_no_args_type => return writer.writeAll("fn() void"),
- .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"),
- .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"),
.anyframe_type => return writer.writeAll("anyframe"),
.const_slice_u8_type => return writer.writeAll("[]const u8"),
@@ -147,7 +142,7 @@ pub fn print(
if (level == 0) {
return writer.writeAll(".{ ... }");
}
- if (ty.zigTypeTag() == .Struct) {
+ if (ty.zigTypeTag(mod) == .Struct) {
try writer.writeAll(".{");
const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items);
@@ -160,7 +155,7 @@ pub fn print(
}
try print(.{
.ty = ty.structFieldType(i),
- .val = val.fieldValue(ty, i),
+ .val = val.fieldValue(ty, mod, i),
}, writer, level - 1, mod);
}
if (ty.structFieldCount() > max_aggregate_items) {
@@ -168,7 +163,7 @@ pub fn print(
}
return writer.writeAll("}");
} else {
- const elem_ty = ty.elemType2();
+ const elem_ty = ty.elemType2(mod);
const len = ty.arrayLen();
if (elem_ty.eql(Type.u8, mod)) str: {
@@ -177,9 +172,9 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
- const elem = val.fieldValue(ty, i);
+ const elem = val.fieldValue(ty, mod, i);
if (elem.isUndef()) break :str;
- buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
+ buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str;
}
const truncated = if (len > max_string_len) " (truncated)" else "";
@@ -194,7 +189,7 @@ pub fn print(
if (i != 0) try writer.writeAll(", ");
try print(.{
.ty = elem_ty,
- .val = val.fieldValue(ty, i),
+ .val = val.fieldValue(ty, mod, i),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
@@ -232,25 +227,18 @@ pub fn print(
.bool_true => return writer.writeAll("true"),
.bool_false => return writer.writeAll("false"),
.ty => return val.castTag(.ty).?.data.print(writer, mod),
- .int_type => {
- const int_type = val.castTag(.int_type).?.data;
- return writer.print("{s}{d}", .{
- if (int_type.signed) "s" else "u",
- int_type.bits,
- });
- },
.int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer),
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer),
.int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
.lazy_align => {
const sub_ty = val.castTag(.lazy_align).?.data;
- const x = sub_ty.abiAlignment(target);
+ const x = sub_ty.abiAlignment(mod);
return writer.print("{d}", .{x});
},
.lazy_size => {
const sub_ty = val.castTag(.lazy_size).?.data;
- const x = sub_ty.abiSize(target);
+ const x = sub_ty.abiSize(mod);
return writer.print("{d}", .{x});
},
.function => return writer.print("(function '{s}')", .{
@@ -315,7 +303,7 @@ pub fn print(
}, writer, level - 1, mod);
}
- if (field_ptr.container_ty.zigTypeTag() == .Struct) {
+ if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) {
switch (field_ptr.container_ty.tag()) {
.tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
else => {
@@ -323,7 +311,7 @@ pub fn print(
return writer.print(".{s}", .{field_name});
},
}
- } else if (field_ptr.container_ty.zigTypeTag() == .Union) {
+ } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) {
const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index];
return writer.print(".{s}", .{field_name});
} else if (field_ptr.container_ty.isSlice()) {
@@ -352,7 +340,7 @@ pub fn print(
var i: u32 = 0;
try writer.writeAll(".{ ");
const elem_tv = TypedValue{
- .ty = ty.elemType2(),
+ .ty = ty.elemType2(mod),
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen();
@@ -372,7 +360,7 @@ pub fn print(
}
try writer.writeAll(".{ ");
try print(.{
- .ty = ty.elemType2(),
+ .ty = ty.elemType2(mod),
.val = ty.sentinel().?,
}, writer, level - 1, mod);
return writer.writeAll(" }");
@@ -382,8 +370,8 @@ pub fn print(
return writer.writeAll(".{ ... }");
}
const payload = val.castTag(.slice).?.data;
- const elem_ty = ty.elemType2();
- const len = payload.len.toUnsignedInt(target);
+ const elem_ty = ty.elemType2(mod);
+ const len = payload.len.toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @intCast(usize, std.math.min(len, max_string_len));
@@ -394,7 +382,7 @@ pub fn print(
var elem_buf: Value.ElemValueBuffer = undefined;
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
if (elem_val.isUndef()) break :str;
- buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
+ buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
}
// TODO would be nice if this had a bit of unicode awareness.
src/value.zig
@@ -11,17 +11,24 @@ const Module = @import("Module.zig");
const Air = @import("Air.zig");
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
-
-/// This is the raw data, with no bookkeeping, no memory awareness,
-/// no de-duplication, and no type system awareness.
-/// It's important for this type to be small.
-/// This union takes advantage of the fact that the first page of memory
-/// is unmapped, giving us 4096 possible enum tags that have no payload.
-pub const Value = extern union {
- /// If the tag value is less than Tag.no_payload_count, then no pointer
- /// dereference is needed.
- tag_if_small_enough: Tag,
- ptr_otherwise: *Payload,
+const InternPool = @import("InternPool.zig");
+
+pub const Value = struct {
+ /// We are migrating towards using this for every Value object. However, many
+ /// values are still represented the legacy way. This is indicated by using
+ /// InternPool.Index.none.
+ ip_index: InternPool.Index,
+
+ /// This is the raw data, with no bookkeeping, no memory awareness,
+ /// no de-duplication, and no type system awareness.
+ /// This union takes advantage of the fact that the first page of memory
+ /// is unmapped, giving us 4096 possible enum tags that have no payload.
+ legacy: extern union {
+ /// If the tag value is less than Tag.no_payload_count, then no pointer
+ /// dereference is needed.
+ tag_if_small_enough: Tag,
+ ptr_otherwise: *Payload,
+ },
// Keep in sync with tools/stage2_pretty_printers_common.py
pub const Tag = enum(usize) {
@@ -81,10 +88,6 @@ pub const Value = extern union {
manyptr_u8_type,
manyptr_const_u8_type,
manyptr_const_u8_sentinel_0_type,
- fn_noreturn_no_args_type,
- fn_void_no_args_type,
- fn_naked_noreturn_no_args_type,
- fn_ccc_void_no_args_type,
single_const_pointer_to_comptime_int_type,
const_slice_u8_type,
const_slice_u8_sentinel_0_type,
@@ -108,7 +111,6 @@ pub const Value = extern union {
// After this, the tag requires a payload.
ty,
- int_type,
int_u64,
int_i64,
int_big_positive,
@@ -232,10 +234,6 @@ pub const Value = extern union {
.noreturn_type,
.null_type,
.undefined_type,
- .fn_noreturn_no_args_type,
- .fn_void_no_args_type,
- .fn_naked_noreturn_no_args_type,
- .fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
.anyframe_type,
.const_slice_u8_type,
@@ -304,7 +302,6 @@ pub const Value = extern union {
.lazy_size,
=> Payload.Ty,
- .int_type => Payload.IntType,
.int_u64 => Payload.U64,
.int_i64 => Payload.I64,
.function => Payload.Function,
@@ -332,7 +329,10 @@ pub const Value = extern union {
.base = .{ .tag = t },
.data = data,
};
- return Value{ .ptr_otherwise = &ptr.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &ptr.base },
+ };
}
pub fn Data(comptime t: Tag) type {
@@ -342,37 +342,47 @@ pub const Value = extern union {
pub fn initTag(small_tag: Tag) Value {
assert(@enumToInt(small_tag) < Tag.no_payload_count);
- return .{ .tag_if_small_enough = small_tag };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .tag_if_small_enough = small_tag },
+ };
}
pub fn initPayload(payload: *Payload) Value {
assert(@enumToInt(payload.tag) >= Tag.no_payload_count);
- return .{ .ptr_otherwise = payload };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = payload },
+ };
}
pub fn tag(self: Value) Tag {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
- return self.tag_if_small_enough;
+ assert(self.ip_index == .none);
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
+ return self.legacy.tag_if_small_enough;
} else {
- return self.ptr_otherwise.tag;
+ return self.legacy.ptr_otherwise.tag;
}
}
/// Prefer `castTag` to this.
pub fn cast(self: Value, comptime T: type) ?*T {
+ if (self.ip_index != .none) {
+ return null;
+ }
if (@hasField(T, "base_tag")) {
return self.castTag(T.base_tag);
}
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
return null;
}
inline for (@typeInfo(Tag).Enum.fields) |field| {
if (field.value < Tag.no_payload_count)
continue;
const t = @intToEnum(Tag, field.value);
- if (self.ptr_otherwise.tag == t) {
+ if (self.legacy.ptr_otherwise.tag == t) {
if (T == t.Type()) {
- return @fieldParentPtr(T, "base", self.ptr_otherwise);
+ return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise);
}
return null;
}
@@ -381,11 +391,15 @@ pub const Value = extern union {
}
pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
+ if (self.ip_index != .none) {
+ return null;
+ }
+
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count)
return null;
- if (self.ptr_otherwise.tag == t)
- return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
+ if (self.legacy.ptr_otherwise.tag == t)
+ return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise);
return null;
}
@@ -393,9 +407,15 @@ pub const Value = extern union {
/// It's intentional that this function is not passed a corresponding Type, so that
/// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types.
pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value {
- if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
- return Value{ .tag_if_small_enough = self.tag_if_small_enough };
- } else switch (self.ptr_otherwise.tag) {
+ if (self.ip_index != .none) {
+ return Value{ .ip_index = self.ip_index, .legacy = undefined };
+ }
+ if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) {
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough },
+ };
+ } else switch (self.legacy.ptr_otherwise.tag) {
.u1_type,
.u8_type,
.i8_type,
@@ -435,10 +455,6 @@ pub const Value = extern union {
.noreturn_type,
.null_type,
.undefined_type,
- .fn_noreturn_no_args_type,
- .fn_void_no_args_type,
- .fn_naked_noreturn_no_args_type,
- .fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
.anyframe_type,
.const_slice_u8_type,
@@ -481,19 +497,24 @@ pub const Value = extern union {
.base = payload.base,
.data = try payload.data.copy(arena),
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
- .int_type => return self.copyPayloadShallow(arena, Payload.IntType),
.int_u64 => return self.copyPayloadShallow(arena, Payload.U64),
.int_i64 => return self.copyPayloadShallow(arena, Payload.I64),
.int_big_positive, .int_big_negative => {
const old_payload = self.cast(Payload.BigInt).?;
const new_payload = try arena.create(Payload.BigInt);
new_payload.* = .{
- .base = .{ .tag = self.ptr_otherwise.tag },
+ .base = .{ .tag = self.legacy.ptr_otherwise.tag },
.data = try arena.dupe(std.math.big.Limb, old_payload.data),
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.function => return self.copyPayloadShallow(arena, Payload.Function),
.extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn),
@@ -512,7 +533,10 @@ pub const Value = extern union {
.container_ty = try payload.data.container_ty.copy(arena),
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.comptime_field_ptr => {
const payload = self.cast(Payload.ComptimeFieldPtr).?;
@@ -524,7 +548,10 @@ pub const Value = extern union {
.field_ty = try payload.data.field_ty.copy(arena),
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.elem_ptr => {
const payload = self.castTag(.elem_ptr).?;
@@ -537,7 +564,10 @@ pub const Value = extern union {
.index = payload.data.index,
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.field_ptr => {
const payload = self.castTag(.field_ptr).?;
@@ -550,7 +580,10 @@ pub const Value = extern union {
.field_index = payload.data.field_index,
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.bytes => {
const bytes = self.castTag(.bytes).?.data;
@@ -559,7 +592,10 @@ pub const Value = extern union {
.base = .{ .tag = .bytes },
.data = try arena.dupe(u8, bytes),
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.str_lit => return self.copyPayloadShallow(arena, Payload.StrLit),
.repeated,
@@ -574,7 +610,10 @@ pub const Value = extern union {
.base = payload.base,
.data = try payload.data.copy(arena),
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.slice => {
const payload = self.castTag(.slice).?;
@@ -586,7 +625,10 @@ pub const Value = extern union {
.len = try payload.data.len.copy(arena),
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.float_16 => return self.copyPayloadShallow(arena, Payload.Float_16),
.float_32 => return self.copyPayloadShallow(arena, Payload.Float_32),
@@ -600,7 +642,10 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.dupe(u8, payload.data),
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.enum_field_index => return self.copyPayloadShallow(arena, Payload.U32),
.@"error" => return self.copyPayloadShallow(arena, Payload.Error),
@@ -615,7 +660,10 @@ pub const Value = extern union {
for (new_payload.data, 0..) |*elem, i| {
elem.* = try payload.data[i].copy(arena);
}
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.@"union" => {
@@ -628,7 +676,10 @@ pub const Value = extern union {
.val = try tag_and_val.val.copy(arena),
},
};
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
},
.inferred_alloc => unreachable,
@@ -640,7 +691,10 @@ pub const Value = extern union {
const payload = self.cast(T).?;
const new_payload = try arena.create(T);
new_payload.* = payload.*;
- return Value{ .ptr_otherwise = &new_payload.base };
+ return Value{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &new_payload.base },
+ };
}
pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
@@ -660,6 +714,10 @@ pub const Value = extern union {
out_stream: anytype,
) !void {
comptime assert(fmt.len == 0);
+ if (start_val.ip_index != .none) {
+ try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)});
+ return;
+ }
var val = start_val;
while (true) switch (val.tag()) {
.u1_type => return out_stream.writeAll("u1"),
@@ -701,10 +759,6 @@ pub const Value = extern union {
.noreturn_type => return out_stream.writeAll("noreturn"),
.null_type => return out_stream.writeAll("@Type(.Null)"),
.undefined_type => return out_stream.writeAll("@Type(.Undefined)"),
- .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
- .fn_void_no_args_type => return out_stream.writeAll("fn() void"),
- .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
- .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
.anyframe_type => return out_stream.writeAll("anyframe"),
.const_slice_u8_type => return out_stream.writeAll("[]const u8"),
@@ -755,13 +809,6 @@ pub const Value = extern union {
try val.castTag(.lazy_size).?.data.dump("", options, out_stream);
return try out_stream.writeAll(")");
},
- .int_type => {
- const int_type = val.castTag(.int_type).?.data;
- return out_stream.print("{s}{d}", .{
- if (int_type.signed) "s" else "u",
- int_type.bits,
- });
- },
.int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream),
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream),
.int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
@@ -848,7 +895,6 @@ pub const Value = extern union {
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
- const target = mod.getTarget();
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
@@ -863,7 +909,7 @@ pub const Value = extern union {
},
.enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data),
.repeated => {
- const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target));
+ const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod));
const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen()));
@memset(result, byte);
return result;
@@ -877,7 +923,7 @@ pub const Value = extern union {
.the_only_possible_value => return &[_]u8{},
.slice => {
const slice = val.castTag(.slice).?.data;
- return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod);
+ return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod);
},
else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod),
}
@@ -888,15 +934,19 @@ pub const Value = extern union {
var elem_value_buf: ElemValueBuffer = undefined;
for (result, 0..) |*elem, i| {
const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
- elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
+ elem.* = @intCast(u8, elem_val.toUnsignedInt(mod));
}
return result;
}
- pub const ToTypeBuffer = Type.Payload.Bits;
-
/// Asserts that the value is representable as a type.
- pub fn toType(self: Value, buffer: *ToTypeBuffer) Type {
+ pub fn toType(self: Value) Type {
+ if (self.ip_index != .none) {
+ return .{
+ .ip_index = self.ip_index,
+ .legacy = undefined,
+ };
+ }
return switch (self.tag()) {
.ty => self.castTag(.ty).?.data,
.u1_type => Type.initTag(.u1),
@@ -938,10 +988,6 @@ pub const Value = extern union {
.noreturn_type => Type.initTag(.noreturn),
.null_type => Type.initTag(.null),
.undefined_type => Type.initTag(.undefined),
- .fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args),
- .fn_void_no_args_type => Type.initTag(.fn_void_no_args),
- .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args),
- .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args),
.single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
.anyframe_type => Type.initTag(.@"anyframe"),
.const_slice_u8_type => Type.initTag(.const_slice_u8),
@@ -964,17 +1010,6 @@ pub const Value = extern union {
.extern_options_type => Type.initTag(.extern_options),
.type_info_type => Type.initTag(.type_info),
- .int_type => {
- const payload = self.castTag(.int_type).?.data;
- buffer.* = .{
- .base = .{
- .tag = if (payload.signed) .int_signed else .int_unsigned,
- },
- .data = payload.bits,
- };
- return Type.initPayload(&buffer.base);
- },
-
else => unreachable,
};
}
@@ -1050,7 +1085,7 @@ pub const Value = extern union {
}
pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 {
- if (ty.zigTypeTag() == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod);
+ if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod);
const field_index = switch (val.tag()) {
.enum_field_index => val.castTag(.enum_field_index).?.data,
@@ -1068,10 +1103,9 @@ pub const Value = extern union {
};
if (values.entries.len == 0) {
// auto-numbered enum
- break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget()));
+ break :field_index @intCast(u32, val.toUnsignedInt(mod));
}
- var buffer: Type.Payload.Bits = undefined;
- const int_tag_ty = ty.intTagType(&buffer);
+ const int_tag_ty = ty.intTagType();
break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?);
},
};
@@ -1086,15 +1120,15 @@ pub const Value = extern union {
}
/// Asserts the value is an integer.
- pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst {
- return val.toBigIntAdvanced(space, target, null) catch unreachable;
+ pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst {
+ return val.toBigIntAdvanced(space, mod, null) catch unreachable;
}
/// Asserts the value is an integer.
pub fn toBigIntAdvanced(
val: Value,
space: *BigIntSpace,
- target: Target,
+ mod: *const Module,
opt_sema: ?*Sema,
) Module.CompileError!BigIntConst {
switch (val.tag()) {
@@ -1114,7 +1148,7 @@ pub const Value = extern union {
},
.runtime_value => {
const sub_val = val.castTag(.runtime_value).?.data;
- return sub_val.toBigIntAdvanced(space, target, opt_sema);
+ return sub_val.toBigIntAdvanced(space, mod, opt_sema);
},
.int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(),
.int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(),
@@ -1128,7 +1162,7 @@ pub const Value = extern union {
if (opt_sema) |sema| {
try sema.resolveTypeLayout(ty);
}
- const x = ty.abiAlignment(target);
+ const x = ty.abiAlignment(mod);
return BigIntMutable.init(&space.limbs, x).toConst();
},
.lazy_size => {
@@ -1136,14 +1170,14 @@ pub const Value = extern union {
if (opt_sema) |sema| {
try sema.resolveTypeLayout(ty);
}
- const x = ty.abiSize(target);
+ const x = ty.abiSize(mod);
return BigIntMutable.init(&space.limbs, x).toConst();
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
- const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, opt_sema)).?;
- const elem_size = elem_ptr.elem_ty.abiSize(target);
+ const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?;
+ const elem_size = elem_ptr.elem_ty.abiSize(mod);
const new_addr = array_addr + elem_size * elem_ptr.index;
return BigIntMutable.init(&space.limbs, new_addr).toConst();
},
@@ -1154,13 +1188,13 @@ pub const Value = extern union {
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
- pub fn getUnsignedInt(val: Value, target: Target) ?u64 {
- return getUnsignedIntAdvanced(val, target, null) catch unreachable;
+ pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 {
+ return getUnsignedIntAdvanced(val, mod, null) catch unreachable;
}
/// If the value fits in a u64, return it, otherwise null.
/// Asserts not undefined.
- pub fn getUnsignedIntAdvanced(val: Value, target: Target, opt_sema: ?*Sema) !?u64 {
+ pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 {
switch (val.tag()) {
.zero,
.bool_false,
@@ -1181,17 +1215,17 @@ pub const Value = extern union {
.lazy_align => {
const ty = val.castTag(.lazy_align).?.data;
if (opt_sema) |sema| {
- return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar;
+ return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
} else {
- return ty.abiAlignment(target);
+ return ty.abiAlignment(mod);
}
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
if (opt_sema) |sema| {
- return (try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar;
+ return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar;
} else {
- return ty.abiSize(target);
+ return ty.abiSize(mod);
}
},
@@ -1200,12 +1234,12 @@ pub const Value = extern union {
}
/// Asserts the value is an integer and it fits in a u64
- pub fn toUnsignedInt(val: Value, target: Target) u64 {
- return getUnsignedInt(val, target).?;
+ pub fn toUnsignedInt(val: Value, mod: *const Module) u64 {
+ return getUnsignedInt(val, mod).?;
}
/// Asserts the value is an integer and it fits in a i64
- pub fn toSignedInt(val: Value, target: Target) i64 {
+ pub fn toSignedInt(val: Value, mod: *const Module) i64 {
switch (val.tag()) {
.zero,
.bool_false,
@@ -1223,11 +1257,11 @@ pub const Value = extern union {
.lazy_align => {
const ty = val.castTag(.lazy_align).?.data;
- return @intCast(i64, ty.abiAlignment(target));
+ return @intCast(i64, ty.abiAlignment(mod));
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
- return @intCast(i64, ty.abiSize(target));
+ return @intCast(i64, ty.abiSize(mod));
},
.undef => unreachable,
@@ -1276,17 +1310,17 @@ pub const Value = extern union {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
- const size = @intCast(usize, ty.abiSize(target));
+ const size = @intCast(usize, ty.abiSize(mod));
@memset(buffer[0..size], 0xaa);
return;
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void => {},
.Bool => {
buffer[0] = @boolToInt(val.toBool());
},
.Int, .Enum => {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
const bits = int_info.bits;
const byte_count = (bits + 7) / 8;
@@ -1307,7 +1341,7 @@ pub const Value = extern union {
};
} else {
var bigint_buffer: BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_buffer, target);
+ const bigint = int_val.toBigInt(&bigint_buffer, mod);
bigint.writeTwosComplement(buffer[0..byte_count], endian);
}
},
@@ -1322,7 +1356,7 @@ pub const Value = extern union {
.Array => {
const len = ty.arrayLen();
const elem_ty = ty.childType();
- const elem_size = @intCast(usize, elem_ty.abiSize(target));
+ const elem_size = @intCast(usize, elem_ty.abiSize(mod));
var elem_i: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
var buf_off: usize = 0;
@@ -1335,7 +1369,7 @@ pub const Value = extern union {
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
- const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
.Struct => switch (ty.containerLayout()) {
@@ -1344,12 +1378,12 @@ pub const Value = extern union {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| {
- const off = @intCast(usize, ty.structFieldOffset(i, target));
+ const off = @intCast(usize, ty.structFieldOffset(i, mod));
try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
}
},
.Packed => {
- const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
@@ -1363,7 +1397,7 @@ pub const Value = extern union {
.Auto => return error.IllDefinedMemoryLayout,
.Extern => return error.Unimplemented,
.Packed => {
- const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
@@ -1373,10 +1407,10 @@ pub const Value = extern union {
return val.writeToMemory(Type.usize, mod, buffer);
},
.Optional => {
- if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout;
+ if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout;
var buf: Type.Payload.ElemType = undefined;
const child = ty.optionalChild(&buf);
- const opt_val = val.optionalValue();
+ const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
return some.writeToMemory(child, mod, buffer);
} else {
@@ -1395,11 +1429,11 @@ pub const Value = extern union {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
- const bit_size = @intCast(usize, ty.bitSize(target));
+ const bit_size = @intCast(usize, ty.bitSize(mod));
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
return;
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void => {},
.Bool => {
const byte_index = switch (endian) {
@@ -1413,8 +1447,8 @@ pub const Value = extern union {
}
},
.Int, .Enum => {
- const bits = ty.intInfo(target).bits;
- const abi_size = @intCast(usize, ty.abiSize(target));
+ const bits = ty.intInfo(mod).bits;
+ const abi_size = @intCast(usize, ty.abiSize(mod));
var enum_buffer: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_buffer);
@@ -1431,7 +1465,7 @@ pub const Value = extern union {
std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian);
} else {
var bigint_buffer: BigIntSpace = undefined;
- const bigint = int_val.toBigInt(&bigint_buffer, target);
+ const bigint = int_val.toBigInt(&bigint_buffer, mod);
bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian);
}
},
@@ -1445,7 +1479,7 @@ pub const Value = extern union {
},
.Vector => {
const elem_ty = ty.childType();
- const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
+ const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
const len = @intCast(usize, ty.arrayLen());
var bits: u16 = 0;
@@ -1467,7 +1501,7 @@ pub const Value = extern union {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| {
- const field_bits = @intCast(u16, field.ty.bitSize(target));
+ const field_bits = @intCast(u16, field.ty.bitSize(mod));
try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
@@ -1479,7 +1513,7 @@ pub const Value = extern union {
.Packed => {
const field_index = ty.unionTagFieldIndex(val.unionTag(), mod);
const field_type = ty.unionFields().values()[field_index.?].ty;
- const field_val = val.fieldValue(field_type, field_index.?);
+ const field_val = val.fieldValue(field_type, mod, field_index.?);
return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
},
@@ -1490,10 +1524,10 @@ pub const Value = extern union {
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
.Optional => {
- assert(ty.isPtrLikeOptional());
+ assert(ty.isPtrLikeOptional(mod));
var buf: Type.Payload.ElemType = undefined;
const child = ty.optionalChild(&buf);
- const opt_val = val.optionalValue();
+ const opt_val = val.optionalValue(mod);
if (opt_val) |some| {
return some.writeToPackedMemory(child, mod, buffer, bit_offset);
} else {
@@ -1516,7 +1550,7 @@ pub const Value = extern union {
) Allocator.Error!Value {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void => return Value.void,
.Bool => {
if (buffer[0] == 0) {
@@ -1526,7 +1560,7 @@ pub const Value = extern union {
}
},
.Int, .Enum => {
- const int_info = ty.intInfo(target);
+ const int_info = ty.intInfo(mod);
const bits = int_info.bits;
const byte_count = (bits + 7) / 8;
if (bits == 0 or buffer.len == 0) return Value.zero;
@@ -1560,7 +1594,7 @@ pub const Value = extern union {
},
.Array => {
const elem_ty = ty.childType();
- const elem_size = elem_ty.abiSize(target);
+ const elem_size = elem_ty.abiSize(mod);
const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
var offset: usize = 0;
for (elems) |*elem| {
@@ -1572,7 +1606,7 @@ pub const Value = extern union {
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
- const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
.Struct => switch (ty.containerLayout()) {
@@ -1581,14 +1615,14 @@ pub const Value = extern union {
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
for (fields, 0..) |field, i| {
- const off = @intCast(usize, ty.structFieldOffset(i, target));
- const sz = @intCast(usize, ty.structFieldType(i).abiSize(target));
+ const off = @intCast(usize, ty.structFieldOffset(i, mod));
+ const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod));
field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena);
}
return Tag.aggregate.create(arena, field_vals);
},
.Packed => {
- const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
+ const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
},
@@ -1609,7 +1643,7 @@ pub const Value = extern union {
return readFromMemory(Type.usize, mod, buffer, arena);
},
.Optional => {
- assert(ty.isPtrLikeOptional());
+ assert(ty.isPtrLikeOptional(mod));
var buf: Type.Payload.ElemType = undefined;
const child = ty.optionalChild(&buf);
return readFromMemory(child, mod, buffer, arena);
@@ -1631,7 +1665,7 @@ pub const Value = extern union {
) Allocator.Error!Value {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Void => return Value.void,
.Bool => {
const byte = switch (endian) {
@@ -1646,8 +1680,8 @@ pub const Value = extern union {
},
.Int, .Enum => {
if (buffer.len == 0) return Value.zero;
- const int_info = ty.intInfo(target);
- const abi_size = @intCast(usize, ty.abiSize(target));
+ const int_info = ty.intInfo(mod);
+ const abi_size = @intCast(usize, ty.abiSize(mod));
const bits = int_info.bits;
if (bits == 0) return Value.zero;
@@ -1677,7 +1711,7 @@ pub const Value = extern union {
const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen()));
var bits: u16 = 0;
- const elem_bit_size = @intCast(u16, elem_ty.bitSize(target));
+ const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod));
for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i;
@@ -1694,7 +1728,7 @@ pub const Value = extern union {
const fields = ty.structFields().values();
const field_vals = try arena.alloc(Value, fields.len);
for (fields, 0..) |field, i| {
- const field_bits = @intCast(u16, field.ty.bitSize(target));
+ const field_bits = @intCast(u16, field.ty.bitSize(mod));
field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena);
bits += field_bits;
}
@@ -1706,7 +1740,7 @@ pub const Value = extern union {
return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
},
.Optional => {
- assert(ty.isPtrLikeOptional());
+ assert(ty.isPtrLikeOptional(mod));
var buf: Type.Payload.ElemType = undefined;
const child = ty.optionalChild(&buf);
return readFromPackedMemory(child, mod, buffer, bit_offset, arena);
@@ -1764,8 +1798,8 @@ pub const Value = extern union {
}
}
- pub fn clz(val: Value, ty: Type, target: Target) u64 {
- const ty_bits = ty.intInfo(target).bits;
+ pub fn clz(val: Value, ty: Type, mod: *const Module) u64 {
+ const ty_bits = ty.intInfo(mod).bits;
switch (val.tag()) {
.zero, .bool_false => return ty_bits,
.one, .bool_true => return ty_bits - 1,
@@ -1792,7 +1826,7 @@ pub const Value = extern union {
.lazy_align, .lazy_size => {
var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable;
+ const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable;
return bigint.clz(ty_bits);
},
@@ -1800,8 +1834,8 @@ pub const Value = extern union {
}
}
- pub fn ctz(val: Value, ty: Type, target: Target) u64 {
- const ty_bits = ty.intInfo(target).bits;
+ pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 {
+ const ty_bits = ty.intInfo(mod).bits;
switch (val.tag()) {
.zero, .bool_false => return ty_bits,
.one, .bool_true => return 0,
@@ -1828,7 +1862,7 @@ pub const Value = extern union {
.lazy_align, .lazy_size => {
var bigint_buf: BigIntSpace = undefined;
- const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable;
+ const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable;
return bigint.ctz();
},
@@ -1836,7 +1870,7 @@ pub const Value = extern union {
}
}
- pub fn popCount(val: Value, ty: Type, target: Target) u64 {
+ pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 {
assert(!val.isUndef());
switch (val.tag()) {
.zero, .bool_false => return 0,
@@ -1845,22 +1879,22 @@ pub const Value = extern union {
.int_u64 => return @popCount(val.castTag(.int_u64).?.data),
else => {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var buffer: Value.BigIntSpace = undefined;
- const int = val.toBigInt(&buffer, target);
+ const int = val.toBigInt(&buffer, mod);
return @intCast(u64, int.popCount(info.bits));
},
}
}
- pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
+ pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value {
assert(!val.isUndef());
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer, target);
+ const operand_bigint = val.toBigInt(&buffer, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
@@ -1872,16 +1906,16 @@ pub const Value = extern union {
return fromBigInt(arena, result_bigint.toConst());
}
- pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
+ pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value {
assert(!val.isUndef());
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
// Bit count must be evenly divisible by 8
assert(info.bits % 8 == 0);
var buffer: Value.BigIntSpace = undefined;
- const operand_bigint = val.toBigInt(&buffer, target);
+ const operand_bigint = val.toBigInt(&buffer, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
@@ -1895,7 +1929,8 @@ pub const Value = extern union {
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
- pub fn intBitCountTwosComp(self: Value, target: Target) usize {
+ pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize {
+ const target = mod.getTarget();
switch (self.tag()) {
.zero,
.bool_false,
@@ -1926,7 +1961,7 @@ pub const Value = extern union {
else => {
var buffer: BigIntSpace = undefined;
- return self.toBigInt(&buffer, target).bitCountTwosComp();
+ return self.toBigInt(&buffer, mod).bitCountTwosComp();
},
}
}
@@ -1962,12 +1997,13 @@ pub const Value = extern union {
};
}
- pub fn orderAgainstZero(lhs: Value) std.math.Order {
- return orderAgainstZeroAdvanced(lhs, null) catch unreachable;
+ pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order {
+ return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable;
}
pub fn orderAgainstZeroAdvanced(
lhs: Value,
+ mod: *const Module,
opt_sema: ?*Sema,
) Module.CompileError!std.math.Order {
return switch (lhs.tag()) {
@@ -1991,7 +2027,7 @@ pub const Value = extern union {
// This is needed to correctly handle hashing the value.
// Checks in Sema should prevent direct comparisons from reaching here.
const val = lhs.castTag(.runtime_value).?.data;
- return val.orderAgainstZeroAdvanced(opt_sema);
+ return val.orderAgainstZeroAdvanced(mod, opt_sema);
},
.int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0),
.int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0),
@@ -2001,7 +2037,7 @@ pub const Value = extern union {
.lazy_align => {
const ty = lhs.castTag(.lazy_align).?.data;
const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
- if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => unreachable,
else => |e| return e,
}) {
@@ -2013,7 +2049,7 @@ pub const Value = extern union {
.lazy_size => {
const ty = lhs.castTag(.lazy_size).?.data;
const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
- if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) {
+ if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => unreachable,
else => |e| return e,
}) {
@@ -2031,7 +2067,7 @@ pub const Value = extern union {
.elem_ptr => {
const elem_ptr = lhs.castTag(.elem_ptr).?.data;
- switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(opt_sema)) {
+ switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) {
.lt => unreachable,
.gt => return .gt,
.eq => {
@@ -2049,17 +2085,17 @@ pub const Value = extern union {
}
/// Asserts the value is comparable.
- pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order {
- return orderAdvanced(lhs, rhs, target, null) catch unreachable;
+ pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order {
+ return orderAdvanced(lhs, rhs, mod, null) catch unreachable;
}
/// Asserts the value is comparable.
/// If opt_sema is null then this function asserts things are resolved and cannot fail.
- pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, opt_sema: ?*Sema) !std.math.Order {
+ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = rhs.tag();
- const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(opt_sema);
- const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(opt_sema);
+ const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema);
+ const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema);
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
@@ -2093,22 +2129,22 @@ pub const Value = extern union {
var lhs_bigint_space: BigIntSpace = undefined;
var rhs_bigint_space: BigIntSpace = undefined;
- const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, opt_sema);
- const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, opt_sema);
+ const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema);
+ const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema);
return lhs_bigint.order(rhs_bigint);
}
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
- pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool {
- return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable;
+ pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool {
+ return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable;
}
pub fn compareHeteroAdvanced(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
- target: Target,
+ mod: *const Module,
opt_sema: ?*Sema,
) !bool {
if (lhs.pointerDecl()) |lhs_decl| {
@@ -2132,20 +2168,20 @@ pub const Value = extern union {
else => {},
}
}
- return (try orderAdvanced(lhs, rhs, target, opt_sema)).compare(op);
+ return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op);
}
/// Asserts the values are comparable. Both operands have type `ty`.
/// For vectors, returns true if comparison is true for ALL elements.
pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool {
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) {
+ if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) {
return false;
}
}
@@ -2165,7 +2201,7 @@ pub const Value = extern union {
return switch (op) {
.eq => lhs.eql(rhs, ty, mod),
.neq => !lhs.eql(rhs, ty, mod),
- else => compareHetero(lhs, op, rhs, mod.getTarget()),
+ else => compareHetero(lhs, op, rhs, mod),
};
}
@@ -2231,7 +2267,7 @@ pub const Value = extern union {
.float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq,
else => {},
}
- return (try orderAgainstZeroAdvanced(lhs, opt_sema)).compare(op);
+ return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op);
}
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
@@ -2346,7 +2382,7 @@ pub const Value = extern union {
return true;
}
- if (ty.zigTypeTag() == .Struct) {
+ if (ty.zigTypeTag(mod) == .Struct) {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields, 0..) |field, i| {
@@ -2406,12 +2442,10 @@ pub const Value = extern union {
return false;
}
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Type => {
- var buf_a: ToTypeBuffer = undefined;
- var buf_b: ToTypeBuffer = undefined;
- const a_type = a.toType(&buf_a);
- const b_type = b.toType(&buf_b);
+ const a_type = a.toType();
+ const b_type = b.toType();
return a_type.eql(b_type, mod);
},
.Enum => {
@@ -2419,8 +2453,7 @@ pub const Value = extern union {
var buf_b: Payload.U64 = undefined;
const a_val = a.enumToInt(ty, &buf_a);
const b_val = b.enumToInt(ty, &buf_b);
- var buf_ty: Type.Payload.Bits = undefined;
- const int_ty = ty.intTagType(&buf_ty);
+ const int_ty = ty.intTagType();
return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema);
},
.Array, .Vector => {
@@ -2466,11 +2499,11 @@ pub const Value = extern union {
// .the_one_possible_value,
// .aggregate,
// Note that we already checked above for matching tags, e.g. both .aggregate.
- return ty.onePossibleValue() != null;
+ return ty.onePossibleValue(mod) != null;
},
.Union => {
// Here we have to check for value equality, as-if `a` has been coerced to `ty`.
- if (ty.onePossibleValue() != null) {
+ if (ty.onePossibleValue(mod) != null) {
return true;
}
if (a_ty.castTag(.anon_struct)) |payload| {
@@ -2533,13 +2566,13 @@ pub const Value = extern union {
else => {},
}
if (a_tag == .null_value or a_tag == .@"error") return false;
- return (try orderAdvanced(a, b, target, opt_sema)).compare(.eq);
+ return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq);
}
/// This function is used by hash maps and so treats floating-point NaNs as equal
/// to each other, and not equal to other floating-point values.
pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
- const zig_ty_tag = ty.zigTypeTag();
+ const zig_ty_tag = ty.zigTypeTag(mod);
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef()) return;
// The value is runtime-known and shouldn't affect the hash.
@@ -2555,8 +2588,7 @@ pub const Value = extern union {
=> {},
.Type => {
- var buf: ToTypeBuffer = undefined;
- return val.toType(&buf).hashWithHasher(hasher, mod);
+ return val.toType().hashWithHasher(hasher, mod);
},
.Float => {
// For hash/eql purposes, we treat floats as their IEEE integer representation.
@@ -2588,7 +2620,7 @@ pub const Value = extern union {
hash(slice.len, Type.usize, hasher, mod);
},
- else => return hashPtr(val, hasher, mod.getTarget()),
+ else => return hashPtr(val, hasher, mod),
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -2648,7 +2680,7 @@ pub const Value = extern union {
.Enum => {
var enum_space: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_space);
- hashInt(int_val, hasher, mod.getTarget());
+ hashInt(int_val, hasher, mod);
},
.Union => {
const union_obj = val.cast(Payload.Union).?.data;
@@ -2691,7 +2723,7 @@ pub const Value = extern union {
// The value is runtime-known and shouldn't affect the hash.
if (val.tag() == .runtime_value) return;
- switch (ty.zigTypeTag()) {
+ switch (ty.zigTypeTag(mod)) {
.Opaque => unreachable, // Cannot hash opaque types
.Void,
.NoReturn,
@@ -2700,8 +2732,7 @@ pub const Value = extern union {
.Struct, // It sure would be nice to do something clever with structs.
=> |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag),
.Type => {
- var buf: ToTypeBuffer = undefined;
- val.toType(&buf).hashWithHasher(hasher, mod);
+ val.toType().hashWithHasher(hasher, mod);
},
.Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))),
.Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) {
@@ -2711,7 +2742,7 @@ pub const Value = extern union {
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
slice.ptr.hashUncoerced(ptr_ty, hasher, mod);
},
- else => val.hashPtr(hasher, mod.getTarget()),
+ else => val.hashPtr(hasher, mod),
},
.Array, .Vector => {
const len = ty.arrayLen();
@@ -2821,16 +2852,16 @@ pub const Value = extern union {
};
}
- fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void {
+ fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void {
var buffer: BigIntSpace = undefined;
- const big = int_val.toBigInt(&buffer, target);
+ const big = int_val.toBigInt(&buffer, mod);
std.hash.autoHash(hasher, big.positive);
for (big.limbs) |limb| {
std.hash.autoHash(hasher, limb);
}
}
- fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void {
+ fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void {
switch (ptr_val.tag()) {
.decl_ref,
.decl_ref_mut,
@@ -2847,25 +2878,25 @@ pub const Value = extern union {
.elem_ptr => {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
- hashPtr(elem_ptr.array_ptr, hasher, target);
+ hashPtr(elem_ptr.array_ptr, hasher, mod);
std.hash.autoHash(hasher, Value.Tag.elem_ptr);
std.hash.autoHash(hasher, elem_ptr.index);
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.field_ptr);
- hashPtr(field_ptr.container_ptr, hasher, target);
+ hashPtr(field_ptr.container_ptr, hasher, mod);
std.hash.autoHash(hasher, field_ptr.field_index);
},
.eu_payload_ptr => {
const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr);
- hashPtr(err_union_ptr.container_ptr, hasher, target);
+ hashPtr(err_union_ptr.container_ptr, hasher, mod);
},
.opt_payload_ptr => {
const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr);
- hashPtr(opt_ptr.container_ptr, hasher, target);
+ hashPtr(opt_ptr.container_ptr, hasher, mod);
},
.zero,
@@ -2880,7 +2911,7 @@ pub const Value = extern union {
.the_only_possible_value,
.lazy_align,
.lazy_size,
- => return hashInt(ptr_val, hasher, target),
+ => return hashInt(ptr_val, hasher, mod),
else => unreachable,
}
@@ -2897,11 +2928,11 @@ pub const Value = extern union {
pub fn sliceLen(val: Value, mod: *Module) u64 {
return switch (val.tag()) {
- .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()),
+ .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod),
.decl_ref => {
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
- if (decl.ty.zigTypeTag() == .Array) {
+ if (decl.ty.zigTypeTag(mod) == .Array) {
return decl.ty.arrayLen();
} else {
return 1;
@@ -2910,7 +2941,7 @@ pub const Value = extern union {
.decl_ref_mut => {
const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
const decl = mod.declPtr(decl_index);
- if (decl.ty.zigTypeTag() == .Array) {
+ if (decl.ty.zigTypeTag(mod) == .Array) {
return decl.ty.arrayLen();
} else {
return 1;
@@ -2918,7 +2949,7 @@ pub const Value = extern union {
},
.comptime_field_ptr => {
const payload = val.castTag(.comptime_field_ptr).?.data;
- if (payload.field_ty.zigTypeTag() == .Array) {
+ if (payload.field_ty.zigTypeTag(mod) == .Array) {
return payload.field_ty.arrayLen();
} else {
return 1;
@@ -3003,7 +3034,7 @@ pub const Value = extern union {
if (data.container_ptr.pointerDecl()) |decl_index| {
const container_decl = mod.declPtr(decl_index);
const field_type = data.container_ty.structFieldType(data.field_index);
- const field_val = container_decl.val.fieldValue(field_type, data.field_index);
+ const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index);
return field_val.elemValueAdvanced(mod, index, arena, buffer);
} else unreachable;
},
@@ -3032,10 +3063,7 @@ pub const Value = extern union {
}
/// Returns true if a Value is backed by a variable
- pub fn isVariable(
- val: Value,
- mod: *Module,
- ) bool {
+ pub fn isVariable(val: Value, mod: *Module) bool {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr.isVariable(mod),
.comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod),
@@ -3119,7 +3147,7 @@ pub const Value = extern union {
};
}
- pub fn fieldValue(val: Value, ty: Type, index: usize) Value {
+ pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value {
switch (val.tag()) {
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
@@ -3131,14 +3159,14 @@ pub const Value = extern union {
return payload.val;
},
- .the_only_possible_value => return ty.onePossibleValue().?,
+ .the_only_possible_value => return ty.onePossibleValue(mod).?,
.empty_struct_value => {
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
return tuple.values[index];
}
- if (ty.structFieldValueComptime(index)) |some| {
+ if (ty.structFieldValueComptime(mod, index)) |some| {
return some;
}
unreachable;
@@ -3165,7 +3193,7 @@ pub const Value = extern union {
index: usize,
mod: *Module,
) Allocator.Error!Value {
- const elem_ty = ty.elemType2();
+ const elem_ty = ty.elemType2(mod);
const ptr_val = switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
else => val,
@@ -3207,7 +3235,7 @@ pub const Value = extern union {
switch (self.tag()) {
.slice => {
const payload = self.castTag(.slice).?;
- const len = payload.data.len.toUnsignedInt(mod.getTarget());
+ const len = payload.data.len.toUnsignedInt(mod);
var elem_value_buf: ElemValueBuffer = undefined;
var i: usize = 0;
@@ -3233,7 +3261,7 @@ pub const Value = extern union {
/// Asserts the value is not undefined and not unreachable.
/// Integer value 0 is considered null because of C pointers.
- pub fn isNull(self: Value) bool {
+ pub fn isNull(self: Value, mod: *const Module) bool {
return switch (self.tag()) {
.null_value => true,
.opt_payload => false,
@@ -3254,7 +3282,7 @@ pub const Value = extern union {
.int_i64,
.int_big_positive,
.int_big_negative,
- => self.orderAgainstZero().compare(.eq),
+ => self.orderAgainstZero(mod).compare(.eq),
.undef => unreachable,
.unreachable_value => unreachable,
@@ -3300,8 +3328,8 @@ pub const Value = extern union {
}
/// Value of the optional, null if optional has no payload.
- pub fn optionalValue(val: Value) ?Value {
- if (val.isNull()) return null;
+ pub fn optionalValue(val: Value, mod: *const Module) ?Value {
+ if (val.isNull(mod)) return null;
// Valid for optional representation to be the direct value
// and not use opt_payload.
@@ -3333,20 +3361,20 @@ pub const Value = extern union {
}
pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
- const target = mod.getTarget();
- if (int_ty.zigTypeTag() == .Vector) {
+ if (int_ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, int_ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema);
+ scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return intToFloatScalar(val, arena, float_ty, target, opt_sema);
+ return intToFloatScalar(val, arena, float_ty, mod, opt_sema);
}
- pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value {
+ pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value {
+ const target = mod.getTarget();
switch (val.tag()) {
.undef, .zero, .one => return val,
.the_only_possible_value => return Value.initTag(.zero), // for i0, u0
@@ -3369,17 +3397,17 @@ pub const Value = extern union {
.lazy_align => {
const ty = val.castTag(.lazy_align).?.data;
if (opt_sema) |sema| {
- return intToFloatInner((try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target);
+ return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target);
} else {
- return intToFloatInner(ty.abiAlignment(target), arena, float_ty, target);
+ return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target);
}
},
.lazy_size => {
const ty = val.castTag(.lazy_size).?.data;
if (opt_sema) |sema| {
- return intToFloatInner((try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target);
+ return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target);
} else {
- return intToFloatInner(ty.abiSize(target), arena, float_ty, target);
+ return intToFloatInner(ty.abiSize(mod), arena, float_ty, target);
}
},
else => unreachable,
@@ -3446,19 +3474,18 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return intAddSatScalar(lhs, rhs, ty, arena, target);
+ return intAddSatScalar(lhs, rhs, ty, arena, mod);
}
/// Supports integers only; asserts neither operand is undefined.
@@ -3467,17 +3494,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -3495,19 +3522,18 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return intSubSatScalar(lhs, rhs, ty, arena, target);
+ return intSubSatScalar(lhs, rhs, ty, arena, mod);
}
/// Supports integers only; asserts neither operand is undefined.
@@ -3516,17 +3542,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -3543,8 +3569,7 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !OverflowArithmeticResult {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try arena.alloc(Value, ty.vectorLen());
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
@@ -3552,7 +3577,7 @@ pub const Value = extern union {
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
@@ -3561,7 +3586,7 @@ pub const Value = extern union {
.wrapped_result = try Value.Tag.aggregate.create(arena, result_data),
};
}
- return intMulWithOverflowScalar(lhs, rhs, ty, arena, target);
+ return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod);
}
pub fn intMulWithOverflowScalar(
@@ -3569,14 +3594,14 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -3607,14 +3632,14 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
+ scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3631,7 +3656,7 @@ pub const Value = extern union {
) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
- if (ty.zigTypeTag() == .ComptimeInt) {
+ if (ty.zigTypeTag(mod) == .ComptimeInt) {
return intMul(lhs, rhs, ty, arena, mod);
}
@@ -3651,19 +3676,18 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return intMulSatScalar(lhs, rhs, ty, arena, target);
+ return intMulSatScalar(lhs, rhs, ty, arena, mod);
}
/// Supports (vectors of) integers only; asserts neither operand is undefined.
@@ -3672,17 +3696,17 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
assert(!lhs.isUndef());
assert(!rhs.isUndef());
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(
@@ -3702,24 +3726,24 @@ pub const Value = extern union {
}
/// Supports both floats and ints; handles undefined.
- pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value {
+ pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value {
if (lhs.isUndef() or rhs.isUndef()) return undef;
if (lhs.isNan()) return rhs;
if (rhs.isNan()) return lhs;
- return switch (order(lhs, rhs, target)) {
+ return switch (order(lhs, rhs, mod)) {
.lt => rhs,
.gt, .eq => lhs,
};
}
/// Supports both floats and ints; handles undefined.
- pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value {
+ pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value {
if (lhs.isUndef() or rhs.isUndef()) return undef;
if (lhs.isNan()) return rhs;
if (rhs.isNan()) return lhs;
- return switch (order(lhs, rhs, target)) {
+ return switch (order(lhs, rhs, mod)) {
.lt => lhs,
.gt, .eq => rhs,
};
@@ -3727,24 +3751,23 @@ pub const Value = extern union {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target);
+ scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return bitwiseNotScalar(val, ty, arena, target);
+ return bitwiseNotScalar(val, ty, arena, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
if (val.isUndef()) return Value.initTag(.undef);
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
if (info.bits == 0) {
return val;
@@ -3753,7 +3776,7 @@ pub const Value = extern union {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space, target);
+ const val_bigint = val.toBigInt(&val_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@@ -3766,31 +3789,30 @@ pub const Value = extern union {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return bitwiseAndScalar(lhs, rhs, allocator, target);
+ return bitwiseAndScalar(lhs, rhs, allocator, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -3803,14 +3825,14 @@ pub const Value = extern union {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
+ scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -3823,41 +3845,40 @@ pub const Value = extern union {
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
- const all_ones = if (ty.isSignedInt())
+ const all_ones = if (ty.isSignedInt(mod))
try Value.Tag.int_i64.create(arena, -1)
else
- try ty.maxInt(arena, mod.getTarget());
+ try ty.maxInt(arena, mod);
return bitwiseXor(anded, all_ones, ty, arena, mod);
}
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return bitwiseOrScalar(lhs, rhs, allocator, target);
+ return bitwiseOrScalar(lhs, rhs, allocator, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
@@ -3869,31 +3890,30 @@ pub const Value = extern union {
/// operands must be (vectors of) integers; handles undefined scalars.
pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return bitwiseXorScalar(lhs, rhs, allocator, target);
+ return bitwiseXorScalar(lhs, rhs, allocator, mod);
}
/// operands must be integers; handles undefined.
- pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value {
+ pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try arena.alloc(
std.math.big.Limb,
// + 1 for negatives
@@ -3905,28 +3925,27 @@ pub const Value = extern union {
}
pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intDivScalar(lhs, rhs, allocator, target);
+ return intDivScalar(lhs, rhs, allocator, mod);
}
- pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -3946,28 +3965,27 @@ pub const Value = extern union {
}
pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intDivFloorScalar(lhs, rhs, allocator, target);
+ return intDivFloorScalar(lhs, rhs, allocator, mod);
}
- pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -3987,28 +4005,27 @@ pub const Value = extern union {
}
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intModScalar(lhs, rhs, allocator, target);
+ return intModScalar(lhs, rhs, allocator, mod);
}
- pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len,
@@ -4064,14 +4081,14 @@ pub const Value = extern union {
pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4111,14 +4128,14 @@ pub const Value = extern union {
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4157,28 +4174,27 @@ pub const Value = extern union {
}
pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intMulScalar(lhs, rhs, allocator, target);
+ return intMulScalar(lhs, rhs, allocator, mod);
}
- pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const rhs_bigint = rhs.toBigInt(&rhs_space, target);
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const rhs_bigint = rhs.toBigInt(&rhs_space, mod);
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@@ -4194,17 +4210,16 @@ pub const Value = extern union {
}
pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intTruncScalar(val, allocator, signedness, bits, target);
+ return intTruncScalar(val, allocator, signedness, bits, mod);
}
/// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`.
@@ -4216,26 +4231,25 @@ pub const Value = extern union {
bits: Value,
mod: *Module,
) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
var bits_buf: Value.ElemValueBuffer = undefined;
const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf);
- scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target);
+ scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target);
+ return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod);
}
- pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value {
+ pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value {
if (bits == 0) return Value.zero;
var val_space: Value.BigIntSpace = undefined;
- const val_bigint = val.toBigInt(&val_space, target);
+ const val_bigint = val.toBigInt(&val_space, mod);
const limbs = try allocator.alloc(
std.math.big.Limb,
@@ -4248,27 +4262,26 @@ pub const Value = extern union {
}
pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return shlScalar(lhs, rhs, allocator, target);
+ return shlScalar(lhs, rhs, allocator, mod);
}
- pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const shift = @intCast(usize, rhs.toUnsignedInt(target));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const shift = @intCast(usize, rhs.toUnsignedInt(mod));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -4289,8 +4302,7 @@ pub const Value = extern union {
allocator: Allocator,
mod: *Module,
) !OverflowArithmeticResult {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const overflowed_data = try allocator.alloc(Value, ty.vectorLen());
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
@@ -4298,7 +4310,7 @@ pub const Value = extern union {
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target);
+ const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod);
overflowed_data[i] = of_math_result.overflow_bit;
scalar.* = of_math_result.wrapped_result;
}
@@ -4307,7 +4319,7 @@ pub const Value = extern union {
.wrapped_result = try Value.Tag.aggregate.create(allocator, result_data),
};
}
- return shlWithOverflowScalar(lhs, rhs, ty, allocator, target);
+ return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod);
}
pub fn shlWithOverflowScalar(
@@ -4315,12 +4327,12 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
allocator: Allocator,
- target: Target,
+ mod: *Module,
) !OverflowArithmeticResult {
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const shift = @intCast(usize, rhs.toUnsignedInt(target));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const shift = @intCast(usize, rhs.toUnsignedInt(mod));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@@ -4348,19 +4360,18 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target);
+ scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
- return shlSatScalar(lhs, rhs, ty, arena, target);
+ return shlSatScalar(lhs, rhs, ty, arena, mod);
}
pub fn shlSatScalar(
@@ -4368,15 +4379,15 @@ pub const Value = extern union {
rhs: Value,
ty: Type,
arena: Allocator,
- target: Target,
+ mod: *Module,
) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
- const info = ty.intInfo(target);
+ const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const shift = @intCast(usize, rhs.toUnsignedInt(target));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const shift = @intCast(usize, rhs.toUnsignedInt(mod));
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
@@ -4397,14 +4408,14 @@ pub const Value = extern union {
arena: Allocator,
mod: *Module,
) !Value {
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod);
+ scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4419,33 +4430,32 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const shifted = try lhs.shl(rhs, ty, arena, mod);
- const int_info = ty.intInfo(mod.getTarget());
+ const int_info = ty.intInfo(mod);
const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod);
return truncated;
}
pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value {
- const target = mod.getTarget();
- if (ty.zigTypeTag() == .Vector) {
+ if (ty.zigTypeTag(mod) == .Vector) {
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target);
+ scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod);
}
return Value.Tag.aggregate.create(allocator, result_data);
}
- return shrScalar(lhs, rhs, allocator, target);
+ return shrScalar(lhs, rhs, allocator, mod);
}
- pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
+ pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value {
// TODO is this a performance issue? maybe we should try the operation without
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space, target);
- const shift = @intCast(usize, rhs.toUnsignedInt(target));
+ const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
+ const shift = @intCast(usize, rhs.toUnsignedInt(mod));
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
if (result_limbs == 0) {
@@ -4478,12 +4488,12 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4514,14 +4524,14 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4573,14 +4583,14 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4632,14 +4642,14 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4691,14 +4701,14 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var lhs_buf: Value.ElemValueBuffer = undefined;
var rhs_buf: Value.ElemValueBuffer = undefined;
const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf);
const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf);
- scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target);
+ scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4744,12 +4754,12 @@ pub const Value = extern union {
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4784,12 +4794,12 @@ pub const Value = extern union {
pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4824,12 +4834,12 @@ pub const Value = extern union {
pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4864,12 +4874,12 @@ pub const Value = extern union {
pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4904,12 +4914,12 @@ pub const Value = extern union {
pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4944,12 +4954,12 @@ pub const Value = extern union {
pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -4984,12 +4994,12 @@ pub const Value = extern union {
pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5024,12 +5034,12 @@ pub const Value = extern union {
pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5064,12 +5074,12 @@ pub const Value = extern union {
pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5104,12 +5114,12 @@ pub const Value = extern union {
pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5144,12 +5154,12 @@ pub const Value = extern union {
pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5184,12 +5194,12 @@ pub const Value = extern union {
pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5224,12 +5234,12 @@ pub const Value = extern union {
pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5264,12 +5274,12 @@ pub const Value = extern union {
pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(mod, i, &buf);
- scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target);
+ scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target);
}
return Value.Tag.aggregate.create(arena, result_data);
}
@@ -5311,7 +5321,7 @@ pub const Value = extern union {
mod: *Module,
) !Value {
const target = mod.getTarget();
- if (float_type.zigTypeTag() == .Vector) {
+ if (float_type.zigTypeTag(mod) == .Vector) {
const result_data = try arena.alloc(Value, float_type.vectorLen());
for (result_data, 0..) |*scalar, i| {
var mulend1_buf: Value.ElemValueBuffer = undefined;
@@ -5321,7 +5331,7 @@ pub const Value = extern union {
var addend_buf: Value.ElemValueBuffer = undefined;
const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf);
scalar.* = try mulAddScalar(
- float_type.scalarType(),
+ float_type.scalarType(mod),
mulend1_elem,
mulend2_elem,
addend_elem,
@@ -5380,8 +5390,7 @@ pub const Value = extern union {
/// If the value is represented in-memory as a series of bytes that all
/// have the same value, return that byte value, otherwise null.
pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value {
- const target = mod.getTarget();
- const abi_size = std.math.cast(usize, ty.abiSize(target)) orelse return null;
+ const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null;
assert(abi_size >= 1);
const byte_buffer = try mod.gpa.alloc(u8, abi_size);
defer mod.gpa.free(byte_buffer);
@@ -5549,16 +5558,6 @@ pub const Value = extern union {
data: Type,
};
- pub const IntType = struct {
- pub const base_tag = Tag.int_type;
-
- base: Payload = Payload{ .tag = base_tag },
- data: struct {
- bits: u16,
- signed: bool,
- },
- };
-
pub const Float_16 = struct {
pub const base_tag = Tag.float_16;
@@ -5659,7 +5658,10 @@ pub const Value = extern union {
pub const zero = initTag(.zero);
pub const one = initTag(.one);
- pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base };
+ pub const negative_one: Value = .{
+ .ip_index = .none,
+ .legacy = .{ .ptr_otherwise = &negative_one_payload.base },
+ };
pub const undef = initTag(.undef);
pub const @"void" = initTag(.void_value);
pub const @"null" = initTag(.null_value);
src/Zir.zig
@@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Ast = std.zig.Ast;
+const InternPool = @import("InternPool.zig");
const Zir = @This();
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
@@ -2041,448 +2042,95 @@ pub const Inst = struct {
/// The position of a ZIR instruction within the `Zir` instructions array.
pub const Index = u32;
- /// A reference to a TypedValue or ZIR instruction.
+ /// A reference to ZIR instruction, or to an InternPool index, or neither.
///
- /// If the Ref has a tag in this enum, it refers to a TypedValue.
- ///
- /// If the value of a Ref does not have a tag, it refers to a ZIR instruction.
- ///
- /// The first values after the the last tag refer to ZIR instructions which may
- /// be derived by subtracting `typed_value_map.len`.
- ///
- /// When adding a tag to this enum, consider adding a corresponding entry to
- /// `primitives` in astgen.
+ /// If the integer tag value is < InternPool.static_len, then it
+ /// corresponds to an InternPool index. Otherwise, this refers to a ZIR
+ /// instruction.
///
/// The tag type is specified so that it is safe to bitcast between `[]u32`
/// and `[]Ref`.
pub const Ref = enum(u32) {
+ u1_type = @enumToInt(InternPool.Index.u1_type),
+ u8_type = @enumToInt(InternPool.Index.u8_type),
+ i8_type = @enumToInt(InternPool.Index.i8_type),
+ u16_type = @enumToInt(InternPool.Index.u16_type),
+ i16_type = @enumToInt(InternPool.Index.i16_type),
+ u29_type = @enumToInt(InternPool.Index.u29_type),
+ u32_type = @enumToInt(InternPool.Index.u32_type),
+ i32_type = @enumToInt(InternPool.Index.i32_type),
+ u64_type = @enumToInt(InternPool.Index.u64_type),
+ i64_type = @enumToInt(InternPool.Index.i64_type),
+ u80_type = @enumToInt(InternPool.Index.u80_type),
+ u128_type = @enumToInt(InternPool.Index.u128_type),
+ i128_type = @enumToInt(InternPool.Index.i128_type),
+ usize_type = @enumToInt(InternPool.Index.usize_type),
+ isize_type = @enumToInt(InternPool.Index.isize_type),
+ c_char_type = @enumToInt(InternPool.Index.c_char_type),
+ c_short_type = @enumToInt(InternPool.Index.c_short_type),
+ c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type),
+ c_int_type = @enumToInt(InternPool.Index.c_int_type),
+ c_uint_type = @enumToInt(InternPool.Index.c_uint_type),
+ c_long_type = @enumToInt(InternPool.Index.c_long_type),
+ c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type),
+ c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type),
+ c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type),
+ c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type),
+ f16_type = @enumToInt(InternPool.Index.f16_type),
+ f32_type = @enumToInt(InternPool.Index.f32_type),
+ f64_type = @enumToInt(InternPool.Index.f64_type),
+ f80_type = @enumToInt(InternPool.Index.f80_type),
+ f128_type = @enumToInt(InternPool.Index.f128_type),
+ anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type),
+ bool_type = @enumToInt(InternPool.Index.bool_type),
+ void_type = @enumToInt(InternPool.Index.void_type),
+ type_type = @enumToInt(InternPool.Index.type_type),
+ anyerror_type = @enumToInt(InternPool.Index.anyerror_type),
+ comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type),
+ comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type),
+ noreturn_type = @enumToInt(InternPool.Index.noreturn_type),
+ anyframe_type = @enumToInt(InternPool.Index.anyframe_type),
+ null_type = @enumToInt(InternPool.Index.null_type),
+ undefined_type = @enumToInt(InternPool.Index.undefined_type),
+ enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type),
+ atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type),
+ atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type),
+ calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type),
+ address_space_type = @enumToInt(InternPool.Index.address_space_type),
+ float_mode_type = @enumToInt(InternPool.Index.float_mode_type),
+ reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type),
+ call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type),
+ prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type),
+ export_options_type = @enumToInt(InternPool.Index.export_options_type),
+ extern_options_type = @enumToInt(InternPool.Index.extern_options_type),
+ type_info_type = @enumToInt(InternPool.Index.type_info_type),
+ manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type),
+ manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
+ single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
+ const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type),
+ anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
+ generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
+ var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type),
+ empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
+ undef = @enumToInt(InternPool.Index.undef),
+ zero = @enumToInt(InternPool.Index.zero),
+ zero_usize = @enumToInt(InternPool.Index.zero_usize),
+ one = @enumToInt(InternPool.Index.one),
+ one_usize = @enumToInt(InternPool.Index.one_usize),
+ calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
+ calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
+ void_value = @enumToInt(InternPool.Index.void_value),
+ unreachable_value = @enumToInt(InternPool.Index.unreachable_value),
+ null_value = @enumToInt(InternPool.Index.null_value),
+ bool_true = @enumToInt(InternPool.Index.bool_true),
+ bool_false = @enumToInt(InternPool.Index.bool_false),
+ empty_struct = @enumToInt(InternPool.Index.empty_struct),
+ generic_poison = @enumToInt(InternPool.Index.generic_poison),
+
/// This Ref does not correspond to any ZIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
- none,
-
- u1_type,
- u8_type,
- i8_type,
- u16_type,
- i16_type,
- u29_type,
- u32_type,
- i32_type,
- u64_type,
- i64_type,
- u128_type,
- i128_type,
- usize_type,
- isize_type,
- c_char_type,
- c_short_type,
- c_ushort_type,
- c_int_type,
- c_uint_type,
- c_long_type,
- c_ulong_type,
- c_longlong_type,
- c_ulonglong_type,
- c_longdouble_type,
- f16_type,
- f32_type,
- f64_type,
- f80_type,
- f128_type,
- anyopaque_type,
- bool_type,
- void_type,
- type_type,
- anyerror_type,
- comptime_int_type,
- comptime_float_type,
- noreturn_type,
- anyframe_type,
- null_type,
- undefined_type,
- enum_literal_type,
- atomic_order_type,
- atomic_rmw_op_type,
- calling_convention_type,
- address_space_type,
- float_mode_type,
- reduce_op_type,
- modifier_type,
- prefetch_options_type,
- export_options_type,
- extern_options_type,
- type_info_type,
- manyptr_u8_type,
- manyptr_const_u8_type,
- fn_noreturn_no_args_type,
- fn_void_no_args_type,
- fn_naked_noreturn_no_args_type,
- fn_ccc_void_no_args_type,
- single_const_pointer_to_comptime_int_type,
- const_slice_u8_type,
- anyerror_void_error_union_type,
- generic_poison_type,
-
- /// `undefined` (untyped)
- undef,
- /// `0` (comptime_int)
- zero,
- /// `1` (comptime_int)
- one,
- /// `{}`
- void_value,
- /// `unreachable` (noreturn type)
- unreachable_value,
- /// `null` (untyped)
- null_value,
- /// `true`
- bool_true,
- /// `false`
- bool_false,
- /// `.{}` (untyped)
- empty_struct,
- /// `0` (usize)
- zero_usize,
- /// `1` (usize)
- one_usize,
- /// `std.builtin.CallingConvention.C`
- calling_convention_c,
- /// `std.builtin.CallingConvention.Inline`
- calling_convention_inline,
- /// Used for generic parameters where the type and value
- /// is not known until generic function instantiation.
- generic_poison,
- /// This is a special type for variadic parameters of a function call.
- /// Casts to it will validate that the type can be passed to a c
- /// calling convention function.
- var_args_param,
-
+ none = std.math.maxInt(u32),
_,
-
- pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{
- .none = undefined,
-
- .u1_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u1_type),
- },
- .u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u8_type),
- },
- .i8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i8_type),
- },
- .u16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u16_type),
- },
- .i16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i16_type),
- },
- .u29_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u29_type),
- },
- .u32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u32_type),
- },
- .i32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i32_type),
- },
- .u64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u64_type),
- },
- .i64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i64_type),
- },
- .u128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.u128_type),
- },
- .i128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.i128_type),
- },
- .usize_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.usize_type),
- },
- .isize_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.isize_type),
- },
- .c_char_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_char_type),
- },
- .c_short_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_short_type),
- },
- .c_ushort_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ushort_type),
- },
- .c_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_int_type),
- },
- .c_uint_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_uint_type),
- },
- .c_long_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_long_type),
- },
- .c_ulong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ulong_type),
- },
- .c_longlong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_longlong_type),
- },
- .c_ulonglong_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_ulonglong_type),
- },
- .c_longdouble_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.c_longdouble_type),
- },
- .f16_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f16_type),
- },
- .f32_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f32_type),
- },
- .f64_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f64_type),
- },
- .f80_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f80_type),
- },
- .f128_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.f128_type),
- },
- .anyopaque_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.anyopaque_type),
- },
- .bool_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.bool_type),
- },
- .void_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.void_type),
- },
- .type_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_type),
- },
- .anyerror_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.anyerror_type),
- },
- .comptime_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.comptime_int_type),
- },
- .comptime_float_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.comptime_float_type),
- },
- .noreturn_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.noreturn_type),
- },
- .anyframe_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.anyframe_type),
- },
- .null_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.null_type),
- },
- .undefined_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.undefined_type),
- },
- .fn_noreturn_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_noreturn_no_args_type),
- },
- .fn_void_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_void_no_args_type),
- },
- .fn_naked_noreturn_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_naked_noreturn_no_args_type),
- },
- .fn_ccc_void_no_args_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.fn_ccc_void_no_args_type),
- },
- .single_const_pointer_to_comptime_int_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.single_const_pointer_to_comptime_int_type),
- },
- .const_slice_u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.const_slice_u8_type),
- },
- .anyerror_void_error_union_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.anyerror_void_error_union_type),
- },
- .generic_poison_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.generic_poison_type),
- },
- .enum_literal_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.enum_literal_type),
- },
- .manyptr_u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.manyptr_u8_type),
- },
- .manyptr_const_u8_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.manyptr_const_u8_type),
- },
- .atomic_order_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.atomic_order_type),
- },
- .atomic_rmw_op_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.atomic_rmw_op_type),
- },
- .calling_convention_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.calling_convention_type),
- },
- .address_space_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.address_space_type),
- },
- .float_mode_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.float_mode_type),
- },
- .reduce_op_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.reduce_op_type),
- },
- .modifier_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.modifier_type),
- },
- .prefetch_options_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.prefetch_options_type),
- },
- .export_options_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.export_options_type),
- },
- .extern_options_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.extern_options_type),
- },
- .type_info_type = .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_info_type),
- },
-
- .undef = .{
- .ty = Type.initTag(.undefined),
- .val = Value.initTag(.undef),
- },
- .zero = .{
- .ty = Type.initTag(.comptime_int),
- .val = Value.initTag(.zero),
- },
- .zero_usize = .{
- .ty = Type.initTag(.usize),
- .val = Value.initTag(.zero),
- },
- .one = .{
- .ty = Type.initTag(.comptime_int),
- .val = Value.initTag(.one),
- },
- .one_usize = .{
- .ty = Type.initTag(.usize),
- .val = Value.initTag(.one),
- },
- .void_value = .{
- .ty = Type.initTag(.void),
- .val = Value.initTag(.void_value),
- },
- .unreachable_value = .{
- .ty = Type.initTag(.noreturn),
- .val = Value.initTag(.unreachable_value),
- },
- .null_value = .{
- .ty = Type.initTag(.null),
- .val = Value.initTag(.null_value),
- },
- .bool_true = .{
- .ty = Type.initTag(.bool),
- .val = Value.initTag(.bool_true),
- },
- .bool_false = .{
- .ty = Type.initTag(.bool),
- .val = Value.initTag(.bool_false),
- },
- .empty_struct = .{
- .ty = Type.initTag(.empty_struct_literal),
- .val = Value.initTag(.empty_struct_value),
- },
- .calling_convention_c = .{
- .ty = Type.initTag(.calling_convention),
- .val = .{ .ptr_otherwise = &calling_convention_c_payload.base },
- },
- .calling_convention_inline = .{
- .ty = Type.initTag(.calling_convention),
- .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base },
- },
- .generic_poison = .{
- .ty = Type.initTag(.generic_poison),
- .val = Value.initTag(.generic_poison),
- },
- .var_args_param = undefined,
- });
- };
-
- /// We would like this to be const but `Value` wants a mutable pointer for
- /// its payload field. Nothing should mutate this though.
- var calling_convention_c_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @enumToInt(std.builtin.CallingConvention.C),
- };
-
- /// We would like this to be const but `Value` wants a mutable pointer for
- /// its payload field. Nothing should mutate this though.
- var calling_convention_inline_payload: Value.Payload.U32 = .{
- .base = .{ .tag = .enum_field_index },
- .data = @enumToInt(std.builtin.CallingConvention.Inline),
};
/// All instructions have an 8-byte payload, which is contained within
@@ -4163,7 +3811,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
};
}
-const ref_start_index: u32 = Inst.Ref.typed_value_map.len;
+const ref_start_index: u32 = InternPool.static_len;
pub fn indexToRef(inst: Inst.Index) Inst.Ref {
return @intToEnum(Inst.Ref, ref_start_index + inst);