Commit 18608223ef
Changed files (32)
src
arch
aarch64
riscv64
sparc64
x86_64
src/arch/aarch64/abi.zig
@@ -83,7 +83,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
const union_obj = mod.typeToUnion(ty).?;
var max_count: u8 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
- const field_count = countFloats(field_ty.toType(), mod, maybe_float_bits);
+ const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > sret_float_count) return invalid;
@@ -122,7 +122,7 @@ pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
.Union => {
const union_obj = mod.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (getFloatArrayType(field_ty.toType(), mod)) |some| return some;
+ if (getFloatArrayType(Type.fromInterned(field_ty), mod)) |some| return some;
}
return null;
},
src/arch/aarch64/CodeGen.zig
@@ -6245,7 +6245,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
if (param_size == 0) {
result_arg.* = .{ .none = {} };
continue;
@@ -6253,14 +6253,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
- if (ty.toType().abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
+ if (Type.fromInterned(ty).abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) {
if (param_size <= 8) {
- result_arg.* = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) };
+ result_arg.* = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], Type.fromInterned(ty)) };
ncrn += 1;
} else {
return self.fail("TODO MCValues with multiple registers", .{});
@@ -6271,7 +6271,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
- if (ty.toType().abiAlignment(mod) == .@"8") {
+ if (Type.fromInterned(ty).abiAlignment(mod) == .@"8") {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
@@ -6310,9 +6310,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (ty.toType().abiSize(mod) > 0) {
- const param_size: u32 = @intCast(ty.toType().abiSize(mod));
- const param_alignment = ty.toType().abiAlignment(mod);
+ if (Type.fromInterned(ty).abiSize(mod) > 0) {
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
+ const param_alignment = Type.fromInterned(ty).abiAlignment(mod);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
src/arch/arm/abi.zig
@@ -65,7 +65,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
if (float_count <= byval_float_count) return .byval;
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
- if (field_ty.toType().bitSize(mod) > 32 or
+ if (Type.fromInterned(field_ty).bitSize(mod) > 32 or
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
{
return Class.arrSize(bit_size, 64);
@@ -129,7 +129,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
const union_obj = mod.typeToUnion(ty).?;
var max_count: u32 = 0;
for (union_obj.field_types.get(ip)) |field_ty| {
- const field_count = countFloats(field_ty.toType(), mod, maybe_float_bits);
+ const field_count = countFloats(Type.fromInterned(field_ty), mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > byval_float_count) return invalid;
src/arch/arm/CodeGen.zig
@@ -6209,10 +6209,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (ty.toType().abiAlignment(mod) == .@"8")
+ if (Type.fromInterned(ty).abiAlignment(mod) == .@"8")
ncrn = std.mem.alignForward(usize, ncrn, 2);
- const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] };
@@ -6224,7 +6224,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
- if (ty.toType().abiAlignment(mod) == .@"8")
+ if (Type.fromInterned(ty).abiAlignment(mod) == .@"8")
nsaa = std.mem.alignForward(u32, nsaa, 8);
result_arg.* = .{ .stack_argument_offset = nsaa };
@@ -6259,9 +6259,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var stack_offset: u32 = 0;
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- if (ty.toType().abiSize(mod) > 0) {
- const param_size: u32 = @intCast(ty.toType().abiSize(mod));
- const param_alignment = ty.toType().abiAlignment(mod);
+ if (Type.fromInterned(ty).abiSize(mod) > 0) {
+ const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
+ const param_alignment = Type.fromInterned(ty).abiAlignment(mod);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
src/arch/riscv64/CodeGen.zig
@@ -2656,7 +2656,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result_arg.* = .{ .register = argument_registers[next_register] };
src/arch/sparc64/CodeGen.zig
@@ -4493,7 +4493,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
};
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
+ const param_size = @as(u32, @intCast(Type.fromInterned(ty).abiSize(mod)));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result_arg.* = .{ .register = argument_registers[next_register] };
src/arch/wasm/abi.zig
@@ -37,7 +37,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
// The struct type is non-scalar.
return memory;
}
- const field_ty = struct_type.field_types.get(ip)[0].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[0]);
const explicit_align = struct_type.fieldAlign(ip, 0);
if (explicit_align != .none) {
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod)))
@@ -76,7 +76,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
const layout = ty.unionGetLayout(mod);
assert(layout.tag_size == 0);
if (union_obj.field_names.len > 1) return memory;
- const first_field_ty = union_obj.field_types.get(ip)[0].toType();
+ const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, mod);
},
.ErrorUnion,
@@ -104,7 +104,7 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
switch (ty.zigTypeTag(mod)) {
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return scalarType(packed_struct.backingIntType(ip).toType(), mod);
+ return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
} else {
assert(ty.structFieldCount(mod) == 1);
return scalarType(ty.structFieldType(0, mod), mod);
@@ -119,7 +119,7 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
}
assert(union_obj.field_types.len == 1);
}
- const first_field_ty = union_obj.field_types.get(ip)[0].toType();
+ const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return scalarType(first_field_ty, mod);
},
else => return ty,
src/arch/wasm/CodeGen.zig
@@ -1009,7 +1009,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
},
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return typeToValtype(packed_struct.backingIntType(ip).toType(), mod);
+ return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
} else {
return wasm.Valtype.i32;
}
@@ -1171,7 +1171,7 @@ fn genFunctype(
// param types
for (params) |param_type_ip| {
- const param_type = param_type_ip.toType();
+ const param_type = Type.fromInterned(param_type_ip);
if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue;
switch (cc) {
@@ -1240,7 +1240,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(func.decl.ty).?;
- var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod);
+ var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod);
defer func_type.deinit(func.gpa);
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
@@ -1363,7 +1363,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
// Check if we store the result as a pointer to the stack rather than
// by value
- if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
// the sret arg will be passed as first argument, therefore we
// set the `return_value` before allocating locals for regular args.
result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
@@ -1373,7 +1373,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
switch (cc) {
.Unspecified => {
for (fn_info.param_types.get(ip)) |ty| {
- if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!Type.fromInterned(ty).hasRuntimeBitsIgnoreComptime(mod)) {
continue;
}
@@ -1383,7 +1383,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
.C => {
for (fn_info.param_types.get(ip)) |ty| {
- const ty_classes = abi.classifyType(ty.toType(), mod);
+ const ty_classes = abi.classifyType(Type.fromInterned(ty), mod);
for (ty_classes) |class| {
if (class == .none) continue;
try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
@@ -1748,7 +1748,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
},
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- return isByRef(packed_struct.backingIntType(ip).toType(), mod);
+ return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), mod);
}
return ty.hasRuntimeBitsIgnoreComptime(mod);
},
@@ -2097,7 +2097,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const un_op = func.air.instructions.items(.data)[inst].un_op;
const operand = try func.resolveInst(un_op);
const fn_info = mod.typeToFunc(func.decl.ty).?;
- const ret_ty = fn_info.return_type.toType();
+ const ret_ty = Type.fromInterned(fn_info.return_type);
// result must be stored in the stack and we return a pointer
// to the stack instead
@@ -2145,7 +2145,7 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
const fn_info = mod.typeToFunc(func.decl.ty).?;
- if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
+ if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
break :result func.return_value;
}
@@ -2166,7 +2166,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (ret_ty.isError(mod)) {
try func.addImm32(0);
}
- } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) {
+ } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod)) {
// leave on the stack
_ = try func.load(operand, ret_ty, 0);
}
@@ -2192,7 +2192,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
const ret_ty = fn_ty.fnReturnType(mod);
const fn_info = mod.typeToFunc(fn_ty).?;
- const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod);
+ const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), mod);
const callee: ?Decl.Index = blk: {
const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null;
@@ -2203,7 +2203,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} else if (func_val.getExternFunc(mod)) |extern_func| {
const ext_decl = mod.declPtr(extern_func.decl);
const ext_info = mod.typeToFunc(ext_decl.ty).?;
- var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), ext_info.return_type.toType(), mod);
+ var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), mod);
defer func_type.deinit(func.gpa);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl);
const atom = func.bin_file.getAtomPtr(atom_index);
@@ -2253,7 +2253,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const operand = try func.resolveInst(pl_op.operand);
try func.emitWValue(operand);
- var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod);
+ var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), mod);
defer fn_type.deinit(func.gpa);
const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
@@ -3081,24 +3081,24 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
return func.lowerParentPtrDecl(ptr_val, decl_index, offset);
},
.eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}),
- .int => |base| return func.lowerConstant(base.toValue(), Type.usize),
- .opt_payload => |base_ptr| return func.lowerParentPtr(base_ptr.toValue(), offset),
+ .int => |base| return func.lowerConstant(Value.fromInterned(base), Type.usize),
+ .opt_payload => |base_ptr| return func.lowerParentPtr(Value.fromInterned(base_ptr), offset),
.comptime_field => unreachable,
.elem => |elem| {
const index = elem.index;
- const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
+ const elem_type = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod);
const elem_offset = index * elem_type.abiSize(mod);
- return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset)));
+ return func.lowerParentPtr(Value.fromInterned(elem.base), @as(u32, @intCast(elem_offset + offset)));
},
.field => |field| {
- const parent_ptr_ty = mod.intern_pool.typeOf(field.base).toType();
+ const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field.index);
const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => blk: {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
- if (ptr.ty.toType().ptrInfo(mod).packed_offset.host_size == 0)
+ if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
break :blk @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8)
else
break :blk 0;
@@ -3126,7 +3126,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
},
else => unreachable,
};
- return func.lowerParentPtr(field.base.toValue(), @as(u32, @intCast(offset + field_offset)));
+ return func.lowerParentPtr(Value.fromInterned(field.base), @as(u32, @intCast(offset + field_offset)));
},
}
}
@@ -3146,7 +3146,7 @@ fn lowerAnonDeclRef(
) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
const decl_val = anon_decl.val;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const is_fn_body = ty.zigTypeTag(mod) == .Fn;
if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -3301,10 +3301,10 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const err_tv: TypedValue = switch (error_union.val) {
.err_name => |err_name| .{
.ty = ty.errorUnionSet(mod),
- .val = (try mod.intern(.{ .err = .{
+ .val = Value.fromInterned((try mod.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
- } })).toValue(),
+ } }))),
},
.payload => .{
.ty = err_int_ty,
@@ -3321,7 +3321,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
},
.enum_tag => |enum_tag| {
const int_tag_ty = ip.typeOf(enum_tag.int);
- return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType());
+ return func.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty));
},
.float => |float| switch (float.storage) {
.f16 => |f16_val| return WValue{ .imm32 = @as(u16, @bitCast(f16_val)) },
@@ -3332,7 +3332,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0),
.mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0),
- .int => |int| return func.lowerConstant(int.toValue(), ip.typeOf(int).toType()),
+ .int => |int| return func.lowerConstant(Value.fromInterned(int), Type.fromInterned(ip.typeOf(int))),
.opt_payload, .elem, .field => return func.lowerParentPtr(val, 0),
.anon_decl => |ad| return func.lowerAnonDeclRef(ad, 0),
else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}),
@@ -3361,7 +3361,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
assert(struct_type.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
- const backing_int_ty = struct_type.backingIntType(ip).toType();
+ const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
const int_val = try mod.intValue(
backing_int_ty,
mem.readInt(u64, &buf, .little),
@@ -3376,10 +3376,10 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
try ty.unionBackingType(mod)
else field_ty: {
const union_obj = mod.typeToUnion(ty).?;
- const field_index = mod.unionTagFieldIndex(union_obj, un.tag.toValue()).?;
- break :field_ty union_obj.field_types.get(ip)[field_index].toType();
+ const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
};
- return func.lowerConstant(un.val.toValue(), constant_ty);
+ return func.lowerConstant(Value.fromInterned(un.val), constant_ty);
},
.memoized_call => unreachable,
}
@@ -3426,7 +3426,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
},
.Struct => {
const packed_struct = mod.typeToPackedStruct(ty).?;
- return func.emitUndefined(packed_struct.backingIntType(ip).toType());
+ return func.emitUndefined(Type.fromInterned(packed_struct.backingIntType(ip).*));
},
else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
@@ -3466,8 +3466,8 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
- .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiAlignment(mod).toByteUnits(0))))),
- .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
+ .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0))))),
+ .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))),
};
}
@@ -3930,7 +3930,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.Struct => result: {
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
- const backing_ty = packed_struct.backingIntType(ip).toType();
+ const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*);
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
@@ -5208,7 +5208,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const a = try func.resolveInst(extra.a);
const b = try func.resolveInst(extra.b);
- const mask = extra.mask.toValue();
+ const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
const child_ty = inst_ty.childType(mod);
@@ -5325,7 +5325,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
const packed_struct = mod.typeToPackedStruct(result_ty).?;
const field_types = packed_struct.field_types;
- const backing_type = packed_struct.backingIntType(ip).toType();
+ const backing_type = Type.fromInterned(packed_struct.backingIntType(ip).*);
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
@@ -5337,7 +5337,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
- const field_ty = field_types.get(ip)[elem_index].toType();
+ const field_ty = Type.fromInterned(field_types.get(ip)[elem_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const shift_val = if (backing_type.bitSize(mod) <= 32)
@@ -5409,7 +5409,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const union_ty = func.typeOfIndex(inst);
const layout = union_ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(union_ty).?;
- const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const tag_int = blk: {
@@ -5438,7 +5438,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
if (layout.tag_size > 0) {
- try func.store(result_ptr, tag_int, union_obj.enum_tag_ty.toType(), 0);
+ try func.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0);
}
} else {
try func.store(result_ptr, payload, field_ty, 0);
@@ -5446,7 +5446,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.store(
result_ptr,
tag_int,
- union_obj.enum_tag_ty.toType(),
+ Type.fromInterned(union_obj.enum_tag_ty),
@intCast(layout.payload_size),
);
}
@@ -7173,8 +7173,8 @@ fn callIntrinsic(
// Lower all arguments to the stack before we call our function
for (args, 0..) |arg, arg_i| {
assert(!(want_sret_param and arg == .stack));
- assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod));
- try func.lowerArg(.C, param_types[arg_i].toType(), arg);
+ assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(mod));
+ try func.lowerArg(.C, Type.fromInterned(param_types[arg_i]), arg);
}
// Actually call our intrinsic
@@ -7260,7 +7260,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.storage = .{ .bytes = tag_name },
} });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
- .{ .ty = name_ty, .val = name_val.toValue() },
+ .{ .ty = name_ty, .val = Value.fromInterned(name_val) },
enum_decl_index,
);
src/arch/x86_64/abi.zig
@@ -224,7 +224,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8
for (struct_type.field_types.get(ip), 0..) |field_ty_ip, i| {
- const field_ty = field_ty_ip.toType();
+ const field_ty = Type.fromInterned(field_ty_ip);
const field_align = struct_type.fieldAlign(ip, i);
if (field_align != .none and field_align.compare(.lt, field_ty.abiAlignment(mod)))
return memory_class;
@@ -342,12 +342,12 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
if (field_align != .none and
- field_align.compare(.lt, field_ty.toType().abiAlignment(mod)))
+ field_align.compare(.lt, Type.fromInterned(field_ty).abiAlignment(mod)))
{
return memory_class;
}
// Combine this field with the previous one.
- const field_class = classifySystemV(field_ty.toType(), mod, .field);
+ const field_class = classifySystemV(Type.fromInterned(field_ty), mod, .field);
for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
src/arch/x86_64/CodeGen.zig
@@ -3044,7 +3044,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
.storage = .{ .repeated_elem = mask_val.ip_index },
} });
- const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() });
+ const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = Value.fromInterned(splat_val) });
const splat_addr_mcv: MCValue = switch (splat_mcv) {
.memory, .indirect, .load_frame => splat_mcv.address(),
else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) },
@@ -3196,7 +3196,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 {
}
} else if (Air.refToInterned(dst_air)) |ip_index| {
var space: Value.BigIntSpace = undefined;
- const src_int = ip_index.toValue().toBigInt(&space, mod);
+ const src_int = Value.fromInterned(ip_index).toBigInt(&space, mod);
return @as(u16, @intCast(src_int.bitCountTwosComp())) +
@intFromBool(src_int.positive and dst_info.signedness == .signed);
}
@@ -4429,7 +4429,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
self.register_manager.unlockReg(lock);
const shift_imm =
- Immediate.u(@intCast(rhs_elem.toValue().toUnsignedInt(mod)));
+ Immediate.u(@intCast(Value.fromInterned(rhs_elem).toUnsignedInt(mod)));
if (self.hasFeature(.avx)) try self.asmRegisterRegisterImmediate(
mir_tag,
registerAlias(dst_reg, abi_size),
@@ -4477,14 +4477,14 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void {
const mask_ty = try mod.vectorType(.{ .len = 16, .child = .u8_type });
const mask_mcv = try self.genTypedValue(.{
.ty = mask_ty,
- .val = (try mod.intern(.{ .aggregate = .{
+ .val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = mask_ty.toIntern(),
.storage = .{ .elems = &([1]InternPool.Index{
(try rhs_ty.childType(mod).maxIntScalar(mod, Type.u8)).toIntern(),
} ++ [1]InternPool.Index{
(try mod.intValue(Type.u8, 0)).toIntern(),
} ** 15) },
- } })).toValue(),
+ } }))),
});
const mask_addr_reg =
try self.copyToTmpRegister(Type.usize, mask_mcv.address());
@@ -6714,7 +6714,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
const mod = self.bin_file.options.module.?;
const ptr_info = ptr_ty.ptrInfo(mod);
- const val_ty = ptr_info.child.toType();
+ const val_ty = Type.fromInterned(ptr_info.child);
if (!val_ty.hasRuntimeBitsIgnoreComptime(mod)) return;
const val_abi_size: u32 = @intCast(val_ty.abiSize(mod));
@@ -10759,7 +10759,7 @@ fn genCall(self: *Self, info: union(enum) {
switch (call_info.return_value.long) {
.none, .unreach => {},
.indirect => |reg_off| {
- const ret_ty = fn_info.return_type.toType();
+ const ret_ty = Type.fromInterned(fn_info.return_type);
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ret_ty, mod));
try self.genSetReg(reg_off.reg, Type.usize, .{
.lea_frame = .{ .index = frame_index, .off = -reg_off.off },
@@ -15343,7 +15343,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.field_names.get(ip)[extra.field_index];
- const tag_ty = union_obj.enum_tag_ty.toType();
+ const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
@@ -15812,7 +15812,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
const ip_index = Air.refToInterned(ref).?;
const gop = try self.const_tracking.getOrPut(self.gpa, ip_index);
if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(init: {
- const const_mcv = try self.genTypedValue(.{ .ty = ty, .val = ip_index.toValue() });
+ const const_mcv = try self.genTypedValue(.{ .ty = ty, .val = Value.fromInterned(ip_index) });
switch (const_mcv) {
.lea_tlv => |tlv_sym| if (self.bin_file.cast(link.File.Elf)) |_| {
if (self.bin_file.options.pic) {
@@ -15921,7 +15921,7 @@ fn resolveCallingConventionValues(
defer self.gpa.free(param_types);
for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| {
- dest.* = src.toType();
+ dest.* = Type.fromInterned(src);
}
for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg_ty|
param_ty.* = self.promoteVarArg(arg_ty);
@@ -15937,7 +15937,7 @@ fn resolveCallingConventionValues(
};
errdefer self.gpa.free(result.args);
- const ret_ty = fn_info.return_type.toType();
+ const ret_ty = Type.fromInterned(fn_info.return_type);
const resolved_cc = abi.resolveCallingConvention(cc, self.target.*);
switch (cc) {
src/codegen/c/type.zig
@@ -1481,7 +1481,7 @@ pub const CType = extern union {
info.flags.vector_index == .none)
try mod.intType(.unsigned, info.packed_offset.host_size * 8)
else
- info.child.toType();
+ Type.fromInterned(info.child);
if (try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| {
self.storage = .{ .child = .{
@@ -1496,7 +1496,7 @@ pub const CType = extern union {
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
- try self.initType(packed_struct.backingIntType(ip).toType(), kind, lookup);
+ try self.initType(Type.fromInterned(packed_struct.backingIntType(ip).*), kind, lookup);
} else {
const bits: u16 = @intCast(ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, bits);
@@ -1736,10 +1736,10 @@ pub const CType = extern union {
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
- _ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
+ _ = try lookup.typeToIndex(Type.fromInterned(info.return_type), param_kind);
for (info.param_types.get(ip)) |param_type| {
- if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- _ = try lookup.typeToIndex(param_type.toType(), param_kind);
+ if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ _ = try lookup.typeToIndex(Type.fromInterned(param_type), param_kind);
}
}
self.init(if (info.is_var_args) .varargs_function else .function);
@@ -2033,21 +2033,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types.get(ip)) |param_type| {
- if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types.get(ip)) |param_type| {
- if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
+ if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ params_pl[c_param_i] = store.set.typeToIndex(Type.fromInterned(param_type), mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
- .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?,
+ .return_type = store.set.typeToIndex(Type.fromInterned(info.return_type), mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@@ -2167,18 +2167,18 @@ pub const CType = extern union {
.payload => unreachable,
};
- if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind))
+ if (!self.eqlRecurse(Type.fromInterned(info.return_type), data.return_type, param_kind))
return false;
var c_param_i: usize = 0;
for (info.param_types.get(ip)) |param_type| {
- if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
c_param_i += 1;
- if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind))
+ if (!self.eqlRecurse(Type.fromInterned(param_type), param_cty, param_kind))
return false;
}
return c_param_i == data.param_types.len;
@@ -2282,10 +2282,10 @@ pub const CType = extern union {
.payload => unreachable,
};
- self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
+ self.updateHasherRecurse(hasher, Type.fromInterned(info.return_type), param_kind);
for (info.param_types.get(ip)) |param_type| {
- if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
+ if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(mod)) continue;
+ self.updateHasherRecurse(hasher, Type.fromInterned(param_type), param_kind);
}
},
src/codegen/c.zig
@@ -355,8 +355,8 @@ pub const Function = struct {
switch (c_value) {
.constant => |val| try f.object.dg.renderValue(
w,
- f.object.dg.module.intern_pool.typeOf(val).toType(),
- val.toValue(),
+ Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
+ Value.fromInterned(val),
location,
),
.undef => |ty| try f.object.dg.renderValue(w, ty, Value.undef, location),
@@ -370,8 +370,8 @@ pub const Function = struct {
try w.writeAll("(*");
try f.object.dg.renderValue(
w,
- f.object.dg.module.intern_pool.typeOf(val).toType(),
- val.toValue(),
+ Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
+ Value.fromInterned(val),
.Other,
);
try w.writeByte(')');
@@ -385,8 +385,8 @@ pub const Function = struct {
.constant => |val| {
try f.object.dg.renderValue(
w,
- f.object.dg.module.intern_pool.typeOf(val).toType(),
- val.toValue(),
+ Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
+ Value.fromInterned(val),
.Other,
);
try w.writeByte('.');
@@ -402,8 +402,8 @@ pub const Function = struct {
try w.writeByte('(');
try f.object.dg.renderValue(
w,
- f.object.dg.module.intern_pool.typeOf(val).toType(),
- val.toValue(),
+ Type.fromInterned(f.object.dg.module.intern_pool.typeOf(val)),
+ Value.fromInterned(val),
.Other,
);
try w.writeAll(")->");
@@ -561,7 +561,7 @@ pub const DeclGen = struct {
const mod = dg.module;
const ip = &mod.intern_pool;
const decl_val = anon_decl.val;
- const decl_ty = ip.typeOf(decl_val).toType();
+ const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
if (ty.isPtrAtRuntime(mod) and !decl_ty.isFnOrHasRuntimeBits(mod)) {
@@ -569,20 +569,20 @@ pub const DeclGen = struct {
}
// Chase function values in order to be able to reference the original function.
- if (decl_val.toValue().getFunction(mod)) |func| {
+ if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
_ = func;
_ = ptr_val;
_ = location;
@panic("TODO");
}
- if (decl_val.toValue().getExternFunc(mod)) |extern_func| {
+ if (Value.fromInterned(decl_val).getExternFunc(mod)) |extern_func| {
_ = extern_func;
_ = ptr_val;
_ = location;
@panic("TODO");
}
- assert(decl_val.toValue().getVariable(mod) == null);
+ assert(Value.fromInterned(decl_val).getVariable(mod) == null);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
@@ -608,7 +608,7 @@ pub const DeclGen = struct {
// alignment. If there is already an entry, keep the greater alignment.
const explicit_alignment = ptr_type.flags.alignment;
if (explicit_alignment != .none) {
- const abi_alignment = ptr_type.child.toType().abiAlignment(mod);
+ const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(mod);
if (explicit_alignment.compareStrict(.gt, abi_alignment)) {
const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, decl_val);
aligned_gop.value_ptr.* = if (aligned_gop.found_existing)
@@ -668,20 +668,20 @@ pub const DeclGen = struct {
location: ValueRenderLocation,
) error{ OutOfMemory, AnalysisFail }!void {
const mod = dg.module;
- const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType();
+ const ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(ptr_val));
const ptr_cty = try dg.typeToIndex(ptr_ty, .complete);
const ptr = mod.intern_pool.indexToKey(ptr_val).ptr;
switch (ptr.addr) {
- .decl => |d| try dg.renderDeclValue(writer, ptr_ty, ptr_val.toValue(), d, location),
- .mut_decl => |md| try dg.renderDeclValue(writer, ptr_ty, ptr_val.toValue(), md.decl, location),
- .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, ptr_ty, ptr_val.toValue(), anon_decl, location),
+ .decl => |d| try dg.renderDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), d, location),
+ .mut_decl => |md| try dg.renderDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), md.decl, location),
+ .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), anon_decl, location),
.int => |int| {
try writer.writeByte('(');
try dg.renderCType(writer, ptr_cty);
- try writer.print("){x}", .{try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other)});
+ try writer.print("){x}", .{try dg.fmtIntLiteral(Type.usize, Value.fromInterned(int), .Other)});
},
.eu_payload, .opt_payload => |base| {
- const ptr_base_ty = mod.intern_pool.typeOf(base).toType();
+ const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(base));
const base_ty = ptr_base_ty.childType(mod);
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(base_ty, .complete);
@@ -702,7 +702,7 @@ pub const DeclGen = struct {
try writer.writeAll(")->payload");
},
.elem => |elem| {
- const ptr_base_ty = mod.intern_pool.typeOf(elem.base).toType();
+ const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base));
const elem_ty = ptr_base_ty.elemType2(mod);
const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty);
const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete);
@@ -718,7 +718,7 @@ pub const DeclGen = struct {
try writer.print(")[{d}]", .{elem.index});
},
.field => |field| {
- const ptr_base_ty = mod.intern_pool.typeOf(field.base).toType();
+ const ptr_base_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
const base_ty = ptr_base_ty.childType(mod);
// Ensure complete type definition is visible before accessing fields.
_ = try dg.typeToIndex(base_ty, .complete);
@@ -909,8 +909,8 @@ pub const DeclGen = struct {
}
const union_obj = mod.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
- try dg.renderValue(writer, field_ty.toType(), val, initializer_type);
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+ try dg.renderValue(writer, Type.fromInterned(field_ty), val, initializer_type);
break;
}
if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
@@ -1045,10 +1045,10 @@ pub const DeclGen = struct {
.err_name => |err_name| return dg.renderValue(
writer,
error_ty,
- (try mod.intern(.{ .err = .{
+ Value.fromInterned((try mod.intern(.{ .err = .{
.ty = error_ty.toIntern(),
.name = err_name,
- } })).toValue(),
+ } }))),
location,
),
.payload => return dg.renderValue(
@@ -1070,10 +1070,10 @@ pub const DeclGen = struct {
try dg.renderValue(
writer,
payload_ty,
- switch (error_union.val) {
+ Value.fromInterned(switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
.payload => |payload| payload,
- }.toValue(),
+ }),
initializer_type,
);
try writer.writeAll(", .error = ");
@@ -1081,10 +1081,10 @@ pub const DeclGen = struct {
.err_name => |err_name| try dg.renderValue(
writer,
error_ty,
- (try mod.intern(.{ .err = .{
+ Value.fromInterned((try mod.intern(.{ .err = .{
.ty = error_ty.toIntern(),
.name = err_name,
- } })).toValue(),
+ } }))),
location,
),
.payload => try dg.renderValue(
@@ -1099,7 +1099,7 @@ pub const DeclGen = struct {
.enum_tag => {
const enum_tag = ip.indexToKey(val.ip_index).enum_tag;
const int_tag_ty = ip.typeOf(enum_tag.int);
- try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location);
+ try dg.renderValue(writer, Type.fromInterned(int_tag_ty), Value.fromInterned(enum_tag.int), location);
},
.float => {
const bits = ty.floatBits(target);
@@ -1218,7 +1218,7 @@ pub const DeclGen = struct {
try writer.writeAll("((");
try dg.renderType(writer, ptr_ty);
try writer.print("){x})", .{
- try dg.fmtIntLiteral(Type.usize, int.toValue(), ptr_location),
+ try dg.fmtIntLiteral(Type.usize, Value.fromInterned(int), ptr_location),
});
},
.eu_payload,
@@ -1230,7 +1230,7 @@ pub const DeclGen = struct {
}
if (ptr.len != .none) {
try writer.writeAll(", ");
- try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type);
+ try dg.renderValue(writer, Type.usize, Value.fromInterned(ptr.len), initializer_type);
try writer.writeByte('}');
}
},
@@ -1250,7 +1250,7 @@ pub const DeclGen = struct {
.Pointer => try mod.getCoerced(val, payload_ty),
else => unreachable,
},
- else => |payload| payload.toValue(),
+ else => |payload| Value.fromInterned(payload),
},
location,
);
@@ -1262,10 +1262,10 @@ pub const DeclGen = struct {
}
try writer.writeAll("{ .payload = ");
- try dg.renderValue(writer, payload_ty, switch (opt.val) {
+ try dg.renderValue(writer, payload_ty, Value.fromInterned(switch (opt.val) {
.none => try mod.intern(.{ .undef = payload_ty.ip_index }),
else => |payload| payload,
- }.toValue(), initializer_type);
+ }), initializer_type);
try writer.writeAll(", .is_null = ");
try dg.renderValue(writer, Type.bool, is_null_val, initializer_type);
try writer.writeAll(" }");
@@ -1349,7 +1349,7 @@ pub const DeclGen = struct {
0..,
) |field_ty, comptime_ty, field_i| {
if (comptime_ty != .none) continue;
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
@@ -1361,7 +1361,7 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
- try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type);
+ try dg.renderValue(writer, Type.fromInterned(field_ty), Value.fromInterned(field_val), initializer_type);
empty = false;
}
@@ -1378,7 +1378,7 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_i| {
- const field_ty = struct_type.field_types.get(ip)[field_i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]);
if (struct_type.fieldIsComptime(ip, field_i)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -1391,7 +1391,7 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
- try dg.renderValue(writer, field_ty, field_val.toValue(), initializer_type);
+ try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
empty = false;
}
@@ -1408,7 +1408,7 @@ pub const DeclGen = struct {
var eff_num_fields: usize = 0;
for (field_types) |field_ty| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
@@ -1428,7 +1428,7 @@ pub const DeclGen = struct {
var eff_index: usize = 0;
var needs_closing_paren = false;
for (field_types, 0..) |field_ty, field_i| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
@@ -1438,24 +1438,24 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
- const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
+ const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
- try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
+ try dg.renderIntCast(writer, ty, cast_context, Type.fromInterned(field_ty), .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
- try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
+ try dg.renderIntCast(writer, ty, cast_context, Type.fromInterned(field_ty), .FunctionArgument);
}
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
- bit_offset += field_ty.toType().bitSize(mod);
+ bit_offset += Type.fromInterned(field_ty).bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
@@ -1464,7 +1464,7 @@ pub const DeclGen = struct {
// a << a_off | b << b_off | c << c_off
var empty = true;
for (field_types, 0..) |field_ty, field_i| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
@@ -1481,15 +1481,15 @@ pub const DeclGen = struct {
};
if (bit_offset != 0) {
- try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
+ try dg.renderValue(writer, Type.fromInterned(field_ty), Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
- try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
+ try dg.renderValue(writer, Type.fromInterned(field_ty), Value.fromInterned(field_val), .Other);
}
- bit_offset += field_ty.toType().bitSize(mod);
+ bit_offset += Type.fromInterned(field_ty).bitSize(mod);
empty = false;
}
try writer.writeByte(')');
@@ -1509,7 +1509,7 @@ pub const DeclGen = struct {
try dg.renderType(writer, backing_ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, backing_ty, un.val.toValue(), initializer_type);
+ try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
},
.Extern => {
if (location == .StaticInitializer) {
@@ -1522,7 +1522,7 @@ pub const DeclGen = struct {
try writer.writeAll(")(");
try dg.renderType(writer, backing_ty);
try writer.writeAll("){");
- try dg.renderValue(writer, backing_ty, un.val.toValue(), initializer_type);
+ try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
try writer.writeAll("})");
},
else => unreachable,
@@ -1534,8 +1534,8 @@ pub const DeclGen = struct {
try writer.writeByte(')');
}
- const field_i = mod.unionTagFieldIndex(union_obj, un.tag.toValue()).?;
- const field_ty = union_obj.field_types.get(ip)[field_i].toType();
+ const field_i = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_i]);
const field_name = union_obj.field_names.get(ip)[field_i];
if (union_obj.getLayout(ip) == .Packed) {
if (field_ty.hasRuntimeBits(mod)) {
@@ -1548,7 +1548,7 @@ pub const DeclGen = struct {
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
- try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type);
+ try dg.renderValue(writer, field_ty, Value.fromInterned(un.val), initializer_type);
} else {
try writer.writeAll("0");
}
@@ -1560,7 +1560,7 @@ pub const DeclGen = struct {
const layout = mod.getUnionLayout(union_obj);
if (layout.tag_size != 0) {
try writer.writeAll(" .tag = ");
- try dg.renderValue(writer, tag_ty, un.tag.toValue(), initializer_type);
+ try dg.renderValue(writer, tag_ty, Value.fromInterned(un.tag), initializer_type);
}
if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}');
if (layout.tag_size != 0) try writer.writeByte(',');
@@ -1568,11 +1568,11 @@ pub const DeclGen = struct {
}
if (field_ty.hasRuntimeBits(mod)) {
try writer.print(" .{ } = ", .{fmtIdent(ip.stringToSlice(field_name))});
- try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type);
+ try dg.renderValue(writer, field_ty, Value.fromInterned(un.val), initializer_type);
try writer.writeByte(' ');
} else for (union_obj.field_types.get(ip)) |this_field_ty| {
- if (!this_field_ty.toType().hasRuntimeBits(mod)) continue;
- try dg.renderValue(writer, this_field_ty.toType(), Value.undef, initializer_type);
+ if (!Type.fromInterned(this_field_ty).hasRuntimeBits(mod)) continue;
+ try dg.renderValue(writer, Type.fromInterned(this_field_ty), Value.undef, initializer_type);
break;
}
if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}');
@@ -2527,7 +2527,7 @@ pub fn genErrDecls(o: *Object) !void {
.ty = .anyerror_type,
.name = name_nts,
} });
- try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other);
+ try o.dg.renderValue(writer, Type.anyerror, Value.fromInterned(err_val), .Other);
try writer.print(" = {d}u,\n", .{value});
}
o.indent_writer.popIndent();
@@ -2557,7 +2557,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, .none, .complete);
try writer.writeAll(" = ");
- try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer);
+ try o.dg.renderValue(writer, name_ty, Value.fromInterned(name_val), .StaticInitializer);
try writer.writeAll(";\n");
}
@@ -2590,7 +2590,7 @@ fn genExports(o: *Object) !void {
const ip = &mod.intern_pool;
const decl_index = o.dg.pass.decl;
const decl = mod.declPtr(decl_index);
- const tv: TypedValue = .{ .ty = decl.ty, .val = (try decl.internValue(mod)).toValue() };
+ const tv: TypedValue = .{ .ty = decl.ty, .val = Value.fromInterned((try decl.internValue(mod))) };
const fwd = o.dg.fwd_decl.writer();
const exports = mod.decl_exports.get(decl_index) orelse return;
@@ -2677,7 +2677,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
try w.writeAll(" = ");
- try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer);
+ try o.dg.renderValue(w, name_ty, Value.fromInterned(name_val), .Initializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
@@ -2817,7 +2817,7 @@ pub fn genDecl(o: *Object) !void {
const mod = o.dg.module;
const decl_index = o.dg.pass.decl;
const decl = mod.declPtr(decl_index);
- const tv: TypedValue = .{ .ty = decl.ty, .val = (try decl.internValue(mod)).toValue() };
+ const tv: TypedValue = .{ .ty = decl.ty, .val = Value.fromInterned((try decl.internValue(mod))) };
if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return;
if (tv.val.getExternFunc(mod)) |_| {
@@ -2843,7 +2843,7 @@ pub fn genDecl(o: *Object) !void {
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment, .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
- try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
+ try o.dg.renderValue(w, tv.ty, Value.fromInterned(variable.init), .StaticInitializer);
try w.writeByte(';');
try o.indent_writer.insertNewline();
} else {
@@ -3491,7 +3491,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_ty = f.typeOf(ty_op.operand);
const ptr_scalar_ty = ptr_ty.scalarType(mod);
const ptr_info = ptr_scalar_ty.ptrInfo(mod);
- const src_ty = ptr_info.child.toType();
+ const src_ty = Type.fromInterned(ptr_info.child);
if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try reap(f, inst, &.{ty_op.operand});
@@ -3767,7 +3767,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("memset(");
try f.writeCValue(writer, ptr_val, .FunctionArgument);
try writer.writeAll(", 0xaa, sizeof(");
- try f.renderType(writer, ptr_info.child.toType());
+ try f.renderType(writer, Type.fromInterned(ptr_info.child));
try writer.writeAll("));\n");
}
return .none;
@@ -3777,7 +3777,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
else
true;
- const is_array = lowersToArray(ptr_info.child.toType(), mod);
+ const is_array = lowersToArray(Type.fromInterned(ptr_info.child), mod);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -3789,7 +3789,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
- assert(src_ty.eql(ptr_info.child.toType(), f.object.dg.module));
+ assert(src_ty.eql(Type.fromInterned(ptr_info.child), f.object.dg.module));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.
@@ -5404,7 +5404,7 @@ fn fieldLocation(
const union_obj = mod.typeToUnion(container_ty).?;
return switch (union_obj.getLayout(ip)) {
.Auto, .Extern => {
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
return if (container_ty.unionTagTypeSafety(mod) != null and
!container_ty.unionHasAllZeroBitFieldTypes(mod))
@@ -6754,7 +6754,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data;
- const mask = extra.mask.toValue();
+ const mask = Value.fromInterned(extra.mask);
const lhs = try f.resolveInst(extra.a);
const rhs = try f.resolveInst(extra.b);
@@ -7947,7 +7947,7 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
.types = &types,
.values = &values,
});
- return interned.toType();
+ return Type.fromInterned(interned);
}
return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
src/codegen/llvm.zig
@@ -1399,12 +1399,12 @@ pub const Object = struct {
break :param param;
} else .none;
- if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) {
+ if (ccAbiPromoteInt(fn_info.cc, mod, Type.fromInterned(fn_info.return_type))) |s| switch (s) {
.signed => try attributes.addRetAttr(.signext, &o.builder),
.unsigned => try attributes.addRetAttr(.zeroext, &o.builder),
};
- const err_return_tracing = fn_info.return_type.toType().isError(mod) and
+ const err_return_tracing = Type.fromInterned(fn_info.return_type).isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
const err_ret_trace: Builder.Value = if (err_return_tracing) param: {
@@ -1430,7 +1430,7 @@ pub const Object = struct {
.byval => {
assert(!it.byval_attr);
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types.get(ip)[param_index].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param = wip.arg(llvm_arg_i);
if (isByRef(param_ty, mod)) {
@@ -1447,7 +1447,7 @@ pub const Object = struct {
llvm_arg_i += 1;
},
.byref => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod).toLlvm();
@@ -1462,7 +1462,7 @@ pub const Object = struct {
}
},
.byref_mut => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
const alignment = param_ty.abiAlignment(mod).toLlvm();
@@ -1478,7 +1478,7 @@ pub const Object = struct {
},
.abi_sized_int => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1494,7 +1494,7 @@ pub const Object = struct {
},
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const ptr_info = param_ty.ptrInfo(mod);
if (math.cast(u5, it.zig_index - 1)) |i| {
@@ -1511,7 +1511,7 @@ pub const Object = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
const ptr_param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1526,7 +1526,7 @@ pub const Object = struct {
.multiple_llvm_types => {
assert(!it.byval_attr);
const field_types = it.types_buffer[0..it.types_len];
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param_alignment = param_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target);
@@ -1553,7 +1553,7 @@ pub const Object = struct {
args.appendAssumeCapacity(try wip.cast(.bitcast, param, .half, ""));
},
.float_array => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1568,7 +1568,7 @@ pub const Object = struct {
try wip.load(.normal, param_llvm_ty, arg_ptr, alignment, ""));
},
.i32_array, .i64_array => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1819,7 +1819,7 @@ pub const Object = struct {
const llvm_addr_space = toLlvmAddressSpace(.generic, o.target);
const variable_index = try o.builder.addVariable(
main_exp_name,
- try o.lowerType(mod.intern_pool.typeOf(exported_value).toType()),
+ try o.lowerType(Type.fromInterned(mod.intern_pool.typeOf(exported_value))),
llvm_addr_space,
);
const global_index = variable_index.ptrConst(&o.builder).global;
@@ -1961,7 +1961,7 @@ pub const Object = struct {
resolve: DebugResolveStatus,
opt_fwd_decl: ?*llvm.DIType,
) Allocator.Error!*llvm.DIType {
- const ty = gop.key_ptr.toType();
+ const ty = Type.fromInterned(gop.key_ptr.*);
const gpa = o.gpa;
const target = o.target;
const dib = o.di_builder.?;
@@ -2004,7 +2004,7 @@ pub const Object = struct {
const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len);
defer gpa.free(enumerators);
- const int_ty = enum_type.tag_ty.toType();
+ const int_ty = Type.fromInterned(enum_type.tag_ty);
const int_info = ty.intInfo(mod);
assert(int_info.bits != 0);
@@ -2013,7 +2013,7 @@ pub const Object = struct {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = if (enum_type.values.len != 0)
- enum_type.values.get(ip)[i].toValue().toBigInt(&bigint_space, mod)
+ Value.fromInterned(enum_type.values.get(ip)[i]).toBigInt(&bigint_space, mod)
else
std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
@@ -2083,10 +2083,10 @@ pub const Object = struct {
ptr_info.flags.is_const or
ptr_info.flags.is_volatile or
ptr_info.flags.size == .Many or ptr_info.flags.size == .C or
- !ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
+ !Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(mod))
{
const bland_ptr_ty = try mod.ptrType(.{
- .child = if (!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod))
+ .child = if (!Type.fromInterned(ptr_info.child).hasRuntimeBitsIgnoreComptime(mod))
.anyopaque_type
else
ptr_info.child,
@@ -2183,7 +2183,7 @@ pub const Object = struct {
return full_di_ty;
}
- const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd);
+ const elem_di_ty = try o.lowerDebugType(Type.fromInterned(ptr_info.child), .fwd);
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
@@ -2456,7 +2456,7 @@ pub const Object = struct {
if (mod.typeToPackedStruct(ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntType(ip).*;
if (backing_int_ty != .none) {
- const info = backing_int_ty.toType().intInfo(mod);
+ const info = Type.fromInterned(backing_int_ty).intInfo(mod);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
@@ -2492,10 +2492,10 @@ pub const Object = struct {
var offset: u64 = 0;
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
- const field_size = field_ty.toType().abiSize(mod);
- const field_align = field_ty.toType().abiAlignment(mod);
+ const field_size = Type.fromInterned(field_ty).abiSize(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
const field_offset = field_align.forward(offset);
offset = field_offset + field_size;
@@ -2514,7 +2514,7 @@ pub const Object = struct {
field_align.toByteUnits(0) * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
- try o.lowerDebugType(field_ty.toType(), .full),
+ try o.lowerDebugType(Type.fromInterned(field_ty), .full),
));
}
@@ -2579,7 +2579,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_size = field_ty.abiSize(mod);
const field_align = mod.structFieldAlignment(
@@ -2661,7 +2661,7 @@ pub const Object = struct {
const layout = mod.getUnionLayout(union_obj);
if (layout.payload_size == 0) {
- const tag_di_ty = try o.lowerDebugType(union_obj.enum_tag_ty.toType(), .full);
+ const tag_di_ty = try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty), .full);
const di_fields = [_]*llvm.DIType{tag_di_ty};
const full_di_ty = dib.createStructType(
compile_unit_scope,
@@ -2692,12 +2692,12 @@ pub const Object = struct {
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_size = field_ty.toType().abiSize(mod);
+ const field_size = Type.fromInterned(field_ty).abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_obj, @intCast(field_index));
- const field_di_ty = try o.lowerDebugType(field_ty.toType(), .full);
+ const field_di_ty = try o.lowerDebugType(Type.fromInterned(field_ty), .full);
const field_name = union_obj.field_names.get(ip)[field_index];
di_fields.appendAssumeCapacity(dib.createMemberType(
fwd_decl.toScope(),
@@ -2759,7 +2759,7 @@ pub const Object = struct {
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8, // offset in bits
0, // flags
- try o.lowerDebugType(union_obj.enum_tag_ty.toType(), .full),
+ try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty), .full),
);
const payload_di = dib.createMemberType(
@@ -2807,20 +2807,20 @@ pub const Object = struct {
defer param_di_types.deinit();
// Return type goes first.
- if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
+ if (Type.fromInterned(fn_info.return_type).hasRuntimeBitsIgnoreComptime(mod)) {
const sret = firstParamSRet(fn_info, mod);
- const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType();
+ const di_ret_ty = if (sret) Type.void else Type.fromInterned(fn_info.return_type);
try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full));
if (sret) {
- const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType());
+ const ptr_ty = try mod.singleMutPtrType(Type.fromInterned(fn_info.return_type));
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
} else {
try param_di_types.append(try o.lowerDebugType(Type.void, .full));
}
- if (fn_info.return_type.toType().isError(mod) and
+ if (Type.fromInterned(fn_info.return_type).isError(mod) and
o.module.comp.bin_file.options.error_return_tracing)
{
const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
@@ -2828,7 +2828,7 @@ pub const Object = struct {
}
for (0..fn_info.param_types.len) |i| {
- const param_ty = fn_info.param_types.get(ip)[i].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[i]);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (isByRef(param_ty, mod)) {
@@ -2982,13 +2982,13 @@ pub const Object = struct {
try attributes.addParamAttr(llvm_arg_i, .nonnull, &o.builder);
try attributes.addParamAttr(llvm_arg_i, .@"noalias", &o.builder);
- const raw_llvm_ret_ty = try o.lowerType(fn_info.return_type.toType());
+ const raw_llvm_ret_ty = try o.lowerType(Type.fromInterned(fn_info.return_type));
try attributes.addParamAttr(llvm_arg_i, .{ .sret = raw_llvm_ret_ty }, &o.builder);
llvm_arg_i += 1;
}
- const err_return_tracing = fn_info.return_type.toType().isError(mod) and
+ const err_return_tracing = Type.fromInterned(fn_info.return_type).isError(mod) and
mod.comp.bin_file.options.error_return_tracing;
if (err_return_tracing) {
@@ -3022,15 +3022,15 @@ pub const Object = struct {
while (try it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types.get(ip)[param_index].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
if (!isByRef(param_ty, mod)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1];
- const param_llvm_ty = try o.lowerType(param_ty.toType());
- const alignment = param_ty.toType().abiAlignment(mod);
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
+ const param_llvm_ty = try o.lowerType(param_ty);
+ const alignment = param_ty.abiAlignment(mod);
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -3138,7 +3138,7 @@ pub const Object = struct {
const variable_index = try o.builder.addVariable(
try o.builder.fmt("__anon_{d}", .{@intFromEnum(decl_val)}),
- try o.lowerType(decl_ty.toType()),
+ try o.lowerType(Type.fromInterned(decl_ty)),
llvm_addr_space,
);
gop.value_ptr.* = variable_index.ptrConst(&o.builder).global;
@@ -3328,23 +3328,23 @@ pub const Object = struct {
},
.array_type => |array_type| o.builder.arrayType(
array_type.len + @intFromBool(array_type.sentinel != .none),
- try o.lowerType(array_type.child.toType()),
+ try o.lowerType(Type.fromInterned(array_type.child)),
),
.vector_type => |vector_type| o.builder.vectorType(
.normal,
vector_type.len,
- try o.lowerType(vector_type.child.toType()),
+ try o.lowerType(Type.fromInterned(vector_type.child)),
),
.opt_type => |child_ty| {
- if (!child_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) return .i8;
+ if (!Type.fromInterned(child_ty).hasRuntimeBitsIgnoreComptime(mod)) return .i8;
- const payload_ty = try o.lowerType(child_ty.toType());
+ const payload_ty = try o.lowerType(Type.fromInterned(child_ty));
if (t.optionalReprIsPayload(mod)) return payload_ty;
comptime assert(optional_layout_version == 3);
var fields: [3]Builder.Type = .{ payload_ty, .i8, undefined };
var fields_len: usize = 2;
- const offset = child_ty.toType().abiSize(mod) + 1;
+ const offset = Type.fromInterned(child_ty).abiSize(mod) + 1;
const abi_size = t.abiSize(mod);
const padding_len = abi_size - offset;
if (padding_len > 0) {
@@ -3356,15 +3356,15 @@ pub const Object = struct {
.anyframe_type => @panic("TODO implement lowerType for AnyFrame types"),
.error_union_type => |error_union_type| {
const error_type = try o.errorIntType();
- if (!error_union_type.payload_type.toType().hasRuntimeBitsIgnoreComptime(mod))
+ if (!Type.fromInterned(error_union_type.payload_type).hasRuntimeBitsIgnoreComptime(mod))
return error_type;
- const payload_type = try o.lowerType(error_union_type.payload_type.toType());
+ const payload_type = try o.lowerType(Type.fromInterned(error_union_type.payload_type));
const err_int_ty = try mod.errorIntType();
- const payload_align = error_union_type.payload_type.toType().abiAlignment(mod);
+ const payload_align = Type.fromInterned(error_union_type.payload_type).abiAlignment(mod);
const error_align = err_int_ty.abiAlignment(mod);
- const payload_size = error_union_type.payload_type.toType().abiSize(mod);
+ const payload_size = Type.fromInterned(error_union_type.payload_type).abiSize(mod);
const error_size = err_int_ty.abiSize(mod);
var fields: [3]Builder.Type = undefined;
@@ -3398,7 +3398,7 @@ pub const Object = struct {
if (gop.found_existing) return gop.value_ptr.*;
if (struct_type.layout == .Packed) {
- const int_ty = try o.lowerType(struct_type.backingIntType(ip).toType());
+ const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
gop.value_ptr.* = int_ty;
return int_ty;
}
@@ -3423,7 +3423,7 @@ pub const Object = struct {
// When we encounter a zero-bit field, we place it here so we know to map it to the next non-zero-bit field (if any).
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
@@ -3499,7 +3499,7 @@ pub const Object = struct {
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- const field_align = field_ty.toType().abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -3509,7 +3509,7 @@ pub const Object = struct {
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
@@ -3525,9 +3525,9 @@ pub const Object = struct {
.struct_ty = t.toIntern(),
.field_index = @intCast(field_index),
}, @intCast(llvm_field_types.items.len));
- try llvm_field_types.append(o.gpa, try o.lowerType(field_ty.toType()));
+ try llvm_field_types.append(o.gpa, try o.lowerType(Type.fromInterned(field_ty)));
- offset += field_ty.toType().abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(mod);
}
{
const prev_offset = offset;
@@ -3554,7 +3554,7 @@ pub const Object = struct {
}
if (layout.payload_size == 0) {
- const enum_tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
+ const enum_tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
gop.value_ptr.* = enum_tag_ty;
return enum_tag_ty;
}
@@ -3565,7 +3565,7 @@ pub const Object = struct {
const ty = try o.builder.opaqueType(name);
gop.value_ptr.* = ty; // must be done before any recursive calls
- const aligned_field_ty = union_obj.field_types.get(ip)[layout.most_aligned_field].toType();
+ const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]);
const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty);
const payload_ty = ty: {
@@ -3589,7 +3589,7 @@ pub const Object = struct {
);
return ty;
}
- const enum_tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
+ const enum_tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
// Put the tag before or after the payload depending on which one's
// alignment is greater.
@@ -3624,7 +3624,7 @@ pub const Object = struct {
}
return gop.value_ptr.*;
},
- .enum_type => |enum_type| try o.lowerType(enum_type.tag_ty.toType()),
+ .enum_type => |enum_type| try o.lowerType(Type.fromInterned(enum_type.tag_ty)),
.func_type => |func_type| try o.lowerTypeFn(func_type),
.error_set_type, .inferred_error_set_type => try o.errorIntType(),
// values, not types
@@ -3678,7 +3678,7 @@ pub const Object = struct {
try llvm_params.append(o.gpa, .ptr);
}
- if (fn_info.return_type.toType().isError(mod) and
+ if (Type.fromInterned(fn_info.return_type).isError(mod) and
mod.comp.bin_file.options.error_return_tracing)
{
const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType());
@@ -3689,20 +3689,20 @@ pub const Object = struct {
while (try it.next()) |lowering| switch (lowering) {
.no_bits => continue,
.byval => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.append(o.gpa, try o.lowerType(param_ty));
},
.byref, .byref_mut => {
try llvm_params.append(o.gpa, .ptr);
},
.abi_sized_int => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.append(o.gpa, try o.builder.intType(
@intCast(param_ty.abiSize(mod) * 8),
));
},
.slice => {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
try llvm_params.appendSlice(o.gpa, &.{
try o.builder.ptrType(toLlvmAddressSpace(param_ty.ptrAddressSpace(mod), target)),
try o.lowerType(Type.usize),
@@ -3715,7 +3715,7 @@ pub const Object = struct {
try llvm_params.append(o.gpa, .i16);
},
.float_array => |count| {
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?);
try llvm_params.append(o.gpa, try o.builder.arrayType(count, float_ty));
},
@@ -3740,14 +3740,14 @@ pub const Object = struct {
const ip = &mod.intern_pool;
const target = mod.getTarget();
- const val = arg_val.toValue();
+ const val = Value.fromInterned(arg_val);
const val_key = ip.indexToKey(val.toIntern());
if (val.isUndefDeep(mod)) {
- return o.builder.undefConst(try o.lowerType(val_key.typeOf().toType()));
+ return o.builder.undefConst(try o.lowerType(Type.fromInterned(val_key.typeOf())));
}
- const ty = val_key.typeOf().toType();
+ const ty = Type.fromInterned(val_key.typeOf());
return switch (val_key) {
.int_type,
.ptr_type,
@@ -4064,9 +4064,9 @@ pub const Object = struct {
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_align = field_ty.toType().abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = field_align.forward(offset);
@@ -4088,7 +4088,7 @@ pub const Object = struct {
need_unnamed = true;
llvm_index += 1;
- offset += field_ty.toType().abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(mod);
}
{
const prev_offset = offset;
@@ -4116,14 +4116,14 @@ pub const Object = struct {
var running_int = try o.builder.intConst(struct_ty, 0);
var running_bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val =
try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
- const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod));
const small_int_ty = try o.builder.intType(ty_bit_size);
const small_int_val = try o.builder.castConst(
- if (field_ty.toType().isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
+ if (Type.fromInterned(field_ty).isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
non_int_val,
small_int_ty,
);
@@ -4159,7 +4159,7 @@ pub const Object = struct {
var need_unnamed = false;
var field_it = struct_type.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
@@ -4225,8 +4225,8 @@ pub const Object = struct {
var need_unnamed = false;
const payload = if (un.tag != .none) p: {
- const field_index = mod.unionTagFieldIndex(union_obj, un.tag.toValue()).?;
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (container_layout == .Packed) {
if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0);
const small_int_val = try o.builder.castConst(
@@ -4313,7 +4313,7 @@ pub const Object = struct {
.undef => return o.builder.undefConst(.ptr),
.int => {
var bigint_space: Value.BigIntSpace = undefined;
- const bigint = val.toValue().toBigInt(&bigint_space, mod);
+ const bigint = Value.fromInterned(val).toBigInt(&bigint_space, mod);
const llvm_int = try lowerBigInt(o, Type.usize, bigint);
return o.builder.castConst(.inttoptr, llvm_int, .ptr);
},
@@ -4345,12 +4345,12 @@ pub const Object = struct {
return switch (ptr.addr) {
.decl => |decl| try o.lowerParentPtrDecl(decl),
.mut_decl => |mut_decl| try o.lowerParentPtrDecl(mut_decl.decl),
- .anon_decl => |ad| try o.lowerAnonDeclRef(ad.orig_ty.toType(), ad),
+ .anon_decl => |ad| try o.lowerAnonDeclRef(Type.fromInterned(ad.orig_ty), ad),
.int => |int| try o.lowerIntAsPtr(int),
.eu_payload => |eu_ptr| {
- const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue());
+ const parent_ptr = try o.lowerParentPtr(Value.fromInterned(eu_ptr));
- const eu_ty = ip.typeOf(eu_ptr).toType().childType(mod);
+ const eu_ty = Type.fromInterned(ip.typeOf(eu_ptr)).childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
@@ -4367,9 +4367,9 @@ pub const Object = struct {
});
},
.opt_payload => |opt_ptr| {
- const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue());
+ const parent_ptr = try o.lowerParentPtr(Value.fromInterned(opt_ptr));
- const opt_ty = ip.typeOf(opt_ptr).toType().childType(mod);
+ const opt_ty = Type.fromInterned(ip.typeOf(opt_ptr)).childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
payload_ty.optionalReprIsPayload(mod))
@@ -4385,16 +4385,16 @@ pub const Object = struct {
},
.comptime_field => unreachable,
.elem => |elem_ptr| {
- const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue());
- const elem_ty = ip.typeOf(elem_ptr.base).toType().elemType2(mod);
+ const parent_ptr = try o.lowerParentPtr(Value.fromInterned(elem_ptr.base));
+ const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
});
},
.field => |field_ptr| {
- const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue());
- const parent_ptr_ty = ip.typeOf(field_ptr.base).toType();
+ const parent_ptr = try o.lowerParentPtr(Value.fromInterned(field_ptr.base));
+ const parent_ptr_ty = Type.fromInterned(ip.typeOf(field_ptr.base));
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
@@ -4420,7 +4420,7 @@ pub const Object = struct {
},
.Struct => {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
- const ptr_info = ptr.ty.toType().ptrInfo(mod);
+ const ptr_info = Type.fromInterned(ptr.ty).ptrInfo(mod);
if (ptr_info.packed_offset.host_size != 0) return parent_ptr;
const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
@@ -4470,13 +4470,13 @@ pub const Object = struct {
const mod = o.module;
const ip = &mod.intern_pool;
const decl_val = anon_decl.val;
- const decl_ty = ip.typeOf(decl_val).toType();
+ const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
const target = mod.getTarget();
- if (decl_val.toValue().getFunction(mod)) |func| {
+ if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
_ = func;
@panic("TODO");
- } else if (decl_val.toValue().getExternFunc(mod)) |func| {
+ } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| {
_ = func;
@panic("TODO");
}
@@ -4488,7 +4488,7 @@ pub const Object = struct {
if (is_fn_body)
@panic("TODO");
- const orig_ty = anon_decl.orig_ty.toType();
+ const orig_ty = Type.fromInterned(anon_decl.orig_ty);
const llvm_addr_space = toLlvmAddressSpace(orig_ty.ptrAddressSpace(mod), target);
const alignment = orig_ty.ptrAlignment(mod);
const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global;
@@ -4620,7 +4620,7 @@ pub const Object = struct {
const elem_align = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- ptr_info.child.toType().abiAlignment(mod).max(.@"1");
+ Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1");
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
} else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
@@ -4835,8 +4835,8 @@ pub const FuncGen = struct {
if (o.null_opt_usize == .no_init) {
const ty = try mod.intern(.{ .opt_type = .usize_type });
o.null_opt_usize = try self.resolveValue(.{
- .ty = ty.toType(),
- .val = (try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } })).toValue(),
+ .ty = Type.fromInterned(ty),
+ .val = Value.fromInterned((try mod.intern(.{ .opt = .{ .ty = ty, .val = .none } }))),
});
}
return o.null_opt_usize;
@@ -5116,7 +5116,7 @@ pub const FuncGen = struct {
else => unreachable,
};
const fn_info = mod.typeToFunc(zig_fn_ty).?;
- const return_type = fn_info.return_type.toType();
+ const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const target = mod.getTarget();
const sret = firstParamSRet(fn_info, mod);
@@ -5296,14 +5296,14 @@ pub const FuncGen = struct {
while (try it.next()) |lowering| switch (lowering) {
.byval => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types.get(ip)[param_index].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
if (!isByRef(param_ty, mod)) {
try o.addByValParamAttrs(&attributes, param_ty, param_index, fn_info, it.llvm_index - 1);
}
},
.byref => {
const param_index = it.zig_index - 1;
- const param_ty = fn_info.param_types.get(ip)[param_index].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[param_index]);
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = param_ty.abiAlignment(mod).toLlvm();
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
@@ -5321,7 +5321,7 @@ pub const FuncGen = struct {
.slice => {
assert(!it.byval_attr);
- const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
+ const param_ty = Type.fromInterned(fn_info.param_types.get(ip)[it.zig_index - 1]);
const ptr_info = param_ty.ptrInfo(mod);
const llvm_arg_i = it.llvm_index - 2;
@@ -5339,7 +5339,7 @@ pub const FuncGen = struct {
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
- ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
+ Type.fromInterned(ptr_info.child).abiAlignment(mod).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
},
};
@@ -5471,7 +5471,7 @@ pub const FuncGen = struct {
}
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- if (fn_info.return_type.toType().isError(mod)) {
+ if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -5513,7 +5513,7 @@ pub const FuncGen = struct {
const ret_ty = ptr_ty.childType(mod);
const fn_info = mod.typeToFunc(self.dg.decl.ty).?;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- if (fn_info.return_type.toType().isError(mod)) {
+ if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -8866,7 +8866,7 @@ pub const FuncGen = struct {
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
- if (!isByRef(ptr_info.child.toType(), mod)) break :elide;
+ if (!isByRef(Type.fromInterned(ptr_info.child), mod)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
@@ -9040,14 +9040,14 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.typeOf(atomic_load.ptr);
const info = ptr_ty.ptrInfo(mod);
- const elem_ty = info.child.toType();
+ const elem_ty = Type.fromInterned(info.child);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
- info.child.toType().abiAlignment(mod)).toLlvm();
+ Type.fromInterned(info.child).abiAlignment(mod)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -9500,7 +9500,7 @@ pub const FuncGen = struct {
const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
const function_index = try o.builder.addFunction(
- try o.builder.fnType(.i1, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal),
+ try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.fmt("__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}),
toLlvmAddressSpace(.generic, mod.getTarget()),
);
@@ -9573,7 +9573,7 @@ pub const FuncGen = struct {
const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0);
const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod);
const function_index = try o.builder.addFunction(
- try o.builder.fnType(ret_ty, &.{try o.lowerType(enum_type.tag_ty.toType())}, .normal),
+ try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.fmt("__zig_tag_name_{}", .{fqn.fmt(ip)}),
toLlvmAddressSpace(.generic, mod.getTarget()),
);
@@ -9692,7 +9692,7 @@ pub const FuncGen = struct {
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolveInst(extra.a);
const b = try self.resolveInst(extra.b);
- const mask = extra.mask.toValue();
+ const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
const a_len = self.typeOf(extra.a).vectorLen(mod);
@@ -9928,18 +9928,18 @@ pub const FuncGen = struct {
if (mod.typeToPackedStruct(result_ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntType(ip).*;
assert(backing_int_ty != .none);
- const big_bits = backing_int_ty.toType().bitSize(mod);
+ const big_bits = Type.fromInterned(backing_int_ty).bitSize(mod);
const int_ty = try o.builder.intType(@intCast(big_bits));
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intValue(int_ty, 0);
var running_bits: u16 = 0;
for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
- const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
+ const ty_bit_size: u16 = @intCast(Type.fromInterned(field_ty).bitSize(mod));
const small_int_ty = try o.builder.intType(ty_bit_size);
- const small_int_val = if (field_ty.toType().isPtrAtRuntime(mod))
+ const small_int_val = if (Type.fromInterned(field_ty).isPtrAtRuntime(mod))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -10041,7 +10041,7 @@ pub const FuncGen = struct {
if (union_obj.getLayout(ip) == .Packed) {
const big_bits = union_ty.bitSize(mod);
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
- const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const non_int_val = try self.resolveInst(extra.init);
const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(mod)));
const small_int_val = if (field_ty.isPtrAtRuntime(mod))
@@ -10074,7 +10074,7 @@ pub const FuncGen = struct {
const alignment = layout.abi_align.toLlvm();
const result_ptr = try self.buildAllocaWorkaround(union_ty, alignment);
const llvm_payload = try self.resolveInst(extra.init);
- const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_llvm_ty = try o.lowerType(field_ty);
const field_size = field_ty.abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_obj, extra.field_index);
@@ -10097,7 +10097,7 @@ pub const FuncGen = struct {
});
};
if (layout.tag_size == 0) break :t try o.builder.structType(.normal, &.{payload_ty});
- const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
+ const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
var fields: [3]Builder.Type = undefined;
var fields_len: usize = 2;
if (layout.tag_align.compare(.gte, layout.payload_align)) {
@@ -10140,9 +10140,9 @@ pub const FuncGen = struct {
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) };
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
- const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
+ const tag_ty = try o.lowerType(Type.fromInterned(union_obj.enum_tag_ty));
const llvm_tag = try o.builder.intValue(tag_ty, tag_int);
- const tag_alignment = union_obj.enum_tag_ty.toType().abiAlignment(mod).toLlvm();
+ const tag_alignment = Type.fromInterned(union_obj.enum_tag_ty).abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
@@ -10514,7 +10514,7 @@ pub const FuncGen = struct {
const o = self.dg.object;
const mod = o.module;
const info = ptr_ty.ptrInfo(mod);
- const elem_ty = info.child.toType();
+ const elem_ty = Type.fromInterned(info.child);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
const ptr_alignment = (if (info.flags.alignment != .none)
@@ -10586,7 +10586,7 @@ pub const FuncGen = struct {
const o = self.dg.object;
const mod = o.module;
const info = ptr_ty.ptrInfo(mod);
- const elem_ty = info.child.toType();
+ const elem_ty = Type.fromInterned(info.child);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
@@ -10926,7 +10926,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
}
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
- const return_type = fn_info.return_type.toType();
+ const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
const target = mod.getTarget();
@@ -10967,7 +10967,7 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool {
/// be effectively bitcasted to the actual return type.
fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
- const return_type = fn_info.return_type.toType();
+ const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
@@ -11051,7 +11051,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
- const return_type = fn_info.return_type.toType();
+ const return_type = Type.fromInterned(fn_info.return_type);
switch (x86_64_abi.classifyWindows(return_type, mod)) {
.integer => {
if (isScalar(mod, return_type)) {
@@ -11070,7 +11070,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const ip = &mod.intern_pool;
- const return_type = fn_info.return_type.toType();
+ const return_type = Type.fromInterned(fn_info.return_type);
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
}
@@ -11158,7 +11158,7 @@ const ParamTypeIterator = struct {
const ip = &mod.intern_pool;
const ty = it.fn_info.param_types.get(ip)[it.zig_index];
it.byval_attr = false;
- return nextInner(it, ty.toType());
+ return nextInner(it, Type.fromInterned(ty));
}
/// `airCall` uses this instead of `next` so that it can take into account variadic functions.
@@ -11172,7 +11172,7 @@ const ParamTypeIterator = struct {
return nextInner(it, fg.typeOf(args[it.zig_index]));
}
} else {
- return nextInner(it, it.fn_info.param_types.get(ip)[it.zig_index].toType());
+ return nextInner(it, Type.fromInterned(it.fn_info.param_types.get(ip)[it.zig_index]));
}
}
@@ -11503,11 +11503,11 @@ fn isByRef(ty: Type, mod: *Module) bool {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
count += 1;
if (count > max_fields_byval) return true;
- if (isByRef(field_ty.toType(), mod)) return true;
+ if (isByRef(Type.fromInterned(field_ty), mod)) return true;
}
return false;
},
@@ -11524,7 +11524,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
while (it.next()) |field_index| {
count += 1;
if (count > max_fields_byval) return true;
- const field_ty = field_types[field_index].toType();
+ const field_ty = Type.fromInterned(field_types[field_index]);
if (isByRef(field_ty, mod)) return true;
}
return false;
src/codegen/spirv.zig
@@ -465,7 +465,7 @@ const DeclGen = struct {
};
const mod = self.module;
- const ty = mod.intern_pool.typeOf(val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
const ptr_ty_ref = try self.ptrType(ty, storage_class);
const var_id = self.spv.declPtr(spv_decl_index).result_id;
@@ -515,7 +515,7 @@ const DeclGen = struct {
});
self.current_block_label = root_block_id;
- const val_id = try self.constant(ty, val.toValue(), .indirect);
+ const val_id = try self.constant(ty, Value.fromInterned(val), .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = var_id,
.object = val_id,
@@ -822,10 +822,10 @@ const DeclGen = struct {
.payload => err_int_ty,
};
const err_val = switch (error_union.val) {
- .err_name => |err_name| (try mod.intern(.{ .err = .{
+ .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{
.ty = ty.errorUnionSet(mod).toIntern(),
.name = err_name,
- } })).toValue(),
+ } }))),
.payload => try mod.intValue(err_int_ty, 0),
};
const payload_ty = ty.errorUnionPayload(mod);
@@ -835,10 +835,10 @@ const DeclGen = struct {
return try self.constant(err_ty, err_val, .indirect);
}
- const payload_val = switch (error_union.val) {
+ const payload_val = Value.fromInterned(switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
- }.toValue();
+ });
var constituents: [2]IdRef = undefined;
var types: [2]Type = undefined;
@@ -869,7 +869,7 @@ const DeclGen = struct {
return ptr_id;
}
- const len_id = try self.constant(Type.usize, ptr.len.toValue(), .indirect);
+ const len_id = try self.constant(Type.usize, Value.fromInterned(ptr.len), .indirect);
return try self.constructStruct(
ty,
&.{ ptr_ty, Type.usize },
@@ -909,7 +909,7 @@ const DeclGen = struct {
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
inline .array_type, .vector_type => |array_type, tag| {
- const elem_ty = array_type.child.toType();
+ const elem_ty = Type.fromInterned(array_type.child);
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
const constituents = try self.gpa.alloc(IdRef, @as(u32, @intCast(ty.arrayLenIncludingSentinel(mod))));
@@ -925,11 +925,11 @@ const DeclGen = struct {
},
.elems => |elems| {
for (0..@as(usize, @intCast(array_type.len))) |i| {
- constituents[i] = try self.constant(elem_ty, elems[i].toValue(), .indirect);
+ constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect);
}
},
.repeated_elem => |elem| {
- const val_id = try self.constant(elem_ty, elem.toValue(), .indirect);
+ const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect);
for (0..@as(usize, @intCast(array_type.len))) |i| {
constituents[i] = val_id;
}
@@ -938,7 +938,7 @@ const DeclGen = struct {
switch (tag) {
inline .array_type => if (array_type.sentinel != .none) {
- constituents[constituents.len - 1] = try self.constant(elem_ty, array_type.sentinel.toValue(), .indirect);
+ constituents[constituents.len - 1] = try self.constant(elem_ty, Value.fromInterned(array_type.sentinel), .indirect);
},
else => {},
}
@@ -959,7 +959,7 @@ const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
@@ -979,11 +979,11 @@ const DeclGen = struct {
else => unreachable,
},
.un => |un| {
- const active_field = ty.unionTagFieldIndex(un.tag.toValue(), mod).?;
+ const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
const union_obj = mod.typeToUnion(ty).?;
- const field_ty = union_obj.field_types.get(ip)[active_field].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
- try self.constant(field_ty, un.val.toValue(), .direct)
+ try self.constant(field_ty, Value.fromInterned(un.val), .direct)
else
null;
return try self.unionInit(ty, active_field, payload);
@@ -1007,7 +1007,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
.id_result_type = self.typeId(result_ty_ref),
.id_result = ptr_id,
- .integer_value = try self.constant(Type.usize, int.toValue(), .direct),
+ .integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct),
});
return ptr_id;
},
@@ -1015,8 +1015,8 @@ const DeclGen = struct {
.opt_payload => unreachable, // TODO
.comptime_field => unreachable,
.elem => |elem_ptr| {
- const parent_ptr_ty = mod.intern_pool.typeOf(elem_ptr.base).toType();
- const parent_ptr_id = try self.constantPtr(parent_ptr_ty, elem_ptr.base.toValue());
+ const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base));
+ const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base));
const size_ty_ref = try self.sizeType();
const index_id = try self.constInt(size_ty_ref, elem_ptr.index);
@@ -1040,8 +1040,8 @@ const DeclGen = struct {
return result_id;
},
.field => |field| {
- const base_ptr_ty = mod.intern_pool.typeOf(field.base).toType();
- const base_ptr = try self.constantPtr(base_ptr_ty, field.base.toValue());
+ const base_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base));
+ const base_ptr = try self.constantPtr(base_ptr_ty, Value.fromInterned(field.base));
const field_index: u32 = @intCast(field.index);
return try self.structFieldPtr(ptr_ty, base_ptr_ty, base_ptr, field_index);
},
@@ -1059,12 +1059,12 @@ const DeclGen = struct {
const ip = &mod.intern_pool;
const ty_ref = try self.resolveType(ty, .direct);
const decl_val = anon_decl.val;
- const decl_ty = ip.typeOf(decl_val).toType();
+ const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
- if (decl_val.toValue().getFunction(mod)) |func| {
+ if (Value.fromInterned(decl_val).getFunction(mod)) |func| {
_ = func;
unreachable; // TODO
- } else if (decl_val.toValue().getExternFunc(mod)) |func| {
+ } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| {
_ = func;
unreachable;
}
@@ -1267,7 +1267,7 @@ const DeclGen = struct {
const layout = self.unionLayout(ty);
if (!layout.has_payload) {
// No payload, so represent this as just the tag type.
- return try self.resolveType(union_obj.enum_tag_ty.toType(), .indirect);
+ return try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
}
if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
@@ -1278,7 +1278,7 @@ const DeclGen = struct {
const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
if (layout.tag_size != 0) {
- const tag_ty_ref = try self.resolveType(union_obj.enum_tag_ty.toType(), .indirect);
+ const tag_ty_ref = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
member_types[layout.tag_index] = tag_ty_ref;
member_names[layout.tag_index] = try self.spv.resolveString("(tag)");
}
@@ -1430,13 +1430,13 @@ const DeclGen = struct {
defer self.gpa.free(param_ty_refs);
var param_index: usize = 0;
for (fn_info.param_types.get(ip)) |param_ty_index| {
- const param_ty = param_ty_index.toType();
+ const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
param_ty_refs[param_index] = try self.resolveType(param_ty, .direct);
param_index += 1;
}
- const return_ty_ref = try self.resolveFnReturnType(fn_info.return_type.toType());
+ const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
const ty_ref = try self.spv.resolve(.{ .function_type = .{
.return_type = return_ty_ref,
@@ -1459,7 +1459,7 @@ const DeclGen = struct {
// in ptrType()!
const storage_class = spvStorageClass(ptr_info.flags.address_space);
- const ptr_ty_ref = try self.ptrType(ptr_info.child.toType(), storage_class);
+ const ptr_ty_ref = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
if (ptr_info.flags.size != .Slice) {
return ptr_ty_ref;
@@ -1494,9 +1494,9 @@ const DeclGen = struct {
var member_index: usize = 0;
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
- member_types[member_index] = try self.resolveType(field_ty.toType(), .indirect);
+ member_types[member_index] = try self.resolveType(Type.fromInterned(field_ty), .indirect);
member_index += 1;
}
@@ -1513,7 +1513,7 @@ const DeclGen = struct {
};
if (struct_type.layout == .Packed) {
- return try self.resolveType(struct_type.backingIntType(ip).toType(), .direct);
+ return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
}
var member_types = std.ArrayList(CacheRef).init(self.gpa);
@@ -1524,7 +1524,7 @@ const DeclGen = struct {
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
@@ -1728,7 +1728,7 @@ const DeclGen = struct {
if (union_layout.has_payload) {
const most_aligned_field = layout.most_aligned_field;
- const most_aligned_field_ty = union_obj.field_types.get(ip)[most_aligned_field].toType();
+ const most_aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[most_aligned_field]);
union_layout.payload_ty = most_aligned_field_ty;
union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(mod));
} else {
@@ -1855,7 +1855,7 @@ const DeclGen = struct {
if (decl.val.getFunction(mod)) |_| {
assert(decl.ty.zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.ty).?;
- const return_ty_ref = try self.resolveFnReturnType(fn_info.return_type.toType());
+ const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
const prototype_id = try self.resolveTypeId(decl.ty);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
@@ -1867,7 +1867,7 @@ const DeclGen = struct {
try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len);
for (fn_info.param_types.get(ip)) |param_ty_index| {
- const param_ty = param_ty_index.toType();
+ const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const param_type_id = try self.resolveTypeId(param_ty);
@@ -1914,7 +1914,7 @@ const DeclGen = struct {
}
} else {
const init_val = if (decl.val.getVariable(mod)) |payload|
- payload.init.toValue()
+ Value.fromInterned(payload.init)
else
decl.val;
@@ -2605,7 +2605,7 @@ const DeclGen = struct {
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
const a = try self.resolve(extra.a);
const b = try self.resolve(extra.b);
- const mask = extra.mask.toValue();
+ const mask = Value.fromInterned(extra.mask);
const mask_len = extra.mask_len;
const a_len = self.typeOf(extra.a).vectorLen(mod);
@@ -3239,11 +3239,11 @@ const DeclGen = struct {
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
- assert(field_ty.toType().hasRuntimeBits(mod));
+ assert(Type.fromInterned(field_ty).hasRuntimeBits(mod));
const id = try self.resolve(element);
- types[index] = field_ty.toType();
- constituents[index] = try self.convertToIndirect(field_ty.toType(), id);
+ types[index] = Type.fromInterned(field_ty);
+ constituents[index] = try self.convertToIndirect(Type.fromInterned(field_ty), id);
index += 1;
}
},
@@ -3252,7 +3252,7 @@ const DeclGen = struct {
for (elements, 0..) |element, i| {
const field_index = it.next().?;
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
assert(field_ty.hasRuntimeBitsIgnoreComptime(mod));
const id = try self.resolve(element);
@@ -3527,7 +3527,7 @@ const DeclGen = struct {
try self.store(maybe_tag_ty.?, ptr_id, tag_id, .{});
}
- const payload_ty = union_ty.field_types.get(ip)[active_field].toType();
+ const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
@@ -3560,7 +3560,7 @@ const DeclGen = struct {
const ty = self.typeOfIndex(inst);
const union_obj = mod.typeToUnion(ty).?;
- const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.resolve(extra.init)
else
@@ -4223,7 +4223,7 @@ const DeclGen = struct {
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.ty).?;
- if (fn_info.return_type.toType().isError(mod)) {
+ if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -4248,7 +4248,7 @@ const DeclGen = struct {
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const decl = mod.declPtr(self.decl_index);
const fn_info = mod.typeToFunc(decl.ty).?;
- if (fn_info.return_type.toType().isError(mod)) {
+ if (Type.fromInterned(fn_info.return_type).isError(mod)) {
// Functions with an empty error set are emitted with an error code
// return type and return zero so they can be function pointers coerced
// to functions that return anyerror.
@@ -4913,7 +4913,7 @@ const DeclGen = struct {
const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_ref = try self.resolveFnReturnType(return_type.toType());
+ const result_type_ref = try self.resolveFnReturnType(Type.fromInterned(return_type));
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -4944,7 +4944,7 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
- if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) {
+ if (self.liveness.isUnused(inst) or !Type.fromInterned(return_type).hasRuntimeBitsIgnoreComptime(mod)) {
return null;
}
src/link/Elf/ZigObject.zig
@@ -660,7 +660,7 @@ pub fn lowerAnonDecl(
) !codegen.Result {
const gpa = elf_file.base.allocator;
const mod = elf_file.base.options.module.?;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
.none => ty.abiAlignment(mod),
else => explicit_alignment,
@@ -671,7 +671,7 @@ pub fn lowerAnonDecl(
return .ok;
}
- const val = decl_val.toValue();
+ const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@@ -835,7 +835,7 @@ fn getDeclShdrIndex(
});
}
if (variable.is_const) break :blk elf_file.zig_data_rel_ro_section_index.?;
- if (variable.init.toValue().isUndefDeep(mod)) {
+ if (Value.fromInterned(variable.init).isUndefDeep(mod)) {
const mode = elf_file.base.options.optimize_mode;
if (mode == .Debug or mode == .ReleaseSafe) break :blk elf_file.zig_data_section_index.?;
break :blk elf_file.zig_bss_section_index.?;
@@ -1114,7 +1114,7 @@ pub fn updateDecl(
defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
- const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
+ const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&elf_file.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -1613,5 +1613,7 @@ const Module = @import("../../Module.zig");
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
const StringTable = @import("../StringTable.zig");
+const Type = @import("../../type.zig").Type;
+const Value = @import("../../value.zig").Value;
const TypedValue = @import("../../TypedValue.zig");
const ZigObject = @This();
src/link/C.zig
@@ -13,6 +13,7 @@ const codegen = @import("../codegen/c.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
@@ -254,8 +255,8 @@ fn updateAnonDecl(self: *C, module: *Module, i: usize) !void {
}
const tv: @import("../TypedValue.zig") = .{
- .ty = module.intern_pool.typeOf(anon_decl).toType(),
- .val = anon_decl.toValue(),
+ .ty = Type.fromInterned(module.intern_pool.typeOf(anon_decl)),
+ .val = Value.fromInterned(anon_decl),
};
const c_value: codegen.CValue = .{ .constant = anon_decl };
const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none;
src/link/Coff.zig
@@ -1189,7 +1189,7 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
+ const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
@@ -1794,7 +1794,7 @@ pub fn lowerAnonDecl(
) !codegen.Result {
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
.none => ty.abiAlignment(mod),
else => explicit_alignment,
@@ -1805,7 +1805,7 @@ pub fn lowerAnonDecl(
return .ok;
}
- const val = decl_val.toValue();
+ const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@@ -2669,6 +2669,7 @@ const Relocation = @import("Coff/Relocation.zig");
const TableSection = @import("table_section.zig").TableSection;
const StringTable = @import("StringTable.zig");
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
const TypedValue = @import("../TypedValue.zig");
pub const base_tag: link.File.Tag = .coff;
src/link/Dwarf.zig
@@ -305,7 +305,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -323,7 +323,7 @@ pub const DeclState = struct {
if (struct_type.isTuple(ip)) {
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -331,7 +331,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
@@ -341,7 +341,7 @@ pub const DeclState = struct {
struct_type.field_types.get(ip),
struct_type.offsets.get(ip),
) |field_name_ip, field_ty, field_off| {
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_name = ip.stringToSlice(field_name_ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
@@ -352,7 +352,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
@@ -389,7 +389,7 @@ pub const DeclState = struct {
const value = enum_type.values.get(ip)[field_i];
// TODO do not assume a 64bit enum value - could be bigger.
// See https://github.com/ziglang/zig/issues/645
- const field_int_val = try value.toValue().intFromEnum(ty, mod);
+ const field_int_val = try Value.fromInterned(value).intFromEnum(ty, mod);
break :value @bitCast(field_int_val.toSignedInt(mod));
};
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
@@ -443,7 +443,7 @@ pub const DeclState = struct {
}
for (union_obj.field_types.get(ip), union_obj.field_names.get(ip)) |field_ty, field_name| {
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -452,7 +452,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.append(0);
}
@@ -469,7 +469,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, union_obj.enum_tag_ty.toType(), @intCast(index));
+ try self.addTypeRelocGlobal(atom_index, Type.fromInterned(union_obj.enum_tag_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
src/link/MachO.zig
@@ -2400,7 +2400,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo
null;
defer if (decl_state) |*ds| ds.deinit();
- const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
+ const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -2569,7 +2569,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
const decl = module.declPtr(decl_index);
const decl_metadata = self.decls.get(decl_index).?;
- const decl_val = decl.val.getVariable(mod).?.init.toValue();
+ const decl_val = Value.fromInterned(decl.val.getVariable(mod).?.init);
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -2995,7 +2995,7 @@ pub fn lowerAnonDecl(
) !codegen.Result {
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
const decl_alignment = switch (explicit_alignment) {
.none => ty.abiAlignment(mod),
else => explicit_alignment,
@@ -3006,7 +3006,7 @@ pub fn lowerAnonDecl(
return .ok;
}
- const val = decl_val.toValue();
+ const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
src/link/Plan9.zig
@@ -13,6 +13,8 @@ const File = link.File;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
+const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
const TypedValue = @import("../TypedValue.zig");
const std = @import("std");
@@ -517,7 +519,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
- const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
+ const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
@@ -1492,8 +1494,8 @@ pub fn lowerAnonDecl(self: *Plan9, decl_val: InternPool.Index, src_loc: Module.S
const gop = try self.anon_decls.getOrPut(gpa, decl_val);
const mod = self.base.options.module.?;
if (!gop.found_existing) {
- const ty = mod.intern_pool.typeOf(decl_val).toType();
- const val = decl_val.toValue();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
+ const val = Value.fromInterned(decl_val);
const tv = TypedValue{ .ty = ty, .val = val };
const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(decl_val)});
src/link/Wasm.zig
@@ -23,6 +23,7 @@ const build_options = @import("build_options");
const wasi_libc = @import("../wasi_libc.zig");
const Cache = std.Build.Cache;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
const TypedValue = @import("../TypedValue.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Air = @import("../Air.zig");
@@ -1452,7 +1453,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null);
}
- const val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
+ const val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
@@ -1719,8 +1720,8 @@ pub fn lowerAnonDecl(
const gop = try wasm.anon_decls.getOrPut(wasm.base.allocator, decl_val);
if (!gop.found_existing) {
const mod = wasm.base.options.module.?;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
- const tv: TypedValue = .{ .ty = ty, .val = decl_val.toValue() };
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
+ const tv: TypedValue = .{ .ty = ty, .val = Value.fromInterned(decl_val) };
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(decl_val),
@@ -1751,7 +1752,7 @@ pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: lin
const parent_atom = wasm.getAtomPtr(parent_atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
const mod = wasm.base.options.module.?;
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
if (ty.zigTypeTag(mod) == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
// We found a function pointer, so add it to our table,
@@ -3533,7 +3534,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
} else if (decl.getOwnedVariable(mod)) |variable| {
if (variable.is_const) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
- } else if (variable.init.toValue().isUndefDeep(mod)) {
+ } else if (Value.fromInterned(variable.init).isUndefDeep(mod)) {
// for safe build modes, we store the atom in the data segment,
// whereas for unsafe build modes we store it in bss.
const is_initialized = wasm.base.options.optimize_mode == .Debug or
@@ -3558,7 +3559,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
// parse anonymous declarations
for (wasm.anon_decls.keys(), wasm.anon_decls.values()) |decl_val, atom_index| {
- const ty = mod.intern_pool.typeOf(decl_val).toType();
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
if (ty.zigTypeTag(mod) == .Fn) {
try wasm.parseAtom(atom_index, .function);
} else {
src/Air.zig
@@ -1221,7 +1221,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
if (refToInterned(inst)) |ip_index| {
- return ip.typeOf(ip_index).toType();
+ return Type.fromInterned(ip.typeOf(ip_index));
} else {
return air.typeOfIndex(refToIndex(inst).?, ip);
}
@@ -1438,7 +1438,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
- return ip.funcTypeReturnType(callee_ty.toIntern()).toType();
+ return Type.fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
@@ -1456,7 +1456,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.reduce, .reduce_optimized => {
const operand_ty = air.typeOf(datas[inst].reduce.operand, ip);
- return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType();
+ return Type.fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
},
.mul_add => return air.typeOf(datas[inst].pl_op.operand, ip),
@@ -1467,7 +1467,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.@"try" => {
const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip);
- return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType();
+ return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
},
.work_item_id,
@@ -1482,7 +1482,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
_ = air; // TODO: remove this parameter
- return refToInterned(ref).?.toType();
+ return Type.fromInterned(refToInterned(ref).?);
}
/// Returns the requested data, as well as the new index which is at the start of the
@@ -1561,7 +1561,7 @@ pub fn indexToRef(inst: Inst.Index) Inst.Ref {
/// Returns `null` if runtime-known.
pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
if (refToInterned(inst)) |ip_index| {
- return ip_index.toValue();
+ return Value.fromInterned(ip_index);
}
const index = refToIndex(inst).?;
return air.typeOfIndex(index, &mod.intern_pool).onePossibleValue(mod);
src/Autodoc.zig
@@ -117,7 +117,7 @@ fn generateZirData(self: *Autodoc, output_dir: std.fs.Dir) !void {
// Not a real type, doesn't have a normal name
try tmpbuf.writer().writeAll("(generic poison)");
} else {
- try ip_index.toType().fmt(self.comp_module).format("", .{}, tmpbuf.writer());
+ try @import("type.zig").Type.fromInterned(ip_index).fmt(self.comp_module).format("", .{}, tmpbuf.writer());
}
try self.types.append(
self.arena,
src/codegen.zig
@@ -259,10 +259,10 @@ pub fn generateSymbol(
const begin = code.items.len;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_ty,
- .val = switch (error_union.val) {
+ .val = Value.fromInterned(switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
- }.toValue(),
+ }),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -293,7 +293,7 @@ pub fn generateSymbol(
const int_tag_ty = typed_value.ty.intTagType(mod);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = int_tag_ty,
- .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty),
+ .val = try mod.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -323,7 +323,7 @@ pub fn generateSymbol(
// generate len
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
- .val = ptr.len.toValue(),
+ .val = Value.fromInterned(ptr.len),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -350,7 +350,7 @@ pub fn generateSymbol(
} else {
const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
if (payload_type.hasRuntimeBits(mod)) {
- const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.toIntern() })).toValue();
+ const value = payload_val orelse Value.fromInterned((try mod.intern(.{ .undef = payload_type.toIntern() })));
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
@@ -372,15 +372,15 @@ pub fn generateSymbol(
array_type.len + @intFromBool(array_type.sentinel != .none);
while (index < len_including_sentinel) : (index += 1) {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = array_type.child.toType(),
- .val = switch (aggregate.storage) {
+ .ty = Type.fromInterned(array_type.child),
+ .val = Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@as(usize, @intCast(index))],
.repeated_elem => |elem| if (index < array_type.len)
elem
else
array_type.sentinel,
- }.toValue(),
+ }),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -395,12 +395,12 @@ pub fn generateSymbol(
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = vector_type.child.toType(),
- .val = switch (aggregate.storage) {
+ .ty = Type.fromInterned(vector_type.child),
+ .val = Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@as(usize, @intCast(index))],
.repeated_elem => |elem| elem,
- }.toValue(),
+ }),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -410,7 +410,7 @@ pub fn generateSymbol(
}
const padding = math.cast(usize, typed_value.ty.abiSize(mod) -
- (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) {
+ (math.divCeil(u64, Type.fromInterned(vector_type.child).bitSize(mod) * vector_type.len, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
else => |e| return e,
})) orelse return error.Overflow;
@@ -424,7 +424,7 @@ pub fn generateSymbol(
0..,
) |field_ty, comptime_val, index| {
if (comptime_val != .none) continue;
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
@@ -436,8 +436,8 @@ pub fn generateSymbol(
};
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = field_ty.toType(),
- .val = field_val.toValue(),
+ .ty = Type.fromInterned(field_ty),
+ .val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -474,22 +474,22 @@ pub fn generateSymbol(
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
- if (field_ty.toType().zigTypeTag(mod) == .Pointer) {
- const field_size = math.cast(usize, field_ty.toType().abiSize(mod)) orelse
+ if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
+ const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = field_ty.toType(),
- .val = field_val.toValue(),
+ .ty = Type.fromInterned(field_ty),
+ .val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
- field_val.toValue().writeToPackedMemory(field_ty.toType(), mod, code.items[current_pos..], bits) catch unreachable;
+ Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
- bits += @as(u16, @intCast(field_ty.toType().bitSize(mod)));
+ bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
.Auto, .Extern => {
@@ -500,7 +500,7 @@ pub fn generateSymbol(
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
@@ -518,8 +518,8 @@ pub fn generateSymbol(
if (padding > 0) try code.appendNTimes(0, padding);
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = field_ty.toType(),
- .val = field_val.toValue(),
+ .ty = Type.fromInterned(field_ty),
+ .val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -545,7 +545,7 @@ pub fn generateSymbol(
if (layout.payload_size == 0) {
return generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.unionTagTypeSafety(mod).?,
- .val = un.tag.toValue(),
+ .val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info);
}
@@ -553,7 +553,7 @@ pub fn generateSymbol(
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.unionTagTypeSafety(mod).?,
- .val = un.tag.toValue(),
+ .val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -562,14 +562,14 @@ pub fn generateSymbol(
const union_obj = mod.typeToUnion(typed_value.ty).?;
if (un.tag != .none) {
- const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?;
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_index = typed_value.ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?;
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBits(mod)) {
try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
} else {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
- .val = un.val.toValue(),
+ .val = Value.fromInterned(un.val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -582,8 +582,8 @@ pub fn generateSymbol(
}
} else {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = ip.typeOf(un.val).toType(),
- .val = un.val.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(un.val)),
+ .val = Value.fromInterned(un.val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -592,8 +592,8 @@ pub fn generateSymbol(
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
- .ty = union_obj.enum_tag_ty.toType(),
- .val = un.tag.toValue(),
+ .ty = Type.fromInterned(union_obj.enum_tag_ty),
+ .val = Value.fromInterned(un.tag),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
@@ -626,7 +626,7 @@ fn lowerParentPtr(
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info),
.int => |int| try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
- .val = int.toValue(),
+ .val = Value.fromInterned(int),
}, code, debug_output, reloc_info),
.eu_payload => |eu_payload| try lowerParentPtr(
bin_file,
@@ -635,7 +635,7 @@ fn lowerParentPtr(
code,
debug_output,
reloc_info.offset(@as(u32, @intCast(errUnionPayloadOffset(
- mod.intern_pool.typeOf(eu_payload).toType(),
+ Type.fromInterned(mod.intern_pool.typeOf(eu_payload)),
mod,
)))),
),
@@ -654,7 +654,7 @@ fn lowerParentPtr(
code,
debug_output,
reloc_info.offset(@as(u32, @intCast(elem.index *
- mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod)))),
+ Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).abiSize(mod)))),
),
.field => |field| {
const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child;
@@ -676,12 +676,12 @@ fn lowerParentPtr(
.struct_type,
.anon_struct_type,
.union_type,
- => switch (base_type.toType().containerLayout(mod)) {
- .Auto, .Extern => @intCast(base_type.toType().structFieldOffset(
+ => switch (Type.fromInterned(base_type).containerLayout(mod)) {
+ .Auto, .Extern => @intCast(Type.fromInterned(base_type).structFieldOffset(
@intCast(field.index),
mod,
)),
- .Packed => if (mod.typeToStruct(base_type.toType())) |struct_type|
+ .Packed => if (mod.typeToStruct(Type.fromInterned(base_type))) |struct_type|
math.divExact(u16, mod.structPackedFieldBitOffset(
struct_type,
@intCast(field.index),
@@ -723,7 +723,7 @@ fn lowerAnonDeclRef(
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const decl_val = anon_decl.val;
- const decl_ty = mod.intern_pool.typeOf(decl_val).toType();
+ const decl_ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val));
log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(mod)});
const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if (!is_fn_body and !decl_ty.hasRuntimeBits(mod)) {
@@ -1056,8 +1056,8 @@ pub fn genTypedValue(
const enum_tag = mod.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag;
const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
return genTypedValue(bin_file, src_loc, .{
- .ty = int_tag_ty.toType(),
- .val = enum_tag.int.toValue(),
+ .ty = Type.fromInterned(int_tag_ty),
+ .val = Value.fromInterned(enum_tag.int),
}, owner_decl_index);
},
.ErrorSet => {
@@ -1074,10 +1074,10 @@ pub fn genTypedValue(
switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(bin_file, src_loc, .{
.ty = err_type,
- .val = (try mod.intern(.{ .err = .{
+ .val = Value.fromInterned((try mod.intern(.{ .err = .{
.ty = err_type.toIntern(),
.name = err_name,
- } })).toValue(),
+ } }))),
}, owner_decl_index),
.payload => return genTypedValue(bin_file, src_loc, .{
.ty = err_int_ty,
src/InternPool.zig
@@ -2096,19 +2096,6 @@ pub const Index = enum(u32) {
}
};
- pub fn toType(i: Index) @import("type.zig").Type {
- assert(i != .none);
- return .{ .ip_index = i };
- }
-
- pub fn toValue(i: Index) @import("value.zig").Value {
- assert(i != .none);
- return .{
- .ip_index = i,
- .legacy = undefined,
- };
- }
-
/// Used for a map of `Index` values to the index within a list of `Index` values.
const Adapter = struct {
indexes: []const Index,
@@ -6145,8 +6132,8 @@ fn finishFuncInstance(
.src_line = fn_owner_decl.src_line,
.has_tv = true,
.owns_tv = true,
- .ty = func_ty.toType(),
- .val = func_index.toValue(),
+ .ty = @import("type.zig").Type.fromInterned(func_ty),
+ .val = @import("value.zig").Value.fromInterned(func_index),
.alignment = .none,
.@"linksection" = section,
.@"addrspace" = fn_owner_decl.@"addrspace",
src/Module.zig
@@ -659,7 +659,7 @@ pub const Decl = struct {
pub fn internValue(decl: *Decl, mod: *Module) Allocator.Error!InternPool.Index {
assert(decl.has_tv);
const ip_index = try decl.val.intern(decl.ty, mod);
- decl.val = ip_index.toValue();
+ decl.val = Value.fromInterned(ip_index);
return ip_index;
}
@@ -3556,8 +3556,8 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
_ = try decl.internValue(mod);
}
- new_namespace.ty = struct_ty.toType();
- new_decl.val = struct_ty.toValue();
+ new_namespace.ty = Type.fromInterned(struct_ty);
+ new_decl.val = Value.fromInterned(struct_ty);
new_decl.has_tv = true;
new_decl.owns_tv = true;
new_decl.analysis = .complete;
@@ -3711,7 +3711,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)});
}
- decl.ty = InternPool.Index.type_type.toType();
+ decl.ty = Type.fromInterned(InternPool.Index.type_type);
decl.val = ty.toValue();
decl.alignment = .none;
decl.@"linksection" = .none;
@@ -3741,7 +3741,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
}
decl.ty = decl_tv.ty;
- decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
+ decl.val = Value.fromInterned((try decl_tv.val.intern(decl_tv.ty, mod)));
// linksection, align, and addrspace were already set by Sema
decl.has_tv = true;
decl.owns_tv = owns_tv;
@@ -3794,7 +3794,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
}
decl.ty = decl_tv.ty;
- decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue();
+ decl.val = Value.fromInterned((try decl_tv.val.intern(decl_tv.ty, mod)));
decl.alignment = blk: {
const align_ref = decl.zirAlignRef(mod);
if (align_ref == .none) break :blk .none;
@@ -4509,7 +4509,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.owner_decl_index = decl_index,
.func_index = func_index,
.func_is_naked = fn_ty_info.cc == .Naked,
- .fn_ret_ty = fn_ty_info.return_type.toType(),
+ .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
.fn_ret_ty_ies = null,
.owner_func_index = func_index,
.branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
@@ -4580,7 +4580,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
runtime_param_index += 1;
- const opt_opv = sema.typeHasOnePossibleValue(param_ty.toType()) catch |err| switch (err) {
+ const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -4707,7 +4707,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
// Similarly, resolve any queued up types that were requested to be resolved for
// the backends.
for (sema.types_to_resolve.keys()) |ty| {
- sema.resolveTypeFully(ty.toType()) catch |err| switch (err) {
+ sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -5392,10 +5392,10 @@ pub fn populateTestFunctions(
});
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
.ty = test_name_decl_ty,
- .val = (try mod.intern(.{ .aggregate = .{
+ .val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = test_name_decl_ty.toIntern(),
.storage = .{ .bytes = test_decl_name },
- } })).toValue(),
+ } }))),
});
break :n test_name_decl_index;
};
@@ -5439,10 +5439,10 @@ pub fn populateTestFunctions(
});
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
.ty = array_decl_ty,
- .val = (try mod.intern(.{ .aggregate = .{
+ .val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
.storage = .{ .elems = test_fn_vals },
- } })).toValue(),
+ } }))),
});
break :d array_decl_index;
@@ -5548,7 +5548,7 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void {
.func => |func| try mod.markDeclIndexAlive(func.owner_decl),
.error_union => |error_union| switch (error_union.val) {
.err_name => {},
- .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()),
+ .payload => |payload| try mod.markReferencedDeclsAlive(Value.fromInterned(payload)),
},
.ptr => |ptr| {
switch (ptr.addr) {
@@ -5556,17 +5556,17 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void {
.anon_decl => {},
.mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl),
.int, .comptime_field => {},
- .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()),
- .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()),
+ .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(Value.fromInterned(parent)),
+ .elem, .field => |base_index| try mod.markReferencedDeclsAlive(Value.fromInterned(base_index.base)),
}
- if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue());
+ if (ptr.len != .none) try mod.markReferencedDeclsAlive(Value.fromInterned(ptr.len));
},
- .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()),
+ .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(Value.fromInterned(opt.val)),
.aggregate => |aggregate| for (aggregate.storage.values()) |elem|
- try mod.markReferencedDeclsAlive(elem.toValue()),
+ try mod.markReferencedDeclsAlive(Value.fromInterned(elem)),
.un => |un| {
- if (un.tag != .none) try mod.markReferencedDeclsAlive(un.tag.toValue());
- try mod.markReferencedDeclsAlive(un.val.toValue());
+ if (un.tag != .none) try mod.markReferencedDeclsAlive(Value.fromInterned(un.tag));
+ try mod.markReferencedDeclsAlive(Value.fromInterned(un.val));
},
else => {},
}
@@ -5653,14 +5653,14 @@ pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Inde
/// Shortcut for calling `intern_pool.getCoerced`.
pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value {
- return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue();
+ return Value.fromInterned((try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())));
}
pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
- return (try intern(mod, .{ .int_type = .{
+ return Type.fromInterned((try intern(mod, .{ .int_type = .{
.signedness = signedness,
.bits = bits,
- } })).toType();
+ } })));
}
pub fn errorIntType(mod: *Module) std.mem.Allocator.Error!Type {
@@ -5669,17 +5669,17 @@ pub fn errorIntType(mod: *Module) std.mem.Allocator.Error!Type {
pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type {
const i = try intern(mod, .{ .array_type = info });
- return i.toType();
+ return Type.fromInterned(i);
}
pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type {
const i = try intern(mod, .{ .vector_type = info });
- return i.toType();
+ return Type.fromInterned(i);
}
pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type {
const i = try intern(mod, .{ .opt_type = child_type });
- return i.toType();
+ return Type.fromInterned(i);
}
pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
@@ -5692,7 +5692,7 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (info.flags.alignment != .none and
- info.flags.alignment == info.child.toType().abiAlignment(mod))
+ info.flags.alignment == Type.fromInterned(info.child).abiAlignment(mod))
{
canon_info.flags.alignment = .none;
}
@@ -5702,7 +5702,7 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
// we change it to 0 here. If this causes an assertion trip, the pointee type
// needs to be resolved before calling this ptr() function.
.none => if (info.packed_offset.host_size != 0) {
- const elem_bit_size = info.child.toType().bitSize(mod);
+ const elem_bit_size = Type.fromInterned(info.child).bitSize(mod);
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
if (info.packed_offset.host_size * 8 == elem_bit_size) {
canon_info.packed_offset.host_size = 0;
@@ -5712,7 +5712,7 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
_ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
}
- return (try intern(mod, .{ .ptr_type = canon_info })).toType();
+ return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info })));
}
pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
@@ -5745,26 +5745,26 @@ pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator
}
pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
- return (try mod.intern_pool.getFuncType(mod.gpa, key)).toType();
+ return Type.fromInterned((try mod.intern_pool.getFuncType(mod.gpa, key)));
}
/// Use this for `anyframe->T` only.
/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly.
pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type {
- return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType();
+ return Type.fromInterned((try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })));
}
pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type {
- return (try intern(mod, .{ .error_union_type = .{
+ return Type.fromInterned((try intern(mod, .{ .error_union_type = .{
.error_set_type = error_set_ty.toIntern(),
.payload_type = payload_ty.toIntern(),
- } })).toType();
+ } })));
}
pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
const names: *const [1]InternPool.NullTerminatedString = &name;
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
- return new_ty.toType();
+ return Type.fromInterned(new_ty);
}
/// Sorts `names` in place.
@@ -5779,7 +5779,7 @@ pub fn errorSetFromUnsortedNames(
InternPool.NullTerminatedString.indexLessThan,
);
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
- return new_ty.toType();
+ return Type.fromInterned(new_ty);
}
/// Supports only pointers, not pointer-like optionals.
@@ -5789,7 +5789,7 @@ pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
.ty = ty.toIntern(),
.addr = .{ .int = (try mod.intValue_u64(Type.usize, x)).toIntern() },
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
/// Creates an enum tag value based on the integer tag value.
@@ -5802,7 +5802,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
.ty = ty.toIntern(),
.int = tag_int,
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
/// Creates an enum tag value based on the field index according to source code
@@ -5814,23 +5814,23 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E
if (enum_type.values.len == 0) {
// Auto-numbered fields.
- return (try ip.get(gpa, .{ .enum_tag = .{
+ return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try ip.get(gpa, .{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = field_index },
} }),
- } })).toValue();
+ } })));
}
- return (try ip.get(gpa, .{ .enum_tag = .{
+ return Value.fromInterned((try ip.get(gpa, .{ .enum_tag = .{
.ty = ty.toIntern(),
.int = enum_type.values.get(ip)[field_index],
- } })).toValue();
+ } })));
}
pub fn undefValue(mod: *Module, ty: Type) Allocator.Error!Value {
- return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
}
pub fn undefRef(mod: *Module, ty: Type) Allocator.Error!Air.Inst.Ref {
@@ -5854,7 +5854,7 @@ pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Valu
.ty = ty.toIntern(),
.storage = .{ .big_int = x },
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
@@ -5862,7 +5862,7 @@ pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
.ty = ty.toIntern(),
.storage = .{ .u64 = x },
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
@@ -5870,7 +5870,7 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
.ty = ty.toIntern(),
.storage = .{ .i64 = x },
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
@@ -5879,7 +5879,7 @@ pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocato
.tag = tag.toIntern(),
.val = val.toIntern(),
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
/// This function casts the float representation down to the representation of the type, potentially
@@ -5897,7 +5897,7 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
.ty = ty.toIntern(),
.storage = storage,
} });
- return i.toValue();
+ return Value.fromInterned(i);
}
pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
@@ -5907,7 +5907,7 @@ pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
.ty = opt_ty.toIntern(),
.val = .none,
} });
- return result.toValue();
+ return Value.fromInterned(result);
}
pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type {
@@ -5964,10 +5964,10 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp()));
},
.lazy_align => |lazy_ty| {
- return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
+ return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
},
.lazy_size => |lazy_ty| {
- return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @intFromBool(sign);
+ return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign);
},
}
}
@@ -6252,14 +6252,14 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
var payload_size: u64 = 0;
var payload_align: Alignment = .@"1";
for (u.field_types.get(ip), 0..) |field_ty, i| {
- if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const explicit_align = u.fieldAlign(ip, @intCast(i));
const field_align = if (explicit_align != .none)
explicit_align
else
- field_ty.toType().abiAlignment(mod);
- const field_size = field_ty.toType().abiSize(mod);
+ Type.fromInterned(field_ty).abiAlignment(mod);
+ const field_size = Type.fromInterned(field_ty).abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(i);
@@ -6271,7 +6271,7 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
}
}
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
- if (!have_tag or !u.enum_tag_ty.toType().hasRuntimeBits(mod)) {
+ if (!have_tag or !Type.fromInterned(u.enum_tag_ty).hasRuntimeBits(mod)) {
return .{
.abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
@@ -6286,8 +6286,8 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
};
}
- const tag_size = u.enum_tag_ty.toType().abiSize(mod);
- const tag_align = u.enum_tag_ty.toType().abiAlignment(mod).max(.@"1");
+ const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
+ const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
.abi_size = u.size,
.abi_align = tag_align.max(payload_align),
@@ -6311,9 +6311,9 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
- if (have_tag) max_align = u.enum_tag_ty.toType().abiAlignment(mod);
+ if (have_tag) max_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod);
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
- if (!field_ty.toType().hasRuntimeBits(mod)) continue;
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
max_align = max_align.max(field_align);
@@ -6328,7 +6328,7 @@ pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_in
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
- const field_ty = u.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
return field_ty.abiAlignment(mod);
}
@@ -6396,7 +6396,7 @@ pub fn structPackedFieldBitOffset(
if (i == field_index) {
return @intCast(bit_sum);
}
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(mod);
}
unreachable; // index out of bounds
src/print_air.zig
@@ -940,10 +940,10 @@ const Writer = struct {
return s.print("@{}", .{operand});
} else if (Air.refToInterned(operand)) |ip_index| {
const mod = w.module;
- const ty = mod.intern_pool.indexToKey(ip_index).typeOf().toType();
+ const ty = Type.fromInterned(mod.intern_pool.indexToKey(ip_index).typeOf());
try s.print("<{}, {}>", .{
ty.fmt(mod),
- ip_index.toValue().fmtValue(ty, mod),
+ Value.fromInterned(ip_index).fmtValue(ty, mod),
});
} else {
return w.writeInstIndex(s, Air.refToIndex(operand).?, dies);
src/RangeSet.zig
@@ -3,6 +3,8 @@ const assert = std.debug.assert;
const Order = std.math.Order;
const InternPool = @import("InternPool.zig");
+const Type = @import("type.zig").Type;
+const Value = @import("value.zig").Value;
const Module = @import("Module.zig");
const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
@@ -43,8 +45,8 @@ pub fn add(
assert(ty == ip.typeOf(range.first));
assert(ty == ip.typeOf(range.last));
- if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and
- first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod))
+ if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), mod) and
+ Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), mod))
{
return range.src; // They overlap.
}
@@ -60,8 +62,8 @@ pub fn add(
/// Assumes a and b do not overlap
fn lessThan(mod: *Module, a: Range, b: Range) bool {
- const ty = mod.intern_pool.typeOf(a.first).toType();
- return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod);
+ const ty = Type.fromInterned(mod.intern_pool.typeOf(a.first));
+ return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, mod);
}
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
@@ -91,10 +93,10 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
- try counter.copy(prev.last.toValue().toBigInt(&space, mod));
+ try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, mod));
try counter.addScalar(&counter, 1);
- const cur_start_int = cur.first.toValue().toBigInt(&space, mod);
+ const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, mod);
if (!cur_start_int.eql(counter.toConst())) {
return false;
}
src/Sema.zig
@@ -2153,13 +2153,13 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val
assert(inst != .none);
// First section of indexes correspond to a set number of constant values.
if (@intFromEnum(inst) < InternPool.static_len) {
- return @as(InternPool.Index, @enumFromInt(@intFromEnum(inst))).toValue();
+ return Value.fromInterned(@as(InternPool.Index, @enumFromInt(@intFromEnum(inst))));
}
const air_tags = sema.air_instructions.items(.tag);
if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| {
if (Air.refToInterned(inst)) |ip_index| {
- const val = ip_index.toValue();
+ const val = Value.fromInterned(ip_index);
if (val.getVariable(sema.mod) != null) return val;
}
return opv;
@@ -2171,7 +2171,7 @@ fn resolveValueAllowVariables(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Val
else => return null,
}
};
- const val = ip_index.toValue();
+ const val = Value.fromInterned(ip_index);
if (val.isPtrToThreadLocal(sema.mod)) return null;
return val;
}
@@ -2364,7 +2364,7 @@ fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.
const ptr_info = ty.ptrInfo(mod);
if (ptr_info.flags.size == .Slice) {
return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len");
- } else if (ptr_info.child.toType().zigTypeTag(mod) == .Array) {
+ } else if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
return ip.stringEqlSlice(field_name, "len");
} else return false;
},
@@ -2752,8 +2752,8 @@ fn zirStructDecl(
//errdefer ip.remove(struct_ty);
new_decl.ty = Type.type;
- new_decl.val = struct_ty.toValue();
- new_namespace.ty = struct_ty.toType();
+ new_decl.val = Value.fromInterned(struct_ty);
+ new_namespace.ty = Type.fromInterned(struct_ty);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
@@ -2959,8 +2959,8 @@ fn zirEnumDecl(
//errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index);
new_decl.ty = Type.type;
- new_decl.val = incomplete_enum.index.toValue();
- new_namespace.ty = incomplete_enum.index.toType();
+ new_decl.val = Value.fromInterned(incomplete_enum.index);
+ new_namespace.ty = Type.fromInterned(incomplete_enum.index);
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
@@ -3222,8 +3222,8 @@ fn zirUnionDecl(
//errdefer mod.intern_pool.remove(union_ty);
new_decl.ty = Type.type;
- new_decl.val = union_ty.toValue();
- new_namespace.ty = union_ty.toType();
+ new_decl.val = Value.fromInterned(union_ty);
+ new_namespace.ty = Type.fromInterned(union_ty);
_ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
@@ -3285,8 +3285,8 @@ fn zirOpaqueDecl(
//errdefer mod.intern_pool.remove(opaque_ty);
new_decl.ty = Type.type;
- new_decl.val = opaque_ty.toValue();
- new_namespace.ty = opaque_ty.toType();
+ new_decl.val = Value.fromInterned(opaque_ty);
+ new_namespace.ty = Type.fromInterned(opaque_ty);
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
@@ -3600,7 +3600,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
const alloc = try sema.resolveInst(inst_data.operand);
const alloc_ty = sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(mod);
- const elem_ty = ptr_info.child.toType();
+ const elem_ty = Type.fromInterned(ptr_info.child);
if (try sema.resolveComptimeKnownAllocValue(block, alloc, null)) |val| {
const new_mut_ptr = Air.internedToRef((try mod.intern(.{ .ptr = .{
@@ -3652,7 +3652,7 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re
const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc);
const ptr_info = alloc_ty.ptrInfo(mod);
- const elem_ty = ptr_info.child.toType();
+ const elem_ty = Type.fromInterned(ptr_info.child);
const alloc_inst = Air.refToIndex(alloc) orelse return null;
const comptime_info = sema.maybe_comptime_allocs.fetchRemove(alloc_inst) orelse return null;
@@ -3803,7 +3803,7 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re
.index = field_idx,
} },
} }),
- .elem => |elem_idx| (try decl_parent_ptr.toValue().elemPtr(new_ptr_ty.toType(), @intCast(elem_idx), mod)).toIntern(),
+ .elem => |elem_idx| (try Value.fromInterned(decl_parent_ptr).elemPtr(Type.fromInterned(new_ptr_ty), @intCast(elem_idx), mod)).toIntern(),
};
try ptr_mapping.put(air_ptr, new_ptr);
}
@@ -3823,7 +3823,7 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re
if (try sema.typeHasOnePossibleValue(payload_ty)) |payload_val| {
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
const store_val = try mod.unionValue(union_ty, tag_val, payload_val);
- try sema.storePtrVal(block, .unneeded, new_ptr.toValue(), store_val, union_ty);
+ try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, union_ty);
}
},
.store, .store_safe => {
@@ -3831,14 +3831,14 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re
const air_ptr_inst = Air.refToIndex(bin_op.lhs).?;
const store_val = (try sema.resolveValue(bin_op.rhs)).?;
const new_ptr = ptr_mapping.get(air_ptr_inst).?;
- try sema.storePtrVal(block, .unneeded, new_ptr.toValue(), store_val, mod.intern_pool.typeOf(store_val.toIntern()).toType());
+ try sema.storePtrVal(block, .unneeded, Value.fromInterned(new_ptr), store_val, Type.fromInterned(mod.intern_pool.typeOf(store_val.toIntern())));
},
else => unreachable,
}
}
// The value is finalized - load it!
- const val = (try sema.pointerDeref(block, .unneeded, decl_ptr.toValue(), alloc_ty)).?.toIntern();
+ const val = (try sema.pointerDeref(block, .unneeded, Value.fromInterned(decl_ptr), alloc_ty)).?.toIntern();
return sema.finishResolveComptimeKnownAllocValue(val, alloc_inst, comptime_info.value);
}
@@ -6757,8 +6757,8 @@ fn checkCallArgumentCount(
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo(mod);
- if (ptr_info.flags.size == .One and ptr_info.child.toType().zigTypeTag(mod) == .Fn) {
- break :func_ty ptr_info.child.toType();
+ if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
+ break :func_ty Type.fromInterned(ptr_info.child);
}
},
.Optional => {
@@ -6831,8 +6831,8 @@ fn callBuiltin(
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo(mod);
- if (ptr_info.flags.size == .One and ptr_info.child.toType().zigTypeTag(mod) == .Fn) {
- break :func_ty ptr_info.child.toType();
+ if (ptr_info.flags.size == .One and Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) {
+ break :func_ty Type.fromInterned(ptr_info.child);
}
},
else => {},
@@ -7179,14 +7179,14 @@ fn analyzeCall(
var is_comptime_call = block.is_comptime or modifier == .compile_time;
var comptime_reason: ?*const Block.ComptimeReason = null;
if (!is_comptime_call) {
- if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| {
+ if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| {
is_comptime_call = ct;
if (ct) {
comptime_reason = &.{ .comptime_ret_ty = .{
.block = block,
.func = func,
.func_src = func_src,
- .return_ty = func_ty_info.return_type.toType(),
+ .return_ty = Type.fromInterned(func_ty_info.return_type),
} };
}
} else |err| switch (err) {
@@ -7234,7 +7234,7 @@ fn analyzeCall(
.block = block,
.func = func,
.func_src = func_src,
- .return_ty = func_ty_info.return_type.toType(),
+ .return_ty = Type.fromInterned(func_ty_info.return_type),
} };
},
else => |e| return e,
@@ -7431,10 +7431,10 @@ fn analyzeCall(
const ies = try sema.arena.create(InferredErrorSet);
ies.* = .{ .func = .none };
sema.fn_ret_ty_ies = ies;
- sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{
+ sema.fn_ret_ty = Type.fromInterned((try ip.get(gpa, .{ .error_union_type = .{
.error_set_type = .adhoc_inferred_error_set_type,
.payload_type = sema.fn_ret_ty.toIntern(),
- } })).toType();
+ } })));
}
// This `res2` is here instead of directly breaking from `res` due to a stage1
@@ -7529,7 +7529,7 @@ fn analyzeCall(
if (new_ty != .none) {
// TODO: mutate in place the previous instruction if possible
// rather than adding a bitcast instruction.
- break :res2 try block.addBitCast(new_ty.toType(), result);
+ break :res2 try block.addBitCast(Type.fromInterned(new_ty), result);
}
break :res2 result;
@@ -7543,8 +7543,8 @@ fn analyzeCall(
for (args, 0..) |*arg_out, arg_idx| {
// Non-generic, so param types are already resolved
const param_ty = if (arg_idx < func_ty_info.param_types.len) ty: {
- break :ty func_ty_info.param_types.get(ip)[arg_idx].toType();
- } else InternPool.Index.var_args_param_type.toType();
+ break :ty Type.fromInterned(func_ty_info.param_types.get(ip)[arg_idx]);
+ } else Type.fromInterned(InternPool.Index.var_args_param_type);
assert(!param_ty.isGenericPoison());
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) {
@@ -7554,8 +7554,8 @@ fn analyzeCall(
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
- try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
- if (sema.owner_func_index != .none and func_ty_info.return_type.toType().isError(mod)) {
+ try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
+ if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) {
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
}
@@ -7670,12 +7670,12 @@ fn analyzeInlineCallArg(
break :param_ty param_ty.toIntern();
};
new_param_types[arg_i.*] = param_ty;
- const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, param_ty.toType(), func_ty_info, func_inst);
+ const casted_arg = try args_info.analyzeArg(ics.caller(), arg_block, arg_i.*, Type.fromInterned(param_ty), func_ty_info, func_inst);
if (ics.caller().typeOf(casted_arg).zigTypeTag(mod) == .NoReturn) {
return casted_arg;
}
const arg_src = args_info.argSrc(arg_block, arg_i.*);
- if (try ics.callee().typeRequiresComptime(param_ty.toType())) {
+ if (try ics.callee().typeRequiresComptime(Type.fromInterned(param_ty))) {
_ = try ics.caller().resolveConstValue(arg_block, arg_src, casted_arg, .{
.needed_comptime_reason = "argument to parameter with comptime-only type must be comptime-known",
.block_comptime_reason = param_block.comptime_reason,
@@ -7705,7 +7705,7 @@ fn analyzeInlineCallArg(
// when the hash function is called.
const resolved_arg_val = try ics.caller().resolveLazyValue(arg_val);
should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod);
- memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod);
+ memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(Type.fromInterned(param_ty), mod);
} else {
ics.callee().inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
@@ -7790,7 +7790,7 @@ fn instantiateGenericCall(
else => unreachable,
};
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
- const generic_owner_ty_info = mod.typeToFunc(generic_owner_func.ty.toType()).?;
+ const generic_owner_ty_info = mod.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?;
// Even though there may already be a generic instantiation corresponding
// to this callsite, we must evaluate the expressions of the generic
@@ -7862,7 +7862,7 @@ fn instantiateGenericCall(
const param_tag = fn_zir.instructions.items(.tag)[@intFromEnum(param_inst)];
const param_ty = switch (generic_owner_ty_info.param_types.get(ip)[arg_index]) {
- else => |ty| ty.toType(), // parameter is not generic, so type is already resolved
+ else => |ty| Type.fromInterned(ty), // parameter is not generic, so type is already resolved
.generic_poison_type => param_ty: {
// We have every parameter before this one, so can resolve this parameter's type now.
// However, first check the param type, since it may be anytype.
@@ -7998,12 +7998,12 @@ fn instantiateGenericCall(
try sema.addReferencedBy(block, call_src, callee.owner_decl);
// Make a runtime call to the new function, making sure to omit the comptime args.
- const func_ty = callee.ty.toType();
+ const func_ty = Type.fromInterned(callee.ty);
const func_ty_info = mod.typeToFunc(func_ty).?;
// If the call evaluated to a return type that requires comptime, never mind
// our generic instantiation. Instead we need to perform a comptime call.
- if (try sema.typeRequiresComptime(func_ty_info.return_type.toType())) {
+ if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) {
return error.ComptimeReturn;
}
// Similarly, if the call evaluated to a generic type we need to instead
@@ -8012,12 +8012,12 @@ fn instantiateGenericCall(
return error.GenericPoison;
}
- try sema.queueFullTypeResolution(func_ty_info.return_type.toType());
+ try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type));
if (call_dbg_node) |some| try sema.zirDbgStmt(block, some);
if (sema.owner_func_index != .none and
- func_ty_info.return_type.toType().isError(mod))
+ Type.fromInterned(func_ty_info.return_type).isError(mod))
{
ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true;
}
@@ -8057,10 +8057,10 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type)
else => return,
};
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
- try sema.resolveTupleLazyValues(block, src, field_ty.toType());
+ try sema.resolveTupleLazyValues(block, src, Type.fromInterned(field_ty));
if (field_val == .none) continue;
// TODO: mutate in intern pool
- _ = try sema.resolveLazyValue(field_val.toValue());
+ _ = try sema.resolveLazyValue(Value.fromInterned(field_val));
}
}
@@ -9217,7 +9217,7 @@ fn funcCommon(
const cc_resolved = cc orelse .Unspecified;
var comptime_bits: u32 = 0;
for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| {
- const param_ty = param_ty_ip.toType();
+ const param_ty = Type.fromInterned(param_ty_ip);
const is_noalias = blk: {
const index = std.math.cast(u5, i) orelse break :blk false;
break :blk @as(u1, @truncate(noalias_bits >> index)) != 0;
@@ -9529,7 +9529,7 @@ fn finishFunc(
const return_type: Type = if (opt_func_index == .none or ret_poison)
bare_return_type
else
- ip.funcTypeReturnType(ip.typeOf(opt_func_index)).toType();
+ Type.fromInterned(ip.funcTypeReturnType(ip.typeOf(opt_func_index)));
if (!return_type.isValidReturnType(mod)) {
const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else "";
@@ -10078,10 +10078,10 @@ fn intCast(
// range to account for negative values.
const dest_range_val = if (wanted_info.signedness == .signed) range_val: {
const one_scalar = try mod.intValue(unsigned_scalar_operand_ty, 1);
- const one = if (is_vector) (try mod.intern(.{ .aggregate = .{
+ const one = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = unsigned_operand_ty.toIntern(),
.storage = .{ .repeated_elem = one_scalar.toIntern() },
- } })).toValue() else one_scalar;
+ } }))) else one_scalar;
const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod);
break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined);
} else try mod.getCoerced(dest_max_val, unsigned_operand_ty);
@@ -10734,7 +10734,7 @@ const SwitchProngAnalysis = struct {
if (operand_ty.zigTypeTag(mod) == .Union) {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, mod).?);
const union_obj = mod.typeToUnion(operand_ty).?;
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
@@ -10790,7 +10790,7 @@ const SwitchProngAnalysis = struct {
const first_item_val = sema.resolveConstDefinedValue(block, .unneeded, case_vals[0], undefined) catch unreachable;
const first_field_index: u32 = mod.unionTagFieldIndex(union_obj, first_item_val).?;
- const first_field_ty = union_obj.field_types.get(ip)[first_field_index].toType();
+ const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_field_index]);
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
@@ -10801,7 +10801,7 @@ const SwitchProngAnalysis = struct {
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, sema.mod)) break false;
} else true;
@@ -10809,7 +10809,7 @@ const SwitchProngAnalysis = struct {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try mod.undefRef(field_ty);
}
@@ -10857,7 +10857,7 @@ const SwitchProngAnalysis = struct {
// pointer type is in-memory coercible to the capture pointer type.
if (!same_types) {
for (field_indices, 0..) |field_idx, i| {
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_ptr_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -10906,7 +10906,7 @@ const SwitchProngAnalysis = struct {
if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| {
if (operand_val.isUndef(mod)) return mod.undefRef(capture_ty);
const union_val = ip.indexToKey(operand_val.toIntern()).un;
- if (union_val.tag.toValue().isUndef(mod)) return mod.undefRef(capture_ty);
+ if (Value.fromInterned(union_val.tag).isUndef(mod)) return mod.undefRef(capture_ty);
const uncoerced = Air.internedToRef(union_val.val);
return sema.coerce(block, capture_ty, uncoerced, operand_src);
}
@@ -10921,7 +10921,7 @@ const SwitchProngAnalysis = struct {
// If we can, try to avoid that using in-memory coercions.
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
break :in_mem i;
}
@@ -10944,7 +10944,7 @@ const SwitchProngAnalysis = struct {
{
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, sema.mod.getTarget(), .unneeded, .unneeded)) {
in_mem_coercible.unset(i);
}
@@ -10975,7 +10975,7 @@ const SwitchProngAnalysis = struct {
defer coerce_block.instructions.deinit(sema.gpa);
const field_idx = field_indices[idx];
- const field_ty = union_obj.field_types.get(ip)[field_idx].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, field_idx, field_ty);
const coerced = sema.coerce(&coerce_block, capture_ty, uncoerced, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
@@ -11004,7 +11004,7 @@ const SwitchProngAnalysis = struct {
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
const first_imc_field_idx = field_indices[first_imc_item_idx];
- const first_imc_field_ty = union_obj.field_types.get(ip)[first_imc_field_idx].toType();
+ const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(spa.operand, first_imc_field_idx, first_imc_field_ty);
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
_ = try coerce_block.addBr(capture_block_inst, coerced);
@@ -12510,7 +12510,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = mod.typeToUnion(maybe_union_ty).?;
- const field_ty = union_obj.field_types.get(ip)[index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
if (field_ty.zigTypeTag(mod) != .NoReturn) break true;
} else false
else
@@ -12609,7 +12609,7 @@ const RangeSetUnhandledIterator = struct {
inline .u64, .i64 => |val_int| {
const next_int = @addWithOverflow(val_int, 1);
if (next_int[1] == 0)
- return (try it.mod.intValue(int.ty.toType(), next_int[0])).toIntern();
+ return (try it.mod.intValue(Type.fromInterned(int.ty), next_int[0])).toIntern();
},
.big_int => {},
.lazy_align, .lazy_size => unreachable,
@@ -12625,7 +12625,7 @@ const RangeSetUnhandledIterator = struct {
);
result_bigint.addScalar(val_bigint, 1);
- return (try it.mod.intValue_big(int.ty.toType(), result_bigint.toConst())).toIntern();
+ return (try it.mod.intValue_big(Type.fromInterned(int.ty), result_bigint.toConst())).toIntern();
}
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
@@ -12704,7 +12704,7 @@ fn validateSwitchRange(
const mod = sema.mod;
const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, src_node_offset, switch_prong_src, .first);
const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, src_node_offset, switch_prong_src, .last);
- if (try first.val.toValue().compareAll(.gt, last.val.toValue(), operand_ty, mod)) {
+ if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, mod)) {
const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first);
return sema.fail(block, src, "range start value is greater than the end value", .{});
}
@@ -12815,7 +12815,7 @@ fn validateSwitchItemBool(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const item = try sema.resolveSwitchItemVal(block, item_ref, Type.bool, src_node_offset, switch_prong_src, .none);
- if (item.val.toValue().toBool()) {
+ if (Value.fromInterned(item.val).toBool()) {
true_count.* += 1;
} else {
false_count.* += 1;
@@ -13645,7 +13645,7 @@ fn analyzeTupleCat(
try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty);
}
- return block.addAggregateInit(tuple_ty.toType(), element_refs);
+ return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -13875,17 +13875,17 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins
.needed_comptime_reason = "slice value being concatenated must be comptime-known",
});
return Type.ArrayInfo{
- .elem_type = ptr_info.child.toType(),
+ .elem_type = Type.fromInterned(ptr_info.child),
.sentinel = switch (ptr_info.sentinel) {
.none => null,
- else => ptr_info.sentinel.toValue(),
+ else => Value.fromInterned(ptr_info.sentinel),
},
.len = val.sliceLen(mod),
};
},
.One => {
- if (ptr_info.child.toType().zigTypeTag(mod) == .Array) {
- return ptr_info.child.toType().arrayInfo(mod);
+ if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
+ return Type.fromInterned(ptr_info.child).arrayInfo(mod);
}
},
.C => {},
@@ -13974,7 +13974,7 @@ fn analyzeTupleMul(
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
- return block.addAggregateInit(tuple_ty.toType(), element_refs);
+ return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -14972,10 +14972,10 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
.ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0),
else => unreachable,
};
- const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{
+ const zero_val = if (is_vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = resolved_type.toIntern(),
.storage = .{ .repeated_elem = scalar_zero.toIntern() },
- } })).toValue() else scalar_zero;
+ } }))) else scalar_zero;
return Air.internedToRef(zero_val.toIntern());
}
} else if (lhs_scalar_ty.isSignedInt(mod)) {
@@ -15058,10 +15058,10 @@ fn intRem(
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sema.intRemScalar(lhs, rhs, ty);
}
@@ -15335,7 +15335,7 @@ fn zirOverflowArithmetic(
const maybe_rhs_val = try sema.resolveValue(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
- const overflow_ty = ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1].toType();
+ const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1]);
var result: struct {
inst: Air.Inst.Ref = .none,
@@ -15504,7 +15504,7 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = val.toIntern() },
} });
- return repeated.toValue();
+ return Value.fromInterned(repeated);
}
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
@@ -15522,7 +15522,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
.values = &values,
.names = &.{},
});
- return tuple_ty.toType();
+ return Type.fromInterned(tuple_ty);
}
fn analyzeArithmetic(
@@ -16036,7 +16036,7 @@ fn analyzePtrArithmetic(
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
- const elem_size = ptr_info.child.toType().abiSize(mod);
+ const elem_size = Type.fromInterned(ptr_info.child).abiSize(mod);
const addend = if (opt_off_val) |off_val| a: {
const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod));
break :a elem_size * off_int;
@@ -16073,7 +16073,7 @@ fn analyzePtrArithmetic(
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod));
if (offset_int == 0) return ptr;
if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| {
- const elem_size = ptr_info.child.toType().abiSize(mod);
+ const elem_size = Type.fromInterned(ptr_info.child).abiSize(mod);
const new_addr = switch (air_tag) {
.ptr_add => addr + elem_size * offset_int,
.ptr_sub => addr - elem_size * offset_int,
@@ -16748,7 +16748,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
assert(block.is_typeof);
// We need a dummy runtime instruction with the correct type.
- return block.addTy(.alloc, capture_ty.toType());
+ return block.addTy(.alloc, Type.fromInterned(capture_ty));
}
fn zirRetAddr(
@@ -17042,7 +17042,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment|
try mod.intValue(Type.comptime_int, alignment)
else
- try info.child.toType().lazyAbiAlignment(mod);
+ try Type.fromInterned(info.child).lazyAbiAlignment(mod);
const addrspace_ty = try sema.getBuiltinType("AddressSpace");
const pointer_ty = t: {
@@ -17086,7 +17086,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// sentinel: ?*const anyopaque,
(try sema.optRefValue(switch (info.sentinel) {
.none => null,
- else => info.sentinel.toValue(),
+ else => Value.fromInterned(info.sentinel),
})).toIntern(),
};
return Air.internedToRef((try mod.intern(.{ .un = .{
@@ -17630,10 +17630,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
- try sema.resolveTypeLayout(field_ty.toType());
+ try sema.resolveTypeLayout(Type.fromInterned(field_ty));
const is_comptime = field_val != .none;
- const opt_default_val = if (is_comptime) field_val.toValue() else null;
+ const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null;
const default_val_ptr = try sema.optRefValue(opt_default_val);
const struct_field_fields = .{
// name: []const u8,
@@ -17645,7 +17645,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
- (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod).toByteUnits(0))).toIntern(),
+ (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits(0))).toIntern(),
};
struct_field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -17667,7 +17667,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.arena.dupe(u8, ip.stringToSlice(name_nts))
else
try std.fmt.allocPrintZ(sema.arena, "{d}", .{i});
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_init = struct_type.fieldInit(ip, i);
const field_is_comptime = struct_type.fieldIsComptime(ip, i);
const name_val = v: {
@@ -17689,7 +17689,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
- const opt_default_val = if (field_init == .none) null else field_init.toValue();
+ const opt_default_val = if (field_init == .none) null else Value.fromInterned(field_init);
const default_val_ptr = try sema.optRefValue(opt_default_val);
const alignment = switch (struct_type.layout) {
.Packed => .none,
@@ -17750,7 +17750,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const backing_integer_val = try mod.intern(.{ .opt = .{
.ty = (try mod.optionalType(.type_type)).toIntern(),
.val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
- assert(packed_struct.backingIntType(ip).toType().isInt(mod));
+ assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod));
break :val packed_struct.backingIntType(ip).*;
} else .none,
} });
@@ -19145,7 +19145,7 @@ fn unionInit(
const mod = sema.mod;
const ip = &mod.intern_pool;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src);
- const field_ty = mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(mod.typeToUnion(union_ty).?.field_types.get(ip)[field_index]);
const init = try sema.coerce(block, field_ty, uncasted_init, init_src);
if (try sema.resolveValue(init)) |init_val| {
@@ -19256,7 +19256,7 @@ fn zirStructInit(
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(mod);
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
- const field_ty = mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(mod.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
return sema.failWithOwnedErrorMsg(block, msg: {
@@ -19275,11 +19275,11 @@ fn zirStructInit(
const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src);
if (try sema.resolveValue(init_inst)) |val| {
- const struct_val = (try mod.intern(.{ .un = .{
+ const struct_val = Value.fromInterned((try mod.intern(.{ .un = .{
.ty = resolved_ty.toIntern(),
.tag = try tag_val.intern(tag_ty, mod),
.val = try val.intern(field_ty, mod),
- } })).toValue();
+ } })));
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src);
const final_val = (try sema.resolveValue(final_val_inst)).?;
return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
@@ -19336,7 +19336,7 @@ fn finishStructInit(
for (0..anon_struct.types.len) |i| {
if (field_inits[i] != .none) {
// Coerce the init value to the field type.
- const field_ty = anon_struct.types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(anon_struct.types.get(ip)[i]);
field_inits[i] = sema.coerce(block, field_ty, field_inits[i], .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = mod.declPtr(block.src_decl);
@@ -19378,7 +19378,7 @@ fn finishStructInit(
for (0..struct_type.field_types.len) |i| {
if (field_inits[i] != .none) {
// Coerce the init value to the field type.
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
field_inits[i] = sema.coerce(block, field_ty, field_inits[i], init_src) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = mod.declPtr(block.src_decl);
@@ -19548,20 +19548,20 @@ fn structInitAnon(
const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init).toIntern();
- if (field_ty.toType().zigTypeTag(mod) == .Opaque) {
+ if (Type.fromInterned(field_ty.*).zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const decl = mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, @intCast(i_usize));
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, field_ty.toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(field_ty.*));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (try sema.resolveValue(init)) |init_val| {
- field_val.* = try init_val.intern(field_ty.toType(), mod);
+ field_val.* = try init_val.intern(Type.fromInterned(field_ty.*), mod);
} else {
field_val.* = .none;
runtime_index = @intCast(i_usize);
@@ -19635,7 +19635,7 @@ fn structInitAnon(
element_refs[i] = try sema.resolveInst(item.data.init);
}
- return block.addAggregateInit(tuple_ty.toType(), element_refs);
+ return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
}
fn zirArrayInit(
@@ -19836,12 +19836,12 @@ fn arrayInitAnon(
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
types[i] = sema.typeOf(elem).toIntern();
- if (types[i].toType().zigTypeTag(mod) == .Opaque) {
+ if (Type.fromInterned(types[i]).zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
- try sema.addDeclaredHereNote(msg, types[i].toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(types[i]));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -19899,7 +19899,7 @@ fn arrayInitAnon(
element_refs[i] = try sema.resolveInst(operand);
}
- return block.addAggregateInit(tuple_ty.toType(), element_refs);
+ return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs);
}
fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref {
@@ -19979,7 +19979,7 @@ fn fieldType(
.Optional => {
// Struct/array init through optional requires the child type to not be a pointer.
// If the child of .optional is a pointer it'll error on the next loop.
- cur_ty = ip.indexToKey(cur_ty.toIntern()).opt_type.toType();
+ cur_ty = Type.fromInterned(ip.indexToKey(cur_ty.toIntern()).opt_type);
continue;
},
.ErrorUnion => {
@@ -20294,8 +20294,8 @@ fn zirReify(
});
const union_val = ip.indexToKey(val.toIntern()).un;
const target = mod.getTarget();
- if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
- const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?;
+ if (try Value.fromInterned(union_val.val).anyUndef(mod)) return sema.failWithUseOfUndef(block, src);
+ const tag_index = type_info_ty.unionTagFieldIndex(Value.fromInterned(union_val.tag), mod).?;
switch (@as(std.builtin.TypeId, @enumFromInt(tag_index))) {
.Type => return .type_type,
.Void => return .void_type,
@@ -20309,11 +20309,11 @@ fn zirReify(
.EnumLiteral => return .enum_literal_type,
.Int => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const signedness_val = try union_val.val.toValue().fieldValue(
+ const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
mod,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
);
- const bits_val = try union_val.val.toValue().fieldValue(
+ const bits_val = try Value.fromInterned(union_val.val).fieldValue(
mod,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits")).?,
);
@@ -20325,11 +20325,11 @@ fn zirReify(
},
.Vector => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
).?);
- const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
@@ -20347,7 +20347,7 @@ fn zirReify(
},
.Float => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const bits_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "bits"),
).?);
@@ -20365,35 +20365,35 @@ fn zirReify(
},
.Pointer => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const size_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "size"),
).?);
- const is_const_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_const_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_const"),
).?);
- const is_volatile_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_volatile_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_volatile"),
).?);
- const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
- const address_space_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const address_space_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "address_space"),
).?);
- const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
- const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_allowzero_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_allowzero"),
).?);
- const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel"),
).?);
@@ -20477,15 +20477,15 @@ fn zirReify(
},
.Array => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
).?);
- const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
- const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const sentinel_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel"),
).?);
@@ -20506,7 +20506,7 @@ fn zirReify(
},
.Optional => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
@@ -20518,11 +20518,11 @@ fn zirReify(
},
.ErrorUnion => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const error_set_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "error_set"),
).?);
- const payload_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const payload_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "payload"),
).?);
@@ -20538,7 +20538,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.ErrorSet => {
- const payload_val = union_val.val.toValue().optionalValue(mod) orelse
+ const payload_val = Value.fromInterned(union_val.val).optionalValue(mod) orelse
return Air.internedToRef(Type.anyerror.toIntern());
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
@@ -20567,23 +20567,23 @@ fn zirReify(
},
.Struct => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
).?);
- const backing_integer_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const backing_integer_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "backing_integer"),
).?);
- const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
- const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
- const is_tuple_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_tuple_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_tuple"),
).?);
@@ -20603,19 +20603,19 @@ fn zirReify(
},
.Enum => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
).?);
- const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
- const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
- const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_exhaustive_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_exhaustive"),
).?);
@@ -20662,7 +20662,7 @@ fn zirReify(
//errdefer ip.remove(incomplete_enum.index);
new_decl.ty = Type.type;
- new_decl.val = incomplete_enum.index.toValue();
+ new_decl.val = Value.fromInterned(incomplete_enum.index);
for (0..fields_len) |field_i| {
const elem_val = try fields_val.elemValue(mod, field_i);
@@ -20718,7 +20718,7 @@ fn zirReify(
},
.Opaque => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
@@ -20759,8 +20759,8 @@ fn zirReify(
//errdefer ip.remove(opaque_ty);
new_decl.ty = Type.type;
- new_decl.val = opaque_ty.toValue();
- new_namespace.ty = opaque_ty.toType();
+ new_decl.val = Value.fromInterned(opaque_ty);
+ new_namespace.ty = Type.fromInterned(opaque_ty);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
@@ -20768,19 +20768,19 @@ fn zirReify(
},
.Union => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
).?);
- const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
).?);
- const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const fields_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
- const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
@@ -20847,10 +20847,10 @@ fn zirReify(
const msg = msg: {
const msg = try sema.errMsg(block, src, "no field named '{}' in enum '{}'", .{
field_name.fmt(ip),
- enum_tag_ty.toType().fmt(mod),
+ Type.fromInterned(enum_tag_ty).fmt(mod),
});
errdefer msg.destroy(gpa);
- try sema.addDeclaredHereNote(msg, enum_tag_ty.toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(enum_tag_ty));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20929,11 +20929,11 @@ fn zirReify(
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
- try sema.addFieldErrNote(enum_tag_ty.toType(), field_index, msg, "field '{}' missing, declared here", .{
+ try sema.addFieldErrNote(Type.fromInterned(enum_tag_ty), field_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
- try sema.addDeclaredHereNote(msg, enum_tag_ty.toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(enum_tag_ty));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -20993,8 +20993,8 @@ fn zirReify(
});
new_decl.ty = Type.type;
- new_decl.val = union_ty.toValue();
- new_namespace.ty = union_ty.toType();
+ new_decl.val = Value.fromInterned(union_ty);
+ new_namespace.ty = Type.fromInterned(union_ty);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
@@ -21002,27 +21002,27 @@ fn zirReify(
},
.Fn => {
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
- const calling_convention_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "calling_convention"),
).?);
- const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const alignment_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
- const is_generic_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_generic_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic"),
).?);
- const is_var_args_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const is_var_args_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_var_args"),
).?);
- const return_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const return_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "return_type"),
).?);
- const params_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
+ const params_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "params"),
).?);
@@ -21075,7 +21075,7 @@ fn zirReify(
param_type.* = param_type_val.toIntern();
if (param_is_noalias_val.toBool()) {
- if (!param_type.toType().isPtrAtRuntime(mod)) {
+ if (!Type.fromInterned(param_type.*).isPtrAtRuntime(mod)) {
return sema.fail(block, src, "non-pointer parameter declared noalias", .{});
}
noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse
@@ -21158,7 +21158,7 @@ fn reifyStruct(
const struct_type = ip.indexToKey(ty).struct_type;
new_decl.ty = Type.type;
- new_decl.val = ty.toValue();
+ new_decl.val = Value.fromInterned(ty);
// Fields
for (0..fields_len) |i| {
@@ -21290,11 +21290,11 @@ fn reifyStruct(
if (layout == .Packed) {
for (0..struct_type.field_types.len) |index| {
- const field_ty = struct_type.field_types.get(ip)[index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
- try sema.addFieldErrNote(ty.toType(), index, msg, "while checking this field", .{});
+ try sema.addFieldErrNote(Type.fromInterned(ty), index, msg, "while checking this field", .{});
return err;
},
else => return err,
@@ -21303,7 +21303,7 @@ fn reifyStruct(
var fields_bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
fields_bit_sum += field_ty.bitSize(mod);
}
@@ -21657,10 +21657,10 @@ fn ptrFromIntVal(
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
return switch (ptr_ty.zigTypeTag(mod)) {
- .Optional => (try mod.intern(.{ .opt = .{
+ .Optional => Value.fromInterned((try mod.intern(.{ .opt = .{
.ty = ptr_ty.toIntern(),
.val = if (addr == 0) .none else (try mod.ptrIntValue(ptr_ty.childType(mod), addr)).toIntern(),
- } })).toValue(),
+ } }))),
.Pointer => try mod.ptrIntValue(ptr_ty, addr),
else => unreachable,
};
@@ -21849,14 +21849,14 @@ fn ptrCastFull(
const src_info = operand_ty.ptrInfo(mod);
const dest_info = dest_ty.ptrInfo(mod);
- try sema.resolveTypeLayout(src_info.child.toType());
- try sema.resolveTypeLayout(dest_info.child.toType());
+ try sema.resolveTypeLayout(Type.fromInterned(src_info.child));
+ try sema.resolveTypeLayout(Type.fromInterned(dest_info.child));
const src_slice_like = src_info.flags.size == .Slice or
- (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array);
+ (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array);
const dest_slice_like = dest_info.flags.size == .Slice or
- (dest_info.flags.size == .One and dest_info.child.toType().zigTypeTag(mod) == .Array);
+ (dest_info.flags.size == .One and Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Array);
if (dest_info.flags.size == .Slice and !src_slice_like) {
return sema.fail(block, src, "illegal pointer cast to slice", .{});
@@ -21864,12 +21864,12 @@ fn ptrCastFull(
if (dest_info.flags.size == .Slice) {
const src_elem_size = switch (src_info.flags.size) {
- .Slice => src_info.child.toType().abiSize(mod),
+ .Slice => Type.fromInterned(src_info.child).abiSize(mod),
// pointer to array
- .One => src_info.child.toType().childType(mod).abiSize(mod),
+ .One => Type.fromInterned(src_info.child).childType(mod).abiSize(mod),
else => unreachable,
};
- const dest_elem_size = dest_info.child.toType().abiSize(mod);
+ const dest_elem_size = Type.fromInterned(dest_info.child).abiSize(mod);
if (src_elem_size != dest_elem_size) {
return sema.fail(block, src, "TODO: implement @ptrCast between slices changing the length", .{});
}
@@ -21891,7 +21891,7 @@ fn ptrCastFull(
errdefer msg.destroy(sema.gpa);
if (dest_info.flags.size == .Many and
(src_info.flags.size == .Slice or
- (src_info.flags.size == .One and src_info.child.toType().zigTypeTag(mod) == .Array)))
+ (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array)))
{
try sema.errNote(block, src, msg, "use 'ptr' field to convert slice to many pointer", .{});
} else {
@@ -21904,10 +21904,10 @@ fn ptrCastFull(
check_child: {
const src_child = if (dest_info.flags.size == .Slice and src_info.flags.size == .One) blk: {
// *[n]T -> []T
- break :blk src_info.child.toType().childType(mod);
- } else src_info.child.toType();
+ break :blk Type.fromInterned(src_info.child).childType(mod);
+ } else Type.fromInterned(src_info.child);
- const dest_child = dest_info.child.toType();
+ const dest_child = Type.fromInterned(dest_info.child);
const imc_res = try sema.coerceInMemoryAllowed(
block,
@@ -21940,7 +21940,7 @@ fn ptrCastFull(
}
if (src_slice_like and src_info.flags.size == .One and dest_info.flags.size == .Slice) {
// [*]nT -> []T
- const arr_ty = src_info.child.toType();
+ const arr_ty = Type.fromInterned(src_info.child);
if (arr_ty.sentinel(mod)) |src_sentinel| {
const coerced_sent = try mod.intern_pool.getCoerced(sema.gpa, src_sentinel.toIntern(), dest_info.child);
if (dest_info.sentinel == coerced_sent) break :check_sent;
@@ -21949,12 +21949,12 @@ fn ptrCastFull(
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = if (src_info.sentinel == .none) blk: {
break :blk try sema.errMsg(block, src, "destination pointer requires '{}' sentinel", .{
- dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod),
+ Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
});
} else blk: {
break :blk try sema.errMsg(block, src, "pointer sentinel '{}' cannot coerce into pointer sentinel '{}'", .{
- src_info.sentinel.toValue().fmtValue(src_info.child.toType(), mod),
- dest_info.sentinel.toValue().fmtValue(dest_info.child.toType(), mod),
+ Value.fromInterned(src_info.sentinel).fmtValue(Type.fromInterned(src_info.child), mod),
+ Value.fromInterned(dest_info.sentinel).fmtValue(Type.fromInterned(dest_info.child), mod),
});
};
errdefer msg.destroy(sema.gpa);
@@ -22010,12 +22010,12 @@ fn ptrCastFull(
const src_align = if (src_info.flags.alignment != .none)
src_info.flags.alignment
else
- src_info.child.toType().abiAlignment(mod);
+ Type.fromInterned(src_info.child).abiAlignment(mod);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
- dest_info.child.toType().abiAlignment(mod);
+ Type.fromInterned(dest_info.child).abiAlignment(mod);
if (!flags.align_cast) {
if (dest_align.compare(.gt, src_align)) {
@@ -22123,7 +22123,7 @@ fn ptrCastFull(
}
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
if (ptr_val.isUndef(mod)) return mod.undefRef(dest_ty);
- const arr_len = try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod));
+ const arr_len = try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod));
return Air.internedToRef((try mod.intern(.{ .ptr = .{
.ty = dest_ty.toIntern(),
.addr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr,
@@ -22139,7 +22139,7 @@ fn ptrCastFull(
try sema.requireRuntimeBlock(block, src, null);
if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and
- (try sema.typeHasRuntimeBits(dest_info.child.toType()) or dest_info.child.toType().zigTypeTag(mod) == .Fn))
+ (try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)) or Type.fromInterned(dest_info.child).zigTypeTag(mod) == .Fn))
{
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize);
@@ -22153,7 +22153,7 @@ fn ptrCastFull(
if (block.wantSafety() and
dest_align.compare(.gt, src_align) and
- try sema.typeHasRuntimeBits(dest_info.child.toType()))
+ try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
{
const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
@@ -22196,7 +22196,7 @@ fn ptrCastFull(
if (dest_info.flags.size == .Slice and src_info.flags.size != .Slice) {
// We have to construct a slice using the operand's child's array length
// Note that we know from the check at the start of the function that operand_ty is slice-like
- const arr_len = Air.internedToRef((try mod.intValue(Type.usize, src_info.child.toType().arrayLen(mod))).toIntern());
+ const arr_len = Air.internedToRef((try mod.intValue(Type.usize, Type.fromInterned(src_info.child).arrayLen(mod))).toIntern());
return block.addInst(.{
.tag = .slice,
.data = .{ .ty_pl = .{
@@ -22549,7 +22549,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
if (i == field_index) {
return bit_sum;
}
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(mod);
} else unreachable;
},
@@ -23447,10 +23447,10 @@ fn analyzeShuffle(
if (a_len < b_len) {
const undef = try mod.undefRef(a_ty);
- a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(max_len));
+ a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, Value.fromInterned(expand_mask), @intCast(max_len));
} else {
const undef = try mod.undefRef(b_ty);
- b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(max_len));
+ b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, Value.fromInterned(expand_mask), @intCast(max_len));
}
}
@@ -24546,10 +24546,10 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_elem_ty: Type = dest_elem_ty: {
const ptr_info = dest_ptr_ty.ptrInfo(mod);
switch (ptr_info.flags.size) {
- .Slice => break :dest_elem_ty ptr_info.child.toType(),
+ .Slice => break :dest_elem_ty Type.fromInterned(ptr_info.child),
.One => {
- if (ptr_info.child.toType().zigTypeTag(mod) == .Array) {
- break :dest_elem_ty ptr_info.child.toType().childType(mod);
+ if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Array) {
+ break :dest_elem_ty Type.fromInterned(ptr_info.child).childType(mod);
}
},
.Many, .C => {},
@@ -24583,10 +24583,10 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
.child = dest_elem_ty.toIntern(),
.len = len_u64,
});
- const array_val = (try mod.intern(.{ .aggregate = .{
+ const array_val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = array_ty.toIntern(),
.storage = .{ .repeated_elem = elem_val.toIntern() },
- } })).toValue();
+ } })));
const array_ptr_ty = ty: {
var info = dest_ptr_ty.ptrInfo(mod);
info.flags.size = .One;
@@ -25227,7 +25227,7 @@ fn zirBuiltinExtern(
// We only access this decl through the decl_ref with the correct type created
// below, so this type doesn't matter
new_decl.ty = ty;
- new_decl.val = new_var.toValue();
+ new_decl.val = Value.fromInterned(new_var);
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
@@ -25237,14 +25237,14 @@ fn zirBuiltinExtern(
try sema.ensureDeclAnalyzed(new_decl_index);
- return Air.internedToRef((try mod.getCoerced((try mod.intern(.{ .ptr = .{
+ return Air.internedToRef((try mod.getCoerced(Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => ty.toIntern(),
.opt_type => |child_type| child_type,
else => unreachable,
},
.addr = .{ .decl = new_decl_index },
- } })).toValue(), ty)).toIntern());
+ } }))), ty)).toIntern());
}
fn zirWorkItem(
@@ -25430,7 +25430,7 @@ fn explainWhyTypeIsComptimeInner(
.Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}),
else => {},
}
- if (fn_info.return_type.toType().comptimeOnly(mod)) {
+ if (Type.fromInterned(fn_info.return_type).comptimeOnly(mod)) {
try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{});
}
return;
@@ -25450,7 +25450,7 @@ fn explainWhyTypeIsComptimeInner(
if (mod.typeToStruct(ty)) |struct_type| {
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_src_loc = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
.index = i,
.range = .type,
@@ -25470,7 +25470,7 @@ fn explainWhyTypeIsComptimeInner(
if (mod.typeToUnion(ty)) |union_obj| {
for (0..union_obj.field_types.len) |i| {
- const field_ty = union_obj.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[i]);
const field_src_loc = mod.fieldSrcLoc(union_obj.decl, .{
.index = i,
.range = .type,
@@ -26096,7 +26096,7 @@ fn fieldVal(
} else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(mod);
const result_ty = try sema.ptrType(.{
- .child = ptr_info.child.toType().childType(mod).toIntern(),
+ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(),
.sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none,
.flags = .{
.size = .Many,
@@ -26515,7 +26515,7 @@ fn fieldCallBind(
if (mod.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
break :find_field;
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
} else if (concrete_ty.isTuple(mod)) {
@@ -26540,7 +26540,7 @@ fn fieldCallBind(
try sema.resolveTypeFields(concrete_ty);
const union_obj = mod.typeToUnion(concrete_ty).?;
const field_index = union_obj.nameIndex(ip, field_name) orelse break :find_field;
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
},
@@ -26563,7 +26563,7 @@ fn fieldCallBind(
if (mod.typeToFunc(decl_type)) |func_type| f: {
if (func_type.param_types.len == 0) break :f;
- const first_param_type = func_type.param_types.get(ip)[0].toType();
+ const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]);
// zig fmt: off
if (first_param_type.isGenericPoison() or (
first_param_type.zigTypeTag(mod) == .Pointer and
@@ -26803,14 +26803,14 @@ fn structFieldPtrByIndex(
const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
struct_ptr_ty_info.flags.alignment
else
- try sema.typeAbiAlignment(struct_ptr_ty_info.child.toType());
+ try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
if (struct_type.layout == .Packed) {
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
for (0..struct_type.field_types.len) |i| {
- const f_ty = struct_type.field_types.get(ip)[i].toType();
+ const f_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (!(try sema.typeHasRuntimeBits(f_ty))) continue;
if (i == field_index) {
@@ -26840,8 +26840,8 @@ fn structFieldPtrByIndex(
if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
target.cpu.arch.endian() == .little)
{
- const elem_size_bytes = try sema.typeAbiSize(ptr_ty_data.child.toType());
- const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod);
+ const elem_size_bytes = try sema.typeAbiSize(Type.fromInterned(ptr_ty_data.child));
+ const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
@@ -26863,7 +26863,7 @@ fn structFieldPtrByIndex(
// Our alignment is capped at the field alignment.
const field_align = try sema.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
- field_ty.toType(),
+ Type.fromInterned(field_ty),
struct_type.layout,
);
ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
@@ -26925,7 +26925,7 @@ fn structFieldVal(
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
}
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (try sema.resolveValue(struct_byval)) |struct_val| {
if (struct_val.isUndef(mod)) return mod.undefRef(field_ty);
@@ -27013,8 +27013,8 @@ fn tupleFieldValByIndex(
.undef => mod.undefRef(field_ty),
.aggregate => |aggregate| Air.internedToRef(switch (aggregate.storage) {
.bytes => |bytes| try mod.intValue(Type.u8, bytes[0]),
- .elems => |elems| elems[field_index].toValue(),
- .repeated_elem => |elem| elem.toValue(),
+ .elems => |elems| Value.fromInterned(elems[field_index]),
+ .repeated_elem => |elem| Value.fromInterned(elem),
}.toIntern()),
else => unreachable,
};
@@ -27049,7 +27049,7 @@ fn unionFieldPtr(
try sema.resolveTypeFields(union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
@@ -27067,7 +27067,7 @@ fn unionFieldPtr(
},
.packed_offset = union_ptr_info.packed_offset,
});
- const enum_field_index: u32 = @intCast(union_obj.enum_tag_ty.toType().enumFieldIndex(field_name, mod).?);
+ const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
if (initializing and field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
@@ -27092,12 +27092,12 @@ fn unionFieldPtr(
return sema.failWithUseOfUndef(block, src);
}
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try mod.enumValueFieldIndex(union_obj.enum_tag_ty.toType(), enum_field_index);
+ const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
if (!tag_matches) {
const msg = msg: {
- const active_index = union_obj.enum_tag_ty.toType().enumTagFieldIndex(un.tag.toValue(), mod).?;
- const active_field_name = union_obj.enum_tag_ty.toType().enumFieldName(active_index, mod);
+ const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
+ const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
field_name.fmt(ip),
active_field_name.fmt(ip),
@@ -27124,11 +27124,11 @@ fn unionFieldPtr(
if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
{
- const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.enum_tag_ty.toType(), enum_field_index);
+ const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
- const active_tag = try block.addTyOp(.get_union_tag, union_obj.enum_tag_ty.toType(), union_val);
+ const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_val);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
@@ -27154,14 +27154,14 @@ fn unionFieldVal(
try sema.resolveTypeFields(union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
- const enum_field_index: u32 = @intCast(union_obj.enum_tag_ty.toType().enumFieldIndex(field_name, mod).?);
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ const enum_field_index: u32 = @intCast(Type.fromInterned(union_obj.enum_tag_ty).enumFieldIndex(field_name, mod).?);
if (try sema.resolveValue(union_byval)) |union_val| {
if (union_val.isUndef(mod)) return mod.undefRef(field_ty);
const un = ip.indexToKey(union_val.toIntern()).un;
- const field_tag = try mod.enumValueFieldIndex(union_obj.enum_tag_ty.toType(), enum_field_index);
+ const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const tag_matches = un.tag == field_tag.toIntern();
switch (union_obj.getLayout(ip)) {
.Auto => {
@@ -27169,8 +27169,8 @@ fn unionFieldVal(
return Air.internedToRef(un.val);
} else {
const msg = msg: {
- const active_index = union_obj.enum_tag_ty.toType().enumTagFieldIndex(un.tag.toValue(), mod).?;
- const active_field_name = union_obj.enum_tag_ty.toType().enumFieldName(active_index, mod);
+ const active_index = Type.fromInterned(union_obj.enum_tag_ty).enumTagFieldIndex(Value.fromInterned(un.tag), mod).?;
+ const active_field_name = Type.fromInterned(union_obj.enum_tag_ty).enumFieldName(active_index, mod);
const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{
field_name.fmt(ip), active_field_name.fmt(ip),
});
@@ -27186,11 +27186,11 @@ fn unionFieldVal(
return Air.internedToRef(un.val);
} else {
const old_ty = if (un.tag == .none)
- ip.typeOf(un.val).toType()
+ Type.fromInterned(ip.typeOf(un.val))
else
- union_ty.unionFieldType(un.tag.toValue(), mod).?;
+ union_ty.unionFieldType(Value.fromInterned(un.tag), mod).?;
- if (try sema.bitCastUnionFieldVal(block, src, un.val.toValue(), old_ty, field_ty, layout)) |new_val| {
+ if (try sema.bitCastUnionFieldVal(block, src, Value.fromInterned(un.val), old_ty, field_ty, layout)) |new_val| {
return Air.internedToRef(new_val.toIntern());
}
}
@@ -27202,9 +27202,9 @@ fn unionFieldVal(
if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
{
- const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.enum_tag_ty.toType(), enum_field_index);
+ const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
- const active_tag = try block.addTyOp(.get_union_tag, union_obj.enum_tag_ty.toType(), union_byval);
+ const active_tag = try block.addTyOp(.get_union_tag, Type.fromInterned(union_obj.enum_tag_ty), union_byval);
try sema.panicInactiveUnionField(block, src, active_tag, wanted_tag);
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
@@ -27906,7 +27906,7 @@ fn coerceExtra(
if (!inst_ty.isSinglePointer(mod)) break :single_item;
if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer;
const ptr_elem_ty = inst_ty.childType(mod);
- const array_ty = dest_info.child.toType();
+ const array_ty = Type.fromInterned(dest_info.child);
if (array_ty.zigTypeTag(mod) != .Array) break :single_item;
const array_elem_ty = array_ty.childType(mod);
if (array_ty.arrayLen(mod) != 1) break :single_item;
@@ -27927,7 +27927,7 @@ fn coerceExtra(
const array_elem_type = array_ty.childType(mod);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = dest_info.child.toType();
+ const dst_elem_type = Type.fromInterned(dest_info.child);
const elem_res = try sema.coerceInMemoryAllowed(block, dst_elem_type, array_elem_type, dest_is_mut, target, dest_ty_src, inst_src);
switch (elem_res) {
.ok => {},
@@ -27948,7 +27948,7 @@ fn coerceExtra(
{
in_memory_result = .{ .ptr_sentinel = .{
.actual = inst_sent,
- .wanted = dest_info.sentinel.toValue(),
+ .wanted = Value.fromInterned(dest_info.sentinel),
.ty = dst_elem_type,
} };
break :src_array_ptr;
@@ -27956,7 +27956,7 @@ fn coerceExtra(
} else {
in_memory_result = .{ .ptr_sentinel = .{
.actual = Value.@"unreachable",
- .wanted = dest_info.sentinel.toValue(),
+ .wanted = Value.fromInterned(dest_info.sentinel),
.ty = dst_elem_type,
} };
break :src_array_ptr;
@@ -27988,7 +27988,7 @@ fn coerceExtra(
// could be null.
const src_elem_ty = inst_ty.childType(mod);
const dest_is_mut = !dest_info.flags.is_const;
- const dst_elem_type = dest_info.child.toType();
+ const dst_elem_type = Type.fromInterned(dest_info.child);
switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) {
.ok => {},
else => break :src_c_ptr,
@@ -28053,8 +28053,8 @@ fn coerceExtra(
const inst_info = inst_ty.ptrInfo(mod);
switch (try sema.coerceInMemoryAllowed(
block,
- dest_info.child.toType(),
- inst_info.child.toType(),
+ Type.fromInterned(dest_info.child),
+ Type.fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -28066,7 +28066,7 @@ fn coerceExtra(
if (inst_info.flags.size == .Slice) {
assert(dest_info.sentinel == .none);
if (inst_info.sentinel == .none or
- inst_info.sentinel != (try mod.intValue(inst_info.child.toType(), 0)).toIntern())
+ inst_info.sentinel != (try mod.intValue(Type.fromInterned(inst_info.child), 0)).toIntern())
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -28076,7 +28076,7 @@ fn coerceExtra(
},
else => {},
},
- .One => switch (dest_info.child.toType().zigTypeTag(mod)) {
+ .One => switch (Type.fromInterned(dest_info.child).zigTypeTag(mod)) {
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(mod) and
@@ -28139,7 +28139,7 @@ fn coerceExtra(
try mod.intern_pool.getCoercedInts(
mod.gpa,
mod.intern_pool.indexToKey(
- (try dest_info.child.toType().lazyAbiAlignment(mod)).toIntern(),
+ (try Type.fromInterned(dest_info.child).lazyAbiAlignment(mod)).toIntern(),
).int,
.usize_type,
) },
@@ -28166,8 +28166,8 @@ fn coerceExtra(
switch (try sema.coerceInMemoryAllowed(
block,
- dest_info.child.toType(),
- inst_info.child.toType(),
+ Type.fromInterned(dest_info.child),
+ Type.fromInterned(inst_info.child),
!dest_info.flags.is_const,
target,
dest_ty_src,
@@ -28179,7 +28179,7 @@ fn coerceExtra(
if (dest_info.sentinel == .none or inst_info.sentinel == .none or
Air.internedToRef(dest_info.sentinel) !=
- try sema.coerceInMemory(inst_info.sentinel.toValue(), dest_info.child.toType()))
+ try sema.coerceInMemory(Value.fromInterned(inst_info.sentinel), Type.fromInterned(dest_info.child)))
break :p;
const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty);
@@ -29220,8 +29220,8 @@ fn coerceInMemoryAllowedFns(
switch (src_info.return_type) {
.noreturn_type, .generic_poison_type => {},
else => {
- const dest_return_type = dest_info.return_type.toType();
- const src_return_type = src_info.return_type.toType();
+ const dest_return_type = Type.fromInterned(dest_info.return_type);
+ const src_return_type = Type.fromInterned(src_info.return_type);
const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src);
if (rt != .ok) {
return InMemoryCoercionResult{ .fn_return_type = .{
@@ -29253,8 +29253,8 @@ fn coerceInMemoryAllowedFns(
};
for (0..params_len) |param_i| {
- const dest_param_ty = dest_info.param_types.get(ip)[param_i].toType();
- const src_param_ty = src_info.param_types.get(ip)[param_i].toType();
+ const dest_param_ty = Type.fromInterned(dest_info.param_types.get(ip)[param_i]);
+ const src_param_ty = Type.fromInterned(src_info.param_types.get(ip)[param_i]);
const param_i_small: u5 = @intCast(param_i);
if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
@@ -29329,12 +29329,12 @@ fn coerceInMemoryAllowedPtrs(
} };
}
- const child = try sema.coerceInMemoryAllowed(block, dest_info.child.toType(), src_info.child.toType(), !dest_info.flags.is_const, target, dest_src, src_src);
+ const child = try sema.coerceInMemoryAllowed(block, Type.fromInterned(dest_info.child), Type.fromInterned(src_info.child), !dest_info.flags.is_const, target, dest_src, src_src);
if (child != .ok) {
return InMemoryCoercionResult{ .ptr_child = .{
.child = try child.dupe(sema.arena),
- .actual = src_info.child.toType(),
- .wanted = dest_info.child.toType(),
+ .actual = Type.fromInterned(src_info.child),
+ .wanted = Type.fromInterned(dest_info.child),
} };
}
@@ -29369,13 +29369,13 @@ fn coerceInMemoryAllowedPtrs(
return InMemoryCoercionResult{ .ptr_sentinel = .{
.actual = switch (src_info.sentinel) {
.none => Value.@"unreachable",
- else => src_info.sentinel.toValue(),
+ else => Value.fromInterned(src_info.sentinel),
},
.wanted = switch (dest_info.sentinel) {
.none => Value.@"unreachable",
- else => dest_info.sentinel.toValue(),
+ else => Value.fromInterned(dest_info.sentinel),
},
- .ty = dest_info.child.toType(),
+ .ty = Type.fromInterned(dest_info.child),
} };
}
@@ -29389,12 +29389,12 @@ fn coerceInMemoryAllowedPtrs(
const src_align = if (src_info.flags.alignment != .none)
src_info.flags.alignment
else
- try sema.typeAbiAlignment(src_info.child.toType());
+ try sema.typeAbiAlignment(Type.fromInterned(src_info.child));
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
- try sema.typeAbiAlignment(dest_info.child.toType());
+ try sema.typeAbiAlignment(Type.fromInterned(dest_info.child));
if (dest_align.compare(.gt, src_align)) {
return InMemoryCoercionResult{ .ptr_alignment = .{
@@ -29719,14 +29719,14 @@ fn storePtrVal(
.opv => {},
.direct => |val_ptr| {
if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) {
- val_ptr.* = (try val_ptr.intern(operand_ty, mod)).toValue();
+ val_ptr.* = Value.fromInterned((try val_ptr.intern(operand_ty, mod)));
if (!operand_val.eql(val_ptr.*, operand_ty, mod)) {
// TODO use failWithInvalidComptimeFieldStore
return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{});
}
return;
}
- val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue();
+ val_ptr.* = Value.fromInterned((try operand_val.intern(operand_ty, mod)));
},
.reinterpret => |reinterpret| {
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod));
@@ -29756,7 +29756,7 @@ fn storePtrVal(
error.IllDefinedMemoryLayout => unreachable,
error.Unimplemented => return sema.fail(block, src, "TODO: implement readFromMemory for type '{}'", .{mut_kit.ty.fmt(mod)}),
};
- reinterpret.val_ptr.* = (try val.intern(mut_kit.ty, mod)).toValue();
+ reinterpret.val_ptr.* = Value.fromInterned((try val.intern(mut_kit.ty, mod)));
},
.bad_decl_ty, .bad_ptr_ty => {
// TODO show the decl declaration site in a note and explain whether the decl
@@ -29817,15 +29817,15 @@ fn beginComptimePtrMutation(
},
.comptime_field => |comptime_field| {
const duped = try sema.arena.create(Value);
- duped.* = comptime_field.toValue();
- return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{
+ duped.* = Value.fromInterned(comptime_field);
+ return sema.beginComptimePtrMutationInner(block, src, Type.fromInterned(mod.intern_pool.typeOf(comptime_field)), duped, ptr_elem_ty, .{
.decl = undefined,
.runtime_index = .comptime_field_ptr,
});
},
.eu_payload => |eu_ptr| {
- const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
- var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty);
+ const eu_ty = Type.fromInterned(mod.intern_pool.typeOf(eu_ptr)).childType(mod);
+ var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(eu_ptr), eu_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| {
@@ -29844,7 +29844,7 @@ fn beginComptimePtrMutation(
const payload = try sema.arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .eu_payload },
- .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
+ .data = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
};
val_ptr.* = Value.initPayload(&payload.base);
@@ -29867,8 +29867,8 @@ fn beginComptimePtrMutation(
}
},
.opt_payload => |opt_ptr| {
- const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
- var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty);
+ const opt_ty = Type.fromInterned(mod.intern_pool.typeOf(opt_ptr)).childType(mod);
+ var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(opt_ptr), opt_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| {
@@ -29896,7 +29896,7 @@ fn beginComptimePtrMutation(
const payload = try sema.arena.create(Value.Payload.SubValue);
payload.* = .{
.base = .{ .tag = .opt_payload },
- .data = payload_val.toValue(),
+ .data = Value.fromInterned(payload_val),
};
val_ptr.* = Value.initPayload(&payload.base);
@@ -29920,8 +29920,8 @@ fn beginComptimePtrMutation(
}
},
.elem => |elem_ptr| {
- const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
- var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty);
+ const base_elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)).elemType2(mod);
+ var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(elem_ptr.base), base_elem_ty);
switch (parent.pointee) {
.opv => unreachable,
@@ -30017,7 +30017,7 @@ fn beginComptimePtrMutation(
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
- @memset(elems, repeated_val.toValue());
+ @memset(elems, Value.fromInterned(repeated_val));
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
@@ -30054,7 +30054,7 @@ fn beginComptimePtrMutation(
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod));
const elems = try arena.alloc(Value, array_len_including_sentinel);
- @memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue());
+ @memset(elems, Value.fromInterned((try mod.intern(.{ .undef = elem_ty.toIntern() }))));
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
@@ -30117,10 +30117,10 @@ fn beginComptimePtrMutation(
}
},
.field => |field_ptr| {
- const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
+ const base_child_ty = Type.fromInterned(mod.intern_pool.typeOf(field_ptr.base)).childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
- var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty);
+ var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(field_ptr.base), base_child_ty);
switch (parent.pointee) {
.opv => unreachable,
.direct => |val_ptr| switch (val_ptr.ip_index) {
@@ -30238,9 +30238,9 @@ fn beginComptimePtrMutation(
switch (parent.ty.zigTypeTag(mod)) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod));
- for (fields, 0..) |*field, i| field.* = (try mod.intern(.{
+ for (fields, 0..) |*field, i| field.* = Value.fromInterned((try mod.intern(.{
.undef = parent.ty.structFieldType(i, mod).toIntern(),
- })).toValue();
+ })));
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
@@ -30260,7 +30260,7 @@ fn beginComptimePtrMutation(
const payload_ty = parent.ty.structFieldType(field_index, mod);
payload.* = .{ .data = .{
.tag = try mod.enumValueFieldIndex(tag_ty, field_index),
- .val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(),
+ .val = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))),
} };
val_ptr.* = Value.initPayload(&payload.base);
@@ -30279,8 +30279,8 @@ fn beginComptimePtrMutation(
assert(parent.ty.isSlice(mod));
const ptr_ty = parent.ty.slicePtrFieldType(mod);
val_ptr.* = try Value.Tag.slice.create(arena, .{
- .ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(),
- .len = (try mod.intern(.{ .undef = .usize_type })).toValue(),
+ .ptr = Value.fromInterned((try mod.intern(.{ .undef = ptr_ty.toIntern() }))),
+ .len = Value.fromInterned((try mod.intern(.{ .undef = .usize_type }))),
});
switch (field_index) {
@@ -30449,9 +30449,9 @@ fn beginComptimePtrLoad(
},
.anon_decl => |anon_decl| blk: {
const decl_val = anon_decl.val;
- if (decl_val.toValue().getVariable(mod) != null) return error.RuntimeLoad;
- const decl_ty = ip.typeOf(decl_val).toType();
- const decl_tv: TypedValue = .{ .ty = decl_ty, .val = decl_val.toValue() };
+ if (Value.fromInterned(decl_val).getVariable(mod) != null) return error.RuntimeLoad;
+ const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
+ const decl_tv: TypedValue = .{ .ty = decl_ty, .val = Value.fromInterned(decl_val) };
const layout_defined = decl_ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
@@ -30462,13 +30462,13 @@ fn beginComptimePtrLoad(
},
.int => return error.RuntimeLoad,
.eu_payload, .opt_payload => |container_ptr| blk: {
- const container_ty = ip.typeOf(container_ptr).toType().childType(mod);
+ const container_ty = Type.fromInterned(ip.typeOf(container_ptr)).childType(mod);
const payload_ty = switch (ptr.addr) {
.eu_payload => container_ty.errorUnionPayload(mod),
.opt_payload => container_ty.optionalChild(mod),
else => unreachable,
};
- var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty);
+ var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(container_ptr), container_ty);
// eu_payload and opt_payload never have a well-defined layout
if (deref.parent != null) {
@@ -30484,7 +30484,7 @@ fn beginComptimePtrLoad(
const payload_val = switch (tv.val.ip_index) {
.none => tv.val.cast(Value.Payload.SubValue).?.data,
.null_value => return sema.fail(block, src, "attempt to use null value", .{}),
- else => switch (ip.indexToKey(tv.val.toIntern())) {
+ else => Value.fromInterned(switch (ip.indexToKey(tv.val.toIntern())) {
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| return sema.fail(
block,
@@ -30499,7 +30499,7 @@ fn beginComptimePtrLoad(
else => |payload| payload,
},
else => unreachable,
- }.toValue(),
+ }),
};
tv.* = TypedValue{ .ty = payload_ty, .val = payload_val };
break :blk deref;
@@ -30509,24 +30509,24 @@ fn beginComptimePtrLoad(
break :blk deref;
},
.comptime_field => |comptime_field| blk: {
- const field_ty = ip.typeOf(comptime_field).toType();
+ const field_ty = Type.fromInterned(ip.typeOf(comptime_field));
break :blk ComptimePtrLoadKit{
.parent = null,
- .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() },
+ .pointee = .{ .ty = field_ty, .val = Value.fromInterned(comptime_field) },
.is_mutable = false,
.ty_without_well_defined_layout = field_ty,
};
},
.elem => |elem_ptr| blk: {
- const elem_ty = ip.typeOf(elem_ptr.base).toType().elemType2(mod);
- var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
+ const elem_ty = Type.fromInterned(ip.typeOf(elem_ptr.base)).elemType2(mod);
+ var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(elem_ptr.base), null);
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
// to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that
// our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened"
switch (ip.indexToKey(elem_ptr.base)) {
.ptr => |base_ptr| switch (base_ptr.addr) {
- .elem => |base_elem| assert(!ip.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)),
+ .elem => |base_elem| assert(!Type.fromInterned(ip.typeOf(base_elem.base)).elemType2(mod).eql(elem_ty, mod)),
else => {},
},
else => {},
@@ -30598,8 +30598,8 @@ fn beginComptimePtrLoad(
},
.field => |field_ptr| blk: {
const field_index: u32 = @intCast(field_ptr.index);
- const container_ty = ip.typeOf(field_ptr.base).toType().childType(mod);
- var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty);
+ const container_ty = Type.fromInterned(ip.typeOf(field_ptr.base)).childType(mod);
+ var deref = try sema.beginComptimePtrLoad(block, src, Value.fromInterned(field_ptr.base), container_ty);
if (container_ty.hasWellDefinedLayout(mod)) {
const struct_obj = mod.typeToStruct(container_ty);
@@ -30637,7 +30637,7 @@ fn beginComptimePtrLoad(
},
Value.slice_len_index => TypedValue{
.ty = Type.usize,
- .val = ip.indexToKey(try tv.val.intern(tv.ty, mod)).ptr.len.toValue(),
+ .val = Value.fromInterned(ip.indexToKey(try tv.val.intern(tv.ty, mod)).ptr.len),
},
else => unreachable,
};
@@ -30653,7 +30653,7 @@ fn beginComptimePtrLoad(
},
.opt => |opt| switch (opt.val) {
.none => return sema.fail(block, src, "attempt to use null value", .{}),
- else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null),
+ else => |payload| try sema.beginComptimePtrLoad(block, src, Value.fromInterned(payload), null),
},
else => unreachable,
};
@@ -30822,9 +30822,9 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
const mod = sema.mod;
const dest_info = dest_ty.ptrInfo(mod);
const inst_info = inst_ty.ptrInfo(mod);
- const len0 = (inst_info.child.toType().zigTypeTag(mod) == .Array and (inst_info.child.toType().arrayLenIncludingSentinel(mod) == 0 or
- (inst_info.child.toType().arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
- (inst_info.child.toType().isTuple(mod) and inst_info.child.toType().structFieldCount(mod) == 0);
+ const len0 = (Type.fromInterned(inst_info.child).zigTypeTag(mod) == .Array and (Type.fromInterned(inst_info.child).arrayLenIncludingSentinel(mod) == 0 or
+ (Type.fromInterned(inst_info.child).arrayLen(mod) == 0 and dest_info.sentinel == .none and dest_info.flags.size != .C and dest_info.flags.size != .Many))) or
+ (Type.fromInterned(inst_info.child).isTuple(mod) and Type.fromInterned(inst_info.child).structFieldCount(mod) == 0);
const ok_cv_qualifiers =
((!inst_info.flags.is_const or dest_info.flags.is_const) or len0) and
@@ -30852,12 +30852,12 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
const inst_align = if (inst_info.flags.alignment != .none)
inst_info.flags.alignment
else
- inst_info.child.toType().abiAlignment(mod);
+ Type.fromInterned(inst_info.child).abiAlignment(mod);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
- dest_info.child.toType().abiAlignment(mod);
+ Type.fromInterned(dest_info.child).abiAlignment(mod);
if (dest_align.compare(.gt, inst_align)) {
in_memory_result.* = .{ .ptr_alignment = .{
@@ -30884,7 +30884,7 @@ fn coerceCompatiblePtrs(
}
// The comptime Value representation is compatible with both types.
return Air.internedToRef(
- (try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty)).toIntern(),
+ (try mod.getCoerced(Value.fromInterned((try val.intern(inst_ty, mod))), dest_ty)).toIntern(),
);
}
try sema.requireRuntimeBlock(block, inst_src, null);
@@ -30950,7 +30950,7 @@ fn coerceEnumToUnion(
};
const union_obj = mod.typeToUnion(union_ty).?;
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
try sema.resolveTypeFields(field_ty);
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
@@ -31007,7 +31007,7 @@ fn coerceEnumToUnion(
errdefer if (msg) |some| some.destroy(sema.gpa);
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
- if (field_ty.toType().zigTypeTag(mod) == .NoReturn) {
+ if (Type.fromInterned(field_ty).zigTypeTag(mod) == .NoReturn) {
const err_msg = msg orelse try sema.errMsg(
block,
inst_src,
@@ -31042,7 +31042,7 @@ fn coerceEnumToUnion(
for (0..union_obj.field_names.len) |field_index| {
const field_name = union_obj.field_names.get(ip)[field_index];
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
field_name.fmt(ip),
@@ -31320,7 +31320,7 @@ fn coerceTupleToArrayPtrs(
const mod = sema.mod;
const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src);
const ptr_info = ptr_array_ty.ptrInfo(mod);
- const array_ty = ptr_info.child.toType();
+ const array_ty = Type.fromInterned(ptr_info.child);
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.flags.alignment != .none) {
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
@@ -31372,7 +31372,7 @@ fn coerceTupleToStruct(
else => unreachable,
};
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
- const field_ty = struct_type.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
field_refs[field_index] = coerced;
@@ -31383,7 +31383,7 @@ fn coerceTupleToStruct(
});
};
- const field_init = struct_type.field_inits.get(ip)[field_index].toValue();
+ const field_init = Value.fromInterned(struct_type.field_inits.get(ip)[field_index]);
if (!init_val.eql(field_init, field_ty, sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
@@ -31502,7 +31502,7 @@ fn coerceTupleToTuple(
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
- const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src);
+ const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src);
field_refs[field_index] = coerced;
if (default_val != .none) {
const init_val = (try sema.resolveValue(coerced)) orelse {
@@ -31511,7 +31511,7 @@ fn coerceTupleToTuple(
});
};
- if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) {
+ if (!init_val.eql(Value.fromInterned(default_val), Type.fromInterned(field_ty), sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -31658,13 +31658,13 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void
fn optRefValue(sema: *Sema, opt_val: ?Value) !Value {
const mod = sema.mod;
const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque);
- return (try mod.intern(.{ .opt = .{
+ return Value.fromInterned((try mod.intern(.{ .opt = .{
.ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
.val = if (opt_val) |val| (try mod.getCoerced(
- (try sema.refValue(val.toIntern())).toValue(),
+ Value.fromInterned((try sema.refValue(val.toIntern()))),
ptr_anyopaque_ty,
)).toIntern() else .none,
- } })).toValue();
+ } })));
}
fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref {
@@ -32378,7 +32378,7 @@ fn analyzeSlice(
if (!new_ptr_val.isUndef(mod)) {
return Air.internedToRef((try mod.getCoerced(
- (try new_ptr_val.intern(new_ptr_ty, mod)).toValue(),
+ Value.fromInterned((try new_ptr_val.intern(new_ptr_ty, mod))),
return_ty,
)).toIntern());
}
@@ -33362,10 +33362,10 @@ fn resolvePeerTypesInner(
ty_ptr.* = ty.errorUnionPayload(mod);
if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) {
.error_union => |eu| switch (eu.val) {
- .payload => |payload_ip| val_ptr.* = payload_ip.toValue(),
+ .payload => |payload_ip| val_ptr.* = Value.fromInterned(payload_ip),
.err_name => val_ptr.* = null,
},
- .undef => val_ptr.* = (try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() })).toValue(),
+ .undef => val_ptr.* = Value.fromInterned((try sema.mod.intern(.{ .undef = ty_ptr.*.?.toIntern() }))),
else => unreachable,
};
break :blk set_ty;
@@ -33606,7 +33606,7 @@ fn resolvePeerTypesInner(
};
// Try peer -> cur, then cur -> peer
- ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) orelse {
+ ptr_info.child = ((try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) orelse {
return .{ .conflict = .{
.peer_idx_a = first_idx,
.peer_idx_b = i,
@@ -33630,12 +33630,12 @@ fn resolvePeerTypesInner(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- ptr_info.child.toType().abiAlignment(mod),
+ Type.fromInterned(ptr_info.child).abiAlignment(mod),
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
- peer_info.child.toType().abiAlignment(mod),
+ Type.fromInterned(peer_info.child).abiAlignment(mod),
);
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
return .{ .conflict = .{
@@ -33714,12 +33714,12 @@ fn resolvePeerTypesInner(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
- try sema.typeAbiAlignment(ptr_info.child.toType()),
+ try sema.typeAbiAlignment(Type.fromInterned(ptr_info.child)),
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
- try sema.typeAbiAlignment(peer_info.child.toType()),
+ try sema.typeAbiAlignment(Type.fromInterned(peer_info.child)),
);
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
@@ -33754,8 +33754,8 @@ fn resolvePeerTypesInner(
};
// We abstract array handling slightly so that tuple pointers can work like array pointers
- const peer_pointee_array = sema.typeIsArrayLike(peer_info.child.toType());
- const cur_pointee_array = sema.typeIsArrayLike(ptr_info.child.toType());
+ const peer_pointee_array = sema.typeIsArrayLike(Type.fromInterned(peer_info.child));
+ const cur_pointee_array = sema.typeIsArrayLike(Type.fromInterned(ptr_info.child));
// This switch is just responsible for deciding the size and pointee (not including
// single-pointer array sentinel).
@@ -33763,7 +33763,7 @@ fn resolvePeerTypesInner(
switch (peer_info.flags.size) {
.One => switch (ptr_info.flags.size) {
.One => {
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -33805,7 +33805,7 @@ fn resolvePeerTypesInner(
.Many => {
// Only works for *[n]T + [*]T -> [*]T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -33818,7 +33818,7 @@ fn resolvePeerTypesInner(
.Slice => {
// Only works for *[n]T + []T -> []T
const arr = peer_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), arr.elem_ty)) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), arr.elem_ty)) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -33834,7 +33834,7 @@ fn resolvePeerTypesInner(
.One => {
// Only works for [*]T + *[n]T -> [*]T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .Many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -33848,7 +33848,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.Many => {
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -33863,7 +33863,7 @@ fn resolvePeerTypesInner(
} };
}
// Okay, then works for [*]T + "[]T" -> [*]T
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .Many;
ptr_info.child = pointee.toIntern();
break :good;
@@ -33876,7 +33876,7 @@ fn resolvePeerTypesInner(
.One => {
// Only works for []T + *[n]T -> []T
const arr = cur_pointee_array orelse return generic_err;
- if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, arr.elem_ty, Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.flags.size = .Slice;
ptr_info.child = pointee.toIntern();
break :good;
@@ -33894,7 +33894,7 @@ fn resolvePeerTypesInner(
return generic_err;
},
.Slice => {
- if (try sema.resolvePairInMemoryCoercible(block, src, ptr_info.child.toType(), peer_info.child.toType())) |pointee| {
+ if (try sema.resolvePairInMemoryCoercible(block, src, Type.fromInterned(ptr_info.child), Type.fromInterned(peer_info.child))) |pointee| {
ptr_info.child = pointee.toIntern();
break :good;
}
@@ -34331,7 +34331,7 @@ fn resolvePeerTypesInner(
break;
};
const uncoerced_field = Air.internedToRef(uncoerced_field_val.toIntern());
- const coerced_inst = sema.coerceExtra(block, field_ty.toType(), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
+ const coerced_inst = sema.coerceExtra(block, Type.fromInterned(field_ty.*), uncoerced_field, src, .{ .report_err = false }) catch |err| switch (err) {
// It's possible for PTR to give false positives. Just give up on making this a comptime field, we'll get an error later anyway
error.NotCoercible => {
comptime_val = null;
@@ -34344,7 +34344,7 @@ fn resolvePeerTypesInner(
comptime_val = coerced_val;
continue;
};
- if (!coerced_val.eql(existing, field_ty.toType(), mod)) {
+ if (!coerced_val.eql(existing, Type.fromInterned(field_ty.*), mod)) {
comptime_val = null;
break;
}
@@ -34359,7 +34359,7 @@ fn resolvePeerTypesInner(
.values = field_vals,
});
- return .{ .success = final_ty.toType() };
+ return .{ .success = Type.fromInterned(final_ty) };
},
.exact => {
@@ -34460,15 +34460,15 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
const ip = &mod.intern_pool;
const fn_ty_info = mod.typeToFunc(fn_ty).?;
- try sema.resolveTypeFully(fn_ty_info.return_type.toType());
+ try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type));
- if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) {
+ if (mod.comp.bin_file.options.error_return_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) {
// Ensure the type exists so that backends can assume that.
_ = try sema.getBuiltinType("StackTrace");
}
for (0..fn_ty_info.param_types.len) |i| {
- try sema.resolveTypeFully(fn_ty_info.param_types.get(ip)[i].toType());
+ try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i]));
}
}
@@ -34480,20 +34480,20 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => return val,
.lazy_align, .lazy_size => return mod.intValue(
- int.ty.toType(),
+ Type.fromInterned(int.ty),
(try val.getUnsignedIntAdvanced(mod, sema)).?,
),
},
.ptr => |ptr| {
const resolved_len = switch (ptr.len) {
.none => .none,
- else => (try sema.resolveLazyValue(ptr.len.toValue())).toIntern(),
+ else => (try sema.resolveLazyValue(Value.fromInterned(ptr.len))).toIntern(),
};
switch (ptr.addr) {
.decl, .mut_decl, .anon_decl => return if (resolved_len == ptr.len)
val
else
- (try mod.intern(.{ .ptr = .{
+ Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = switch (ptr.addr) {
.decl => |decl| .{ .decl = decl },
@@ -34502,36 +34502,36 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
else => unreachable,
},
.len = resolved_len,
- } })).toValue(),
+ } }))),
.comptime_field => |field_val| {
const resolved_field_val =
- (try sema.resolveLazyValue(field_val.toValue())).toIntern();
+ (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern();
return if (resolved_field_val == field_val and resolved_len == ptr.len)
val
else
- (try mod.intern(.{ .ptr = .{
+ Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = .{ .comptime_field = resolved_field_val },
.len = resolved_len,
- } })).toValue();
+ } })));
},
.int => |int| {
- const resolved_int = (try sema.resolveLazyValue(int.toValue())).toIntern();
+ const resolved_int = (try sema.resolveLazyValue(Value.fromInterned(int))).toIntern();
return if (resolved_int == int and resolved_len == ptr.len)
val
else
- (try mod.intern(.{ .ptr = .{
+ Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = .{ .int = resolved_int },
.len = resolved_len,
- } })).toValue();
+ } })));
},
.eu_payload, .opt_payload => |base| {
- const resolved_base = (try sema.resolveLazyValue(base.toValue())).toIntern();
+ const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern();
return if (resolved_base == base and resolved_len == ptr.len)
val
else
- (try mod.intern(.{ .ptr = .{
+ Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = switch (ptr.addr) {
.eu_payload => .{ .eu_payload = resolved_base },
@@ -34539,14 +34539,14 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
else => unreachable,
},
.len = ptr.len,
- } })).toValue();
+ } })));
},
.elem, .field => |base_index| {
- const resolved_base = (try sema.resolveLazyValue(base_index.base.toValue())).toIntern();
+ const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern();
return if (resolved_base == base_index.base and resolved_len == ptr.len)
val
else
- (try mod.intern(.{ .ptr = .{
+ Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ptr.ty,
.addr = switch (ptr.addr) {
.elem => .{ .elem = .{
@@ -34560,7 +34560,7 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
else => unreachable,
},
.len = ptr.len,
- } })).toValue();
+ } })));
},
}
},
@@ -34569,37 +34569,37 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value {
.elems => |elems| {
var resolved_elems: []InternPool.Index = &.{};
for (elems, 0..) |elem, i| {
- const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern();
+ const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
if (resolved_elems.len == 0 and resolved_elem != elem) {
resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len);
@memcpy(resolved_elems[0..i], elems[0..i]);
}
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
}
- return if (resolved_elems.len == 0) val else (try mod.intern(.{ .aggregate = .{
+ return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .elems = resolved_elems },
- } })).toValue();
+ } })));
},
.repeated_elem => |elem| {
- const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern();
- return if (resolved_elem == elem) val else (try mod.intern(.{ .aggregate = .{
+ const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern();
+ return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = aggregate.ty,
.storage = .{ .repeated_elem = resolved_elem },
- } })).toValue();
+ } })));
},
},
.un => |un| {
- const resolved_tag = (try sema.resolveLazyValue(un.tag.toValue())).toIntern();
- const resolved_val = (try sema.resolveLazyValue(un.val.toValue())).toIntern();
+ const resolved_tag = (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern();
+ const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern();
return if (resolved_tag == un.tag and resolved_val == un.val)
val
else
- (try mod.intern(.{ .un = .{
+ Value.fromInterned((try mod.intern(.{ .un = .{
.ty = un.ty,
.tag = resolved_tag,
.val = resolved_val,
- } })).toValue();
+ } })));
},
else => return val,
}
@@ -34641,9 +34641,9 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
const ip = &mod.intern_pool;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try sema.resolveTypeLayout(param_ty.toType());
+ try sema.resolveTypeLayout(Type.fromInterned(param_ty));
}
- try sema.resolveTypeLayout(info.return_type.toType());
+ try sema.resolveTypeLayout(Type.fromInterned(info.return_type));
},
else => {},
}
@@ -34689,7 +34689,7 @@ pub fn resolveStructAlignment(
var result: Alignment = .@"1";
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
continue;
const field_align = try sema.structFieldAlignment(
@@ -34736,7 +34736,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
var big_align: Alignment = .@"1";
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
struct_type.offsets.get(ip)[i] = 0;
field_size.* = 0;
@@ -34786,7 +34786,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
const runtime_order = struct_type.runtime_order.get(ip);
for (runtime_order, 0..) |*ro, i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
ro.* = .omitted;
} else {
@@ -34892,7 +34892,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
const fields_bit_sum = blk: {
var accumulator: u64 = 0;
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
accumulator += try field_ty.bitSizeAdvanced(mod, &sema);
}
break :blk accumulator;
@@ -35018,7 +35018,7 @@ pub fn resolveUnionAlignment(
const union_obj = ip.loadUnionType(union_type);
var max_align: Alignment = .@"1";
for (0..union_obj.field_names.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
@@ -35067,7 +35067,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
var max_size: u64 = 0;
var max_align: Alignment = .@"1";
for (0..union_obj.field_names.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
@@ -35089,9 +35089,9 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
}
const flags = union_obj.flagsPtr(ip);
- const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(union_obj.enum_tag_ty.toType());
+ const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_obj.enum_tag_ty));
const size, const alignment, const padding = if (has_runtime_tag) layout: {
- const enum_tag_type = union_obj.enum_tag_ty.toType();
+ const enum_tag_type = Type.fromInterned(union_obj.enum_tag_ty);
const tag_align = try sema.typeAbiAlignment(enum_tag_type);
const tag_size = try sema.typeAbiSize(enum_tag_type);
@@ -35161,7 +35161,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
.struct_type => return sema.resolveStructFully(ty),
.anon_struct_type => |tuple| {
for (tuple.types.get(ip)) |field_ty| {
- try sema.resolveTypeFully(field_ty.toType());
+ try sema.resolveTypeFully(Type.fromInterned(field_ty));
}
},
.simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
@@ -35182,9 +35182,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
}
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
- try sema.resolveTypeFully(param_ty.toType());
+ try sema.resolveTypeFully(Type.fromInterned(param_ty));
}
- try sema.resolveTypeFully(info.return_type.toType());
+ try sema.resolveTypeFully(Type.fromInterned(info.return_type));
},
else => {},
}
@@ -35205,7 +35205,7 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
// See also similar code for unions.
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
try sema.resolveTypeFully(field_ty);
}
}
@@ -35230,7 +35230,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
union_obj.flagsPtr(ip).status = .fully_resolved_wip;
for (0..union_obj.field_types.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
try sema.resolveTypeFully(field_ty);
}
union_obj.flagsPtr(ip).status = .fully_resolved;
@@ -35333,7 +35333,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
.type_struct_packed_inits,
=> try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
- .type_union => try sema.resolveTypeFieldsUnion(ty_ip.toType(), ip.indexToKey(ty_ip).union_type),
+ .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.indexToKey(ty_ip).union_type),
.simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
else => {},
},
@@ -35392,7 +35392,7 @@ pub fn resolveTypeFieldsStruct(
sema.gpa,
mod.declPtr(owner_decl).srcLoc(mod),
"struct '{}' depends on itself",
- .{ty.toType().fmt(mod)},
+ .{Type.fromInterned(ty).fmt(mod)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
@@ -36042,7 +36042,7 @@ fn semaStructFieldInits(
// In init bodies, the zir index of the struct itself is used
// to refer to the current field type.
- const field_ty = struct_type.field_types.get(ip)[field_i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_i]);
const type_ref = Air.internedToRef(field_ty.toIntern());
try sema.inst_map.ensureSpaceForInstructions(sema.gpa, &.{zir_index});
sema.inst_map.putAssumeCapacity(zir_index, type_ref);
@@ -36368,10 +36368,10 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
.range = .type,
}).lazy;
const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{
- field_name.fmt(ip), union_type.tagTypePtr(ip).toType().fmt(mod),
+ field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(mod),
});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, union_type.tagTypePtr(ip).toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
@@ -36483,11 +36483,11 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
for (tag_info.names.get(ip), 0..) |field_name, field_index| {
if (explicit_tags_seen[field_index]) continue;
- try sema.addFieldErrNote(union_type.tagTypePtr(ip).toType(), field_index, msg, "field '{}' missing, declared here", .{
+ try sema.addFieldErrNote(Type.fromInterned(union_type.tagTypePtr(ip).*), field_index, msg, "field '{}' missing, declared here", .{
field_name.fmt(ip),
});
}
- try sema.addDeclaredHereNote(msg, union_type.tagTypePtr(ip).toType());
+ try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*));
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
@@ -36548,7 +36548,7 @@ fn generateUnionTagTypeNumbered(
});
new_decl.ty = Type.type;
- new_decl.val = enum_ty.toValue();
+ new_decl.val = Value.fromInterned(enum_ty);
try mod.finalizeAnonDecl(new_decl_index);
return enum_ty;
@@ -36600,7 +36600,7 @@ fn generateUnionTagTypeSimple(
const new_decl = mod.declPtr(new_decl_index);
new_decl.owns_tv = true;
new_decl.ty = Type.type;
- new_decl.val = enum_ty.toValue();
+ new_decl.val = Value.fromInterned(enum_ty);
try mod.finalizeAnonDecl(new_decl_index);
return enum_ty;
@@ -36869,16 +36869,16 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
=> switch (ip.indexToKey(ty.toIntern())) {
inline .array_type, .vector_type => |seq_type, seq_tag| {
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
- if (seq_type.len + @intFromBool(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{
+ if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })).toValue();
+ } })));
- if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| {
- return (try mod.intern(.{ .aggregate = .{
+ if (try sema.typeHasOnePossibleValue(Type.fromInterned(seq_type.child))) |opv| {
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
- } })).toValue();
+ } })));
}
return null;
},
@@ -36889,10 +36889,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
if (struct_type.field_types.len == 0) {
// In this case the struct has no fields at all and
// therefore has one possible value.
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })).toValue();
+ } })));
}
const field_vals = try sema.arena.alloc(
@@ -36905,7 +36905,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (field_ty.eql(ty, mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
@@ -36923,10 +36923,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })).toValue();
+ } })));
},
.anon_struct_type => |tuple| {
@@ -36936,22 +36936,22 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// In this case the struct has all comptime-known fields and
// therefore has one possible value.
// TODO: write something like getCoercedInts to avoid needing to dupe
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) },
- } })).toValue();
+ } })));
},
.union_type => |union_type| {
try sema.resolveTypeFields(ty);
const union_obj = ip.loadUnionType(union_type);
- const tag_val = (try sema.typeHasOnePossibleValue(union_obj.enum_tag_ty.toType())) orelse
+ const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.enum_tag_ty))) orelse
return null;
if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
- return only.toValue();
+ return Value.fromInterned(only);
}
- const only_field_ty = union_obj.field_types.get(ip)[0].toType();
+ const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
if (only_field_ty.eql(ty, mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
@@ -36969,32 +36969,32 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.tag = tag_val.toIntern(),
.val = val_val.toIntern(),
} });
- return only.toValue();
+ return Value.fromInterned(only);
},
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| {
+ if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
- return only.toValue();
+ return Value.fromInterned(only);
}
return null;
},
.auto, .explicit => {
- if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null;
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
- return switch (enum_type.names.len) {
+ return Value.fromInterned(switch (enum_type.names.len) {
0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
1 => try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = if (enum_type.values.len == 0)
- (try mod.intValue(enum_type.tag_ty.toType(), 0)).toIntern()
+ (try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
else
try mod.intern_pool.getCoercedInts(
mod.gpa,
@@ -37003,7 +37003,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
),
} }),
else => return null,
- }.toValue();
+ });
},
},
@@ -37092,7 +37092,7 @@ fn analyzeComptimeAlloc(
// There will be stores before the first load, but they may be to sub-elements or
// sub-fields. So we need to initialize with undef to allow the mechanism to expand
// into fields/elements and have those overridden with stored values.
- (try mod.intern(.{ .undef = var_type.toIntern() })).toValue(),
+ Value.fromInterned((try mod.intern(.{ .undef = var_type.toIntern() }))),
alignment,
);
const decl = mod.declPtr(decl_index);
@@ -37226,7 +37226,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
// Move mutable decl values to the InternPool and assert other decls are already in
// the InternPool.
const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern();
- const coerced_val = try mod.getCoerced(uncoerced_val.toValue(), load_ty);
+ const coerced_val = try mod.getCoerced(Value.fromInterned(uncoerced_val), load_ty);
return .{ .val = coerced_val };
}
}
@@ -37284,7 +37284,7 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type {
if (ptr_type.flags.is_allowzero) return null;
// optionals of zero sized types behave like bools, not pointers
- const payload_ty = opt_child.toType();
+ const payload_ty = Type.fromInterned(opt_child);
if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) {
return null;
}
@@ -37328,7 +37328,7 @@ fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
- const field_ty = u.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]);
if (field_ty.isNoReturn(sema.mod)) return .none;
return sema.typeAbiAlignment(field_ty);
}
@@ -37458,10 +37458,10 @@ fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
};
scalar.* = try val.intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sema.intAddScalar(lhs, rhs, ty);
}
@@ -37548,10 +37548,10 @@ fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usi
};
scalar.* = try val.intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sema.intSubScalar(lhs, rhs, ty);
}
@@ -37620,14 +37620,14 @@ fn intSubWithOverflow(
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return Value.OverflowArithmeticResult{
- .overflow_bit = (try mod.intern(.{ .aggregate = .{
+ .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } })).toValue(),
- .wrapped_result = (try mod.intern(.{ .aggregate = .{
+ } }))),
+ .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue(),
+ } }))),
};
}
return sema.intSubWithOverflowScalar(lhs, rhs, ty);
@@ -37679,10 +37679,10 @@ fn intFromFloat(
const elem_val = try val.elemValue(sema.mod, i);
scalar.* = try (try sema.intFromFloatScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod), mode)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = int_ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sema.intFromFloatScalar(block, src, val, float_ty, int_ty, mode);
}
@@ -37792,7 +37792,7 @@ fn intFitsInType(
const max_needed_bits = @as(u16, 16) + @intFromBool(info.signedness == .signed);
// If it is u16 or bigger we know the alignment fits without resolving it.
if (info.bits >= max_needed_bits) return true;
- const x = try sema.typeAbiAlignment(lazy_ty.toType());
+ const x = try sema.typeAbiAlignment(Type.fromInterned(lazy_ty));
if (x == .none) return true;
const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed);
return info.bits >= actual_needed_bits;
@@ -37801,7 +37801,7 @@ fn intFitsInType(
const max_needed_bits = @as(u16, 64) + @intFromBool(info.signedness == .signed);
// If it is u64 or bigger we know the size fits without resolving it.
if (info.bits >= max_needed_bits) return true;
- const x = try sema.typeAbiSize(lazy_ty.toType());
+ const x = try sema.typeAbiSize(Type.fromInterned(lazy_ty));
if (x == 0) return true;
const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
return info.bits >= actual_needed_bits;
@@ -37822,7 +37822,7 @@ fn intFitsInType(
.elems => |elems| elems,
.repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
}, 0..) |elem, i| {
- if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue;
+ if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(mod), null)) continue;
if (vector_index) |vi| vi.* = i;
break false;
} else true,
@@ -37848,8 +37848,8 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
- if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false;
- const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType());
+ if (!(try sema.intFitsInType(int, Type.fromInterned(enum_type.tag_ty), null))) return false;
+ const int_coerced = try mod.getCoerced(int, Type.fromInterned(enum_type.tag_ty));
return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
}
@@ -37874,14 +37874,14 @@ fn intAddWithOverflow(
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return Value.OverflowArithmeticResult{
- .overflow_bit = (try mod.intern(.{ .aggregate = .{
+ .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } })).toValue(),
- .wrapped_result = (try mod.intern(.{ .aggregate = .{
+ } }))),
+ .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue(),
+ } }))),
};
}
return sema.intAddWithOverflowScalar(lhs, rhs, ty);
@@ -37983,10 +37983,10 @@ fn compareVector(
const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod));
scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
/// Returns the type of a pointer to an element.
@@ -38102,7 +38102,7 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
if (info.flags.alignment != .none) {
- _ = try sema.typeAbiAlignment(info.child.toType());
+ _ = try sema.typeAbiAlignment(Type.fromInterned(info.child));
}
return sema.mod.ptrType(info);
}
src/type.zig
@@ -185,8 +185,8 @@ pub const Type = struct {
if (info.sentinel != .none) switch (info.flags.size) {
.One, .C => unreachable,
- .Many => try writer.print("[*:{}]", .{info.sentinel.toValue().fmtValue(info.child.toType(), mod)}),
- .Slice => try writer.print("[:{}]", .{info.sentinel.toValue().fmtValue(info.child.toType(), mod)}),
+ .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(Type.fromInterned(info.child), mod)}),
+ .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(Type.fromInterned(info.child), mod)}),
} else switch (info.flags.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@@ -200,7 +200,7 @@ pub const Type = struct {
const alignment = if (info.flags.alignment != .none)
info.flags.alignment
else
- info.child.toType().abiAlignment(mod);
+ Type.fromInterned(info.child).abiAlignment(mod);
try writer.print("align({d}", .{alignment.toByteUnits(0)});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
@@ -222,36 +222,36 @@ pub const Type = struct {
if (info.flags.is_volatile) try writer.writeAll("volatile ");
if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero ");
- try print(info.child.toType(), writer, mod);
+ try print(Type.fromInterned(info.child), writer, mod);
return;
},
.array_type => |array_type| {
if (array_type.sentinel == .none) {
try writer.print("[{d}]", .{array_type.len});
- try print(array_type.child.toType(), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, mod);
} else {
try writer.print("[{d}:{}]", .{
array_type.len,
- array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod),
+ Value.fromInterned(array_type.sentinel).fmtValue(Type.fromInterned(array_type.child), mod),
});
- try print(array_type.child.toType(), writer, mod);
+ try print(Type.fromInterned(array_type.child), writer, mod);
}
return;
},
.vector_type => |vector_type| {
try writer.print("@Vector({d}, ", .{vector_type.len});
- try print(vector_type.child.toType(), writer, mod);
+ try print(Type.fromInterned(vector_type.child), writer, mod);
try writer.writeAll(")");
return;
},
.opt_type => |child| {
try writer.writeByte('?');
- return print(child.toType(), writer, mod);
+ return print(Type.fromInterned(child), writer, mod);
},
.error_union_type => |error_union_type| {
- try print(error_union_type.error_set_type.toType(), writer, mod);
+ try print(Type.fromInterned(error_union_type.error_set_type), writer, mod);
try writer.writeByte('!');
- try print(error_union_type.payload_type.toType(), writer, mod);
+ try print(Type.fromInterned(error_union_type.payload_type), writer, mod);
return;
},
.inferred_error_set_type => |func_index| {
@@ -342,10 +342,10 @@ pub const Type = struct {
try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)});
}
- try print(field_ty.toType(), writer, mod);
+ try print(Type.fromInterned(field_ty), writer, mod);
if (val != .none) {
- try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)});
+ try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(Type.fromInterned(field_ty), mod)});
}
}
try writer.writeAll("}");
@@ -382,7 +382,7 @@ pub const Type = struct {
if (param_ty == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(param_ty.toType(), writer, mod);
+ try print(Type.fromInterned(param_ty), writer, mod);
}
}
if (fn_info.is_var_args) {
@@ -403,13 +403,13 @@ pub const Type = struct {
if (fn_info.return_type == .generic_poison_type) {
try writer.writeAll("anytype");
} else {
- try print(fn_info.return_type.toType(), writer, mod);
+ try print(Type.fromInterned(fn_info.return_type), writer, mod);
}
},
.anyframe_type => |child| {
if (child == .none) return writer.writeAll("anyframe");
try writer.writeAll("anyframe->");
- return print(child.toType(), writer, mod);
+ return print(Type.fromInterned(child), writer, mod);
},
// values, not types
@@ -435,13 +435,18 @@ pub const Type = struct {
}
}
+ pub fn fromInterned(i: InternPool.Index) Type {
+ assert(i != .none);
+ return .{ .ip_index = i };
+ }
+
pub fn toIntern(ty: Type) InternPool.Index {
assert(ty.ip_index != .none);
return ty.ip_index;
}
pub fn toValue(self: Type) Value {
- return self.toIntern().toValue();
+ return Value.fromInterned(self.toIntern());
}
const RuntimeBitsError = Module.CompileError || error{NeedLazy};
@@ -482,18 +487,18 @@ pub const Type = struct {
.anyframe_type => true,
.array_type => |array_type| {
if (array_type.sentinel != .none) {
- return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ return Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
} else {
return array_type.len > 0 and
- try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
}
},
.vector_type => |vector_type| {
return vector_type.len > 0 and
- try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
+ try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
},
.opt_type => |child| {
- const child_ty = child.toType();
+ const child_ty = Type.fromInterned(child);
if (child_ty.isNoReturn(mod)) {
// Then the optional is comptime-known to be null.
return false;
@@ -577,7 +582,7 @@ pub const Type = struct {
}
for (0..struct_type.field_types.len) |i| {
if (struct_type.comptime_bits.getBit(ip, i)) continue;
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
@@ -587,7 +592,7 @@ pub const Type = struct {
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
+ if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
}
return false;
},
@@ -607,7 +612,7 @@ pub const Type = struct {
// tag_ty will be `none` if this union's tag type is not resolved yet,
// in which case we want control flow to continue down below.
if (tag_ty != .none and
- try tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
+ try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
{
return true;
}
@@ -621,7 +626,7 @@ pub const Type = struct {
}
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index].toType();
+ const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
@@ -630,7 +635,7 @@ pub const Type = struct {
},
.opaque_type => true,
- .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
// values, not types
.undef,
@@ -676,7 +681,7 @@ pub const Type = struct {
.func_type,
=> false,
- .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod),
+ .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod),
.opt_type => ty.isPtrLikeOptional(mod),
.ptr_type => |ptr_type| ptr_type.flags.size != .Slice,
@@ -783,7 +788,7 @@ pub const Type = struct {
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
if (fn_info.cc == .Inline) return false;
- return !try fn_info.return_type.toType().comptimeOnlyAdvanced(mod, opt_sema);
+ return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema);
}
pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool {
@@ -817,13 +822,13 @@ pub const Type = struct {
return ptr_type.flags.alignment;
if (opt_sema) |sema| {
- const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
+ const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema });
return res.scalar;
}
- return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
+ return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
},
- .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema),
+ .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema),
else => unreachable,
};
}
@@ -891,10 +896,10 @@ pub const Type = struct {
return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
},
.array_type => |array_type| {
- return array_type.child.toType().abiAlignmentAdvanced(mod, strat);
+ return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat);
},
.vector_type => |vector_type| {
- const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema);
+ const bits_u64 = try bitSizeAdvanced(Type.fromInterned(vector_type.child), mod, opt_sema);
const bits: u32 = @intCast(bits_u64);
const bytes = ((bits * vector_type.len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
@@ -902,7 +907,7 @@ pub const Type = struct {
},
.opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
- .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()),
+ .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)),
.error_set_type, .inferred_error_set_type => {
const bits = mod.errorSetBits();
@@ -992,14 +997,14 @@ pub const Type = struct {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
- .val = (try mod.intern(.{ .int = .{
+ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue(),
+ } }))),
},
.eager => {},
}
- return .{ .scalar = struct_type.backingIntType(ip).toType().abiAlignment(mod) };
+ return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) };
}
const flags = struct_type.flagsPtr(ip).*;
@@ -1010,25 +1015,25 @@ pub const Type = struct {
.sema => |sema| .{
.scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type),
},
- .lazy => .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
};
},
.anon_struct_type => |tuple| {
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
- switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) {
.scalar => |field_align| big_align = big_align.max(field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema => unreachable, // passed to abiAlignmentAdvanced above
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
},
}
}
@@ -1041,17 +1046,17 @@ pub const Type = struct {
if (!union_type.haveLayout(ip)) switch (strat) {
.eager => unreachable, // union layout not resolved
.sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) },
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
};
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => |enum_type| return .{
- .scalar = enum_type.tag_ty.toType().abiAlignment(mod),
+ .scalar = Type.fromInterned(enum_type.tag_ty).abiAlignment(mod),
},
// values, not types
@@ -1090,10 +1095,10 @@ pub const Type = struct {
switch (strat) {
.eager, .sema => {
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
else => |e| return e,
})) {
return .{ .scalar = code_align };
@@ -1107,10 +1112,10 @@ pub const Type = struct {
.scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
.val => {},
}
- return .{ .val = (try mod.intern(.{ .int = .{
+ return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() };
+ } }))) };
},
}
}
@@ -1135,10 +1140,10 @@ pub const Type = struct {
switch (strat) {
.eager, .sema => {
if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
else => |e| return e,
})) {
return .{ .scalar = .@"1" };
@@ -1147,10 +1152,10 @@ pub const Type = struct {
},
.lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| return .{ .scalar = x.max(.@"1") },
- .val => return .{ .val = (try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
- } })).toValue() },
+ } }))) },
},
}
}
@@ -1205,14 +1210,14 @@ pub const Type = struct {
.array_type => |array_type| {
const len = array_type.len + @intFromBool(array_type.sentinel != .none);
if (len == 0) return .{ .scalar = 0 };
- switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) {
+ switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| return .{ .scalar = len * elem_size },
.val => switch (strat) {
.sema, .eager => unreachable,
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
},
}
},
@@ -1220,20 +1225,20 @@ pub const Type = struct {
const opt_sema = switch (strat) {
.sema => |sema| sema,
.eager => null,
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
};
- const elem_bits = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
+ const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema);
const total_bits = elem_bits * vector_type.len;
const total_bytes = (total_bits + 7) / 8;
const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| x,
- .val => return .{ .val = (try mod.intern(.{ .int = .{
+ .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
};
return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) };
},
@@ -1247,15 +1252,15 @@ pub const Type = struct {
},
.error_union_type => |error_union_type| {
- const payload_ty = error_union_type.payload_type.toType();
+ const payload_ty = Type.fromInterned(error_union_type.payload_type);
// This code needs to be kept in sync with the equivalent switch prong
// in abiAlignmentAdvanced.
const code_size = abiSize(Type.anyerror, mod);
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
else => |e| return e,
})) {
// Same as anyerror.
@@ -1268,10 +1273,10 @@ pub const Type = struct {
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
},
};
@@ -1358,18 +1363,18 @@ pub const Type = struct {
.lazy => switch (struct_type.layout) {
.Packed => {
if (struct_type.backingIntType(ip).* == .none) return .{
- .val = (try mod.intern(.{ .int = .{
+ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue(),
+ } }))),
};
},
.Auto, .Extern => {
if (!struct_type.haveLayout(ip)) return .{
- .val = (try mod.intern(.{ .int = .{
+ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue(),
+ } }))),
};
},
},
@@ -1377,7 +1382,7 @@ pub const Type = struct {
}
return switch (struct_type.layout) {
.Packed => .{
- .scalar = struct_type.backingIntType(ip).toType().abiSize(mod),
+ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod),
},
.Auto, .Extern => .{ .scalar = struct_type.size(ip).* },
};
@@ -1398,10 +1403,10 @@ pub const Type = struct {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
- .val = (try mod.intern(.{ .int = .{
+ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue(),
+ } }))),
},
.eager => {},
}
@@ -1409,7 +1414,7 @@ pub const Type = struct {
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
- .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) },
+ .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = Type.fromInterned(enum_type.tag_ty).abiSize(mod) },
// values, not types
.undef,
@@ -1447,10 +1452,10 @@ pub const Type = struct {
}
if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
- error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
+ error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
else => |e| return e,
})) return AbiSizeAdvanced{ .scalar = 1 };
@@ -1463,10 +1468,10 @@ pub const Type = struct {
.val => switch (strat) {
.sema => unreachable,
.eager => unreachable,
- .lazy => return .{ .val = (try mod.intern(.{ .int = .{
+ .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
- } })).toValue() },
+ } }))) },
},
};
@@ -1518,7 +1523,7 @@ pub const Type = struct {
.array_type => |array_type| {
const len = array_type.len + @intFromBool(array_type.sentinel != .none);
if (len == 0) return 0;
- const elem_ty = array_type.child.toType();
+ const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
(try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits(0),
(try elem_ty.abiSizeAdvanced(mod, strat)).scalar,
@@ -1528,7 +1533,7 @@ pub const Type = struct {
return (len - 1) * 8 * elem_size + elem_bit_size;
},
.vector_type => |vector_type| {
- const child_ty = vector_type.child.toType();
+ const child_ty = Type.fromInterned(vector_type.child);
const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema);
return elem_bit_size * vector_type.len;
},
@@ -1600,7 +1605,7 @@ pub const Type = struct {
.struct_type => |struct_type| {
if (struct_type.layout == .Packed) {
if (opt_sema) |sema| try sema.resolveTypeLayout(ty);
- return try struct_type.backingIntType(ip).*.toType().bitSizeAdvanced(mod, opt_sema);
+ return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema);
}
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
@@ -1625,13 +1630,13 @@ pub const Type = struct {
var size: u64 = 0;
for (0..union_obj.field_types.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
- size = @max(size, try bitSizeAdvanced(field_ty.toType(), mod, opt_sema));
+ size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema));
}
return size;
},
.opaque_type => unreachable,
- .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema),
+ .enum_type => |enum_type| return bitSizeAdvanced(Type.fromInterned(enum_type.tag_ty), mod, opt_sema),
// values, not types
.undef,
@@ -1665,10 +1670,10 @@ pub const Type = struct {
.union_type => |union_type| union_type.haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
- return array_type.child.toType().layoutIsResolved(mod);
+ return Type.fromInterned(array_type.child).layoutIsResolved(mod);
},
- .opt_type => |child| child.toType().layoutIsResolved(mod),
- .error_union_type => |k| k.payload_type.toType().layoutIsResolved(mod),
+ .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod),
+ .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod),
else => true,
};
}
@@ -1701,7 +1706,7 @@ pub const Type = struct {
}
pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type {
- return mod.intern_pool.slicePtrType(ty.toIntern()).toType();
+ return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern()));
}
pub fn isConstPtr(ty: Type, mod: *const Module) bool {
@@ -1801,7 +1806,7 @@ pub const Type = struct {
}
pub fn childTypeIp(ty: Type, ip: *const InternPool) Type {
- return ip.childType(ty.toIntern()).toType();
+ return Type.fromInterned(ip.childType(ty.toIntern()));
}
/// For *[N]T, returns T.
@@ -1816,16 +1821,16 @@ pub const Type = struct {
pub fn elemType2(ty: Type, mod: *const Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
- .One => ptr_type.child.toType().shallowElemType(mod),
- .Many, .C, .Slice => ptr_type.child.toType(),
+ .One => Type.fromInterned(ptr_type.child).shallowElemType(mod),
+ .Many, .C, .Slice => Type.fromInterned(ptr_type.child),
},
.anyframe_type => |child| {
assert(child != .none);
- return child.toType();
+ return Type.fromInterned(child);
},
- .vector_type => |vector_type| vector_type.child.toType(),
- .array_type => |array_type| array_type.child.toType(),
- .opt_type => |child| mod.intern_pool.childType(child).toType(),
+ .vector_type => |vector_type| Type.fromInterned(vector_type.child),
+ .array_type => |array_type| Type.fromInterned(array_type.child),
+ .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)),
else => unreachable,
};
}
@@ -1849,7 +1854,7 @@ pub const Type = struct {
/// Note that for C pointers this returns the type unmodified.
pub fn optionalChild(ty: Type, mod: *const Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
- .opt_type => |child| child.toType(),
+ .opt_type => |child| Type.fromInterned(child),
.ptr_type => |ptr_type| b: {
assert(ptr_type.flags.size == .C);
break :b ty;
@@ -1866,7 +1871,7 @@ pub const Type = struct {
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.tagged => {
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
- return union_type.enum_tag_ty.toType();
+ return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
},
@@ -1882,7 +1887,7 @@ pub const Type = struct {
.union_type => |union_type| {
if (!union_type.hasTag(ip)) return null;
assert(union_type.haveFieldTypes(ip));
- return union_type.enum_tag_ty.toType();
+ return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
};
@@ -1892,7 +1897,7 @@ pub const Type = struct {
/// not be stored at runtime.
pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type {
const union_obj = mod.typeToUnion(ty).?;
- return union_obj.enum_tag_ty.toType();
+ return Type.fromInterned(union_obj.enum_tag_ty);
}
pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type {
@@ -1900,7 +1905,7 @@ pub const Type = struct {
const union_obj = mod.typeToUnion(ty).?;
const union_fields = union_obj.field_types.get(ip);
const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null;
- return union_fields[index].toType();
+ return Type.fromInterned(union_fields[index]);
}
pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
@@ -1912,7 +1917,7 @@ pub const Type = struct {
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
- if (field_ty.toType().hasRuntimeBits(mod)) return false;
+ if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false;
}
return true;
}
@@ -1946,12 +1951,12 @@ pub const Type = struct {
/// Asserts that the type is an error union.
pub fn errorUnionPayload(ty: Type, mod: *Module) Type {
- return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type.toType();
+ return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type);
}
/// Asserts that the type is an error union.
pub fn errorUnionSet(ty: Type, mod: *Module) Type {
- return mod.intern_pool.errorUnionSet(ty.toIntern()).toType();
+ return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern()));
}
/// Returns false for unresolved inferred error sets.
@@ -2077,8 +2082,8 @@ pub const Type = struct {
.anon_struct_type,
=> null,
- .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null,
- .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null,
+ .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
+ .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
else => unreachable,
};
@@ -2146,9 +2151,9 @@ pub const Type = struct {
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
- .struct_type => |t| ty = t.backingIntType(ip).*.toType(),
- .enum_type => |enum_type| ty = enum_type.tag_ty.toType(),
- .vector_type => |vector_type| ty = vector_type.child.toType(),
+ .struct_type => |t| ty = Type.fromInterned(t.backingIntType(ip).*),
+ .enum_type => |enum_type| ty = Type.fromInterned(enum_type.tag_ty),
+ .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
.error_set_type, .inferred_error_set_type => {
return .{ .signedness = .unsigned, .bits = mod.errorSetBits() };
@@ -2259,7 +2264,7 @@ pub const Type = struct {
/// Asserts the type is a function or a function pointer.
pub fn fnReturnType(ty: Type, mod: *Module) Type {
- return mod.intern_pool.funcTypeReturnType(ty.toIntern()).toType();
+ return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern()));
}
/// Asserts the type is a function.
@@ -2343,15 +2348,15 @@ pub const Type = struct {
inline .array_type, .vector_type => |seq_type, seq_tag| {
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
- if (seq_type.len + @intFromBool(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{
+ if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
- } })).toValue();
- if (try seq_type.child.toType().onePossibleValue(mod)) |opv| {
- return (try mod.intern(.{ .aggregate = .{
+ } })));
+ if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| {
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .repeated_elem = opv.toIntern() },
- } })).toValue();
+ } })));
}
return null;
},
@@ -2422,7 +2427,7 @@ pub const Type = struct {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
if (try field_ty.onePossibleValue(mod)) |field_opv| {
field_val.* = try field_opv.intern(field_ty, mod);
} else return null;
@@ -2430,10 +2435,10 @@ pub const Type = struct {
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })).toValue();
+ } })));
},
.anon_struct_type => |tuple| {
@@ -2445,52 +2450,52 @@ pub const Type = struct {
// TODO: write something like getCoercedInts to avoid needing to dupe
const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip));
defer mod.gpa.free(duped_values);
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = duped_values },
- } })).toValue();
+ } })));
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
- const tag_val = (try union_obj.enum_tag_ty.toType().onePossibleValue(mod)) orelse
+ const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
return null;
if (union_obj.field_names.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
- return only.toValue();
+ return Value.fromInterned(only);
}
const only_field_ty = union_obj.field_types.get(ip)[0];
- const val_val = (try only_field_ty.toType().onePossibleValue(mod)) orelse
+ const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse
return null;
const only = try mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = val_val.toIntern(),
} });
- return only.toValue();
+ return Value.fromInterned(only);
},
.opaque_type => return null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| {
+ if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
- return only.toValue();
+ return Value.fromInterned(only);
}
return null;
},
.auto, .explicit => {
- if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null;
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
switch (enum_type.names.len) {
0 => {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
- return only.toValue();
+ return Value.fromInterned(only);
},
1 => {
if (enum_type.values.len == 0) {
@@ -2501,9 +2506,9 @@ pub const Type = struct {
.storage = .{ .u64 = 0 },
} }),
} });
- return only.toValue();
+ return Value.fromInterned(only);
} else {
- return enum_type.values.get(ip)[0].toValue();
+ return Value.fromInterned(enum_type.values.get(ip)[0]);
}
},
else => return null,
@@ -2552,7 +2557,7 @@ pub const Type = struct {
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => false,
.ptr_type => |ptr_type| {
- const child_ty = ptr_type.child.toType();
+ const child_ty = Type.fromInterned(ptr_type.child);
switch (child_ty.zigTypeTag(mod)) {
.Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema),
.Opaque => return false,
@@ -2561,12 +2566,12 @@ pub const Type = struct {
},
.anyframe_type => |child| {
if (child == .none) return false;
- return child.toType().comptimeOnlyAdvanced(mod, opt_sema);
+ return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema);
},
- .array_type => |array_type| return array_type.child.toType().comptimeOnlyAdvanced(mod, opt_sema),
- .vector_type => |vector_type| return vector_type.child.toType().comptimeOnlyAdvanced(mod, opt_sema),
- .opt_type => |child| return child.toType().comptimeOnlyAdvanced(mod, opt_sema),
- .error_union_type => |error_union_type| return error_union_type.payload_type.toType().comptimeOnlyAdvanced(mod, opt_sema),
+ .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema),
+ .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema),
+ .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema),
+ .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema),
.error_set_type,
.inferred_error_set_type,
@@ -2647,7 +2652,7 @@ pub const Type = struct {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
- if (try field_ty.toType().comptimeOnlyAdvanced(mod, opt_sema)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
@@ -2666,7 +2671,7 @@ pub const Type = struct {
.anon_struct_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
- if (!have_comptime_val and try field_ty.toType().comptimeOnlyAdvanced(mod, opt_sema)) return true;
+ if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true;
}
return false;
},
@@ -2689,7 +2694,7 @@ pub const Type = struct {
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_idx| {
const field_ty = union_obj.field_types.get(ip)[field_idx];
- if (try field_ty.toType().comptimeOnlyAdvanced(mod, opt_sema)) {
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
union_obj.flagsPtr(ip).requires_comptime = .yes;
return true;
}
@@ -2702,7 +2707,7 @@ pub const Type = struct {
.opaque_type => false,
- .enum_type => |enum_type| return enum_type.tag_ty.toType().comptimeOnlyAdvanced(mod, opt_sema),
+ .enum_type => |enum_type| return Type.fromInterned(enum_type.tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
// values, not types
.undef,
@@ -2792,10 +2797,10 @@ pub const Type = struct {
// Works for vectors and vectors of integers.
pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } })).toValue() else scalar;
+ } }))) else scalar;
}
/// Asserts that the type is an integer.
@@ -2821,10 +2826,10 @@ pub const Type = struct {
/// The returned Value will have type dest_ty.
pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod));
- return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
+ return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = dest_ty.toIntern(),
.storage = .{ .repeated_elem = scalar.toIntern() },
- } })).toValue() else scalar;
+ } }))) else scalar;
}
/// The returned Value will have type dest_ty.
@@ -2865,8 +2870,8 @@ pub const Type = struct {
/// Asserts the type is an enum or a union.
pub fn intTagType(ty: Type, mod: *Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
- .union_type => |union_type| union_type.enum_tag_ty.toType().intTagType(mod),
- .enum_type => |enum_type| enum_type.tag_ty.toType(),
+ .union_type => |union_type| Type.fromInterned(union_type.enum_tag_ty).intTagType(mod),
+ .enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty),
else => unreachable,
};
}
@@ -2965,12 +2970,12 @@ pub const Type = struct {
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.field_types.get(ip)[index].toType(),
+ .struct_type => |struct_type| Type.fromInterned(struct_type.field_types.get(ip)[index]),
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
- return union_obj.field_types.get(ip)[index].toType();
+ return Type.fromInterned(union_obj.field_types.get(ip)[index]);
},
- .anon_struct_type => |anon_struct| anon_struct.types.get(ip)[index].toType(),
+ .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
else => unreachable,
};
}
@@ -2981,11 +2986,11 @@ pub const Type = struct {
.struct_type => |struct_type| {
assert(struct_type.layout != .Packed);
const explicit_align = struct_type.fieldAlign(ip, index);
- const field_ty = struct_type.field_types.get(ip)[index].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
},
.anon_struct_type => |anon_struct| {
- return anon_struct.types.get(ip)[index].toType().abiAlignment(mod);
+ return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
@@ -3002,13 +3007,13 @@ pub const Type = struct {
const val = struct_type.fieldInit(ip, index);
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
- return val.toValue();
+ return Value.fromInterned(val);
},
.anon_struct_type => |anon_struct| {
const val = anon_struct.values.get(ip)[index];
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
- return val.toValue();
+ return Value.fromInterned(val);
},
else => unreachable,
}
@@ -3020,17 +3025,17 @@ pub const Type = struct {
.struct_type => |struct_type| {
assert(struct_type.haveFieldInits(ip));
if (struct_type.fieldIsComptime(ip, index)) {
- return struct_type.field_inits.get(ip)[index].toValue();
+ return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
} else {
- return struct_type.field_types.get(ip)[index].toType().onePossibleValue(mod);
+ return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod);
}
},
.anon_struct_type => |tuple| {
const val = tuple.values.get(ip)[index];
if (val == .none) {
- return tuple.types.get(ip)[index].toType().onePossibleValue(mod);
+ return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod);
} else {
- return val.toValue();
+ return Value.fromInterned(val);
}
},
else => unreachable,
@@ -3066,17 +3071,17 @@ pub const Type = struct {
var big_align: Alignment = .none;
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
- if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) {
+ if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) {
// comptime field
if (i == index) return offset;
continue;
}
- const field_align = field_ty.toType().abiAlignment(mod);
+ const field_align = Type.fromInterned(field_ty).abiAlignment(mod);
big_align = big_align.max(field_align);
offset = field_align.forward(offset);
if (i == index) return offset;
- offset += field_ty.toType().abiSize(mod);
+ offset += Type.fromInterned(field_ty).abiSize(mod);
}
offset = big_align.max(.@"1").forward(offset);
return offset;
src/TypedValue.zig
@@ -89,7 +89,7 @@ pub fn print(
if (payload.tag) |tag| {
try print(.{
- .ty = ip.indexToKey(ty.toIntern()).union_type.enum_tag_ty.toType(),
+ .ty = Type.fromInterned(ip.indexToKey(ty.toIntern()).union_type.enum_tag_ty),
.val = tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
@@ -222,10 +222,10 @@ pub fn print(
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
.lazy_align => |lazy_ty| return writer.print("{d}", .{
- lazy_ty.toType().abiAlignment(mod),
+ Type.fromInterned(lazy_ty).abiAlignment(mod),
}),
.lazy_size => |lazy_ty| return writer.print("{d}", .{
- lazy_ty.toType().abiSize(mod),
+ Type.fromInterned(lazy_ty).abiSize(mod),
}),
},
.err => |err| return writer.print("error.{}", .{
@@ -236,7 +236,7 @@ pub fn print(
err_name.fmt(ip),
}),
.payload => |payload| {
- val = payload.toValue();
+ val = Value.fromInterned(payload);
ty = ty.errorUnionPayload(mod);
},
},
@@ -254,8 +254,8 @@ pub fn print(
}
try writer.writeAll("@enumFromInt(");
try print(.{
- .ty = ip.typeOf(enum_tag.int).toType(),
- .val = enum_tag.int.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(enum_tag.int)),
+ .val = Value.fromInterned(enum_tag.int),
}, writer, level - 1, mod);
try writer.writeAll(")");
return;
@@ -280,8 +280,8 @@ pub fn print(
if (level == 0) {
return writer.writeAll(".{ ... }");
}
- const elem_ty = ptr_ty.child.toType();
- const len = ptr.len.toValue().toUnsignedInt(mod);
+ const elem_ty = Type.fromInterned(ptr_ty.child);
+ const len = Value.fromInterned(ptr.len).toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
@@ -326,8 +326,8 @@ pub fn print(
@intFromEnum(decl_val),
});
return print(.{
- .ty = ip.typeOf(decl_val).toType(),
- .val = decl_val.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(decl_val)),
+ .val = Value.fromInterned(decl_val),
}, writer, level - 1, mod);
},
.mut_decl => |mut_decl| {
@@ -340,38 +340,38 @@ pub fn print(
},
.comptime_field => |field_val_ip| {
return print(.{
- .ty = ip.typeOf(field_val_ip).toType(),
- .val = field_val_ip.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(field_val_ip)),
+ .val = Value.fromInterned(field_val_ip),
}, writer, level - 1, mod);
},
.int => unreachable,
.eu_payload => |eu_ip| {
try writer.writeAll("(payload of ");
try print(.{
- .ty = ip.typeOf(eu_ip).toType(),
- .val = eu_ip.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(eu_ip)),
+ .val = Value.fromInterned(eu_ip),
}, writer, level - 1, mod);
try writer.writeAll(")");
},
.opt_payload => |opt_ip| {
try print(.{
- .ty = ip.typeOf(opt_ip).toType(),
- .val = opt_ip.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(opt_ip)),
+ .val = Value.fromInterned(opt_ip),
}, writer, level - 1, mod);
try writer.writeAll(".?");
},
.elem => |elem| {
try print(.{
- .ty = ip.typeOf(elem.base).toType(),
- .val = elem.base.toValue(),
+ .ty = Type.fromInterned(ip.typeOf(elem.base)),
+ .val = Value.fromInterned(elem.base),
}, writer, level - 1, mod);
try writer.print("[{}]", .{elem.index});
},
.field => |field| {
- const ptr_container_ty = ip.typeOf(field.base).toType();
+ const ptr_container_ty = Type.fromInterned(ip.typeOf(field.base));
try print(.{
.ty = ptr_container_ty,
- .val = field.base.toValue(),
+ .val = Value.fromInterned(field.base),
}, writer, level - 1, mod);
const container_ty = ptr_container_ty.childType(mod);
@@ -404,7 +404,7 @@ pub fn print(
.opt => |opt| switch (opt.val) {
.none => return writer.writeAll("null"),
else => |payload| {
- val = payload.toValue();
+ val = Value.fromInterned(payload);
ty = ty.optionalChild(mod);
},
},
@@ -426,20 +426,20 @@ pub fn print(
if (un.tag != .none) {
try print(.{
.ty = ty.unionTagTypeHypothetical(mod),
- .val = un.tag.toValue(),
+ .val = Value.fromInterned(un.tag),
}, writer, level - 1, mod);
try writer.writeAll(" = ");
- const field_ty = ty.unionFieldType(un.tag.toValue(), mod).?;
+ const field_ty = ty.unionFieldType(Value.fromInterned(un.tag), mod).?;
try print(.{
.ty = field_ty,
- .val = un.val.toValue(),
+ .val = Value.fromInterned(un.val),
}, writer, level - 1, mod);
} else {
try writer.writeAll("(unknown tag) = ");
const backing_ty = try ty.unionBackingType(mod);
try print(.{
.ty = backing_ty,
- .val = un.val.toValue(),
+ .val = Value.fromInterned(un.val),
}, writer, level - 1, mod);
}
} else try writer.writeAll("...");
src/value.zig
@@ -196,13 +196,13 @@ pub const Value = struct {
.enum_literal => |enum_literal| enum_literal,
.ptr => |ptr| switch (ptr.len) {
.none => unreachable,
- else => try arrayToIpString(val, ptr.len.toValue().toUnsignedInt(mod), mod),
+ else => try arrayToIpString(val, Value.fromInterned(ptr.len).toUnsignedInt(mod), mod),
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes),
.elems => try arrayToIpString(val, ty.arrayLen(mod), mod),
.repeated_elem => |elem| {
- const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod)));
+ const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod)));
const len = @as(usize, @intCast(ty.arrayLen(mod)));
try ip.string_bytes.appendNTimes(mod.gpa, byte, len);
return ip.getOrPutTrailingString(mod.gpa, len);
@@ -219,13 +219,13 @@ pub const Value = struct {
.enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)),
.ptr => |ptr| switch (ptr.len) {
.none => unreachable,
- else => try arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod),
+ else => try arrayToAllocatedBytes(val, Value.fromInterned(ptr.len).toUnsignedInt(mod), allocator, mod),
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| try allocator.dupe(u8, bytes),
.elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
.repeated_elem => |elem| {
- const byte = @as(u8, @intCast(elem.toValue().toUnsignedInt(mod)));
+ const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod)));
const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod))));
@memset(result, byte);
return result;
@@ -316,8 +316,8 @@ pub const Value = struct {
for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i|
new_elem.* = try old_elem.intern(switch (ty_key) {
.struct_type => ty.structFieldType(field_i, mod),
- .anon_struct_type => |info| info.types.get(ip)[field_i].toType(),
- inline .array_type, .vector_type => |info| info.child.toType(),
+ .anon_struct_type => |info| Type.fromInterned(info.types.get(ip)[field_i]),
+ inline .array_type, .vector_type => |info| Type.fromInterned(info.child),
else => unreachable,
}, mod);
return mod.intern(.{ .aggregate = .{
@@ -378,42 +378,50 @@ pub const Value = struct {
.error_union => |error_union| switch (error_union.val) {
.err_name => val,
- .payload => |payload| Tag.eu_payload.create(arena, payload.toValue()),
+ .payload => |payload| Tag.eu_payload.create(arena, Value.fromInterned(payload)),
},
.ptr => |ptr| switch (ptr.len) {
.none => val,
else => |len| Tag.slice.create(arena, .{
.ptr = val.slicePtr(mod),
- .len = len.toValue(),
+ .len = Value.fromInterned(len),
}),
},
.opt => |opt| switch (opt.val) {
.none => val,
- else => |payload| Tag.opt_payload.create(arena, payload.toValue()),
+ else => |payload| Tag.opt_payload.create(arena, Value.fromInterned(payload)),
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| Tag.bytes.create(arena, try arena.dupe(u8, bytes)),
.elems => |old_elems| {
const new_elems = try arena.alloc(Value, old_elems.len);
- for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue();
+ for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = Value.fromInterned(old_elem);
return Tag.aggregate.create(arena, new_elems);
},
- .repeated_elem => |elem| Tag.repeated.create(arena, elem.toValue()),
+ .repeated_elem => |elem| Tag.repeated.create(arena, Value.fromInterned(elem)),
},
.un => |un| Tag.@"union".create(arena, .{
// toValue asserts that the value cannot be .none which is valid on unions.
- .tag = if (un.tag == .none) null else un.tag.toValue(),
- .val = un.val.toValue(),
+ .tag = if (un.tag == .none) null else Value.fromInterned(un.tag),
+ .val = Value.fromInterned(un.val),
}),
.memoized_call => unreachable,
};
}
+ pub fn fromInterned(i: InternPool.Index) Value {
+ assert(i != .none);
+ return .{
+ .ip_index = i,
+ .legacy = undefined,
+ };
+ }
+
pub fn toIntern(val: Value) InternPool.Index {
assert(val.ip_index != .none);
return val.ip_index;
@@ -421,7 +429,7 @@ pub const Value = struct {
/// Asserts that the value is representable as a type.
pub fn toType(self: Value) Type {
- return self.toIntern().toType();
+ return Type.fromInterned(self.toIntern());
}
pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
@@ -435,13 +443,13 @@ pub const Value = struct {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_type => |enum_type| if (enum_type.values.len != 0)
- enum_type.values.get(ip)[field_index].toValue()
+ Value.fromInterned(enum_type.values.get(ip)[field_index])
else // Field index and integer values are the same.
- mod.intValue(enum_type.tag_ty.toType(), field_index),
+ mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index),
else => unreachable,
};
},
- .enum_type => |enum_type| try mod.getCoerced(val, enum_type.tag_ty.toType()),
+ .enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)),
else => unreachable,
};
}
@@ -466,16 +474,16 @@ pub const Value = struct {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => int.storage.toBigInt(space),
.lazy_align, .lazy_size => |ty| {
- if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType());
+ if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty));
const x = switch (int.storage) {
else => unreachable,
- .lazy_align => ty.toType().abiAlignment(mod).toByteUnits(0),
- .lazy_size => ty.toType().abiSize(mod),
+ .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
+ .lazy_size => Type.fromInterned(ty).abiSize(mod),
};
return BigIntMutable.init(&space.limbs, x).toConst();
},
},
- .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema),
+ .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema),
.opt, .ptr => BigIntMutable.init(
&space.limbs,
(try val.getUnsignedIntAdvanced(mod, opt_sema)).?,
@@ -530,24 +538,24 @@ pub const Value = struct {
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
.lazy_align => |ty| if (opt_sema) |sema|
- (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
+ (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
else
- ty.toType().abiAlignment(mod).toByteUnits(0),
+ Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
.lazy_size => |ty| if (opt_sema) |sema|
- (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar
+ (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar
else
- ty.toType().abiSize(mod),
+ Type.fromInterned(ty).abiSize(mod),
},
.ptr => |ptr| switch (ptr.addr) {
- .int => |int| int.toValue().getUnsignedIntAdvanced(mod, opt_sema),
+ .int => |int| Value.fromInterned(int).getUnsignedIntAdvanced(mod, opt_sema),
.elem => |elem| {
- const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
- const elem_ty = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod);
+ const base_addr = (try Value.fromInterned(elem.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
+ const elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod);
return base_addr + elem.index * elem_ty.abiSize(mod);
},
.field => |field| {
- const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
- const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
+ const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
+ const struct_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base)).childType(mod);
if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod);
},
@@ -555,7 +563,7 @@ pub const Value = struct {
},
.opt => |opt| switch (opt.val) {
.none => 0,
- else => |payload| payload.toValue().getUnsignedIntAdvanced(mod, opt_sema),
+ else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema),
},
else => null,
},
@@ -577,8 +585,8 @@ pub const Value = struct {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @intCast(x),
- .lazy_align => |ty| @intCast(ty.toType().abiAlignment(mod).toByteUnits(0)),
- .lazy_size => |ty| @intCast(ty.toType().abiSize(mod)),
+ .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
+ .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)),
},
else => unreachable,
},
@@ -598,8 +606,8 @@ pub const Value = struct {
while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl, .mut_decl, .comptime_field, .anon_decl => return true,
- .eu_payload, .opt_payload => |base| check = base.toValue(),
- .elem, .field => |base_index| check = base_index.base.toValue(),
+ .eu_payload, .opt_payload => |base| check = Value.fromInterned(base),
+ .elem, .field => |base_index| check = Value.fromInterned(base_index.base),
.int => return false,
},
else => return false,
@@ -680,16 +688,16 @@ pub const Value = struct {
.repeated => val.castTag(.repeated).?.data,
else => unreachable,
},
- else => switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
+ else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
continue;
},
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
- }.toValue(),
+ }),
};
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
},
.Packed => {
@@ -720,7 +728,7 @@ pub const Value = struct {
if (val.unionTag(mod)) |union_tag| {
const union_obj = mod.typeToUnion(ty).?;
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
- const field_type = union_obj.field_types.get(&mod.intern_pool)[field_index].toType();
+ const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]);
const field_val = try val.fieldValue(mod, field_index);
const byte_count = @as(usize, @intCast(field_type.abiSize(mod)));
return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]);
@@ -829,14 +837,14 @@ pub const Value = struct {
var bits: u16 = 0;
const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
for (0..struct_type.field_types.len) |i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
const field_val = switch (storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
};
- try field_val.toValue().writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
+ try Value.fromInterned(field_val).writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
@@ -847,7 +855,7 @@ pub const Value = struct {
.Packed => {
if (val.unionTag(mod)) |union_tag| {
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
- const field_type = union_obj.field_types.get(ip)[field_index].toType();
+ const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_val = try val.fieldValue(mod, field_index);
return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
} else {
@@ -934,7 +942,7 @@ pub const Value = struct {
return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty);
}
},
- .Float => return (try mod.intern(.{ .float = .{
+ .Float => return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) },
@@ -944,7 +952,7 @@ pub const Value = struct {
128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) },
else => unreachable,
},
- } })).toValue(),
+ } }))),
.Array => {
const elem_ty = ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
@@ -954,10 +962,10 @@ pub const Value = struct {
elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod);
offset += @as(usize, @intCast(elem_size));
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
- } })).toValue();
+ } })));
},
.Vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
@@ -973,15 +981,15 @@ pub const Value = struct {
const field_types = struct_type.field_types;
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
for (field_vals, 0..) |*field_val, i| {
- const field_ty = field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(field_types.get(ip)[i]);
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const sz: usize = @intCast(field_ty.abiSize(mod));
field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })).toValue();
+ } })));
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
@@ -996,10 +1004,10 @@ pub const Value = struct {
const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
const name = mod.global_error_set.keys()[@intCast(index)];
- return (try mod.intern(.{ .err = .{
+ return Value.fromInterned((try mod.intern(.{ .err = .{
.ty = ty.toIntern(),
.name = name,
- } })).toValue();
+ } })));
},
.Union => switch (ty.containerLayout(mod)) {
.Auto => return error.IllDefinedMemoryLayout,
@@ -1007,11 +1015,11 @@ pub const Value = struct {
const union_size = ty.abiSize(mod);
const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod);
- return (try mod.intern(.{ .un = .{
+ return Value.fromInterned((try mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = val,
- } })).toValue();
+ } })));
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
@@ -1021,23 +1029,23 @@ pub const Value = struct {
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
const int_val = try readFromMemory(Type.usize, mod, buffer, arena);
- return (try mod.intern(.{ .ptr = .{
+ return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.addr = .{ .int = int_val.toIntern() },
- } })).toValue();
+ } })));
},
.Optional => {
assert(ty.isPtrLikeOptional(mod));
const child_ty = ty.optionalChild(mod);
const child_val = try readFromMemory(child_ty, mod, buffer, arena);
- return (try mod.intern(.{ .opt = .{
+ return Value.fromInterned((try mod.intern(.{ .opt = .{
.ty = ty.toIntern(),
.val = switch (child_val.orderAgainstZero(mod)) {
.lt => unreachable,
.eq => .none,
.gt => child_val.toIntern(),
},
- } })).toValue();
+ } })));
},
else => return error.Unimplemented,
}
@@ -1108,7 +1116,7 @@ pub const Value = struct {
bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
return mod.intValue_big(ty, bigint.toConst());
},
- .Float => return (try mod.intern(.{ .float = .{
+ .Float => return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) },
@@ -1118,7 +1126,7 @@ pub const Value = struct {
128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) },
else => unreachable,
},
- } })).toValue(),
+ } }))),
.Vector => {
const elem_ty = ty.childType(mod);
const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
@@ -1131,10 +1139,10 @@ pub const Value = struct {
elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod);
bits += elem_bit_size;
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = elems },
- } })).toValue();
+ } })));
},
.Struct => {
// Sema is supposed to have emitted a compile error already for Auto layout structs,
@@ -1143,26 +1151,26 @@ pub const Value = struct {
var bits: u16 = 0;
const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
for (field_vals, 0..) |*field_val, i| {
- const field_ty = struct_type.field_types.get(ip)[i].toType();
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod);
bits += field_bits;
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
- } })).toValue();
+ } })));
},
.Union => switch (ty.containerLayout(mod)) {
.Auto, .Extern => unreachable, // Handled by non-packed readFromMemory
.Packed => {
const backing_ty = try ty.unionBackingType(mod);
const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern();
- return (try mod.intern(.{ .un = .{
+ return Value.fromInterned((try mod.intern(.{ .un = .{
.ty = ty.toIntern(),
.tag = .none,
.val = val,
- } })).toValue();
+ } })));
},
},
.Pointer => {
@@ -1189,8 +1197,8 @@ pub const Value = struct {
}
return @floatFromInt(x);
},
- .lazy_align => |ty| @floatFromInt(ty.toType().abiAlignment(mod).toByteUnits(0)),
- .lazy_size => |ty| @floatFromInt(ty.toType().abiSize(mod)),
+ .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
+ .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)),
},
.float => |float| switch (float.storage) {
inline else => |x| @floatCast(x),
@@ -1283,7 +1291,7 @@ pub const Value = struct {
/// Caller can find out by equality checking the result against the operand.
pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value {
const target = mod.getTarget();
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = dest_ty.toIntern(),
.storage = switch (dest_ty.floatBits(target)) {
16 => .{ .f16 = self.toFloat(f16, mod) },
@@ -1293,7 +1301,7 @@ pub const Value = struct {
128 => .{ .f128 = self.toFloat(f128, mod) },
else => unreachable,
},
- } })).toValue();
+ } })));
}
/// Asserts the value is a float
@@ -1321,8 +1329,8 @@ pub const Value = struct {
else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl, .mut_decl, .comptime_field => .gt,
- .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema),
- .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) {
+ .int => |int| Value.fromInterned(int).orderAgainstZeroAdvanced(mod, opt_sema),
+ .elem => |elem| switch (try Value.fromInterned(elem.base).orderAgainstZeroAdvanced(mod, opt_sema)) {
.lt => unreachable,
.gt => .gt,
.eq => if (elem.index == 0) .eq else .gt,
@@ -1333,7 +1341,7 @@ pub const Value = struct {
.big_int => |big_int| big_int.orderAgainstScalar(0),
inline .u64, .i64 => |x| std.math.order(x, 0),
.lazy_align => .gt, // alignment is never 0
- .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
+ .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced(
mod,
false,
if (opt_sema) |sema| .{ .sema = sema } else .eager,
@@ -1342,7 +1350,7 @@ pub const Value = struct {
else => |e| return e,
}) .gt else .eq,
},
- .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema),
+ .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema),
.float => |float| switch (float.storage) {
inline else => |x| std.math.order(x, 0),
},
@@ -1494,9 +1502,9 @@ pub const Value = struct {
if (!std.math.order(byte, 0).compare(op)) break false;
} else true,
.elems => |elems| for (elems) |elem| {
- if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false;
+ if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false;
} else true,
- .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
+ .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
},
else => {},
}
@@ -1513,8 +1521,8 @@ pub const Value = struct {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.mut_decl, .comptime_field => true,
- .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod),
- .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod),
+ .eu_payload, .opt_payload => |base_ptr| Value.fromInterned(base_ptr).isComptimeMutablePtr(mod),
+ .elem, .field => |base_index| Value.fromInterned(base_index.base).isComptimeMutablePtr(mod),
else => false,
},
else => false,
@@ -1526,20 +1534,20 @@ pub const Value = struct {
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.error_union => |error_union| switch (error_union.val) {
.err_name => false,
- .payload => |payload| payload.toValue().canMutateComptimeVarState(mod),
+ .payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod),
},
.ptr => |ptr| switch (ptr.addr) {
- .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod),
+ .eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(mod),
else => false,
},
.opt => |opt| switch (opt.val) {
.none => false,
- else => |payload| payload.toValue().canMutateComptimeVarState(mod),
+ else => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod),
},
.aggregate => |aggregate| for (aggregate.storage.values()) |elem| {
- if (elem.toValue().canMutateComptimeVarState(mod)) break true;
+ if (Value.fromInterned(elem).canMutateComptimeVarState(mod)) break true;
} else false,
- .un => |un| un.val.toValue().canMutateComptimeVarState(mod),
+ .un => |un| Value.fromInterned(un.val).canMutateComptimeVarState(mod),
else => false,
},
};
@@ -1566,7 +1574,7 @@ pub const Value = struct {
pub const slice_len_index = 1;
pub fn slicePtr(val: Value, mod: *Module) Value {
- return mod.intern_pool.slicePtr(val.toIntern()).toValue();
+ return Value.fromInterned(mod.intern_pool.slicePtr(val.toIntern()));
}
pub fn sliceLen(val: Value, mod: *Module) u64 {
@@ -1583,7 +1591,7 @@ pub const Value = struct {
.array_type => |array_type| array_type.len,
else => 1,
},
- else => ptr.len.toValue().toUnsignedInt(mod),
+ else => Value.fromInterned(ptr.len).toUnsignedInt(mod),
};
}
@@ -1604,37 +1612,36 @@ pub const Value = struct {
else => null,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .undef => |ty| (try mod.intern(.{
- .undef = ty.toType().elemType2(mod).toIntern(),
- })).toValue(),
+ .undef => |ty| Value.fromInterned((try mod.intern(.{
+ .undef = Type.fromInterned(ty).elemType2(mod).toIntern(),
+ }))),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.maybeElemValue(mod, index),
- .anon_decl => |anon_decl| anon_decl.val.toValue().maybeElemValue(mod, index),
- .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod))
- .toValue().maybeElemValue(mod, index),
+ .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).maybeElemValue(mod, index),
+ .mut_decl => |mut_decl| Value.fromInterned((try mod.declPtr(mut_decl.decl).internValue(mod))).maybeElemValue(mod, index),
.int, .eu_payload => null,
- .opt_payload => |base| base.toValue().maybeElemValue(mod, index),
- .comptime_field => |field_val| field_val.toValue().maybeElemValue(mod, index),
- .elem => |elem| elem.base.toValue().maybeElemValue(mod, index + @as(usize, @intCast(elem.index))),
- .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| {
+ .opt_payload => |base| Value.fromInterned(base).maybeElemValue(mod, index),
+ .comptime_field => |field_val| Value.fromInterned(field_val).maybeElemValue(mod, index),
+ .elem => |elem| Value.fromInterned(elem.base).maybeElemValue(mod, index + @as(usize, @intCast(elem.index))),
+ .field => |field| if (Value.fromInterned(field.base).pointerDecl(mod)) |decl_index| {
const base_decl = mod.declPtr(decl_index);
const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index)));
return field_val.maybeElemValue(mod, index);
} else null,
},
- .opt => |opt| opt.val.toValue().maybeElemValue(mod, index),
+ .opt => |opt| Value.fromInterned(opt.val).maybeElemValue(mod, index),
.aggregate => |aggregate| {
const len = mod.intern_pool.aggregateTypeLen(aggregate.ty);
- if (index < len) return switch (aggregate.storage) {
+ if (index < len) return Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
- }.toValue();
+ });
assert(index == len);
- return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue();
+ return Value.fromInterned(mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel);
},
else => null,
},
@@ -1681,15 +1688,15 @@ pub const Value = struct {
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end),
- .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue()
+ .mut_decl => |mut_decl| Value.fromInterned((try mod.declPtr(mut_decl.decl).internValue(mod)))
.sliceArray(mod, arena, start, end),
- .comptime_field => |comptime_field| comptime_field.toValue()
+ .comptime_field => |comptime_field| Value.fromInterned(comptime_field)
.sliceArray(mod, arena, start, end),
- .elem => |elem| elem.base.toValue()
+ .elem => |elem| Value.fromInterned(elem.base)
.sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))),
else => unreachable,
},
- .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{
+ .aggregate => |aggregate| Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
.array_type => |array_type| try mod.arrayType(.{
.len = @as(u32, @intCast(end - start)),
@@ -1707,7 +1714,7 @@ pub const Value = struct {
.elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) },
.repeated_elem => |elem| .{ .repeated_elem = elem },
},
- } })).toValue(),
+ } }))),
else => unreachable,
},
};
@@ -1728,19 +1735,19 @@ pub const Value = struct {
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .undef => |ty| (try mod.intern(.{
- .undef = ty.toType().structFieldType(index, mod).toIntern(),
- })).toValue(),
- .aggregate => |aggregate| switch (aggregate.storage) {
+ .undef => |ty| Value.fromInterned((try mod.intern(.{
+ .undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(),
+ }))),
+ .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
- }.toValue(),
+ }),
// TODO assert the tag is correct
- .un => |un| un.val.toValue(),
+ .un => |un| Value.fromInterned(un.val),
else => unreachable,
},
};
@@ -1750,7 +1757,7 @@ pub const Value = struct {
if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef, .enum_tag => val,
- .un => |un| if (un.tag != .none) un.tag.toValue() else return null,
+ .un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null,
else => unreachable,
};
}
@@ -1758,7 +1765,7 @@ pub const Value = struct {
pub fn unionValue(val: Value, mod: *Module) Value {
if (val.ip_index == .none) return val.castTag(.@"union").?.data.val;
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
- .un => |un| un.val.toValue(),
+ .un => |un| Value.fromInterned(un.val),
else => unreachable,
};
}
@@ -1774,14 +1781,14 @@ pub const Value = struct {
const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) {
.ptr => |ptr| ptr: {
switch (ptr.addr) {
- .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod))
- return (try mod.intern(.{ .ptr = .{
+ .elem => |elem| if (Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).eql(elem_ty, mod))
+ return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{
.base = elem.base,
.index = elem.index + index,
} },
- } })).toValue(),
+ } }))),
else => {},
}
break :ptr switch (ptr.len) {
@@ -1794,13 +1801,13 @@ pub const Value = struct {
var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type;
assert(ptr_ty_key.flags.size != .Slice);
ptr_ty_key.flags.size = .Many;
- return (try mod.intern(.{ .ptr = .{
+ return Value.fromInterned((try mod.intern(.{ .ptr = .{
.ty = elem_ptr_ty.toIntern(),
.addr = .{ .elem = .{
.base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(),
.index = index,
} },
- } })).toValue();
+ } })));
}
pub fn isUndef(val: Value, mod: *Module) bool {
@@ -1824,13 +1831,13 @@ pub const Value = struct {
.simple_value => |v| v == .undefined,
.ptr => |ptr| switch (ptr.len) {
.none => false,
- else => for (0..@as(usize, @intCast(ptr.len.toValue().toUnsignedInt(mod)))) |index| {
+ else => for (0..@as(usize, @intCast(Value.fromInterned(ptr.len).toUnsignedInt(mod)))) |index| {
if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true;
} else false,
},
.aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| {
const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i];
- if (try anyUndef(elem.toValue(), mod)) break true;
+ if (try anyUndef(Value.fromInterned(elem), mod)) break true;
} else false,
else => false,
},
@@ -1889,7 +1896,7 @@ pub const Value = struct {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.opt => |opt| switch (opt.val) {
.none => null,
- else => |payload| payload.toValue(),
+ else => |payload| Value.fromInterned(payload),
},
.ptr => val,
else => unreachable,
@@ -1923,10 +1930,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatFromIntScalar(val, float_ty, mod, opt_sema);
}
@@ -1941,14 +1948,14 @@ pub const Value = struct {
},
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
.lazy_align => |ty| if (opt_sema) |sema| {
- return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
+ return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
} else {
- return floatFromIntInner(ty.toType().abiAlignment(mod).toByteUnits(0), float_ty, mod);
+ return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod);
},
.lazy_size => |ty| if (opt_sema) |sema| {
- return floatFromIntInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
+ return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
} else {
- return floatFromIntInner(ty.toType().abiSize(mod), float_ty, mod);
+ return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod);
},
},
else => unreachable,
@@ -1965,10 +1972,10 @@ pub const Value = struct {
128 => .{ .f128 = @floatFromInt(x) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = dest_ty.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
fn calcLimbLenFloat(scalar: anytype) usize {
@@ -2001,10 +2008,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intAddSatScalar(lhs, rhs, ty, arena, mod);
}
@@ -2051,10 +2058,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intSubSatScalar(lhs, rhs, ty, arena, mod);
}
@@ -2105,14 +2112,14 @@ pub const Value = struct {
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return OverflowArithmeticResult{
- .overflow_bit = (try mod.intern(.{ .aggregate = .{
+ .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } })).toValue(),
- .wrapped_result = (try mod.intern(.{ .aggregate = .{
+ } }))),
+ .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue(),
+ } }))),
};
}
return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod);
@@ -2169,10 +2176,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return numberMulWrapScalar(lhs, rhs, ty, arena, mod);
}
@@ -2215,10 +2222,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intMulSatScalar(lhs, rhs, ty, arena, mod);
}
@@ -2291,17 +2298,17 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return bitwiseNotScalar(val, ty, arena, mod);
}
/// operands must be integers; handles undefined.
pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (val.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (val.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
if (ty.toIntern() == .bool_type) return makeBool(!val.toBool());
const info = ty.intInfo(mod);
@@ -2334,17 +2341,17 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return bitwiseAndScalar(lhs, rhs, ty, allocator, mod);
}
/// operands must be integers; handles undefined.
pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
@@ -2373,17 +2380,17 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return bitwiseNandScalar(lhs, rhs, ty, arena, mod);
}
/// operands must be integers; handles undefined.
pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool()));
const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod);
@@ -2401,17 +2408,17 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return bitwiseOrScalar(lhs, rhs, ty, allocator, mod);
}
/// operands must be integers; handles undefined.
pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
@@ -2439,17 +2446,17 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return bitwiseXorScalar(lhs, rhs, ty, allocator, mod);
}
/// operands must be integers; handles undefined.
pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
- if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue();
+ if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() })));
if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool());
// TODO is this a performance issue? maybe we should try the operation without
@@ -2505,10 +2512,10 @@ pub const Value = struct {
};
scalar.* = try val.intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intDivScalar(lhs, rhs, ty, allocator, mod);
}
@@ -2553,10 +2560,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intDivFloorScalar(lhs, rhs, ty, allocator, mod);
}
@@ -2595,10 +2602,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intModScalar(lhs, rhs, ty, allocator, mod);
}
@@ -2669,10 +2676,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatRemScalar(lhs, rhs, float_type, mod);
}
@@ -2687,10 +2694,10 @@ pub const Value = struct {
128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -2702,10 +2709,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatModScalar(lhs, rhs, float_type, mod);
}
@@ -2720,10 +2727,10 @@ pub const Value = struct {
128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting
@@ -2763,10 +2770,10 @@ pub const Value = struct {
};
scalar.* = try val.intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intMulScalar(lhs, rhs, ty, allocator, mod);
}
@@ -2805,10 +2812,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intTruncScalar(val, ty, allocator, signedness, bits, mod);
}
@@ -2830,10 +2837,10 @@ pub const Value = struct {
const bits_elem = try bits.elemValue(mod, i);
scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod);
}
@@ -2870,10 +2877,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return shlScalar(lhs, rhs, ty, allocator, mod);
}
@@ -2922,14 +2929,14 @@ pub const Value = struct {
scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod);
}
return OverflowArithmeticResult{
- .overflow_bit = (try mod.intern(.{ .aggregate = .{
+ .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(),
.storage = .{ .elems = overflowed_data },
- } })).toValue(),
- .wrapped_result = (try mod.intern(.{ .aggregate = .{
+ } }))),
+ .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue(),
+ } }))),
};
}
return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod);
@@ -2981,10 +2988,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return shlSatScalar(lhs, rhs, ty, arena, mod);
}
@@ -3031,10 +3038,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return shlTruncScalar(lhs, rhs, ty, arena, mod);
}
@@ -3061,10 +3068,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return shrScalar(lhs, rhs, ty, allocator, mod);
}
@@ -3113,10 +3120,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatNegScalar(val, float_type, mod);
}
@@ -3135,10 +3142,10 @@ pub const Value = struct {
128 => .{ .f128 = -val.toFloat(f128, mod) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatAdd(
@@ -3156,10 +3163,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatAddScalar(lhs, rhs, float_type, mod);
}
@@ -3179,10 +3186,10 @@ pub const Value = struct {
128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatSub(
@@ -3200,10 +3207,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatSubScalar(lhs, rhs, float_type, mod);
}
@@ -3223,10 +3230,10 @@ pub const Value = struct {
128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatDiv(
@@ -3244,10 +3251,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatDivScalar(lhs, rhs, float_type, mod);
}
@@ -3267,10 +3274,10 @@ pub const Value = struct {
128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatDivFloor(
@@ -3288,10 +3295,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatDivFloorScalar(lhs, rhs, float_type, mod);
}
@@ -3311,10 +3318,10 @@ pub const Value = struct {
128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatDivTrunc(
@@ -3332,10 +3339,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatDivTruncScalar(lhs, rhs, float_type, mod);
}
@@ -3355,10 +3362,10 @@ pub const Value = struct {
128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn floatMul(
@@ -3376,10 +3383,10 @@ pub const Value = struct {
const rhs_elem = try rhs.elemValue(mod, i);
scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floatMulScalar(lhs, rhs, float_type, mod);
}
@@ -3399,10 +3406,10 @@ pub const Value = struct {
128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3413,10 +3420,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sqrtScalar(val, float_type, mod);
}
@@ -3431,10 +3438,10 @@ pub const Value = struct {
128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3445,10 +3452,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return sinScalar(val, float_type, mod);
}
@@ -3463,10 +3470,10 @@ pub const Value = struct {
128 => .{ .f128 = @sin(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3477,10 +3484,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return cosScalar(val, float_type, mod);
}
@@ -3495,10 +3502,10 @@ pub const Value = struct {
128 => .{ .f128 = @cos(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3509,10 +3516,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return tanScalar(val, float_type, mod);
}
@@ -3527,10 +3534,10 @@ pub const Value = struct {
128 => .{ .f128 = @tan(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3541,10 +3548,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return expScalar(val, float_type, mod);
}
@@ -3559,10 +3566,10 @@ pub const Value = struct {
128 => .{ .f128 = @exp(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3573,10 +3580,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return exp2Scalar(val, float_type, mod);
}
@@ -3591,10 +3598,10 @@ pub const Value = struct {
128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3605,10 +3612,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return logScalar(val, float_type, mod);
}
@@ -3623,10 +3630,10 @@ pub const Value = struct {
128 => .{ .f128 = @log(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3637,10 +3644,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return log2Scalar(val, float_type, mod);
}
@@ -3655,10 +3662,10 @@ pub const Value = struct {
128 => .{ .f128 = @log2(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3669,10 +3676,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return log10Scalar(val, float_type, mod);
}
@@ -3687,10 +3694,10 @@ pub const Value = struct {
128 => .{ .f128 = @log10(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value {
@@ -3701,10 +3708,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try absScalar(elem_val, scalar_ty, mod, arena)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return absScalar(val, ty, mod, arena);
}
@@ -3735,10 +3742,10 @@ pub const Value = struct {
128 => .{ .f128 = @abs(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
},
else => unreachable,
}
@@ -3752,10 +3759,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return floorScalar(val, float_type, mod);
}
@@ -3770,10 +3777,10 @@ pub const Value = struct {
128 => .{ .f128 = @floor(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3784,10 +3791,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return ceilScalar(val, float_type, mod);
}
@@ -3802,10 +3809,10 @@ pub const Value = struct {
128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3816,10 +3823,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return roundScalar(val, float_type, mod);
}
@@ -3834,10 +3841,10 @@ pub const Value = struct {
128 => .{ .f128 = @round(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value {
@@ -3848,10 +3855,10 @@ pub const Value = struct {
const elem_val = try val.elemValue(mod, i);
scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return truncScalar(val, float_type, mod);
}
@@ -3866,10 +3873,10 @@ pub const Value = struct {
128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
pub fn mulAdd(
@@ -3889,10 +3896,10 @@ pub const Value = struct {
const addend_elem = try addend.elemValue(mod, i);
scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod);
}
- return (try mod.intern(.{ .aggregate = .{
+ return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = float_type.toIntern(),
.storage = .{ .elems = result_data },
- } })).toValue();
+ } })));
}
return mulAddScalar(float_type, mulend1, mulend2, addend, mod);
}
@@ -3913,10 +3920,10 @@ pub const Value = struct {
128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) },
else => unreachable,
};
- return (try mod.intern(.{ .float = .{
+ return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = float_type.toIntern(),
.storage = storage,
- } })).toValue();
+ } })));
}
/// If the value is represented in-memory as a series of bytes that all
@@ -3958,8 +3965,8 @@ pub const Value = struct {
const ty = mod.intern_pool.typeOf(val.toIntern());
if (ty == .comptime_int_type) return null;
return .{
- try ty.toType().minInt(mod, ty.toType()),
- try ty.toType().maxInt(mod, ty.toType()),
+ try Type.fromInterned(ty).minInt(mod, Type.fromInterned(ty)),
+ try Type.fromInterned(ty).maxInt(mod, Type.fromInterned(ty)),
};
}