Commit f2c716187c
Changed files (8)
src/codegen/llvm.zig
@@ -2329,7 +2329,7 @@ pub const Object = struct {
try param_di_types.append(try o.lowerDebugType(ptr_ty, .full));
}
- for (fn_info.param_types) |param_ty| {
+ for (mod.typeToFunc(ty).?.param_types) |param_ty| {
if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (isByRef(param_ty.toType(), mod)) {
src/codegen.zig
@@ -185,7 +185,7 @@ pub fn generateSymbol(
const mod = bin_file.options.module.?;
var typed_value = arg_tv;
- switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) {
.runtime_value => |rt| typed_value.val = rt.val.toValue(),
else => {},
}
@@ -204,7 +204,7 @@ pub fn generateSymbol(
return .ok;
}
- switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
@@ -282,7 +282,7 @@ pub fn generateSymbol(
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_ty,
.val = switch (error_union.val) {
- .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }),
+ .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }),
.payload => |payload| payload,
}.toValue(),
}, code, debug_output, reloc_info)) {
@@ -315,7 +315,7 @@ pub fn generateSymbol(
const int_tag_ty = try typed_value.ty.intTagType(mod);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = int_tag_ty,
- .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(),
+ .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
@@ -337,7 +337,7 @@ pub fn generateSymbol(
switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) {
.none => typed_value.val,
else => typed_value.val.slicePtr(mod),
- }.ip_index, code, debug_output, reloc_info)) {
+ }.toIntern(), code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return .{ .fail = em },
}
@@ -372,7 +372,7 @@ pub fn generateSymbol(
} else {
const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1;
if (payload_type.hasRuntimeBits(mod)) {
- const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue();
+ const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.toIntern() })).toValue();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
@@ -385,7 +385,7 @@ pub fn generateSymbol(
try code.writer().writeByteNTimes(0, padding);
}
},
- .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) {
+ .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) {
.array_type => |array_type| {
var index: u64 = 0;
while (index < array_type.len) : (index += 1) {
@@ -850,7 +850,7 @@ pub fn genTypedValue(
) CodeGenError!GenResult {
const mod = bin_file.options.module.?;
var typed_value = arg_tv;
- switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) {
.runtime_value => |rt| typed_value.val = rt.val.toValue(),
else => {},
}
@@ -866,7 +866,7 @@ pub fn genTypedValue(
const target = bin_file.options.target;
const ptr_bits = target.ptrBitWidth();
- if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl),
.mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl),
@@ -879,12 +879,12 @@ pub fn genTypedValue(
.Void => return GenResult.mcv(.none),
.Pointer => switch (typed_value.ty.ptrSize(mod)) {
.Slice => {},
- else => switch (typed_value.val.ip_index) {
+ else => switch (typed_value.val.toIntern()) {
.null_value => {
return GenResult.mcv(.{ .immediate = 0 });
},
.none => {},
- else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) {
.int => {
return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) });
},
@@ -916,7 +916,7 @@ pub fn genTypedValue(
}
},
.Enum => {
- const enum_tag = mod.intern_pool.indexToKey(typed_value.val.ip_index).enum_tag;
+ const enum_tag = mod.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag;
const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int);
return genTypedValue(bin_file, src_loc, .{
.ty = int_tag_ty.toType(),
@@ -924,7 +924,9 @@ pub fn genTypedValue(
}, owner_decl_index);
},
.ErrorSet => {
- const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name);
+ const err_name = mod.intern_pool.stringToSlice(
+ mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name,
+ );
const global_error_set = mod.global_error_set;
const error_index = global_error_set.get(err_name).?;
return GenResult.mcv(.{ .immediate = error_index });
src/InternPool.zig
@@ -650,8 +650,14 @@ pub const Key = union(enum) {
.enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl),
.variable => |variable| std.hash.autoHash(hasher, variable.decl),
- .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl),
- .func => |func| std.hash.autoHash(hasher, func.index),
+ .extern_func => |extern_func| {
+ std.hash.autoHash(hasher, extern_func.ty);
+ std.hash.autoHash(hasher, extern_func.decl);
+ },
+ .func => |func| {
+ std.hash.autoHash(hasher, func.ty);
+ std.hash.autoHash(hasher, func.index);
+ },
.int => |int| {
// Canonicalize all integers by converting them to BigIntConst.
@@ -854,11 +860,11 @@ pub const Key = union(enum) {
},
.extern_func => |a_info| {
const b_info = b.extern_func;
- return a_info.decl == b_info.decl;
+ return a_info.ty == b_info.ty and a_info.decl == b_info.decl;
},
.func => |a_info| {
const b_info = b.func;
- return a_info.index == b_info.index;
+ return a_info.ty == b_info.ty and a_info.index == b_info.index;
},
.ptr => |a_info| {
@@ -1340,8 +1346,8 @@ pub const Index = enum(u32) {
float_c_longdouble_f128: struct { data: *Float128 },
float_comptime_float: struct { data: *Float128 },
variable: struct { data: *Variable },
- extern_func: struct { data: void },
- func: struct { data: void },
+ extern_func: struct { data: *Key.ExternFunc },
+ func: struct { data: *Key.Func },
only_possible_value: DataIsIndex,
union_value: struct { data: *Key.Union },
bytes: struct { data: *Bytes },
@@ -3216,6 +3222,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.opt => |opt| {
assert(ip.isOptionalType(opt.ty));
+ assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val));
ip.items.appendAssumeCapacity(if (opt.val == .none) .{
.tag = .opt_null,
.data = @enumToInt(opt.ty),
@@ -3226,23 +3233,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.int => |int| b: {
- switch (int.ty) {
- .usize_type,
- .isize_type,
- .c_char_type,
- .c_short_type,
- .c_ushort_type,
- .c_int_type,
- .c_uint_type,
- .c_long_type,
- .c_ulong_type,
- .c_longlong_type,
- .c_ulonglong_type,
- .c_longdouble_type,
- .comptime_int_type,
- => {},
- else => assert(ip.indexToKey(int.ty) == .int_type),
- }
+ assert(ip.isIntegerType(int.ty));
switch (int.storage) {
.u64, .i64, .big_int => {},
.lazy_align, .lazy_size => |lazy_ty| {
@@ -3425,13 +3416,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
- .err => |err| ip.items.appendAssumeCapacity(.{
- .tag = .error_set_error,
- .data = try ip.addExtra(gpa, err),
- }),
+ .err => |err| {
+ assert(ip.isErrorSetType(err.ty));
+ ip.items.appendAssumeCapacity(.{
+ .tag = .error_set_error,
+ .data = try ip.addExtra(gpa, err),
+ });
+ },
.error_union => |error_union| {
- assert(ip.indexToKey(error_union.ty) == .error_union_type);
+ assert(ip.isErrorUnionType(error_union.ty));
ip.items.appendAssumeCapacity(switch (error_union.val) {
.err_name => |err_name| .{
.tag = .error_union_error,
@@ -3456,9 +3450,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}),
.enum_tag => |enum_tag| {
- assert(enum_tag.ty != .none);
- assert(enum_tag.int != .none);
-
+ assert(ip.isEnumType(enum_tag.ty));
+ assert(ip.indexToKey(enum_tag.int) == .int);
ip.items.appendAssumeCapacity(.{
.tag = .enum_tag,
.data = try ip.addExtra(gpa, enum_tag),
@@ -4191,69 +4184,93 @@ pub fn sliceLen(ip: InternPool, i: Index) Index {
/// * identity coercion
/// * int <=> int
/// * int <=> enum
+/// * enum_literal => enum
/// * ptr <=> ptr
/// * null_value => opt
/// * payload => opt
/// * error set <=> error set
+/// * error union <=> error union
+/// * error set => error union
+/// * payload => error union
+/// * fn <=> fn
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
const old_ty = ip.typeOf(val);
if (old_ty == new_ty) return val;
switch (ip.indexToKey(val)) {
- .int => |int| switch (ip.indexToKey(new_ty)) {
- .simple_type => |simple_type| switch (simple_type) {
- .usize,
- .isize,
- .c_char,
- .c_short,
- .c_ushort,
- .c_int,
- .c_uint,
- .c_long,
- .c_ulong,
- .c_longlong,
- .c_ulonglong,
- .comptime_int,
- => return getCoercedInts(ip, gpa, int, new_ty),
- else => {},
- },
- .int_type => return getCoercedInts(ip, gpa, int, new_ty),
- .enum_type => return ip.get(gpa, .{ .enum_tag = .{
+ .extern_func => |extern_func| if (ip.isFunctionType(new_ty))
+ return ip.get(gpa, .{ .extern_func = .{
+ .ty = new_ty,
+ .decl = extern_func.decl,
+ .lib_name = extern_func.lib_name,
+ } }),
+ .func => |func| if (ip.isFunctionType(new_ty))
+ return ip.get(gpa, .{ .func = .{
+ .ty = new_ty,
+ .index = func.index,
+ } }),
+ .int => |int| if (ip.isIntegerType(new_ty))
+ return getCoercedInts(ip, gpa, int, new_ty)
+ else if (ip.isEnumType(new_ty))
+ return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
.int = val,
} }),
+ .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
+ return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
+ .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
+ .enum_type => |enum_type| {
+ const index = enum_type.nameIndex(ip, enum_literal).?;
+ return ip.get(gpa, .{ .enum_tag = .{
+ .ty = new_ty,
+ .int = if (enum_type.values.len != 0)
+ enum_type.values[index]
+ else
+ try ip.get(gpa, .{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = index },
+ } }),
+ } });
+ },
else => {},
},
- .enum_tag => |enum_tag| {
- // Assume new_ty is an integer type.
- return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty);
- },
- .ptr => |ptr| switch (ip.indexToKey(new_ty)) {
- .ptr_type => return ip.get(gpa, .{ .ptr = .{
+ .ptr => |ptr| if (ip.isPointerType(new_ty))
+ return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = ptr.addr,
+ .len = ptr.len,
} }),
- else => {},
- },
- .err => |err| switch (ip.indexToKey(new_ty)) {
- .error_set_type, .inferred_error_set_type => return ip.get(gpa, .{ .err = .{
+ .err => |err| if (ip.isErrorSetType(new_ty))
+ return ip.get(gpa, .{ .err = .{
.ty = new_ty,
.name = err.name,
+ } })
+ else if (ip.isErrorUnionType(new_ty))
+ return ip.get(gpa, .{ .error_union = .{
+ .ty = new_ty,
+ .val = .{ .err_name = err.name },
+ } }),
+ .error_union => |error_union| if (ip.isErrorUnionType(new_ty))
+ return ip.get(gpa, .{ .error_union = .{
+ .ty = new_ty,
+ .val = error_union.val,
} }),
- else => {},
- },
else => {},
}
switch (ip.indexToKey(new_ty)) {
- .opt_type => |child_ty| switch (val) {
+ .opt_type => |child_type| switch (val) {
.null_value => return ip.get(gpa, .{ .opt = .{
.ty = new_ty,
.val = .none,
} }),
else => return ip.get(gpa, .{ .opt = .{
.ty = new_ty,
- .val = try ip.getCoerced(gpa, val, child_ty),
+ .val = try ip.getCoerced(gpa, val, child_type),
} }),
},
+ .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{
+ .ty = new_ty,
+ .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) },
+ } }),
else => {},
}
if (std.debug.runtime_safety) {
@@ -4271,33 +4288,24 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
// big_int storage, the limbs would be invalidated before they are read.
// Here we pre-reserve the limbs to ensure that the logic in `addInt` will
// not use an invalidated limbs pointer.
- switch (int.storage) {
- .u64 => |x| return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .u64 = x },
- } }),
- .i64 => |x| return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .i64 = x },
- } }),
-
- .big_int => |big_int| {
+ const new_storage: Key.Int.Storage = switch (int.storage) {
+ .u64, .i64, .lazy_align, .lazy_size => int.storage,
+ .big_int => |big_int| storage: {
const positive = big_int.positive;
const limbs = ip.limbsSliceToIndex(big_int.limbs);
// This line invalidates the limbs slice, but the indexes computed in the
// previous line are still correct.
try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len);
- return ip.get(gpa, .{ .int = .{
- .ty = new_ty,
- .storage = .{ .big_int = .{
- .limbs = ip.limbsIndexToSlice(limbs),
- .positive = positive,
- } },
- } });
+ break :storage .{ .big_int = .{
+ .limbs = ip.limbsIndexToSlice(limbs),
+ .positive = positive,
+ } };
},
-
- .lazy_align, .lazy_size => unreachable,
- }
+ };
+ return ip.get(gpa, .{ .int = .{
+ .ty = new_ty,
+ .storage = new_storage,
+ } });
}
pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex {
@@ -4345,25 +4353,68 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre
return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional();
}
-pub fn isPointerType(ip: InternPool, ty: Index) bool {
- const tags = ip.items.items(.tag);
- if (ty == .none) return false;
- return switch (tags[@enumToInt(ty)]) {
- .type_pointer, .type_slice => true,
- else => false,
+/// includes .comptime_int_type
+pub fn isIntegerType(ip: InternPool, ty: Index) bool {
+ return switch (ty) {
+ .usize_type,
+ .isize_type,
+ .c_char_type,
+ .c_short_type,
+ .c_ushort_type,
+ .c_int_type,
+ .c_uint_type,
+ .c_long_type,
+ .c_ulong_type,
+ .c_longlong_type,
+ .c_ulonglong_type,
+ .c_longdouble_type,
+ .comptime_int_type,
+ => true,
+ else => ip.indexToKey(ty) == .int_type,
+ };
+}
+
+/// does not include .enum_literal_type
+pub fn isEnumType(ip: InternPool, ty: Index) bool {
+ return switch (ty) {
+ .atomic_order_type,
+ .atomic_rmw_op_type,
+ .calling_convention_type,
+ .address_space_type,
+ .float_mode_type,
+ .reduce_op_type,
+ .call_modifier_type,
+ => true,
+ else => ip.indexToKey(ty) == .enum_type,
};
}
+pub fn isFunctionType(ip: InternPool, ty: Index) bool {
+ return ip.indexToKey(ty) == .func_type;
+}
+
+pub fn isPointerType(ip: InternPool, ty: Index) bool {
+ return ip.indexToKey(ty) == .ptr_type;
+}
+
pub fn isOptionalType(ip: InternPool, ty: Index) bool {
- const tags = ip.items.items(.tag);
- if (ty == .none) return false;
- return tags[@enumToInt(ty)] == .type_optional;
+ return ip.indexToKey(ty) == .opt_type;
+}
+
+/// includes .inferred_error_set_type
+pub fn isErrorSetType(ip: InternPool, ty: Index) bool {
+ return ty == .anyerror_type or switch (ip.indexToKey(ty)) {
+ .error_set_type, .inferred_error_set_type => true,
+ else => false,
+ };
}
pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool {
- const tags = ip.items.items(.tag);
- assert(ty != .none);
- return tags[@enumToInt(ty)] == .type_inferred_error_set;
+ return ip.indexToKey(ty) == .inferred_error_set_type;
+}
+
+pub fn isErrorUnionType(ip: InternPool, ty: Index) bool {
+ return ip.indexToKey(ty) == .error_union_type;
}
/// The is only legal because the initializer is not part of the hash.
src/Module.zig
@@ -6699,6 +6699,11 @@ pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Inde
return mod.intern_pool.get(mod.gpa, key);
}
+/// Shortcut for calling `intern_pool.getCoerced`.
+pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value {
+ return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue();
+}
+
pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
const i = try intern(mod, .{ .int_type = .{
.signedness = signedness,
src/Sema.zig
@@ -7821,7 +7821,6 @@ fn resolveGenericInstantiationType(
const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst);
const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable;
const new_func = new_func_val.getFunctionIndex(mod).unwrap().?;
- errdefer mod.destroyFunc(new_func);
assert(new_func == new_module_func);
arg_i = 0;
@@ -10793,7 +10792,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
check_range: {
if (operand_ty.zigTypeTag(mod) == .Int) {
const min_int = try operand_ty.minInt(mod);
- const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int);
+ const max_int = try operand_ty.maxInt(mod, operand_ty);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
@@ -11649,7 +11648,7 @@ const RangeSetUnhandledIterator = struct {
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
const mod = sema.mod;
const min = try ty.minInt(mod);
- const max = try ty.maxIntScalar(mod, Type.comptime_int);
+ const max = try ty.maxInt(mod, ty);
return RangeSetUnhandledIterator{
.sema = sema,
@@ -15964,25 +15963,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
const args_val = v: {
- const args_slice_ty = try mod.ptrType(.{
- .elem_type = param_info_ty.toIntern(),
- .size = .Slice,
- .is_const = true,
+ const new_decl_ty = try mod.arrayType(.{
+ .len = param_vals.len,
+ .child = param_info_ty.toIntern(),
});
const new_decl = try params_anon_decl.finish(
- try mod.arrayType(.{
- .len = param_vals.len,
- .child = param_info_ty.toIntern(),
- .sentinel = .none,
- }),
+ new_decl_ty,
(try mod.intern(.{ .aggregate = .{
- .ty = args_slice_ty.toIntern(),
+ .ty = new_decl_ty.toIntern(),
.storage = .{ .elems = param_vals },
} })).toValue(),
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = args_slice_ty.toIntern(),
+ .ty = (try mod.ptrType(.{
+ .elem_type = param_info_ty.toIntern(),
+ .size = .Slice,
+ .is_const = true,
+ })).toIntern(),
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(),
} });
@@ -16214,7 +16212,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{
.ty = type_info_ty.toIntern(),
- .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(),
+ .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional))).toIntern(),
.val = try mod.intern(.{ .aggregate = .{
.ty = optional_field_ty.toIntern(),
.storage = .{ .elems = &field_values },
@@ -16258,7 +16256,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
- .sentinel = .zero_u8,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
@@ -16269,8 +16266,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = .slice_const_u8_sentinel_0_type,
+ .ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
@@ -16386,7 +16384,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
- .sentinel = .zero_u8,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
@@ -16397,8 +16394,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = .slice_const_u8_sentinel_0_type,
+ .ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
@@ -16521,7 +16519,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
- .sentinel = .zero_u8,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
@@ -16532,8 +16529,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = .slice_const_u8_sentinel_0_type,
+ .ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
+ .len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
@@ -16663,12 +16661,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
.anon_struct_type => |tuple| {
struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len);
- for (
- tuple.types,
- tuple.values,
- struct_field_vals,
- 0..,
- ) |field_ty, field_val, *struct_field_val, i| {
+ for (struct_field_vals, 0..) |*struct_field_val, i| {
+ const anon_struct_type = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type;
+ const field_ty = anon_struct_type.types[i];
+ const field_val = anon_struct_type.values[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@@ -16735,7 +16731,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
- .sentinel = .zero_u8,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
@@ -16746,7 +16741,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = .slice_const_u8_sentinel_0_type,
+ .ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
@@ -16975,7 +16970,6 @@ fn typeInfoNamespaceDecls(
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
- .sentinel = .zero_u8,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
@@ -16986,7 +16980,7 @@ fn typeInfoNamespaceDecls(
0, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
- .ty = .slice_const_u8_sentinel_0_type,
+ .ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
@@ -20404,7 +20398,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.val = operand_val.toIntern(),
} })).toValue());
}
- return sema.addConstant(aligned_dest_ty, operand_val);
+ return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty));
}
try sema.requireRuntimeBlock(block, src, null);
@@ -22401,7 +22395,7 @@ fn analyzeMinMax(
if (std.debug.runtime_safety) {
assert(try sema.intFitsInType(val, refined_ty, null));
}
- cur_minmax = try sema.addConstant(refined_ty, val);
+ cur_minmax = try sema.addConstant(refined_ty, try mod.getCoerced(val, refined_ty));
}
break :refined refined_ty;
@@ -22459,8 +22453,8 @@ fn analyzeMinMax(
else => unreachable,
};
const max_val = switch (air_tag) {
- .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct
- .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int),
+ .min => try comptime_elem_ty.maxInt(mod, comptime_elem_ty), // @min(ct, rt) <= ct
+ .max => try unrefined_elem_ty.maxInt(mod, unrefined_elem_ty),
else => unreachable,
};
@@ -23356,11 +23350,14 @@ fn zirBuiltinExtern(
try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index);
try sema.ensureDeclAnalyzed(new_decl_index);
- const ref = try mod.intern(.{ .ptr = .{
- .ty = (try mod.singleConstPtrType(ty)).toIntern(),
+ return sema.addConstant(ty, try mod.getCoerced((try mod.intern(.{ .ptr = .{
+ .ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => ty.toIntern(),
+ .opt_type => |child_type| child_type,
+ else => unreachable,
+ },
.addr = .{ .decl = new_decl_index },
- } });
- return sema.addConstant(ty, ref.toValue());
+ } })).toValue(), ty));
}
fn zirWorkItem(
@@ -25887,13 +25884,7 @@ fn coerceExtra(
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src);
if (in_memory_result == .ok) {
if (maybe_inst_val) |val| {
- if (val.ip_index == .none) {
- // Keep the comptime Value representation; take the new type.
- return sema.addConstant(dest_ty, val);
- } else {
- const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern());
- return sema.addConstant(dest_ty, new_val.toValue());
- }
+ return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty));
}
try sema.requireRuntimeBlock(block, inst_src, null);
return block.addBitCast(dest_ty, inst);
@@ -26269,8 +26260,7 @@ fn coerceExtra(
if (!opts.report_err) return error.NotCoercible;
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) });
}
- const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern());
- return try sema.addConstant(dest_ty, new_val.toValue());
+ return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty));
}
if (dest_ty.zigTypeTag(mod) == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
@@ -27222,68 +27212,84 @@ fn coerceInMemoryAllowedFns(
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
const mod = sema.mod;
- const dest_info = mod.typeToFunc(dest_ty).?;
- const src_info = mod.typeToFunc(src_ty).?;
- if (dest_info.is_var_args != src_info.is_var_args) {
- return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
- }
+ {
+ const dest_info = mod.typeToFunc(dest_ty).?;
+ const src_info = mod.typeToFunc(src_ty).?;
- if (dest_info.is_generic != src_info.is_generic) {
- return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
- }
+ if (dest_info.is_var_args != src_info.is_var_args) {
+ return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args };
+ }
- if (dest_info.cc != src_info.cc) {
- return InMemoryCoercionResult{ .fn_cc = .{
- .actual = src_info.cc,
- .wanted = dest_info.cc,
- } };
- }
+ if (dest_info.is_generic != src_info.is_generic) {
+ return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic };
+ }
- if (src_info.return_type != .noreturn_type) {
- const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src);
- if (rt != .ok) {
- return InMemoryCoercionResult{ .fn_return_type = .{
- .child = try rt.dupe(sema.arena),
- .actual = src_info.return_type.toType(),
- .wanted = dest_info.return_type.toType(),
+ if (dest_info.cc != src_info.cc) {
+ return InMemoryCoercionResult{ .fn_cc = .{
+ .actual = src_info.cc,
+ .wanted = dest_info.cc,
} };
}
- }
- if (dest_info.param_types.len != src_info.param_types.len) {
- return InMemoryCoercionResult{ .fn_param_count = .{
- .actual = src_info.param_types.len,
- .wanted = dest_info.param_types.len,
- } };
+ if (src_info.return_type != .noreturn_type) {
+ const dest_return_type = dest_info.return_type.toType();
+ const src_return_type = src_info.return_type.toType();
+ const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src);
+ if (rt != .ok) {
+ return InMemoryCoercionResult{ .fn_return_type = .{
+ .child = try rt.dupe(sema.arena),
+ .actual = dest_return_type,
+ .wanted = src_return_type,
+ } };
+ }
+ }
}
- if (dest_info.noalias_bits != src_info.noalias_bits) {
- return InMemoryCoercionResult{ .fn_param_noalias = .{
- .actual = src_info.noalias_bits,
- .wanted = dest_info.noalias_bits,
- } };
- }
+ const params_len = params_len: {
+ const dest_info = mod.typeToFunc(dest_ty).?;
+ const src_info = mod.typeToFunc(src_ty).?;
+
+ if (dest_info.param_types.len != src_info.param_types.len) {
+ return InMemoryCoercionResult{ .fn_param_count = .{
+ .actual = src_info.param_types.len,
+ .wanted = dest_info.param_types.len,
+ } };
+ }
- for (dest_info.param_types, 0..) |dest_param_ty, i| {
- const src_param_ty = src_info.param_types[i].toType();
+ if (dest_info.noalias_bits != src_info.noalias_bits) {
+ return InMemoryCoercionResult{ .fn_param_noalias = .{
+ .actual = src_info.noalias_bits,
+ .wanted = dest_info.noalias_bits,
+ } };
+ }
+
+ break :params_len dest_info.param_types.len;
+ };
+
+ for (0..params_len) |param_i| {
+ const dest_info = mod.typeToFunc(dest_ty).?;
+ const src_info = mod.typeToFunc(src_ty).?;
- const i_small = @intCast(u5, i);
- if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) {
+ const dest_param_ty = dest_info.param_types[param_i].toType();
+ const src_param_ty = src_info.param_types[param_i].toType();
+
+ const param_i_small = @intCast(u5, param_i);
+ if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) {
return InMemoryCoercionResult{ .fn_param_comptime = .{
- .index = i,
- .wanted = dest_info.paramIsComptime(i_small),
+ .index = param_i,
+ .wanted = dest_info.paramIsComptime(param_i_small),
} };
}
// Note: Cast direction is reversed here.
- const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src);
+ const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src);
if (param != .ok) {
return InMemoryCoercionResult{ .fn_param = .{
.child = try param.dupe(sema.arena),
.actual = src_param_ty,
- .wanted = dest_param_ty.toType(),
- .index = i,
+ .wanted = dest_param_ty,
+ .index = param_i,
} };
}
}
@@ -28385,7 +28391,7 @@ fn beginComptimePtrLoad(
};
},
.elem => |elem_ptr| blk: {
- const elem_ty = ptr.ty.toType().childType(mod);
+ const elem_ty = ptr.ty.toType().elemType2(mod);
var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null);
// This code assumes that elem_ptrs have been "flattened" in order for direct dereference
@@ -28678,11 +28684,10 @@ fn coerceCompatiblePtrs(
return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)});
}
// The comptime Value representation is compatible with both types.
- return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced(
- sema.gpa,
- try val.intern(inst_ty, mod),
- dest_ty.toIntern(),
- )).toValue());
+ return sema.addConstant(
+ dest_ty,
+ try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty),
+ );
}
try sema.requireRuntimeBlock(block, inst_src, null);
const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod);
@@ -29390,9 +29395,13 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value {
fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value {
const mod = sema.mod;
+ const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque);
return (try mod.intern(.{ .opt = .{
- .ty = (try mod.optionalType((try mod.singleConstPtrType(Type.anyopaque)).toIntern())).toIntern(),
- .val = if (opt_val) |val| (try sema.refValue(block, ty, val)).toIntern() else .none,
+ .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(),
+ .val = if (opt_val) |val| (try mod.getCoerced(
+ try sema.refValue(block, ty, val),
+ ptr_anyopaque_ty,
+ )).toIntern() else .none,
} })).toValue();
}
@@ -30051,11 +30060,10 @@ fn analyzeSlice(
};
if (!new_ptr_val.isUndef(mod)) {
- return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced(
- sema.gpa,
- try new_ptr_val.intern(new_ptr_ty, mod),
- return_ty.toIntern(),
- )).toValue());
+ return sema.addConstant(return_ty, try mod.getCoerced(
+ (try new_ptr_val.intern(new_ptr_ty, mod)).toValue(),
+ return_ty,
+ ));
}
// Special case: @as([]i32, undefined)[x..x]
@@ -34237,9 +34245,9 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false;
- const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.toIntern(), enum_type.tag_ty);
+ const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType());
- return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null;
+ return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null;
}
fn intAddWithOverflow(
src/type.zig
@@ -23,7 +23,7 @@ pub const Type = struct {
}
pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => .Int,
.ptr_type => .Pointer,
.array_type => .Array,
@@ -170,7 +170,7 @@ pub const Type = struct {
/// Asserts the type is a pointer.
pub fn ptrIsMutable(ty: Type, mod: *const Module) bool {
- return !mod.intern_pool.indexToKey(ty.ip_index).ptr_type.is_const;
+ return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.is_const;
}
pub const ArrayInfo = struct {
@@ -199,26 +199,23 @@ pub const Type = struct {
}
pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data {
- return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.ip_index));
+ return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern()));
}
pub fn eql(a: Type, b: Type, mod: *const Module) bool {
_ = mod; // TODO: remove this parameter
- assert(a.ip_index != .none);
- assert(b.ip_index != .none);
// The InternPool data structure hashes based on Key to make interned objects
// unique. An Index can be treated simply as u32 value for the
// purpose of Type/Value hashing and equality.
- return a.ip_index == b.ip_index;
+ return a.toIntern() == b.toIntern();
}
pub fn hash(ty: Type, mod: *const Module) u32 {
_ = mod; // TODO: remove this parameter
- assert(ty.ip_index != .none);
// The InternPool data structure hashes based on Key to make interned objects
// unique. An Index can be treated simply as u32 value for the
// purpose of Type/Value hashing and equality.
- return std.hash.uint32(@enumToInt(ty.ip_index));
+ return std.hash.uint32(@enumToInt(ty.toIntern()));
}
pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
@@ -280,7 +277,7 @@ pub const Type = struct {
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
const sign_char: u8 = switch (int_type.signedness) {
.signed => 'i',
@@ -520,10 +517,10 @@ pub const Type = struct {
ignore_comptime_only: bool,
strat: AbiAlignmentAdvancedStrat,
) RuntimeBitsError!bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
// False because it is a comptime-only type.
.empty_struct_type => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| int_type.bits != 0,
.ptr_type => |ptr_type| {
// Pointers to zero-bit types still have a runtime address; however, pointers
@@ -710,7 +707,7 @@ pub const Type = struct {
/// readFrom/writeToMemory are supported only for types with a well-
/// defined memory layout
pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.vector_type,
@@ -847,7 +844,7 @@ pub const Type = struct {
}
pub fn isNoReturn(ty: Type, mod: *Module) bool {
- return if (ty.ip_index != .none) mod.intern_pool.isNoReturn(ty.ip_index) else false;
+ return mod.intern_pool.isNoReturn(ty.toIntern());
}
/// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
@@ -856,7 +853,7 @@ pub const Type = struct {
}
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.alignment.toByteUnitsOptional()) |a| {
return @intCast(u32, a);
@@ -873,7 +870,7 @@ pub const Type = struct {
}
pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.address_space,
.opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space,
else => unreachable,
@@ -923,9 +920,9 @@ pub const Type = struct {
else => null,
};
- switch (ty.ip_index) {
+ switch (ty.toIntern()) {
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) };
@@ -1040,7 +1037,7 @@ pub const Type = struct {
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
};
if (struct_obj.layout == .Packed) {
@@ -1048,7 +1045,7 @@ pub const Type = struct {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
.eager => {},
}
@@ -1062,7 +1059,7 @@ pub const Type = struct {
if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) continue;
@@ -1076,7 +1073,7 @@ pub const Type = struct {
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
};
@@ -1106,7 +1103,7 @@ pub const Type = struct {
.sema => unreachable, // passed to abiAlignmentAdvanced above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
}
@@ -1157,7 +1154,7 @@ pub const Type = struct {
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) {
@@ -1179,7 +1176,7 @@ pub const Type = struct {
}
return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() };
},
}
@@ -1205,7 +1202,7 @@ pub const Type = struct {
if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) {
@@ -1217,7 +1214,7 @@ pub const Type = struct {
.scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
.val => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
}
@@ -1249,7 +1246,7 @@ pub const Type = struct {
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
};
if (union_obj.fields.count() == 0) {
@@ -1266,7 +1263,7 @@ pub const Type = struct {
if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) continue;
@@ -1280,7 +1277,7 @@ pub const Type = struct {
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_align = ty.ip_index },
+ .storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
};
@@ -1321,10 +1318,10 @@ pub const Type = struct {
) Module.CompileError!AbiSizeAdvanced {
const target = mod.getTarget();
- switch (ty.ip_index) {
+ switch (ty.toIntern()) {
.empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 };
return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) };
@@ -1343,7 +1340,7 @@ pub const Type = struct {
.sema, .eager => unreachable,
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
},
}
@@ -1354,7 +1351,7 @@ pub const Type = struct {
.eager => null,
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
};
const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
@@ -1365,7 +1362,7 @@ pub const Type = struct {
.scalar => |x| x,
.val => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
};
const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment);
@@ -1385,7 +1382,7 @@ pub const Type = struct {
if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) {
@@ -1401,7 +1398,7 @@ pub const Type = struct {
.eager => unreachable,
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
},
};
@@ -1489,7 +1486,7 @@ pub const Type = struct {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
.eager => {},
}
@@ -1504,7 +1501,7 @@ pub const Type = struct {
return AbiSizeAdvanced{ .scalar = 0 };
if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() };
},
.eager => {},
@@ -1568,7 +1565,7 @@ pub const Type = struct {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
.eager => {},
}
@@ -1589,7 +1586,7 @@ pub const Type = struct {
if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) return AbiSizeAdvanced{ .scalar = 1 };
@@ -1605,7 +1602,7 @@ pub const Type = struct {
.eager => unreachable,
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
- .storage = .{ .lazy_size = ty.ip_index },
+ .storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
},
};
@@ -1647,7 +1644,7 @@ pub const Type = struct {
const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager;
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type.bits,
.ptr_type => |ptr_type| switch (ptr_type.size) {
.Slice => return target.ptrBitWidth() * 2,
@@ -1820,7 +1817,7 @@ pub const Type = struct {
}
pub fn isSinglePointer(ty: Type, mod: *const Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.size == .One,
else => false,
};
@@ -1833,33 +1830,27 @@ pub const Type = struct {
/// Returns `null` if `ty` is not a pointer.
pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.size,
else => null,
};
}
pub fn isSlice(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.size == .Slice,
- else => false,
- },
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.size == .Slice,
+ else => false,
};
}
pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type {
- return mod.intern_pool.slicePtrType(ty.ip_index).toType();
+ return mod.intern_pool.slicePtrType(ty.toIntern()).toType();
}
pub fn isConstPtr(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.is_const,
- else => false,
- },
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.is_const,
+ else => false,
};
}
@@ -1868,53 +1859,41 @@ pub const Type = struct {
}
pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (ip.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.is_volatile,
- else => false,
- },
+ return switch (ip.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.is_volatile,
+ else => false,
};
}
pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.is_allowzero,
- .opt_type => true,
- else => false,
- },
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.is_allowzero,
+ .opt_type => true,
+ else => false,
};
}
pub fn isCPtr(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.size == .C,
- else => false,
- },
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.size == .C,
+ else => false,
};
}
pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| switch (ptr_type.size) {
- .Slice => false,
- .One, .Many, .C => true,
- },
- .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
- .ptr_type => |p| switch (p.size) {
- .Slice, .C => false,
- .Many, .One => !p.is_allowzero,
- },
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .Slice => false,
+ .One, .Many, .C => true,
+ },
+ .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+ .ptr_type => |p| switch (p.size) {
+ .Slice, .C => false,
+ .Many, .One => !p.is_allowzero,
},
else => false,
},
+ else => false,
};
}
@@ -1929,22 +1908,19 @@ pub const Type = struct {
/// See also `isPtrLikeOptional`.
pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .opt_type => |child| switch (child.toType().zigTypeTag(mod)) {
- .Pointer => {
- const info = child.toType().ptrInfo(mod);
- return switch (info.size) {
- .C => false,
- else => !info.@"allowzero",
- };
- },
- .ErrorSet => true,
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .opt_type => |child| switch (child.toType().zigTypeTag(mod)) {
+ .Pointer => {
+ const info = child.toType().ptrInfo(mod);
+ return switch (info.size) {
+ .C => false,
+ else => !info.@"allowzero",
+ };
},
+ .ErrorSet => true,
else => false,
},
+ else => false,
};
}
@@ -1952,19 +1928,16 @@ pub const Type = struct {
/// address value, using 0 for null. Note that this returns true for C pointers.
/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`.
pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .ptr_type => |ptr_type| ptr_type.size == .C,
- .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
- .ptr_type => |ptr_type| switch (ptr_type.size) {
- .Slice, .C => false,
- .Many, .One => !ptr_type.is_allowzero,
- },
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .ptr_type => |ptr_type| ptr_type.size == .C,
+ .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .Slice, .C => false,
+ .Many, .One => !ptr_type.is_allowzero,
},
else => false,
},
+ else => false,
};
}
@@ -1976,7 +1949,7 @@ pub const Type = struct {
}
pub fn childTypeIp(ty: Type, ip: InternPool) Type {
- return ip.childType(ty.ip_index).toType();
+ return ip.childType(ty.toIntern()).toType();
}
/// For *[N]T, returns T.
@@ -1989,7 +1962,7 @@ pub const Type = struct {
/// For []T, returns T.
/// For anyframe->T, returns T.
pub fn elemType2(ty: Type, mod: *const Module) Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.size) {
.One => ptr_type.elem_type.toType().shallowElemType(mod),
.Many, .C, .Slice => ptr_type.elem_type.toType(),
@@ -2023,7 +1996,7 @@ pub const Type = struct {
/// Asserts that the type is an optional.
/// Note that for C pointers this returns the type unmodified.
pub fn optionalChild(ty: Type, mod: *const Module) Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opt_type => |child| child.toType(),
.ptr_type => |ptr_type| b: {
assert(ptr_type.size == .C);
@@ -2036,7 +2009,7 @@ pub const Type = struct {
/// Returns the tag type of a union, if the type is a union and it has a tag type.
/// Otherwise, returns `null`.
pub fn unionTagType(ty: Type, mod: *Module) ?Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.union_type => |union_type| switch (union_type.runtime_tag) {
.tagged => {
const union_obj = mod.unionPtr(union_type.index);
@@ -2052,7 +2025,7 @@ pub const Type = struct {
/// Same as `unionTagType` but includes safety tag.
/// Codegen should use this version.
pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.union_type => |union_type| {
if (!union_type.hasTag()) return null;
const union_obj = mod.unionPtr(union_type.index);
@@ -2097,13 +2070,13 @@ pub const Type = struct {
}
pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout {
- const union_type = mod.intern_pool.indexToKey(ty.ip_index).union_type;
+ const union_type = mod.intern_pool.indexToKey(ty.toIntern()).union_type;
const union_obj = mod.unionPtr(union_type.index);
return union_obj.getLayout(mod, union_type.hasTag());
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
return struct_obj.layout;
@@ -2119,19 +2092,19 @@ pub const Type = struct {
/// Asserts that the type is an error union.
pub fn errorUnionPayload(ty: Type, mod: *Module) Type {
- return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.payload_type.toType();
+ return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type.toType();
}
/// Asserts that the type is an error union.
pub fn errorUnionSet(ty: Type, mod: *Module) Type {
- return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.error_set_type.toType();
+ return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType();
}
/// Returns false for unresolved inferred error sets.
pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.anyerror_type => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.error_set_type => |error_set_type| error_set_type.names.len == 0,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
@@ -2149,9 +2122,9 @@ pub const Type = struct {
/// Note that the result may be a false negative if the type did not get error set
/// resolution prior to this call.
pub fn isAnyError(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.anyerror_type => true,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror,
else => false,
},
@@ -2194,9 +2167,9 @@ pub const Type = struct {
/// resolved yet.
pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool {
const ip = &mod.intern_pool;
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.anyerror_type => true,
- else => switch (ip.indexToKey(ty.ip_index)) {
+ else => switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |error_set_type| {
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(name).unwrap() orelse return false;
@@ -2220,7 +2193,7 @@ pub const Type = struct {
}
pub fn arrayLenIp(ty: Type, ip: InternPool) u64 {
- return switch (ip.indexToKey(ty.ip_index)) {
+ return switch (ip.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.array_type => |array_type| array_type.len,
.struct_type => |struct_type| {
@@ -2238,7 +2211,7 @@ pub const Type = struct {
}
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.anon_struct_type => |tuple| @intCast(u32, tuple.types.len),
else => unreachable,
@@ -2247,7 +2220,7 @@ pub const Type = struct {
/// Asserts the type is an array, pointer or vector.
pub fn sentinel(ty: Type, mod: *const Module) ?Value {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.vector_type,
.struct_type,
.anon_struct_type,
@@ -2267,10 +2240,9 @@ pub const Type = struct {
/// Returns true if and only if the type is a fixed-width, signed integer.
pub fn isSignedInt(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true,
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| int_type.signedness == .signed,
else => false,
},
@@ -2279,10 +2251,9 @@ pub const Type = struct {
/// Returns true if and only if the type is a fixed-width, unsigned integer.
pub fn isUnsignedInt(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true,
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| int_type.signedness == .unsigned,
else => false,
},
@@ -2304,7 +2275,7 @@ pub const Type = struct {
const target = mod.getTarget();
var ty = starting_ty;
- while (true) switch (ty.ip_index) {
+ while (true) switch (ty.toIntern()) {
.anyerror_type => {
// TODO revisit this when error sets support custom int types
return .{ .signedness = .unsigned, .bits = 16 };
@@ -2320,7 +2291,7 @@ pub const Type = struct {
.c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
.c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
@@ -2370,7 +2341,7 @@ pub const Type = struct {
}
pub fn isNamedInt(ty: Type) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.usize_type,
.isize_type,
.c_char_type,
@@ -2390,7 +2361,7 @@ pub const Type = struct {
/// Returns `false` for `comptime_float`.
pub fn isRuntimeFloat(ty: Type) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.f16_type,
.f32_type,
.f64_type,
@@ -2405,7 +2376,7 @@ pub const Type = struct {
/// Returns `true` for `comptime_float`.
pub fn isAnyFloat(ty: Type) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.f16_type,
.f32_type,
.f64_type,
@@ -2422,7 +2393,7 @@ pub const Type = struct {
/// Asserts the type is a fixed-size float or comptime_float.
/// Returns 128 for comptime_float types.
pub fn floatBits(ty: Type, target: Target) u16 {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.f16_type => 16,
.f32_type => 32,
.f64_type => 64,
@@ -2440,7 +2411,7 @@ pub const Type = struct {
}
pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type {
- return switch (ip.indexToKey(ty.ip_index)) {
+ return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type,
.func_type => |func_type| func_type.return_type,
else => unreachable,
@@ -2449,7 +2420,7 @@ pub const Type = struct {
/// Asserts the type is a function.
pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention {
- return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc;
+ return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
}
pub fn isValidParamType(self: Type, mod: *const Module) bool {
@@ -2468,11 +2439,11 @@ pub const Type = struct {
/// Asserts the type is a function.
pub fn fnIsVarArgs(ty: Type, mod: *Module) bool {
- return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args;
+ return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
}
pub fn isNumeric(ty: Type, mod: *const Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.f16_type,
.f32_type,
.f64_type,
@@ -2494,9 +2465,7 @@ pub const Type = struct {
.c_ulonglong_type,
=> true,
- .none => false,
-
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => true,
else => false,
},
@@ -2508,10 +2477,10 @@ pub const Type = struct {
pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value {
var ty = starting_type;
- while (true) switch (ty.ip_index) {
+ while (true) switch (ty.toIntern()) {
.empty_struct_type => return Value.empty_struct,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) {
return try mod.intValue(ty, 0);
@@ -2530,13 +2499,13 @@ pub const Type = struct {
inline .array_type, .vector_type => |seq_type| {
if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
+ .ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
} })).toValue();
if (try seq_type.child.toType().onePossibleValue(mod)) |opv| {
return (try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
- .storage = .{ .repeated_elem = opv.ip_index },
+ .ty = ty.toIntern(),
+ .storage = .{ .repeated_elem = opv.toIntern() },
} })).toValue();
}
return null;
@@ -2612,7 +2581,7 @@ pub const Type = struct {
// This TODO is repeated in the redundant implementation of
// one-possible-value logic in Sema.zig.
const empty = try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
+ .ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
} });
return empty.toValue();
@@ -2625,7 +2594,7 @@ pub const Type = struct {
// In this case the struct has all comptime-known fields and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
+ .ty = ty.toIntern(),
.storage = .{ .elems = tuple.values },
} })).toValue();
},
@@ -2637,9 +2606,9 @@ pub const Type = struct {
const only_field = union_obj.fields.values()[0];
const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null;
const only = try mod.intern(.{ .un = .{
- .ty = ty.ip_index,
- .tag = tag_val.ip_index,
- .val = val_val.ip_index,
+ .ty = ty.toIntern(),
+ .tag = tag_val.toIntern(),
+ .val = val_val.toIntern(),
} });
return only.toValue();
},
@@ -2650,8 +2619,8 @@ pub const Type = struct {
if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.ip_index,
- .int = int_opv.ip_index,
+ .ty = ty.toIntern(),
+ .int = int_opv.toIntern(),
} });
return only.toValue();
}
@@ -2663,7 +2632,7 @@ pub const Type = struct {
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.ip_index,
+ .ty = ty.toIntern(),
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
@@ -2705,10 +2674,10 @@ pub const Type = struct {
/// TODO merge these implementations together with the "advanced" pattern seen
/// elsewhere in this file.
pub fn comptimeOnly(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
+ return switch (ty.toIntern()) {
.empty_struct_type => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.int_type => false,
.ptr_type => |ptr_type| {
const child_ty = ptr_type.elem_type.toType();
@@ -2880,8 +2849,7 @@ pub const Type = struct {
/// Returns null if the type has no namespace.
pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex {
- if (ty.ip_index == .none) return .none;
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(),
@@ -2900,8 +2868,8 @@ pub const Type = struct {
pub fn minInt(ty: Type, mod: *Module) !Value {
const scalar = try minIntScalar(ty.scalarType(mod), mod);
return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
- .storage = .{ .repeated_elem = scalar.ip_index },
+ .ty = ty.toIntern(),
+ .storage = .{ .repeated_elem = scalar.toIntern() },
} })).toValue() else scalar;
}
@@ -2929,8 +2897,8 @@ pub const Type = struct {
pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value {
const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty);
return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{
- .ty = ty.ip_index,
- .storage = .{ .repeated_elem = scalar.ip_index },
+ .ty = ty.toIntern(),
+ .storage = .{ .repeated_elem = scalar.toIntern() },
} })).toValue() else scalar;
}
@@ -2971,7 +2939,7 @@ pub const Type = struct {
/// Asserts the type is an enum or a union.
pub fn intTagType(ty: Type, mod: *Module) !Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod),
.enum_type => |enum_type| enum_type.tag_ty.toType(),
else => unreachable,
@@ -2979,21 +2947,18 @@ pub const Type = struct {
}
pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .enum_type => |enum_type| switch (enum_type.tag_mode) {
- .nonexhaustive => true,
- .auto, .explicit => false,
- },
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .enum_type => |enum_type| switch (enum_type.tag_mode) {
+ .nonexhaustive => true,
+ .auto, .explicit => false,
},
+ else => false,
};
}
// Asserts that `ty` is an error set and not `anyerror`.
pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.error_set_type => |x| x.names,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
@@ -3006,22 +2971,22 @@ pub const Type = struct {
}
pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString {
- return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names;
+ return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names;
}
pub fn enumFieldCount(ty: Type, mod: *Module) usize {
- return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names.len;
+ return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len;
}
pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 {
const ip = &mod.intern_pool;
- const field_name = ip.indexToKey(ty.ip_index).enum_type.names[field_index];
+ const field_name = ip.indexToKey(ty.toIntern()).enum_type.names[field_index];
return ip.stringToSlice(field_name);
}
pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
- const enum_type = ip.indexToKey(ty.ip_index).enum_type;
+ const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(field_name).unwrap() orelse return null;
return enum_type.nameIndex(ip, field_name_interned);
@@ -3032,9 +2997,9 @@ pub const Type = struct {
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
- const enum_type = ip.indexToKey(ty.ip_index).enum_type;
- const int_tag = switch (ip.indexToKey(enum_tag.ip_index)) {
- .int => enum_tag.ip_index,
+ const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
+ .int => enum_tag.toIntern(),
.enum_tag => |info| info.int,
else => unreachable,
};
@@ -3043,7 +3008,7 @@ pub const Type = struct {
}
pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{};
assert(struct_obj.haveFieldTypes());
@@ -3054,7 +3019,7 @@ pub const Type = struct {
}
pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveFieldTypes());
@@ -3069,7 +3034,7 @@ pub const Type = struct {
}
pub fn structFieldCount(ty: Type, mod: *Module) usize {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
assert(struct_obj.haveFieldTypes());
@@ -3082,7 +3047,7 @@ pub const Type = struct {
/// Supports structs and unions.
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
return struct_obj.fields.values()[index].ty;
@@ -3097,7 +3062,7 @@ pub const Type = struct {
}
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.layout != .Packed);
@@ -3115,7 +3080,7 @@ pub const Type = struct {
}
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
return struct_obj.fields.values()[index].default_val;
@@ -3131,7 +3096,7 @@ pub const Type = struct {
}
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const field = struct_obj.fields.values()[index];
@@ -3154,7 +3119,7 @@ pub const Type = struct {
}
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) return false;
@@ -3167,7 +3132,7 @@ pub const Type = struct {
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
- const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type;
+ const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.layout == .Packed);
comptime assert(Type.packed_struct_layout_version == 2);
@@ -3229,7 +3194,7 @@ pub const Type = struct {
/// Get an iterator that iterates over all the struct field, returning the field and
/// offset of that field. Asserts that the type is a non-packed struct.
pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator {
- const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type;
+ const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
assert(struct_obj.layout != .Packed);
@@ -3238,7 +3203,7 @@ pub const Type = struct {
/// Supports structs and unions.
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
- switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
@@ -3296,7 +3261,7 @@ pub const Type = struct {
}
pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
return struct_obj.srcLoc(mod);
@@ -3316,7 +3281,7 @@ pub const Type = struct {
}
pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null;
return struct_obj.owner_decl;
@@ -3332,33 +3297,30 @@ pub const Type = struct {
}
pub fn isGenericPoison(ty: Type) bool {
- return ty.ip_index == .generic_poison_type;
+ return ty.toIntern() == .generic_poison_type;
}
pub fn isTuple(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
- .none => false,
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
- return struct_obj.is_tuple;
- },
- .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+ return struct_obj.is_tuple;
},
+ .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
+ else => false,
};
}
pub fn isAnonStruct(ty: Type, mod: *Module) bool {
- if (ty.ip_index == .empty_struct_type) return true;
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ if (ty.toIntern() == .empty_struct_type) return true;
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0,
else => false,
};
}
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
return struct_obj.is_tuple;
@@ -3369,14 +3331,14 @@ pub const Type = struct {
}
pub fn isSimpleTuple(ty: Type, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
else => false,
};
}
pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => true,
else => false,
};
src/value.zig
@@ -345,7 +345,7 @@ pub const Value = struct {
}
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
- if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern());
+ if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern();
switch (val.tag()) {
.eu_payload => {
const pl = val.castTag(.eu_payload).?.data;
@@ -506,11 +506,7 @@ pub const Value = struct {
else => unreachable,
};
},
- .enum_type => |enum_type| (try ip.getCoerced(
- mod.gpa,
- val.toIntern(),
- enum_type.tag_ty,
- )).toValue(),
+ .enum_type => |enum_type| try mod.getCoerced(val, enum_type.tag_ty.toType()),
else => unreachable,
};
}
@@ -872,10 +868,15 @@ pub const Value = struct {
.Packed => {
var bits: u16 = 0;
const fields = ty.structFields(mod).values();
- const field_vals = val.castTag(.aggregate).?.data;
+ const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage;
for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(mod));
- try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
+ const field_val = switch (storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[i],
+ .repeated_elem => |elem| elem,
+ };
+ try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
@@ -2006,23 +2007,30 @@ pub const Value = struct {
}
pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool {
+ return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) {
+ .variable => false,
+ else => val.isPtrToThreadLocalInner(mod),
+ };
+ }
+
+ pub fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool {
return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| variable.is_threadlocal,
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl_index| {
const decl = mod.declPtr(decl_index);
assert(decl.has_tv);
- return decl.val.isPtrToThreadLocal(mod);
+ return decl.val.isPtrToThreadLocalInner(mod);
},
.mut_decl => |mut_decl| {
const decl = mod.declPtr(mut_decl.decl);
assert(decl.has_tv);
- return decl.val.isPtrToThreadLocal(mod);
+ return decl.val.isPtrToThreadLocalInner(mod);
},
.int => false,
- .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod),
- .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod),
- .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod),
+ .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocalInner(mod),
+ .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocalInner(mod),
+ .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocalInner(mod),
},
else => false,
};
@@ -2045,7 +2053,18 @@ pub const Value = struct {
else => unreachable,
},
.aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{
- .ty = mod.intern_pool.typeOf(val.toIntern()),
+ .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
+ .array_type => |array_type| try mod.arrayType(.{
+ .len = @intCast(u32, end - start),
+ .child = array_type.child,
+ .sentinel = if (end == array_type.len) array_type.sentinel else .none,
+ }),
+ .vector_type => |vector_type| try mod.vectorType(.{
+ .len = @intCast(u32, end - start),
+ .child = vector_type.child,
+ }),
+ else => unreachable,
+ }.toIntern(),
.storage = switch (aggregate.storage) {
.bytes => |bytes| .{ .bytes = bytes[start..end] },
.elems => |elems| .{ .elems = elems[start..end] },
tools/lldb_pretty_printers.py
@@ -347,9 +347,15 @@ class TagAndPayload_SynthProvider:
except: return -1
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None
-def Inst_Ref_SummaryProvider(value, _=None):
+def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None):
members = value.type.enum_members
- return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned - len(members))
+ # ignore .var_args_param_type and .none
+ return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members))
+
+def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None):
+ members = value.type.enum_members
+ # ignore .none
+ return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 1 - len(members))
class Module_Decl__Module_Decl_Index_SynthProvider:
def __init__(self, value, _=None): self.value = value
@@ -676,8 +682,9 @@ def __lldb_init_module(debugger, _=None):
add(debugger, category='zig.stage2', type='Zir.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Zir\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Zir\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
- add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', identifier='Inst_Ref', summary=True)
+ add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', summary=True)
add(debugger, category='zig.stage2', type='Air.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
+ add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Ref', summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True)