Commit 975b859377
Changed files (16)
src/arch/wasm/abi.zig
@@ -76,7 +76,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
}
const layout = ty.unionGetLayout(mod);
assert(layout.tag_size == 0);
- if (union_obj.field_names.len > 1) return memory;
+ if (union_obj.field_types.len > 1) return memory;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, mod);
},
src/arch/wasm/CodeGen.zig
@@ -3354,7 +3354,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
val.writeToMemory(ty, mod, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
// non-packed structs are not handled in this function because they
// are by-ref types.
assert(struct_type.layout == .Packed);
@@ -5411,7 +5412,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const layout = union_ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
- const field_name = union_obj.field_names.get(ip)[extra.field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
src/arch/x86_64/CodeGen.zig
@@ -18183,7 +18183,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
const union_obj = mod.typeToUnion(union_ty).?;
- const field_name = union_obj.field_names.get(ip)[extra.field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
src/codegen/c/type.zig
@@ -1507,7 +1507,7 @@ pub const CType = extern union {
if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_names.len,
+ .Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@@ -1589,7 +1589,7 @@ pub const CType = extern union {
var is_packed = false;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_names.len,
+ .Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@@ -1940,7 +1940,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_names.len,
+ .Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
};
@@ -1967,7 +1967,7 @@ pub const CType = extern union {
else
arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
+ .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
})),
.type = store.set.typeToIndex(field_ty, mod, switch (kind) {
@@ -2097,7 +2097,7 @@ pub const CType = extern union {
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_names.len,
+ .Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@@ -2120,7 +2120,7 @@ pub const CType = extern union {
else
ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
+ .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}),
mem.span(c_field.name),
@@ -2226,7 +2226,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(mod),
- .Union => mod.typeToUnion(ty).?.field_names.len,
+ .Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@@ -2245,7 +2245,7 @@ pub const CType = extern union {
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
- .Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
+ .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}));
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
src/codegen/c.zig
@@ -1475,13 +1475,10 @@ pub const DeclGen = struct {
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
+ if (struct_type.fieldIsComptime(ip, field_index)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- if (!empty) try writer.writeAll(" | ");
- try writer.writeByte('(');
- try dg.renderType(writer, ty);
- try writer.writeByte(')');
-
+ if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
@@ -1490,6 +1487,7 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
+ try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
@@ -1503,7 +1501,7 @@ pub const DeclGen = struct {
bit_offset += field_ty.bitSize(mod);
empty = false;
}
- try writer.writeByte(')');
+ try writer.writeByte('}');
}
},
},
@@ -1547,7 +1545,7 @@ pub const DeclGen = struct {
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
- const field_name = union_obj.field_names.get(ip)[field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
if (union_obj.getLayout(ip) == .Packed) {
if (field_ty.hasRuntimeBits(mod)) {
if (field_ty.isPtrAtRuntime(mod)) {
@@ -5502,7 +5500,7 @@ fn fieldLocation(
.{ .field = .{ .identifier = "payload" } }
else
.begin;
- const field_name = union_obj.field_names.get(ip)[field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_|
.{ .payload_identifier = ip.stringToSlice(field_name) }
else
@@ -5735,8 +5733,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
else
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
- .union_type => |union_type| field_name: {
- const union_obj = ip.loadUnionType(union_type);
+ .union_type => field_name: {
+ const union_obj = ip.loadUnionType(struct_ty.toIntern());
if (union_obj.flagsPtr(ip).layout == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
@@ -5762,8 +5760,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
} else {
- const name = union_obj.field_names.get(ip)[extra.field_index];
- break :field_name if (union_type.hasTag(ip)) .{
+ const name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
+ break :field_name if (union_obj.hasTag(ip)) .{
.payload_identifier = ip.stringToSlice(name),
} else .{
.identifier = ip.stringToSlice(name),
@@ -7171,7 +7169,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
const union_ty = f.typeOfIndex(inst);
const union_obj = mod.typeToUnion(union_ty).?;
- const field_name = union_obj.field_names.get(ip)[extra.field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const payload_ty = f.typeOf(extra.init);
const payload = try f.resolveInst(extra.init);
try reap(f, inst, &.{extra.init});
src/codegen/llvm.zig
@@ -1997,7 +1997,7 @@ pub const Object = struct {
return debug_enum_type;
}
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len);
defer gpa.free(enumerators);
@@ -2507,8 +2507,8 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
},
- .struct_type => |struct_type| {
- if (!struct_type.haveFieldTypes(ip)) {
+ .struct_type => {
+ if (!ip.loadStructType(ty.toIntern()).haveFieldTypes(ip)) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
@@ -2597,15 +2597,14 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
- const union_type = ip.indexToKey(ty.toIntern()).union_type;
+ const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.haveFieldTypes(ip) or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_union_type);
return debug_union_type;
}
- const union_obj = ip.loadUnionType(union_type);
- const layout = mod.getUnionLayout(union_obj);
+ const layout = mod.getUnionLayout(union_type);
const debug_fwd_ref = try o.builder.debugForwardReference();
@@ -2622,7 +2621,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(
- &.{try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty))},
+ &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
);
@@ -2636,21 +2635,23 @@ pub const Object = struct {
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
- try fields.ensureUnusedCapacity(gpa, union_obj.field_names.len);
+ try fields.ensureUnusedCapacity(gpa, union_type.loadTagType(ip).names.len);
const debug_union_fwd_ref = if (layout.tag_size == 0)
debug_fwd_ref
else
try o.builder.debugForwardReference();
- for (0..union_obj.field_names.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index];
+ const tag_type = union_type.loadTagType();
+
+ for (0..tag_type.names.len) |field_index| {
+ const field_ty = union_type.field_types.get(ip)[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_size = Type.fromInterned(field_ty).abiSize(mod);
- const field_align = mod.unionFieldNormalAlignment(union_obj, @intCast(field_index));
+ const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index));
- const field_name = union_obj.field_names.get(ip)[field_index];
+ const field_name = tag_type.names.get(ip)[field_index];
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
.none, // File
@@ -2706,7 +2707,7 @@ pub const Object = struct {
.none, // File
debug_fwd_ref,
0, // Line
- try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty)),
+ try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)),
layout.tag_size * 8,
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8,
@@ -3321,9 +3322,11 @@ pub const Object = struct {
return o.builder.structType(.normal, fields[0..fields_len]);
},
.simple_type => unreachable,
- .struct_type => |struct_type| {
+ .struct_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
+ const struct_type = ip.loadStructType(t.toIntern());
+
if (struct_type.layout == .Packed) {
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
@@ -3468,10 +3471,10 @@ pub const Object = struct {
}
return o.builder.structType(.normal, llvm_field_types.items);
},
- .union_type => |union_type| {
+ .union_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
- const union_obj = ip.loadUnionType(union_type);
+ const union_obj = ip.loadUnionType(t.toIntern());
const layout = mod.getUnionLayout(union_obj);
if (union_obj.flagsPtr(ip).layout == .Packed) {
@@ -3555,7 +3558,7 @@ pub const Object = struct {
}
return gop.value_ptr.*;
},
- .enum_type => |enum_type| try o.lowerType(Type.fromInterned(enum_type.tag_ty)),
+ .enum_type => try o.lowerType(Type.fromInterned(ip.loadEnumType(t.toIntern()).tag_ty)),
.func_type => |func_type| try o.lowerTypeFn(func_type),
.error_set_type, .inferred_error_set_type => try o.errorIntType(),
// values, not types
@@ -4032,7 +4035,8 @@ pub const Object = struct {
else
struct_ty, vals);
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const struct_ty = try o.lowerType(ty);
if (struct_type.layout == .Packed) {
@@ -4596,7 +4600,7 @@ pub const Object = struct {
fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index {
const zcu = o.module;
const ip = &zcu.intern_pool;
- const enum_type = ip.indexToKey(enum_ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl);
@@ -9620,7 +9624,7 @@ pub const FuncGen = struct {
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
const zcu = o.module;
- const enum_type = zcu.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
+ const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl);
@@ -10092,7 +10096,7 @@ pub const FuncGen = struct {
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
- const union_field_name = union_obj.field_names.get(ip)[extra.field_index];
+ const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
@@ -11154,7 +11158,8 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index);
switch (ip.indexToKey(return_type.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(return_type.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@@ -11446,7 +11451,8 @@ const ParamTypeIterator = struct {
return .byref;
}
switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@@ -11562,7 +11568,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
}
return false;
},
- .struct_type => |s| s,
+ .struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
src/codegen/spirv.zig
@@ -1528,7 +1528,7 @@ const DeclGen = struct {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
- .struct_type => |struct_type| struct_type,
+ .struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
@@ -3633,7 +3633,8 @@ const DeclGen = struct {
index += 1;
}
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(result_ty.toIntern());
var it = struct_type.iterateRuntimeOrder(ip);
for (elements, 0..) |element, i| {
const field_index = it.next().?;
@@ -3901,36 +3902,33 @@ const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
const union_ty = mod.typeToUnion(ty).?;
+ const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
if (union_ty.getLayout(ip) == .Packed) {
unreachable; // TODO
}
- const maybe_tag_ty = ty.unionTagTypeSafety(mod);
const layout = self.unionLayout(ty);
const tag_int = if (layout.tag_size != 0) blk: {
- const tag_ty = maybe_tag_ty.?;
- const union_field_name = union_ty.field_names.get(ip)[active_field];
- const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
- const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
+ const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
break :blk tag_int_val.toUnsignedInt(mod);
} else 0;
if (!layout.has_payload) {
- const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
+ const tag_ty_ref = try self.resolveType(tag_ty, .direct);
return try self.constInt(tag_ty_ref, tag_int);
}
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
if (layout.tag_size != 0) {
- const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
- const tag_ptr_ty_ref = try self.ptrType(maybe_tag_ty.?, .Function);
+ const tag_ty_ref = try self.resolveType(tag_ty, .direct);
+ const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function);
const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
const tag_id = try self.constInt(tag_ty_ref, tag_int);
- try self.store(maybe_tag_ty.?, ptr_id, tag_id, .{});
+ try self.store(tag_ty, ptr_id, tag_id, .{});
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
src/link/Dwarf.zig
@@ -311,7 +311,8 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
@@ -374,7 +375,7 @@ pub const DeclState = struct {
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
- const enum_type = ip.indexToKey(ty.ip_index).enum_type;
+ const enum_type = ip.loadEnumType(ty.ip_index);
for (enum_type.names.get(ip), 0..) |field_name_index, field_i| {
const field_name = ip.stringToSlice(field_name_index);
// DW.AT.enumerator
@@ -442,7 +443,7 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
}
- for (union_obj.field_types.get(ip), union_obj.field_names.get(ip)) |field_ty, field_name| {
+ for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
src/codegen.zig
@@ -510,88 +510,91 @@ pub fn generateSymbol(
}
}
},
- .struct_type => |struct_type| switch (struct_type.layout) {
- .Packed => {
- const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
- return error.Overflow;
- const current_pos = code.items.len;
- try code.resize(current_pos + abi_size);
- var bits: u16 = 0;
-
- for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
- const field_val = switch (aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
- .ty = field_ty,
- .storage = .{ .u64 = bytes[index] },
- } }),
- .elems => |elems| elems[index],
- .repeated_elem => |elem| elem,
- };
+ .struct_type => {
+ const struct_type = ip.loadStructType(typed_value.ty.toIntern());
+ switch (struct_type.layout) {
+ .Packed => {
+ const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
+ return error.Overflow;
+ const current_pos = code.items.len;
+ try code.resize(current_pos + abi_size);
+ var bits: u16 = 0;
+
+ for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
+ const field_val = switch (aggregate.storage) {
+ .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ .ty = field_ty,
+ .storage = .{ .u64 = bytes[index] },
+ } }),
+ .elems => |elems| elems[index],
+ .repeated_elem => |elem| elem,
+ };
+
+ // pointer may point to a decl which must be marked used
+ // but can also result in a relocation. Therefore we handle those separately.
+ if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
+ const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
+ return error.Overflow;
+ var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
+ defer tmp_list.deinit();
+ switch (try generateSymbol(bin_file, src_loc, .{
+ .ty = Type.fromInterned(field_ty),
+ .val = Value.fromInterned(field_val),
+ }, &tmp_list, debug_output, reloc_info)) {
+ .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
+ .fail => |em| return Result{ .fail = em },
+ }
+ } else {
+ Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
+ }
+ bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
+ }
+ },
+ .Auto, .Extern => {
+ const struct_begin = code.items.len;
+ const field_types = struct_type.field_types.get(ip);
+ const offsets = struct_type.offsets.get(ip);
+
+ var it = struct_type.iterateRuntimeOrder(ip);
+ while (it.next()) |field_index| {
+ const field_ty = field_types[field_index];
+ if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
+
+ const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
+ .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
+ .ty = field_ty,
+ .storage = .{ .u64 = bytes[field_index] },
+ } }),
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |elem| elem,
+ };
+
+ const padding = math.cast(
+ usize,
+ offsets[field_index] - (code.items.len - struct_begin),
+ ) orelse return error.Overflow;
+ if (padding > 0) try code.appendNTimes(0, padding);
- // pointer may point to a decl which must be marked used
- // but can also result in a relocation. Therefore we handle those separately.
- if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
- const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
- return error.Overflow;
- var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
- defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
- }, &tmp_list, debug_output, reloc_info)) {
- .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
+ }, code, debug_output, reloc_info)) {
+ .ok => {},
.fail => |em| return Result{ .fail = em },
}
- } else {
- Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
- bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
- }
- },
- .Auto, .Extern => {
- const struct_begin = code.items.len;
- const field_types = struct_type.field_types.get(ip);
- const offsets = struct_type.offsets.get(ip);
-
- var it = struct_type.iterateRuntimeOrder(ip);
- while (it.next()) |field_index| {
- const field_ty = field_types[field_index];
- if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
-
- const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
- .bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
- .ty = field_ty,
- .storage = .{ .u64 = bytes[field_index] },
- } }),
- .elems => |elems| elems[field_index],
- .repeated_elem => |elem| elem,
- };
+
+ const size = struct_type.size(ip).*;
+ const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
const padding = math.cast(
usize,
- offsets[field_index] - (code.items.len - struct_begin),
+ std.mem.alignForward(u64, size, @max(alignment, 1)) -
+ (code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
-
- switch (try generateSymbol(bin_file, src_loc, .{
- .ty = Type.fromInterned(field_ty),
- .val = Value.fromInterned(field_val),
- }, code, debug_output, reloc_info)) {
- .ok => {},
- .fail => |em| return Result{ .fail = em },
- }
- }
-
- const size = struct_type.size(ip).*;
- const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
-
- const padding = math.cast(
- usize,
- std.mem.alignForward(u64, size, @max(alignment, 1)) -
- (code.items.len - struct_begin),
- ) orelse return error.Overflow;
- if (padding > 0) try code.appendNTimes(0, padding);
- },
+ },
+ }
},
else => unreachable,
},
src/InternPool.zig
@@ -644,348 +644,6 @@ pub const Key = union(enum) {
child: Index,
};
- pub const OpaqueType = extern struct {
- /// The Decl that corresponds to the opaque itself.
- decl: DeclIndex,
- /// Represents the declarations inside this opaque.
- namespace: NamespaceIndex,
- zir_index: TrackedInst.Index.Optional,
- };
-
- /// Although packed structs and non-packed structs are encoded differently,
- /// this struct is used for both categories since they share some common
- /// functionality.
- pub const StructType = struct {
- extra_index: u32,
- /// `none` when the struct is `@TypeOf(.{})`.
- decl: OptionalDeclIndex,
- /// `none` when the struct has no declarations.
- namespace: OptionalNamespaceIndex,
- /// Index of the struct_decl ZIR instruction.
- zir_index: TrackedInst.Index.Optional,
- layout: std.builtin.Type.ContainerLayout,
- field_names: NullTerminatedString.Slice,
- field_types: Index.Slice,
- field_inits: Index.Slice,
- field_aligns: Alignment.Slice,
- runtime_order: RuntimeOrder.Slice,
- comptime_bits: ComptimeBits,
- offsets: Offsets,
- names_map: OptionalMapIndex,
-
- pub const ComptimeBits = struct {
- start: u32,
- /// This is the number of u32 elements, not the number of struct fields.
- len: u32,
-
- pub fn get(this: @This(), ip: *const InternPool) []u32 {
- return ip.extra.items[this.start..][0..this.len];
- }
-
- pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool {
- if (this.len == 0) return false;
- return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
- }
-
- pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void {
- this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
- }
-
- pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void {
- this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
- }
- };
-
- pub const Offsets = struct {
- start: u32,
- len: u32,
-
- pub fn get(this: @This(), ip: *const InternPool) []u32 {
- return @ptrCast(ip.extra.items[this.start..][0..this.len]);
- }
- };
-
- pub const RuntimeOrder = enum(u32) {
- /// Placeholder until layout is resolved.
- unresolved = std.math.maxInt(u32) - 0,
- /// Field not present at runtime
- omitted = std.math.maxInt(u32) - 1,
- _,
-
- pub const Slice = struct {
- start: u32,
- len: u32,
-
- pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
- return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
- }
- };
-
- pub fn toInt(i: @This()) ?u32 {
- return switch (i) {
- .omitted => null,
- .unresolved => unreachable,
- else => @intFromEnum(i),
- };
- }
- };
-
- /// Look up field index based on field name.
- pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const names_map = self.names_map.unwrap() orelse {
- const i = name.toUnsigned(ip) orelse return null;
- if (i >= self.field_types.len) return null;
- return i;
- };
- const map = &ip.maps.items[@intFromEnum(names_map)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
- }
-
- /// Returns the already-existing field with the same name, if any.
- pub fn addFieldName(
- self: @This(),
- ip: *InternPool,
- name: NullTerminatedString,
- ) ?u32 {
- return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
- }
-
- pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
- if (s.field_aligns.len == 0) return .none;
- return s.field_aligns.get(ip)[i];
- }
-
- pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
- if (s.field_inits.len == 0) return .none;
- assert(s.haveFieldInits(ip));
- return s.field_inits.get(ip)[i];
- }
-
- /// Returns `none` in the case the struct is a tuple.
- pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
- if (s.field_names.len == 0) return .none;
- return s.field_names.get(ip)[i].toOptional();
- }
-
- pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
- return s.comptime_bits.getBit(ip, i);
- }
-
- pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
- s.comptime_bits.setBit(ip, i);
- }
-
- /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
- /// complicated logic.
- pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
- return switch (s.layout) {
- .Packed => false,
- .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
- };
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts the struct is not packed.
- pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
- assert(self.layout != .Packed);
- const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts that the struct is packed.
- pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
- assert(self.layout == .Packed);
- const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.field_types_wip) {
- flags_ptr.assumed_runtime_bits = true;
- return true;
- }
- return false;
- }
-
- pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.field_types_wip) return true;
- flags_ptr.field_types_wip = true;
- return false;
- }
-
- pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).field_types_wip = false;
- }
-
- pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.layout_wip) return true;
- flags_ptr.layout_wip = true;
- return false;
- }
-
- pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).layout_wip = false;
- }
-
- pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return false;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.alignment_wip) return true;
- flags_ptr.alignment_wip = true;
- return false;
- }
-
- pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
- if (s.layout == .Packed) return;
- s.flagsPtr(ip).alignment_wip = false;
- }
-
- pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
- switch (s.layout) {
- .Packed => {
- const flag = &s.packedFlagsPtr(ip).field_inits_wip;
- if (flag.*) return true;
- flag.* = true;
- return false;
- },
- .Auto, .Extern => {
- const flag = &s.flagsPtr(ip).field_inits_wip;
- if (flag.*) return true;
- flag.* = true;
- return false;
- },
- }
- }
-
- pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
- switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
- .Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
- }
- }
-
- pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
- if (s.layout == .Packed) return true;
- const flags_ptr = s.flagsPtr(ip);
- if (flags_ptr.fully_resolved) return true;
- flags_ptr.fully_resolved = true;
- return false;
- }
-
- pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
- s.flagsPtr(ip).fully_resolved = false;
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- /// Asserts the struct is not packed.
- pub fn size(self: @This(), ip: *InternPool) *u32 {
- assert(self.layout != .Packed);
- const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
- return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
- }
-
- /// The backing integer type of the packed struct. Whether zig chooses
- /// this type or the user specifies it, it is stored here. This will be
- /// set to `none` until the layout is resolved.
- /// Asserts the struct is packed.
- pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
- assert(s.layout == .Packed);
- const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
- return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
- }
-
- /// Asserts the struct is not packed.
- pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
- assert(s.layout != .Packed);
- const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
- ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
- }
-
- pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
- const types = s.field_types.get(ip);
- return types.len == 0 or types[0] != .none;
- }
-
- pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
- return switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved,
- };
- }
-
- pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
- switch (s.layout) {
- .Packed => s.packedFlagsPtr(ip).inits_resolved = true,
- .Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
- }
- }
-
- pub fn haveLayout(s: @This(), ip: *InternPool) bool {
- return switch (s.layout) {
- .Packed => s.backingIntType(ip).* != .none,
- .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
- };
- }
-
- pub fn isTuple(s: @This(), ip: *InternPool) bool {
- return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
- }
-
- pub fn hasReorderedFields(s: @This()) bool {
- return s.layout == .Auto;
- }
-
- pub const RuntimeOrderIterator = struct {
- ip: *InternPool,
- field_index: u32,
- struct_type: InternPool.Key.StructType,
-
- pub fn next(it: *@This()) ?u32 {
- var i = it.field_index;
-
- if (i >= it.struct_type.field_types.len)
- return null;
-
- if (it.struct_type.hasReorderedFields()) {
- it.field_index += 1;
- return it.struct_type.runtime_order.get(it.ip)[i].toInt();
- }
-
- while (it.struct_type.fieldIsComptime(it.ip, i)) {
- i += 1;
- if (i >= it.struct_type.field_types.len)
- return null;
- }
-
- it.field_index = i + 1;
- return i;
- }
- };
-
- /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
- /// May or may not include zero-bit fields.
- /// Asserts the struct is not packed.
- pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
- assert(s.layout != .Packed);
- return .{
- .ip = ip,
- .field_index = 0,
- .struct_type = s,
- };
- }
- };
-
pub const AnonStructType = struct {
types: Index.Slice,
/// This may be empty, indicating this is a tuple.
@@ -1009,156 +667,28 @@ pub const Key = union(enum) {
}
};
- /// Serves two purposes:
- /// * Being the key in the InternPool hash map, which only requires the `decl` field.
- /// * Provide the other fields that do not require chasing the enum type.
- pub const UnionType = struct {
- /// The Decl that corresponds to the union itself.
- decl: DeclIndex,
- /// The index of the `Tag.TypeUnion` payload. Ignored by `get`,
- /// populated by `indexToKey`.
- extra_index: u32,
- namespace: NamespaceIndex,
- flags: Tag.TypeUnion.Flags,
- /// The enum that provides the list of field names and values.
- enum_tag_ty: Index,
- zir_index: TrackedInst.Index.Optional,
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags {
- const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
- return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn size(self: @This(), ip: *InternPool) *u32 {
- const size_field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
- return &ip.extra.items[self.extra_index + size_field_index];
- }
-
- /// The returned pointer expires with any addition to the `InternPool`.
- pub fn padding(self: @This(), ip: *InternPool) *u32 {
- const padding_field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
- return &ip.extra.items[self.extra_index + padding_field_index];
- }
-
- pub fn haveFieldTypes(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).status.haveFieldTypes();
- }
-
- pub fn hasTag(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).runtime_tag.hasTag();
- }
-
- pub fn getLayout(self: @This(), ip: *const InternPool) std.builtin.Type.ContainerLayout {
- return self.flagsPtr(ip).layout;
- }
-
- pub fn haveLayout(self: @This(), ip: *const InternPool) bool {
- return self.flagsPtr(ip).status.haveLayout();
- }
-
- /// Pointer to an enum type which is used for the tag of the union.
- /// This type is created even for untagged unions, even when the memory
- /// layout does not store the tag.
- /// Whether zig chooses this type or the user specifies it, it is stored here.
- /// This will be set to the null type until status is `have_field_types`.
- /// This accessor is provided so that the tag type can be mutated, and so that
- /// when it is mutated, the mutations are observed.
- /// The returned pointer is invalidated when something is added to the `InternPool`.
- pub fn tagTypePtr(self: @This(), ip: *const InternPool) *Index {
- const tag_ty_field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
- return @ptrCast(&ip.extra.items[self.extra_index + tag_ty_field_index]);
- }
+ /// This is the hashmap key. To fetch other data associated with the struct, see `loadStructType`.
+ pub const StructType = struct {
+ /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`.
+ decl: OptionalDeclIndex,
+ };
- pub fn setFieldTypes(self: @This(), ip: *InternPool, types: []const Index) void {
- @memcpy((Index.Slice{
- .start = @intCast(self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len),
- .len = @intCast(types.len),
- }).get(ip), types);
- }
+ /// This is the hashmap key. To fetch other data associated with the opaque, see `loadOpaqueType`.
+ pub const OpaqueType = struct {
+ /// The opaque's owner Decl.
+ decl: DeclIndex,
+ };
- pub fn setFieldAligns(self: @This(), ip: *InternPool, aligns: []const Alignment) void {
- if (aligns.len == 0) return;
- assert(self.flagsPtr(ip).any_aligned_fields);
- @memcpy((Alignment.Slice{
- .start = @intCast(
- self.extra_index + @typeInfo(Tag.TypeUnion).Struct.fields.len + aligns.len,
- ),
- .len = @intCast(aligns.len),
- }).get(ip), aligns);
- }
+ /// This is the hashmap key. To fetch other data associated with the union, see `loadUnionType`.
+ pub const UnionType = struct {
+ /// The union's owner Decl.
+ decl: DeclIndex,
};
+ /// This is the hashmap key. To fetch other data associated with the enum, see `loadEnumType`.
pub const EnumType = struct {
- /// The Decl that corresponds to the enum itself.
+ /// The enum's owner Decl.
decl: DeclIndex,
- /// Represents the declarations inside this enum.
- namespace: OptionalNamespaceIndex,
- /// An integer type which is used for the numerical value of the enum.
- /// This field is present regardless of whether the enum has an
- /// explicitly provided tag type or auto-numbered.
- tag_ty: Index,
- /// Set of field names in declaration order.
- names: NullTerminatedString.Slice,
- /// Maps integer tag value to field index.
- /// Entries are in declaration order, same as `fields`.
- /// If this is empty, it means the enum tags are auto-numbered.
- values: Index.Slice,
- tag_mode: TagMode,
- /// This is ignored by `get` but will always be provided by `indexToKey`.
- names_map: OptionalMapIndex = .none,
- /// This is ignored by `get` but will be provided by `indexToKey` when
- /// a value map exists.
- values_map: OptionalMapIndex = .none,
- zir_index: TrackedInst.Index.Optional,
-
- pub const TagMode = enum {
- /// The integer tag type was auto-numbered by zig.
- auto,
- /// The integer tag type was provided by the enum declaration, and the enum
- /// is exhaustive.
- explicit,
- /// The integer tag type was provided by the enum declaration, and the enum
- /// is non-exhaustive.
- nonexhaustive,
- };
-
- /// Look up field index based on field name.
- pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
- }
-
- /// Look up field index based on tag value.
- /// Asserts that `values_map` is not `none`.
- /// This function returns `null` when `tag_val` does not have the
- /// integer tag type of the enum.
- pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 {
- assert(tag_val != .none);
- // TODO: we should probably decide a single interface for this function, but currently
- // it's being called with both tag values and underlying ints. Fix this!
- const int_tag_val = switch (ip.indexToKey(tag_val)) {
- .enum_tag => |enum_tag| enum_tag.int,
- .int => tag_val,
- else => unreachable,
- };
- if (self.values_map.unwrap()) |values_map| {
- const map = &ip.maps.items[@intFromEnum(values_map)];
- const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
- const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
- return @intCast(field_index);
- }
- // Auto-numbered enum. Convert `int_tag_val` to field index.
- const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
- inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
- .big_int => |x| x.to(u32) catch return null,
- .lazy_align, .lazy_size => unreachable,
- };
- return if (field_index < self.names.len) field_index else null;
- }
};
pub const IncompleteEnumType = struct {
@@ -1173,12 +703,13 @@ pub const Key = union(enum) {
/// later when populating field values.
has_values: bool,
/// Same as corresponding `EnumType` field.
- tag_mode: EnumType.TagMode,
+ tag_mode: LoadedEnumType.TagMode,
/// This may be updated via `setTagType` later.
tag_ty: Index = .none,
zir_index: TrackedInst.Index.Optional,
- pub fn toEnumType(self: @This()) EnumType {
+ pub fn toEnumType(self: @This()) LoadedEnumType {
+ if (true) @compileError("AHHHH");
return .{
.decl = self.decl,
.namespace = self.namespace,
@@ -1193,7 +724,7 @@ pub const Key = union(enum) {
/// Only the decl is used for hashing and equality, so we can construct
/// this minimal key for use with `map`.
pub fn toKey(self: @This()) Key {
- return .{ .enum_type = self.toEnumType() };
+ return .{ .enum_type = .{ .decl = self.decl } };
}
};
@@ -2111,21 +1642,15 @@ pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
// minimal hashmap key, this type is a convenience type that contains info
// needed by semantic analysis.
-pub const UnionType = struct {
+pub const LoadedUnionType = struct {
+ /// The index of the `Tag.TypeUnion` payload.
+ extra_index: u32,
/// The Decl that corresponds to the union itself.
decl: DeclIndex,
/// Represents the declarations inside this union.
namespace: NamespaceIndex,
/// The enum tag type.
enum_tag_ty: Index,
- /// The integer tag type of the enum.
- int_tag_ty: Index,
- /// ABI size of the union, including padding
- size: u64,
- /// Trailing padding bytes
- padding: u32,
- /// List of field names in declaration order.
- field_names: NullTerminatedString.Slice,
/// List of field types in declaration order.
/// These are `none` until `status` is `have_field_types` or `have_layout`.
field_types: Index.Slice,
@@ -2135,10 +1660,6 @@ pub const UnionType = struct {
field_aligns: Alignment.Slice,
/// Index of the union_decl ZIR instruction.
zir_index: TrackedInst.Index.Optional,
- /// Index into extra array of the `flags` field.
- flags_index: u32,
- /// Copied from `enum_tag_ty`.
- names_map: OptionalMapIndex,
pub const RuntimeTag = enum(u2) {
none,
@@ -2193,68 +1714,92 @@ pub const UnionType = struct {
}
};
+ pub fn loadTagType(self: LoadedUnionType, ip: *InternPool) LoadedEnumType {
+ return ip.loadEnumType(self.enum_tag_ty);
+ }
+
+ /// Pointer to an enum type which is used for the tag of the union.
+ /// This type is created even for untagged unions, even when the memory
+ /// layout does not store the tag.
+ /// Whether zig chooses this type or the user specifies it, it is stored here.
+ /// This will be set to the null type until status is `have_field_types`.
+ /// This accessor is provided so that the tag type can be mutated, and so that
+ /// when it is mutated, the mutations are observed.
/// The returned pointer expires with any addition to the `InternPool`.
- pub fn flagsPtr(self: UnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
- return @ptrCast(&ip.extra.items[self.flags_index]);
+ pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
}
- /// Look up field index based on field name.
- pub fn nameIndex(self: UnionType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
- const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
- const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
- const field_index = map.getIndexAdapted(name, adapter) orelse return null;
- return @intCast(field_index);
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + field_index]);
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
+ return &ip.extra.items[self.extra_index + field_index];
}
- pub fn hasTag(self: UnionType, ip: *const InternPool) bool {
+ /// The returned pointer expires with any addition to the `InternPool`.
+ pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 {
+ const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
+ return &ip.extra.items[self.extra_index + field_index];
+ }
+
+ pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).runtime_tag.hasTag();
}
- pub fn haveFieldTypes(self: UnionType, ip: *const InternPool) bool {
+ pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveFieldTypes();
}
- pub fn haveLayout(self: UnionType, ip: *const InternPool) bool {
+ pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool {
return self.flagsPtr(ip).status.haveLayout();
}
- pub fn getLayout(self: UnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
+ pub fn getLayout(self: LoadedUnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout {
return self.flagsPtr(ip).layout;
}
- pub fn fieldAlign(self: UnionType, ip: *const InternPool, field_index: u32) Alignment {
+ pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: u32) Alignment {
if (self.field_aligns.len == 0) return .none;
return self.field_aligns.get(ip)[field_index];
}
- /// This does not mutate the field of UnionType.
- pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
+ /// This does not mutate the field of LoadedUnionType.
+ pub fn setZirIndex(self: LoadedUnionType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
const ptr: *TrackedInst.Index.Optional =
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]);
ptr.* = new_zir_index;
}
+
+ pub fn setFieldTypes(self: LoadedUnionType, ip: *const InternPool, types: []const Index) void {
+ @memcpy(self.field_types.get(ip), types);
+ }
+
+ pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void {
+ if (aligns.len == 0) return;
+ assert(self.flagsPtr(ip).any_aligned_fields);
+ @memcpy(self.field_aligns.get(ip), aligns);
+ }
};
-/// Fetch all the interesting fields of a union type into a convenient data
-/// structure.
-/// This asserts that the union's enum tag type has been resolved.
-pub fn loadUnionType(ip: *InternPool, key: Key.UnionType) UnionType {
- const type_union = ip.extraDataTrail(Tag.TypeUnion, key.extra_index);
- const enum_ty = type_union.data.tag_ty;
- const enum_info = ip.indexToKey(enum_ty).enum_type;
- const fields_len: u32 = @intCast(enum_info.names.len);
+pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType {
+ const extra_index = ip.items.items(.data)[@intFromEnum(index)];
+ const type_union = ip.extraDataTrail(Tag.TypeUnion, extra_index);
+ const fields_len = type_union.data.fields_len;
return .{
+ .extra_index = extra_index,
.decl = type_union.data.decl,
.namespace = type_union.data.namespace,
- .enum_tag_ty = enum_ty,
- .int_tag_ty = enum_info.tag_ty,
- .size = type_union.data.size,
- .padding = type_union.data.padding,
- .field_names = enum_info.names,
- .names_map = enum_info.names_map,
+ .enum_tag_ty = type_union.data.tag_ty,
.field_types = .{
.start = type_union.end,
.len = fields_len,
@@ -2264,10 +1809,583 @@ pub fn loadUnionType(ip: *InternPool, key: Key.UnionType) UnionType {
.len = if (type_union.data.flags.any_aligned_fields) fields_len else 0,
},
.zir_index = type_union.data.zir_index,
- .flags_index = key.extra_index + std.meta.fieldIndex(Tag.TypeUnion, "flags").?,
};
}
+pub const LoadedStructType = struct {
+ /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload.
+ extra_index: u32,
+ /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`.
+ decl: OptionalDeclIndex,
+ /// `none` when the struct has no declarations.
+ namespace: OptionalNamespaceIndex,
+ /// Index of the `struct_decl` ZIR instruction.
+ zir_index: TrackedInst.Index.Optional,
+ layout: std.builtin.Type.ContainerLayout,
+ field_names: NullTerminatedString.Slice,
+ field_types: Index.Slice,
+ field_inits: Index.Slice,
+ field_aligns: Alignment.Slice,
+ runtime_order: RuntimeOrder.Slice,
+ comptime_bits: ComptimeBits,
+ offsets: Offsets,
+ names_map: OptionalMapIndex,
+
+ pub const ComptimeBits = struct {
+ start: u32,
+ /// This is the number of u32 elements, not the number of struct fields.
+ len: u32,
+
+ pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 {
+ return ip.extra.items[this.start..][0..this.len];
+ }
+
+ pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool {
+ if (this.len == 0) return false;
+ return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
+ }
+
+ pub fn setBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
+ this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
+ }
+
+ pub fn clearBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
+ this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
+ }
+ };
+
+ pub const Offsets = struct {
+ start: u32,
+ len: u32,
+
+ pub fn get(this: Offsets, ip: *const InternPool) []u32 {
+ return @ptrCast(ip.extra.items[this.start..][0..this.len]);
+ }
+ };
+
+ pub const RuntimeOrder = enum(u32) {
+ /// Placeholder until layout is resolved.
+ unresolved = std.math.maxInt(u32) - 0,
+ /// Field not present at runtime
+ omitted = std.math.maxInt(u32) - 1,
+ _,
+
+ pub const Slice = struct {
+ start: u32,
+ len: u32,
+
+ pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
+ return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
+ }
+ };
+
+ pub fn toInt(i: RuntimeOrder) ?u32 {
+ return switch (i) {
+ .omitted => null,
+ .unresolved => unreachable,
+ else => @intFromEnum(i),
+ };
+ }
+ };
+
+ /// Look up field index based on field name.
+ pub fn nameIndex(self: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+ const names_map = self.names_map.unwrap() orelse {
+ const i = name.toUnsigned(ip) orelse return null;
+ if (i >= self.field_types.len) return null;
+ return i;
+ };
+ const map = &ip.maps.items[@intFromEnum(names_map)];
+ const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
+ const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+
+ /// Returns the already-existing field with the same name, if any.
+ pub fn addFieldName(
+ self: @This(),
+ ip: *InternPool,
+ name: NullTerminatedString,
+ ) ?u32 {
+ return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
+ }
+
+ pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
+ if (s.field_aligns.len == 0) return .none;
+ return s.field_aligns.get(ip)[i];
+ }
+
+ pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
+ if (s.field_inits.len == 0) return .none;
+ assert(s.haveFieldInits(ip));
+ return s.field_inits.get(ip)[i];
+ }
+
+ /// Returns `none` in the case the struct is a tuple.
+ pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
+ if (s.field_names.len == 0) return .none;
+ return s.field_names.get(ip)[i].toOptional();
+ }
+
+ pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
+ return s.comptime_bits.getBit(ip, i);
+ }
+
+ pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
+ s.comptime_bits.setBit(ip, i);
+ }
+
+ /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
+ /// complicated logic.
+ pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
+ return switch (s.layout) {
+ .Packed => false,
+ .Auto, .Extern => s.flagsPtr(ip).known_non_opv,
+ };
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts the struct is not packed.
+ pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
+ assert(self.layout != .Packed);
+ const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts that the struct is packed.
+ pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
+ assert(self.layout == .Packed);
+ const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
+ }
+
+ pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.field_types_wip) {
+ flags_ptr.assumed_runtime_bits = true;
+ return true;
+ }
+ return false;
+ }
+
+ pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.field_types_wip) return true;
+ flags_ptr.field_types_wip = true;
+ return false;
+ }
+
+ pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).field_types_wip = false;
+ }
+
+ pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.layout_wip) return true;
+ flags_ptr.layout_wip = true;
+ return false;
+ }
+
+ pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).layout_wip = false;
+ }
+
+ pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return false;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.alignment_wip) return true;
+ flags_ptr.alignment_wip = true;
+ return false;
+ }
+
+ pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
+ if (s.layout == .Packed) return;
+ s.flagsPtr(ip).alignment_wip = false;
+ }
+
+ pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
+ switch (s.layout) {
+ .Packed => {
+ const flag = &s.packedFlagsPtr(ip).field_inits_wip;
+ if (flag.*) return true;
+ flag.* = true;
+ return false;
+ },
+ .Auto, .Extern => {
+ const flag = &s.flagsPtr(ip).field_inits_wip;
+ if (flag.*) return true;
+ flag.* = true;
+ return false;
+ },
+ }
+ }
+
+ pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
+ switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
+ .Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
+ }
+ }
+
+ pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
+ if (s.layout == .Packed) return true;
+ const flags_ptr = s.flagsPtr(ip);
+ if (flags_ptr.fully_resolved) return true;
+ flags_ptr.fully_resolved = true;
+ return false;
+ }
+
+ pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
+ s.flagsPtr(ip).fully_resolved = false;
+ }
+
+ /// The returned pointer expires with any addition to the `InternPool`.
+ /// Asserts the struct is not packed.
+ pub fn size(self: @This(), ip: *InternPool) *u32 {
+ assert(self.layout != .Packed);
+ const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
+ return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
+ }
+
+ /// The backing integer type of the packed struct. Whether zig chooses
+ /// this type or the user specifies it, it is stored here. This will be
+ /// set to `none` until the layout is resolved.
+ /// Asserts the struct is packed.
+ pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
+ assert(s.layout == .Packed);
+ const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
+ return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
+ }
+
+ /// Asserts the struct is not packed.
+ pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
+ assert(s.layout != .Packed);
+ const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
+ ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
+ }
+
+ pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
+ const types = s.field_types.get(ip);
+ return types.len == 0 or types[0] != .none;
+ }
+
+ pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
+ return switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).inits_resolved,
+ .Auto, .Extern => s.flagsPtr(ip).inits_resolved,
+ };
+ }
+
+ pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
+ switch (s.layout) {
+ .Packed => s.packedFlagsPtr(ip).inits_resolved = true,
+ .Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
+ }
+ }
+
+ pub fn haveLayout(s: @This(), ip: *InternPool) bool {
+ return switch (s.layout) {
+ .Packed => s.backingIntType(ip).* != .none,
+ .Auto, .Extern => s.flagsPtr(ip).layout_resolved,
+ };
+ }
+
+ pub fn isTuple(s: @This(), ip: *InternPool) bool {
+ return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
+ }
+
+ pub fn hasReorderedFields(s: @This()) bool {
+ return s.layout == .Auto;
+ }
+
+ pub const RuntimeOrderIterator = struct {
+ ip: *InternPool,
+ field_index: u32,
+ struct_type: InternPool.LoadedStructType,
+
+ pub fn next(it: *@This()) ?u32 {
+ var i = it.field_index;
+
+ if (i >= it.struct_type.field_types.len)
+ return null;
+
+ if (it.struct_type.hasReorderedFields()) {
+ it.field_index += 1;
+ return it.struct_type.runtime_order.get(it.ip)[i].toInt();
+ }
+
+ while (it.struct_type.fieldIsComptime(it.ip, i)) {
+ i += 1;
+ if (i >= it.struct_type.field_types.len)
+ return null;
+ }
+
+ it.field_index = i + 1;
+ return i;
+ }
+ };
+
+ /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
+ /// May or may not include zero-bit fields.
+ /// Asserts the struct is not packed.
+ pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
+ assert(s.layout != .Packed);
+ return .{
+ .ip = ip,
+ .field_index = 0,
+ .struct_type = s,
+ };
+ }
+};
+
+pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
+ const item = ip.items.get(@intFromEnum(index));
+ switch (item.tag) {
+ .type_struct => {
+ if (item.data == 0) return .{
+ .extra_index = 0,
+ .decl = .none,
+ .namespace = .none,
+ .zir_index = .none,
+ .layout = .Auto,
+ .field_names = .{ .start = 0, .len = 0 },
+ .field_types = .{ .start = 0, .len = 0 },
+ .field_inits = .{ .start = 0, .len = 0 },
+ .field_aligns = .{ .start = 0, .len = 0 },
+ .runtime_order = .{ .start = 0, .len = 0 },
+ .comptime_bits = .{ .start = 0, .len = 0 },
+ .offsets = .{ .start = 0, .len = 0 },
+ .names_map = .none,
+ };
+ const extra = ip.extraDataTrail(Tag.TypeStruct, item.data);
+ const fields_len = extra.data.fields_len;
+ var extra_index = extra.end + fields_len; // skip field types
+ const names_map: OptionalMapIndex, const names: NullTerminatedString.Slice = if (!extra.data.flags.is_tuple) n: {
+ const names_map: OptionalMapIndex = @enumFromInt(ip.extra.items[extra_index]);
+ extra_index += 1;
+ const names: NullTerminatedString.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :n .{ names_map, names };
+ } else .{ .none, .{ .start = 0, .len = 0 } };
+ const inits: Index.Slice = if (extra.data.flags.any_default_inits) i: {
+ const inits: Index.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :i inits;
+ } else .{ .start = 0, .len = 0 };
+ const namespace: OptionalNamespaceIndex = if (extra.data.flags.has_namespace) n: {
+ const n: NamespaceIndex = @enumFromInt(ip.extra.items[extra_index]);
+ extra_index += 1;
+ break :n n.toOptional();
+ } else .none;
+ const aligns: Alignment.Slice = if (extra.data.flags.any_aligned_fields) a: {
+ const a: Alignment.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
+ break :a a;
+ } else .{ .start = 0, .len = 0 };
+ const comptime_bits: LoadedStructType.ComptimeBits = if (extra.data.flags.any_comptime_fields) c: {
+ const len = std.math.divCeil(u32, fields_len, 32) catch unreachable;
+ const c: LoadedStructType.ComptimeBits = .{ .start = extra_index, .len = len };
+ extra_index += len;
+ break :c c;
+ } else .{ .start = 0, .len = 0 };
+ const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!extra.data.flags.is_extern) ro: {
+ const ro: LoadedStructType.RuntimeOrder.Slice = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :ro ro;
+ } else .{ .start = 0, .len = 0 };
+ const offsets: LoadedStructType.Offsets = o: {
+ const o: LoadedStructType.Offsets = .{ .start = extra_index, .len = fields_len };
+ extra_index += fields_len;
+ break :o o;
+ };
+ return .{
+ .extra_index = item.data,
+ .decl = extra.data.decl.toOptional(),
+ .namespace = namespace,
+ .zir_index = extra.data.zir_index,
+ .layout = if (extra.data.flags.is_extern) .Extern else .Auto,
+ .field_names = names,
+ .field_types = .{ .start = extra.end, .len = fields_len },
+ .field_inits = inits,
+ .field_aligns = aligns,
+ .runtime_order = runtime_order,
+ .comptime_bits = comptime_bits,
+ .offsets = offsets,
+ .names_map = names_map,
+ };
+ },
+ .type_struct_packed, .type_struct_packed_inits => {
+ const extra = ip.extraDataTrail(Tag.TypeStructPacked, item.data);
+ const has_inits = item.tag == .type_struct_packed_inits;
+ const fields_len = extra.data.fields_len;
+ return .{
+ .extra_index = item.data,
+ .decl = extra.data.decl.toOptional(),
+ .namespace = extra.data.namespace,
+ .zir_index = extra.data.zir_index,
+ .layout = .Packed,
+ .field_names = .{
+ .start = extra.end + fields_len,
+ .len = fields_len,
+ },
+ .field_types = .{
+ .start = extra.end,
+ .len = fields_len,
+ },
+ .field_inits = if (has_inits) .{
+ .start = extra.end + 2 * fields_len,
+ .len = fields_len,
+ } else .{ .start = 0, .len = 0 },
+ .field_aligns = .{ .start = 0, .len = 0 },
+ .runtime_order = .{ .start = 0, .len = 0 },
+ .comptime_bits = .{ .start = 0, .len = 0 },
+ .offsets = .{ .start = 0, .len = 0 },
+ .names_map = extra.data.names_map.toOptional(),
+ };
+ },
+ else => unreachable,
+ }
+}
+
+const LoadedEnumType = struct {
+ /// The Decl that corresponds to the enum itself.
+ decl: DeclIndex,
+ /// Represents the declarations inside this enum.
+ namespace: OptionalNamespaceIndex,
+ /// An integer type which is used for the numerical value of the enum.
+ /// This field is present regardless of whether the enum has an
+ /// explicitly provided tag type or auto-numbered.
+ tag_ty: Index,
+ /// Set of field names in declaration order.
+ names: NullTerminatedString.Slice,
+ /// Maps integer tag value to field index.
+ /// Entries are in declaration order, same as `fields`.
+ /// If this is empty, it means the enum tags are auto-numbered.
+ values: Index.Slice,
+ tag_mode: TagMode,
+ names_map: MapIndex,
+ /// This is guaranteed to not be `.none` if explicit values are provided.
+ values_map: OptionalMapIndex,
+ zir_index: TrackedInst.Index.Optional,
+
+ pub const TagMode = enum {
+ /// The integer tag type was auto-numbered by zig.
+ auto,
+ /// The integer tag type was provided by the enum declaration, and the enum
+ /// is exhaustive.
+ explicit,
+ /// The integer tag type was provided by the enum declaration, and the enum
+ /// is non-exhaustive.
+ nonexhaustive,
+ };
+
+ /// Look up field index based on field name.
+ pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
+ const map = &ip.maps.items[@intFromEnum(self.names_map)];
+ const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
+ const field_index = map.getIndexAdapted(name, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+
+ /// Look up field index based on tag value.
+ /// Asserts that `values_map` is not `none`.
+ /// This function returns `null` when `tag_val` does not have the
+ /// integer tag type of the enum.
+ pub fn tagValueIndex(self: LoadedEnumType, ip: *const InternPool, tag_val: Index) ?u32 {
+ assert(tag_val != .none);
+ // TODO: we should probably decide a single interface for this function, but currently
+ // it's being called with both tag values and underlying ints. Fix this!
+ const int_tag_val = switch (ip.indexToKey(tag_val)) {
+ .enum_tag => |enum_tag| enum_tag.int,
+ .int => tag_val,
+ else => unreachable,
+ };
+ if (self.values_map.unwrap()) |values_map| {
+ const map = &ip.maps.items[@intFromEnum(values_map)];
+ const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
+ const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
+ return @intCast(field_index);
+ }
+ // Auto-numbered enum. Convert `int_tag_val` to field index.
+ const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
+ inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
+ .big_int => |x| x.to(u32) catch return null,
+ .lazy_align, .lazy_size => unreachable,
+ };
+ return if (field_index < self.names.len) field_index else null;
+ }
+};
+
+pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
+ const item = ip.items.get(@intFromEnum(index));
+ switch (item.tag) {
+ .type_enum_auto => {
+ const extra = ip.extraDataTrail(EnumAuto, item.data);
+ return .{
+ .decl = extra.data.decl,
+ .namespace = extra.data.namespace,
+ .tag_ty = extra.data.int_tag_type,
+ .names = .{
+ .start = @intCast(extra.end),
+ .len = extra.data.fields_len,
+ },
+ .values = .{ .start = 0, .len = 0 },
+ .tag_mode = .auto,
+ .names_map = extra.data.names_map,
+ .values_map = .none,
+ .zir_index = extra.data.zir_index,
+ };
+ },
+ .type_enum_explicit, .type_enum_nonexhaustive => {
+ const extra = ip.extraDataTrail(EnumExplicit, item.data);
+ return .{
+ .decl = extra.data.decl,
+ .namespace = extra.data.namespace,
+ .tag_ty = extra.data.int_tag_type,
+ .names = .{
+ .start = @intCast(extra.end),
+ .len = extra.data.fields_len,
+ },
+ .values = .{
+ .start = @intCast(extra.end + extra.data.fields_len),
+ .len = if (extra.data.values_map != .none) extra.data.fields_len else 0,
+ },
+ .tag_mode = switch (item.tag) {
+ .type_enum_explicit => .explicit,
+ .type_enum_nonexhaustive => .nonexhaustive,
+ else => unreachable,
+ },
+ .names_map = extra.data.names_map,
+ .values_map = extra.data.values_map,
+ .zir_index = extra.data.zir_index,
+ };
+ },
+ else => unreachable,
+ }
+}
+
+/// Note that this type doubles as the payload for `Tag.type_opaque`.
+pub const LoadedOpaqueType = struct {
+ /// The opaque's owner Decl.
+ decl: DeclIndex,
+ /// Contains the declarations inside this opaque.
+ namespace: NamespaceIndex,
+ /// The index of the `opaque_decl` instruction.
+ zir_index: TrackedInst.Index.Optional,
+};
+
+pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType {
+ assert(ip.items.items(.tag)[@intFromEnum(index)] == .type_opaque);
+ const extra_index = ip.items.items(.data)[@intFromEnum(index)];
+ return ip.extraData(LoadedOpaqueType, extra_index);
+}
+
pub const Item = struct {
tag: Tag,
/// The doc comments on the respective Tag explain how to interpret this.
@@ -2485,7 +2603,6 @@ pub const Index = enum(u32) {
simple_type: struct { data: SimpleType },
type_opaque: struct { data: *Key.OpaqueType },
type_struct: struct { data: *Tag.TypeStruct },
- type_struct_ns: struct { data: NamespaceIndex },
type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
type_struct_packed: struct { data: *Tag.TypeStructPacked },
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
@@ -2925,9 +3042,6 @@ pub const Tag = enum(u8) {
/// data is 0 or extra index of `TypeStruct`.
/// data == 0 represents `@TypeOf(.{})`.
type_struct,
- /// A non-packed struct type that has only a namespace; no fields.
- /// data is NamespaceIndex.
- type_struct_ns,
/// An AnonStructType which stores types, names, and values for fields.
/// data is extra index of `TypeStructAnon`.
type_struct_anon,
@@ -3125,7 +3239,7 @@ pub const Tag = enum(u8) {
memoized_call,
const ErrorUnionType = Key.ErrorUnionType;
- const OpaqueType = Key.OpaqueType;
+ const OpaqueType = LoadedOpaqueType;
const TypeValue = Key.TypeValue;
const Error = Key.Error;
const EnumTag = Key.EnumTag;
@@ -3154,7 +3268,6 @@ pub const Tag = enum(u8) {
.simple_type => unreachable,
.type_opaque => OpaqueType,
.type_struct => TypeStruct,
- .type_struct_ns => unreachable,
.type_struct_anon => TypeStructAnon,
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
.type_tuple_anon => TypeStructAnon,
@@ -3310,12 +3423,15 @@ pub const Tag = enum(u8) {
};
};
- /// The number of fields is provided by the `tag_ty` field.
/// Trailing:
/// 0. field type: Index for each field; declaration order
/// 1. field align: Alignment for each field; declaration order
pub const TypeUnion = struct {
flags: Flags,
+ /// This could be provided through the tag type, but it is more convenient
+ /// to store it directly. This is also necessary for `dumpStatsFallible` to
+ /// work on unresolved types.
+ fields_len: u32,
/// Only valid after .have_layout
size: u32,
/// Only valid after .have_layout
@@ -3327,11 +3443,11 @@ pub const Tag = enum(u8) {
zir_index: TrackedInst.Index.Optional,
pub const Flags = packed struct(u32) {
- runtime_tag: UnionType.RuntimeTag,
+ runtime_tag: LoadedUnionType.RuntimeTag,
/// If false, the field alignment trailing data is omitted.
any_aligned_fields: bool,
layout: std.builtin.Type.ContainerLayout,
- status: UnionType.Status,
+ status: LoadedUnionType.Status,
requires_comptime: RequiresComptime,
assumed_runtime_bits: bool,
assumed_pointer_aligned: bool,
@@ -4074,65 +4190,27 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
.type_struct => .{ .struct_type = if (data == 0) .{
- .extra_index = 0,
- .namespace = .none,
.decl = .none,
- .zir_index = undefined,
- .layout = .Auto,
- .field_names = .{ .start = 0, .len = 0 },
- .field_types = .{ .start = 0, .len = 0 },
- .field_inits = .{ .start = 0, .len = 0 },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = undefined,
- } else extraStructType(ip, data) },
-
- .type_struct_ns => .{ .struct_type = .{
- .extra_index = 0,
- .namespace = @as(NamespaceIndex, @enumFromInt(data)).toOptional(),
- .decl = .none,
- .zir_index = undefined,
- .layout = .Auto,
- .field_names = .{ .start = 0, .len = 0 },
- .field_types = .{ .start = 0, .len = 0 },
- .field_inits = .{ .start = 0, .len = 0 },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = undefined,
+ } else .{
+ .decl = ip.extraData(Tag.TypeStruct, data).decl.toOptional(),
+ } },
+
+ .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = .{
+ .decl = ip.extraData(Tag.TypeStructPacked, data).decl.toOptional(),
} },
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
- .type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) },
- .type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) },
- .type_union => .{ .union_type = extraUnionType(ip, data) },
+ .type_union => .{ .union_type = .{
+ .decl = ip.extraData(Tag.TypeUnion, data).decl,
+ } },
- .type_enum_auto => {
- const enum_auto = ip.extraDataTrail(EnumAuto, data);
- return .{ .enum_type = .{
- .decl = enum_auto.data.decl,
- .namespace = enum_auto.data.namespace,
- .tag_ty = enum_auto.data.int_tag_type,
- .names = .{
- .start = @intCast(enum_auto.end),
- .len = enum_auto.data.fields_len,
- },
- .values = .{
- .start = 0,
- .len = 0,
- },
- .tag_mode = .auto,
- .names_map = enum_auto.data.names_map.toOptional(),
- .values_map = .none,
- .zir_index = enum_auto.data.zir_index,
- } };
- },
- .type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
- .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive),
+ .type_enum_auto => .{ .enum_type = .{
+ .decl = ip.extraData(EnumAuto, data).decl,
+ } },
+ .type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = .{
+ .decl = ip.extraData(EnumExplicit, data).decl,
+ } },
.type_function => .{ .func_type = ip.extraFuncType(data) },
.undef => .{ .undef = @as(Index, @enumFromInt(data)) },
@@ -4365,7 +4443,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.type_array_small,
.type_vector,
- .type_struct_ns,
.type_struct_packed,
=> .{ .aggregate = .{
.ty = ty,
@@ -4374,16 +4451,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
// There is only one possible value precisely due to the
// fact that this values slice is fully populated!
- .type_struct => {
- const info = extraStructType(ip, ty_item.data);
- return .{ .aggregate = .{
- .ty = ty,
- .storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
- } };
- },
-
- .type_struct_packed_inits => {
- const info = extraPackedStructType(ip, ty_item.data, true);
+ .type_struct, .type_struct_packed_inits => {
+ const info = loadStructType(ip, ty);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
@@ -4475,18 +4544,6 @@ fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType {
};
}
-fn extraUnionType(ip: *const InternPool, extra_index: u32) Key.UnionType {
- const type_union = ip.extraData(Tag.TypeUnion, extra_index);
- return .{
- .decl = type_union.decl,
- .namespace = type_union.namespace,
- .flags = type_union.flags,
- .enum_tag_ty = type_union.tag_ty,
- .zir_index = type_union.zir_index,
- .extra_index = extra_index,
- };
-}
-
fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index);
const fields_len = type_struct_anon.data.fields_len;
@@ -4525,109 +4582,6 @@ fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructTyp
};
}
-fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType {
- const s = ip.extraDataTrail(Tag.TypeStruct, extra_index);
- const fields_len = s.data.fields_len;
-
- var index = s.end;
-
- const field_types = t: {
- const types: Index.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t types;
- };
- const names_map, const field_names: NullTerminatedString.Slice = t: {
- if (s.data.flags.is_tuple) break :t .{ .none, .{ .start = 0, .len = 0 } };
- const names_map: MapIndex = @enumFromInt(ip.extra.items[index]);
- index += 1;
- const names: NullTerminatedString.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t .{ names_map.toOptional(), names };
- };
- const field_inits: Index.Slice = t: {
- if (!s.data.flags.any_default_inits) break :t .{ .start = 0, .len = 0 };
- const inits: Index.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t inits;
- };
- const namespace = t: {
- if (!s.data.flags.has_namespace) break :t .none;
- const namespace: NamespaceIndex = @enumFromInt(ip.extra.items[index]);
- index += 1;
- break :t namespace.toOptional();
- };
- const field_aligns: Alignment.Slice = t: {
- if (!s.data.flags.any_aligned_fields) break :t .{ .start = 0, .len = 0 };
- const aligns: Alignment.Slice = .{ .start = index, .len = fields_len };
- index += (fields_len + 3) / 4;
- break :t aligns;
- };
- const comptime_bits: Key.StructType.ComptimeBits = t: {
- if (!s.data.flags.any_comptime_fields) break :t .{ .start = 0, .len = 0 };
- const comptime_bits: Key.StructType.ComptimeBits = .{ .start = index, .len = fields_len };
- index += (fields_len + 31) / 32;
- break :t comptime_bits;
- };
- const runtime_order: Key.StructType.RuntimeOrder.Slice = t: {
- if (s.data.flags.is_extern) break :t .{ .start = 0, .len = 0 };
- const ro: Key.StructType.RuntimeOrder.Slice = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t ro;
- };
- const offsets = t: {
- const offsets: Key.StructType.Offsets = .{ .start = index, .len = fields_len };
- index += fields_len;
- break :t offsets;
- };
- return .{
- .extra_index = extra_index,
- .decl = s.data.decl.toOptional(),
- .zir_index = s.data.zir_index,
- .layout = if (s.data.flags.is_extern) .Extern else .Auto,
- .field_types = field_types,
- .names_map = names_map,
- .field_names = field_names,
- .field_inits = field_inits,
- .namespace = namespace,
- .field_aligns = field_aligns,
- .comptime_bits = comptime_bits,
- .runtime_order = runtime_order,
- .offsets = offsets,
- };
-}
-
-fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType {
- const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index);
- const fields_len = type_struct_packed.data.fields_len;
- return .{
- .extra_index = extra_index,
- .decl = type_struct_packed.data.decl.toOptional(),
- .namespace = type_struct_packed.data.namespace,
- .zir_index = type_struct_packed.data.zir_index,
- .layout = .Packed,
- .field_types = .{
- .start = type_struct_packed.end,
- .len = fields_len,
- },
- .field_names = .{
- .start = type_struct_packed.end + fields_len,
- .len = fields_len,
- },
- .field_inits = if (inits) .{
- .start = type_struct_packed.end + fields_len * 2,
- .len = fields_len,
- } else .{
- .start = 0,
- .len = 0,
- },
- .field_aligns = .{ .start = 0, .len = 0 },
- .runtime_order = .{ .start = 0, .len = 0 },
- .comptime_bits = .{ .start = 0, .len = 0 },
- .offsets = .{ .start = 0, .len = 0 },
- .names_map = type_struct_packed.data.names_map.toOptional(),
- };
-}
-
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
@@ -4719,28 +4673,6 @@ fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func {
return func;
}
-fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key {
- const enum_explicit = ip.extraDataTrail(EnumExplicit, data);
- const fields_len = enum_explicit.data.fields_len;
- return .{ .enum_type = .{
- .decl = enum_explicit.data.decl,
- .namespace = enum_explicit.data.namespace,
- .tag_ty = enum_explicit.data.int_tag_type,
- .names = .{
- .start = @intCast(enum_explicit.end),
- .len = fields_len,
- },
- .values = .{
- .start = @intCast(enum_explicit.end + fields_len),
- .len = if (enum_explicit.data.values_map != .none) fields_len else 0,
- },
- .tag_mode = tag_mode,
- .names_map = enum_explicit.data.names_map.toOptional(),
- .values_map = enum_explicit.data.values_map,
- .zir_index = enum_explicit.data.zir_index,
- } };
-}
-
fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key {
const int_info = ip.limbData(Int, limb_index);
return .{ .int = .{
@@ -4900,13 +4832,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.struct_type => unreachable, // use getStructType() instead
.anon_struct_type => unreachable, // use getAnonStructType() instead
.union_type => unreachable, // use getUnionType() instead
-
- .opaque_type => |opaque_type| {
- ip.items.appendAssumeCapacity(.{
- .tag = .type_opaque,
- .data = try ip.addExtra(gpa, opaque_type),
- });
- },
+ .opaque_type => unreachable, // use getOpaqueType() instead
.enum_type => unreachable, // use getEnum() or getIncompleteEnum() instead
.func_type => unreachable, // use getFuncType() instead
@@ -5026,14 +4952,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ptr.addr == .field);
assert(base_index.index < anon_struct_type.types.len);
},
- .struct_type => |struct_type| {
+ .struct_type => {
assert(ptr.addr == .field);
- assert(base_index.index < struct_type.field_types.len);
+ assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
},
- .union_type => |union_key| {
- const union_type = ip.loadUnionType(union_key);
+ .union_type => {
+ const union_type = ip.loadUnionType(base_ptr_type.child);
assert(ptr.addr == .field);
- assert(base_index.index < union_type.field_names.len);
+ assert(base_index.index < union_type.field_types.len);
},
.ptr_type => |slice_type| {
assert(ptr.addr == .field);
@@ -5304,7 +5230,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.isEnumType(enum_tag.ty));
switch (ip.indexToKey(enum_tag.ty)) {
.simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
- .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty),
+ .enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty),
else => unreachable,
}
ip.items.appendAssumeCapacity(.{
@@ -5397,8 +5323,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.typeOf(elem) == child);
}
},
- .struct_type => |t| {
- for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| {
+ .struct_type => {
+ for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
assert(ip.typeOf(elem) == field_ty);
}
},
@@ -5596,6 +5522,7 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
const union_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeUnion{
.flags = ini.flags,
+ .fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.padding = std.math.maxInt(u32),
.decl = ini.decl,
@@ -5628,7 +5555,7 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
- .union_type = extraUnionType(ip, union_type_extra_index),
+ .union_type = .{ .decl = ini.decl },
}, adapter);
if (gop.found_existing) {
ip.extra.items.len = prev_extra_len;
@@ -5664,23 +5591,7 @@ pub fn getStructType(
) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const key: Key = .{
- .struct_type = .{
- // Only the decl matters for hashing and equality purposes.
- .decl = ini.decl.toOptional(),
-
- .extra_index = undefined,
- .namespace = undefined,
- .zir_index = undefined,
- .layout = undefined,
- .field_names = undefined,
- .field_types = undefined,
- .field_inits = undefined,
- .field_aligns = undefined,
- .runtime_order = undefined,
- .comptime_bits = undefined,
- .offsets = undefined,
- .names_map = undefined,
- },
+ .struct_type = .{ .decl = ini.decl.toOptional() },
};
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
@@ -5776,7 +5687,7 @@ pub fn getStructType(
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
}
if (ini.layout == .Auto) {
- ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Key.StructType.RuntimeOrder.unresolved), ini.fields_len);
+ ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len);
}
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
return @enumFromInt(ip.items.len - 1);
@@ -6579,26 +6490,14 @@ pub const GetEnumInit = struct {
tag_ty: Index,
names: []const NullTerminatedString,
values: []const Index,
- tag_mode: Key.EnumType.TagMode,
+ tag_mode: LoadedEnumType.TagMode,
zir_index: TrackedInst.Index.Optional,
};
pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
- .enum_type = .{
- // Only the decl is used for hashing and equality.
- .decl = ini.decl,
-
- .namespace = undefined,
- .tag_ty = undefined,
- .names = undefined,
- .values = undefined,
- .tag_mode = undefined,
- .names_map = undefined,
- .values_map = undefined,
- .zir_index = undefined,
- },
+ .enum_type = .{ .decl = ini.decl },
}, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
errdefer _ = ip.map.pop();
@@ -6668,6 +6567,21 @@ pub fn finishGetEnum(
return @enumFromInt(ip.items.len - 1);
}
+pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, key: LoadedOpaqueType) Allocator.Error!Index {
+ const adapter: KeyAdapter = .{ .intern_pool = ip };
+ try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(LoadedOpaqueType).Struct.fields.len);
+ try ip.items.ensureUnusedCapacity(gpa, 1);
+ const gop = try ip.map.getOrPutAdapted(gpa, Key{
+ .opaque_type = .{ .decl = key.decl },
+ }, adapter);
+ if (gop.found_existing) return @enumFromInt(gop.index);
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_opaque,
+ .data = ip.addExtraAssumeCapacity(key),
+ });
+ return @enumFromInt(gop.index);
+}
+
pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const index = ip.map.getIndexAdapted(key, adapter) orelse return null;
@@ -7075,9 +6989,9 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.func => unreachable,
.int => |int| switch (ip.indexToKey(new_ty)) {
- .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{
+ .enum_type => return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
- .int = try ip.getCoerced(gpa, val, enum_type.tag_ty),
+ .int = try ip.getCoerced(gpa, val, ip.loadEnumType(new_ty).tag_ty),
} }),
.ptr_type => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
@@ -7106,7 +7020,8 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty),
.enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
- .enum_type => |enum_type| {
+ .enum_type => {
+ const enum_type = ip.loadEnumType(new_ty);
const index = enum_type.nameIndex(ip, enum_literal).?;
return ip.get(gpa, .{ .enum_tag = .{
.ty = new_ty,
@@ -7247,7 +7162,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
- .struct_type => |struct_type| struct_type.field_types.get(ip)[i],
+ .struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
else => unreachable,
};
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
@@ -7550,7 +7465,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
ints += info.fields_len; // offsets
break :b @sizeOf(u32) * ints;
},
- .type_struct_ns => @sizeOf(Module.Namespace),
.type_struct_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
@@ -7572,7 +7486,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_union => b: {
const info = ip.extraData(Tag.TypeUnion, data);
- const enum_info = ip.indexToKey(info.tag_ty).enum_type;
+ const enum_info = ip.loadEnumType(info.tag_ty);
const fields_len: u32 = @intCast(enum_info.names.len);
const per_field = @sizeOf(u32); // field type
// 1 byte per field for alignment, rounded up to the nearest 4 bytes
@@ -7716,7 +7630,6 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.type_enum_auto,
.type_opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8123,7 +8036,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.simple_type,
.type_opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -8217,7 +8129,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len,
.vector_type => |vector_type| vector_type.len,
@@ -8227,7 +8139,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
.vector_type => |vector_type| vector_type.len,
@@ -8457,7 +8369,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_opaque => .Opaque,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
src/Liveness.zig
@@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
};
}
-pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness {
+pub fn analyze(gpa: Allocator, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@@ -836,7 +836,7 @@ pub const BigTomb = struct {
const Analysis = struct {
gpa: Allocator,
air: Air,
- intern_pool: *const InternPool,
+ intern_pool: *InternPool,
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),
src/Module.zig
@@ -527,7 +527,7 @@ pub const Decl = struct {
/// If the Decl owns its value and it is a union, return it,
/// otherwise null.
- pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.UnionType {
+ pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.LoadedUnionType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return zcu.typeToUnion(decl.val.toType());
@@ -563,14 +563,15 @@ pub const Decl = struct {
/// enum, or opaque.
pub fn getInnerNamespaceIndex(decl: Decl, zcu: *Zcu) Namespace.OptionalIndex {
if (!decl.has_tv) return .none;
+ const ip = &zcu.intern_pool;
return switch (decl.val.ip_index) {
.empty_struct_type => .none,
.none => .none,
- else => switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) {
- .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
- .struct_type => |struct_type| struct_type.namespace,
- .union_type => |union_type| union_type.namespace.toOptional(),
- .enum_type => |enum_type| enum_type.namespace,
+ else => switch (ip.indexToKey(decl.val.toIntern())) {
+ .opaque_type => ip.loadOpaqueType(decl.val.toIntern()).namespace.toOptional(),
+ .struct_type => ip.loadStructType(decl.val.toIntern()).namespace,
+ .union_type => ip.loadUnionType(decl.val.toIntern()).namespace.toOptional(),
+ .enum_type => ip.loadEnumType(decl.val.toIntern()).namespace,
else => .none,
},
};
@@ -5682,7 +5683,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.values.len == 0) {
// Auto-numbered fields.
@@ -5988,28 +5989,26 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
-pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
+pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |t| t,
+ const ip = &mod.intern_pool;
+ return switch (ip.indexToKey(ty.ip_index)) {
+ .struct_type => ip.loadStructType(ty.ip_index),
else => null,
};
}
-pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
- if (ty.ip_index == .none) return null;
- return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |t| if (t.layout == .Packed) t else null,
- else => null,
- };
+pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
+ const s = mod.typeToStruct(ty) orelse return null;
+ if (s.layout != .Packed) return null;
+ return s;
}
-/// This asserts that the union's enum tag type has been resolved.
-pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
+pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
- .union_type => |k| ip.loadUnionType(k),
+ .union_type => ip.loadUnionType(ty.ip_index),
else => null,
};
}
@@ -6111,7 +6110,7 @@ pub const UnionLayout = struct {
padding: u32,
};
-pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
+pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
const ip = &mod.intern_pool;
assert(u.haveLayout(ip));
var most_aligned_field: u32 = undefined;
@@ -6157,7 +6156,7 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
- .abi_size = u.size,
+ .abi_size = u.size(ip).*,
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
@@ -6166,16 +6165,16 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
- .padding = u.padding,
+ .padding = u.padding(ip).*,
};
}
-pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
+pub fn unionAbiSize(mod: *Module, u: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(u).abi_size;
}
/// Returns 0 if the union is represented with 0 bits at runtime.
-pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
+pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
@@ -6192,7 +6191,7 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
-pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
+pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.LoadedUnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
@@ -6201,12 +6200,11 @@ pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_in
}
/// Returns the index of the active field, given the current tag value
-pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value) ?u32 {
+pub fn unionTagFieldIndex(mod: *Module, u: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == u.enum_tag_ty);
- const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
- return enum_type.tagValueIndex(ip, enum_tag.toIntern());
+ return u.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
/// Returns the field alignment of a non-packed struct in byte units.
@@ -6253,7 +6251,7 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
/// projects.
pub fn structPackedFieldBitOffset(
mod: *Module,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const ip = &mod.intern_pool;
src/Sema.zig
@@ -3371,11 +3371,11 @@ fn zirOpaqueDecl(
});
errdefer mod.destroyNamespace(new_namespace_index);
- const opaque_ty = try mod.intern(.{ .opaque_type = .{
+ const opaque_ty = try mod.intern_pool.getOpaqueType(sema.gpa, .{
.decl = new_decl_index,
.namespace = new_namespace_index,
.zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
- } });
+ });
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(opaque_ty);
@@ -5371,7 +5371,7 @@ fn failWithBadMemberAccess(
fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
@@ -5397,7 +5397,7 @@ fn failWithBadStructFieldAccess(
fn failWithBadUnionFieldAccess(
sema: *Sema,
block: *Block,
- union_obj: InternPool.UnionType,
+ union_obj: InternPool.LoadedUnionType,
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
@@ -13348,7 +13348,7 @@ fn validateSwitchItemEnum(
const ip = &sema.mod.intern_pool;
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, src_node_offset, switch_prong_src, .none);
const int = ip.indexToKey(item.val).enum_tag.int;
- const field_index = ip.indexToKey(ip.typeOf(item.val)).enum_type.tagValueIndex(ip, int) orelse {
+ const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse {
const maybe_prev_src = try range_set.add(int, int, switch_prong_src);
try sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
return item.ref;
@@ -13628,15 +13628,15 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :hf field_index < ty.structFieldCount(mod);
}
},
- .struct_type => |struct_type| {
- break :hf struct_type.nameIndex(ip, field_name) != null;
+ .struct_type => {
+ break :hf ip.loadStructType(ty.toIntern()).nameIndex(ip, field_name) != null;
},
- .union_type => |union_type| {
- const union_obj = ip.loadUnionType(union_type);
- break :hf union_obj.nameIndex(ip, field_name) != null;
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
+ break :hf union_type.loadTagType(ip).nameIndex(ip, field_name) != null;
},
- .enum_type => |enum_type| {
- break :hf enum_type.nameIndex(ip, field_name) != null;
+ .enum_type => {
+ break :hf ip.loadEnumType(ty.toIntern()).nameIndex(ip, field_name) != null;
},
.array_type => break :hf ip.stringEqlSlice(field_name, "len"),
else => {},
@@ -17942,7 +17942,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} })));
},
.Enum => {
- const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive);
+ const is_exhaustive = Value.makeBool(ip.loadEnumType(ty.toIntern()).tag_mode != .nonexhaustive);
const enum_field_ty = t: {
const enum_field_ty_decl_index = (try sema.namespaceLookup(
@@ -17956,9 +17956,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t enum_field_ty_decl.val.toType();
};
- const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len);
+ const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.loadEnumType(ty.toIntern()).names.len);
for (enum_field_vals, 0..) |*field_val, i| {
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
const value_val = if (enum_type.values.len > 0)
try mod.intern_pool.getCoercedInts(
mod.gpa,
@@ -18033,7 +18033,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
- const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace);
+ const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.loadEnumType(ty.toIntern()).namespace);
const type_enum_ty = t: {
const type_enum_ty_decl_index = (try sema.namespaceLookup(
@@ -18049,7 +18049,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = .{
// tag_type: type,
- ip.indexToKey(ty.toIntern()).enum_type.tag_ty,
+ ip.loadEnumType(ty.toIntern()).tag_ty,
// fields: []const EnumField,
fields_val,
// decls: []const Declaration,
@@ -18093,14 +18093,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const union_obj = mod.typeToUnion(ty).?;
+ const tag_type = union_obj.loadTagType(ip);
const layout = union_obj.getLayout(ip);
- const union_field_vals = try gpa.alloc(InternPool.Index, union_obj.field_names.len);
+ const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len);
defer gpa.free(union_field_vals);
for (union_field_vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
- const name = try sema.arena.dupeZ(u8, ip.stringToSlice(union_obj.field_names.get(ip)[i]));
+ const name = try sema.arena.dupeZ(u8, ip.stringToSlice(tag_type.names.get(ip)[i]));
const name_val = v: {
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
@@ -18302,7 +18303,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :fv;
},
- .struct_type => |s| s,
+ .struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
@@ -20067,7 +20068,8 @@ fn finishStructInit(
}
}
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(struct_ty.toIntern());
for (0..struct_type.field_types.len) |i| {
if (field_inits[i] != .none) {
// Coerce the init value to the field type.
@@ -20668,7 +20670,8 @@ fn fieldType(
try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(cur_ty.toIntern());
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
const field_ty = struct_type.field_types.get(ip)[field_index];
@@ -20678,7 +20681,7 @@ fn fieldType(
},
.Union => {
const union_obj = mod.typeToUnion(cur_ty).?;
- const field_index = union_obj.nameIndex(ip, field_name) orelse
+ const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
const field_ty = union_obj.field_types.get(ip)[field_index];
return Air.internedToRef(field_ty);
@@ -21007,7 +21010,7 @@ fn zirReify(
.AnyFrame => return sema.failWithUseOfAsync(block, src),
.EnumLiteral => return .enum_literal_type,
.Int => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const signedness_val = try Value.fromInterned(union_val.val).fieldValue(
mod,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
@@ -21023,7 +21026,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Vector => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
@@ -21045,7 +21048,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Float => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const bits_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "bits"),
@@ -21063,7 +21066,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Pointer => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const size_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "size"),
@@ -21175,7 +21178,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Array => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const len_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
@@ -21204,7 +21207,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Optional => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const child_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
@@ -21216,7 +21219,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.ErrorUnion => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const error_set_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "error_set"),
@@ -21245,7 +21248,7 @@ fn zirReify(
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
- const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+ const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
@@ -21265,7 +21268,7 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Struct => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
@@ -21301,7 +21304,7 @@ fn zirReify(
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
},
.Enum => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const tag_type_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
@@ -21366,7 +21369,7 @@ fn zirReify(
for (0..fields_len) |field_i| {
const elem_val = try fields_val.elemValue(mod, field_i);
- const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+ const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
@@ -21417,7 +21420,7 @@ fn zirReify(
return decl_val;
},
.Opaque => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const decls_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
@@ -21451,11 +21454,11 @@ fn zirReify(
});
errdefer mod.destroyNamespace(new_namespace_index);
- const opaque_ty = try mod.intern(.{ .opaque_type = .{
+ const opaque_ty = try ip.getOpaqueType(gpa, .{
.decl = new_decl_index,
.namespace = new_namespace_index,
.zir_index = .none,
- } });
+ });
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(opaque_ty);
@@ -21467,7 +21470,7 @@ fn zirReify(
return decl_val;
},
.Union => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const layout_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
@@ -21500,7 +21503,7 @@ fn zirReify(
enum_tag_ty = payload_val.toType().toIntern();
const enum_type = switch (ip.indexToKey(enum_tag_ty)) {
- .enum_type => |x| x,
+ .enum_type => ip.loadEnumType(enum_tag_ty),
else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}),
};
@@ -21521,7 +21524,7 @@ fn zirReify(
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
- const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+ const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
@@ -21542,7 +21545,7 @@ fn zirReify(
}
if (enum_tag_ty != .none) {
- const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
+ const tag_info = ip.loadEnumType(enum_tag_ty);
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
return sema.fail(block, src, "no field named '{}' in enum '{}'", .{
field_name.fmt(ip), Type.fromInterned(enum_tag_ty).fmt(mod),
@@ -21615,7 +21618,7 @@ fn zirReify(
}
if (enum_tag_ty != .none) {
- const tag_info = ip.indexToKey(enum_tag_ty).enum_type;
+ const tag_info = ip.loadEnumType(enum_tag_ty);
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{});
@@ -21695,7 +21698,7 @@ fn zirReify(
return decl_val;
},
.Fn => {
- const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
+ const struct_type = ip.loadStructType(ip.typeOf(union_val.val));
const calling_convention_val = try Value.fromInterned(union_val.val).fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "calling_convention"),
@@ -21746,7 +21749,7 @@ fn zirReify(
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
const elem_val = try params_val.elemValue(mod, i);
- const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+ const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic"),
@@ -21849,7 +21852,7 @@ fn reifyStruct(
});
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(ty);
- const struct_type = ip.indexToKey(ty).struct_type;
+ const struct_type = ip.loadStructType(ty);
new_decl.ty = Type.type;
new_decl.val = Value.fromInterned(ty);
@@ -21857,7 +21860,7 @@ fn reifyStruct(
// Fields
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
- const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
+ const elem_struct_type = ip.loadStructType(ip.typeOf(elem_val.toIntern()));
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
@@ -23228,7 +23231,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
switch (ty.containerLayout(mod)) {
.Packed => {
var bit_sum: u64 = 0;
- const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
+ const struct_type = ip.loadStructType(ty.toIntern());
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return bit_sum;
@@ -27252,8 +27255,7 @@ fn fieldCallBind(
.Union => {
try sema.resolveTypeFields(concrete_ty);
const union_obj = mod.typeToUnion(concrete_ty).?;
- _ = union_obj.nameIndex(ip, field_name) orelse break :find_field;
-
+ _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field;
const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false);
return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) };
},
@@ -27627,7 +27629,8 @@ fn structFieldVal(
try sema.resolveTypeFields(struct_ty);
switch (ip.indexToKey(struct_ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(struct_ty.toIntern());
if (struct_type.isTuple(ip))
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
@@ -27833,7 +27836,7 @@ fn unionFieldPtr(
try sema.requireRuntimeBlock(block, src, null);
if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
- union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
+ union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
@@ -27911,7 +27914,7 @@ fn unionFieldVal(
try sema.requireRuntimeBlock(block, src, null);
if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
- union_ty.unionTagTypeSafety(mod) != null and union_obj.field_names.len > 1)
+ union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
{
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
const wanted_tag = Air.internedToRef(wanted_tag_val.toIntern());
@@ -31670,7 +31673,7 @@ fn coerceEnumToUnion(
const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{});
errdefer msg.destroy(sema.gpa);
- const field_name = union_obj.field_names.get(ip)[field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{
field_name.fmt(ip),
});
@@ -31681,7 +31684,7 @@ fn coerceEnumToUnion(
}
const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse {
const msg = msg: {
- const field_name = union_obj.field_names.get(ip)[field_index];
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{
inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod),
field_ty.fmt(sema.mod), field_name.fmt(ip),
@@ -31753,8 +31756,8 @@ fn coerceEnumToUnion(
);
errdefer msg.destroy(sema.gpa);
- for (0..union_obj.field_names.len) |field_index| {
- const field_name = union_obj.field_names.get(ip)[field_index];
+ for (0..union_obj.field_types.len) |field_index| {
+ const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{
@@ -31787,8 +31790,8 @@ fn coerceAnonStructToUnion(
.{ .name = anon_struct_type.names.get(ip)[0] }
else
.{ .count = anon_struct_type.names.len },
- .struct_type => |struct_type| name: {
- const field_names = struct_type.field_names.get(ip);
+ .struct_type => name: {
+ const field_names = ip.loadStructType(inst_ty.toIntern()).field_names.get(ip);
break :name if (field_names.len == 1)
.{ .name = field_names[0] }
else
@@ -32097,7 +32100,7 @@ fn coerceTupleToStruct(
var runtime_src: ?LazySrcLoc = null;
const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
- .struct_type => |s| s.field_types.len,
+ .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len,
else => unreachable,
};
for (0..field_count) |field_index_usize| {
@@ -32109,7 +32112,7 @@ fn coerceTupleToStruct(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
- .struct_type => |s| s.field_names.get(ip)[field_i],
+ .struct_type => ip.loadStructType(inst_ty.toIntern()).field_names.get(ip)[field_i],
else => unreachable,
};
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -32197,7 +32200,7 @@ fn coerceTupleToTuple(
const ip = &mod.intern_pool;
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.len,
else => unreachable,
};
const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
@@ -32207,7 +32210,7 @@ fn coerceTupleToTuple(
const inst_ty = sema.typeOf(inst);
const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len,
else => unreachable,
};
if (src_field_count > dest_field_count) return error.NotCoercible;
@@ -32222,10 +32225,14 @@ fn coerceTupleToTuple(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
- .struct_type => |struct_type| if (struct_type.field_names.len > 0)
- struct_type.field_names.get(ip)[field_i]
- else
- try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
+ .struct_type => s: {
+ const struct_type = ip.loadStructType(inst_ty.toIntern());
+ if (struct_type.field_names.len > 0) {
+ break :s struct_type.field_names.get(ip)[field_i];
+ } else {
+ break :s try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i});
+ }
+ },
else => unreachable,
};
@@ -32234,12 +32241,12 @@ fn coerceTupleToTuple(
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize],
- .struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize],
+ .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.get(ip)[field_index_usize],
else => unreachable,
};
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
- .struct_type => |struct_type| struct_type.fieldInit(ip, field_index_usize),
+ .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, field_index_usize),
else => unreachable,
};
@@ -32278,7 +32285,7 @@ fn coerceTupleToTuple(
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
- .struct_type => |struct_type| struct_type.fieldInit(ip, i),
+ .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, i),
else => unreachable,
};
@@ -35518,7 +35525,7 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
pub fn resolveStructAlignment(
sema: *Sema,
ty: InternPool.Index,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
) CompileError!Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -35658,7 +35665,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
}
}
- const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
+ const RuntimeOrder = InternPool.LoadedStructType.RuntimeOrder;
const AlignSortContext = struct {
aligns: []const Alignment,
@@ -35710,7 +35717,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
_ = try sema.typeRequiresComptime(ty);
}
-fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void {
+fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
@@ -35869,7 +35876,7 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void
pub fn resolveUnionAlignment(
sema: *Sema,
ty: Type,
- union_type: InternPool.Key.UnionType,
+ union_type: InternPool.LoadedUnionType,
) CompileError!Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -35889,13 +35896,12 @@ pub fn resolveUnionAlignment(
try sema.resolveTypeFieldsUnion(ty, union_type);
- const union_obj = ip.loadUnionType(union_type);
var max_align: Alignment = .@"1";
- for (0..union_obj.field_names.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ for (0..union_type.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
- const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
+ const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -35913,16 +35919,17 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
- const union_type = ip.indexToKey(ty.ip_index).union_type;
- try sema.resolveTypeFieldsUnion(ty, union_type);
+ try sema.resolveTypeFieldsUnion(ty, ip.loadUnionType(ty.ip_index));
- const union_obj = ip.loadUnionType(union_type);
- switch (union_obj.flagsPtr(ip).status) {
+ // Load again, since the tag type might have changed due to resolution.
+ const union_type = ip.loadUnionType(ty.ip_index);
+
+ switch (union_type.flagsPtr(ip).status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
- mod.declPtr(union_obj.decl).srcLoc(mod),
+ mod.declPtr(union_type.decl).srcLoc(mod),
"union '{}' depends on itself",
.{ty.fmt(mod)},
);
@@ -35931,17 +35938,17 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
- const prev_status = union_obj.flagsPtr(ip).status;
- errdefer if (union_obj.flagsPtr(ip).status == .layout_wip) {
- union_obj.flagsPtr(ip).status = prev_status;
+ const prev_status = union_type.flagsPtr(ip).status;
+ errdefer if (union_type.flagsPtr(ip).status == .layout_wip) {
+ union_type.flagsPtr(ip).status = prev_status;
};
- union_obj.flagsPtr(ip).status = .layout_wip;
+ union_type.flagsPtr(ip).status = .layout_wip;
var max_size: u64 = 0;
var max_align: Alignment = .@"1";
- for (0..union_obj.field_names.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ for (0..union_type.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (!(try sema.typeHasRuntimeBits(field_ty))) continue;
max_size = @max(max_size, sema.typeAbiSize(field_ty) catch |err| switch (err) {
@@ -35953,7 +35960,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
else => return err,
});
- const explicit_align = union_obj.fieldAlign(ip, @intCast(field_index));
+ const explicit_align = union_type.fieldAlign(ip, @intCast(field_index));
const field_align = if (explicit_align != .none)
explicit_align
else
@@ -35962,10 +35969,10 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
max_align = max_align.max(field_align);
}
- const flags = union_obj.flagsPtr(ip);
- const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_obj.enum_tag_ty));
+ const flags = union_type.flagsPtr(ip);
+ const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty));
const size, const alignment, const padding = if (has_runtime_tag) layout: {
- const enum_tag_type = Type.fromInterned(union_obj.enum_tag_ty);
+ const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty);
const tag_align = try sema.typeAbiAlignment(enum_tag_type);
const tag_size = try sema.typeAbiSize(enum_tag_type);
@@ -35999,22 +36006,22 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
flags.alignment = alignment;
flags.status = .have_layout;
- if (union_obj.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
+ if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
- mod.declPtr(union_obj.decl).srcLoc(mod),
+ mod.declPtr(union_type.decl).srcLoc(mod),
"union layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
- if (union_obj.flagsPtr(ip).assumed_pointer_aligned and
+ if (union_type.flagsPtr(ip).assumed_pointer_aligned and
alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(mod.getTarget().ptrBitWidth(), 8))))
{
const msg = try Module.ErrorMsg.create(
sema.gpa,
- mod.declPtr(union_obj.decl).srcLoc(mod),
+ mod.declPtr(union_type.decl).srcLoc(mod),
"union layout depends on being pointer aligned",
.{},
);
@@ -36202,12 +36209,11 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
.type_struct,
- .type_struct_ns,
.type_struct_packed,
.type_struct_packed_inits,
- => try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
+ => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)),
- .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.indexToKey(ty_ip).union_type),
+ .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)),
.simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
else => {},
},
@@ -36239,7 +36245,7 @@ fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileErr
pub fn resolveTypeFieldsStruct(
sema: *Sema,
ty: InternPool.Index,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
@@ -36299,7 +36305,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void {
struct_type.setHaveFieldInits(ip);
}
-pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void {
+pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const owner_decl = mod.declPtr(union_type.decl);
@@ -36530,7 +36536,7 @@ fn structZirInfo(zir: Zir, zir_index: Zir.Inst.Index) struct {
fn semaStructFields(
mod: *Module,
arena: Allocator,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
@@ -36797,7 +36803,7 @@ fn semaStructFields(
fn semaStructFieldInits(
mod: *Module,
arena: Allocator,
- struct_type: InternPool.Key.StructType,
+ struct_type: InternPool.LoadedStructType,
) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
@@ -36948,7 +36954,7 @@ fn semaStructFieldInits(
}
}
-fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void {
+fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -37080,7 +37086,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
// The provided type is the enum tag type.
union_type.tagTypePtr(ip).* = provided_ty.toIntern();
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
- .enum_type => |x| x,
+ .enum_type => ip.loadEnumType(provided_ty.toIntern()),
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(mod)}),
};
// The fields of the union must match the enum exactly.
@@ -37217,7 +37223,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
}
if (explicit_tags_seen.len > 0) {
- const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
+ const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
const enum_index = tag_info.nameIndex(ip, field_name) orelse {
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
.index = field_i,
@@ -37328,7 +37334,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
union_type.setFieldAligns(ip, field_aligns.items);
if (explicit_tags_seen.len > 0) {
- const tag_info = ip.indexToKey(union_type.tagTypePtr(ip).*).enum_type;
+ const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*);
if (tag_info.names.len > fields_len) {
const msg = msg: {
const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{});
@@ -37710,7 +37716,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_enum_explicit,
.type_enum_nonexhaustive,
.type_struct,
- .type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
@@ -37733,8 +37738,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return null;
},
- .struct_type => |struct_type| {
- try sema.resolveTypeFields(ty);
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
+ try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
if (struct_type.field_types.len == 0) {
// In this case the struct has no fields at all and
@@ -37792,10 +37798,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
} })));
},
- .union_type => |union_type| {
- try sema.resolveTypeFields(ty);
- const union_obj = ip.loadUnionType(union_type);
- const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.enum_tag_ty))) orelse
+ .union_type => {
+ const union_obj = ip.loadUnionType(ty.toIntern());
+ try sema.resolveTypeFieldsUnion(ty, union_obj);
+ const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse
return null;
if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
@@ -37822,39 +37828,42 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
return Value.fromInterned(only);
},
- .enum_type => |enum_type| switch (enum_type.tag_mode) {
- .nonexhaustive => {
- if (enum_type.tag_ty == .comptime_int_type) return null;
+ .enum_type => {
+ const enum_type = ip.loadEnumType(ty.toIntern());
+ switch (enum_type.tag_mode) {
+ .nonexhaustive => {
+ if (enum_type.tag_ty == .comptime_int_type) return null;
- if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
- const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = int_opv.toIntern(),
- } });
- return Value.fromInterned(only);
- }
+ if (try sema.typeHasOnePossibleValue(Type.fromInterned(enum_type.tag_ty))) |int_opv| {
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = int_opv.toIntern(),
+ } });
+ return Value.fromInterned(only);
+ }
- return null;
- },
- .auto, .explicit => {
- if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
-
- return Value.fromInterned(switch (enum_type.names.len) {
- 0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
- 1 => try mod.intern(.{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = if (enum_type.values.len == 0)
- (try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
- else
- try mod.intern_pool.getCoercedInts(
- mod.gpa,
- mod.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
- enum_type.tag_ty,
- ),
- } }),
- else => return null,
- });
- },
+ return null;
+ },
+ .auto, .explicit => {
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
+
+ return Value.fromInterned(switch (enum_type.names.len) {
+ 0 => try mod.intern(.{ .empty_enum_value = ty.toIntern() }),
+ 1 => try mod.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = if (enum_type.values.len == 0)
+ (try mod.intValue(Type.fromInterned(enum_type.tag_ty), 0)).toIntern()
+ else
+ try mod.intern_pool.getCoercedInts(
+ mod.gpa,
+ mod.intern_pool.indexToKey(enum_type.values.get(ip)[0]).int,
+ enum_type.tag_ty,
+ ),
+ } }),
+ else => return null,
+ });
+ },
+ }
},
else => unreachable,
@@ -38186,7 +38195,7 @@ fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
-fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment {
+fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
@@ -38234,7 +38243,7 @@ fn unionFieldIndex(
const ip = &mod.intern_pool;
try sema.resolveTypeFields(union_ty);
const union_obj = mod.typeToUnion(union_ty).?;
- const field_index = union_obj.nameIndex(ip, field_name) orelse
+ const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
return @intCast(field_index);
}
@@ -38271,7 +38280,7 @@ fn anonStructFieldIndex(
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
if (name == field_name) return @intCast(i);
},
- .struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i,
+ .struct_type => if (ip.loadStructType(struct_ty.toIntern()).nameIndex(ip, field_name)) |i| return i,
else => unreachable,
}
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
@@ -38707,7 +38716,7 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool {
/// Asserts the type is an enum.
fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool {
const mod = sema.mod;
- const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = mod.intern_pool.loadEnumType(ty.toIntern());
assert(enum_type.tag_mode != .nonexhaustive);
// The `tagValueIndex` function call below relies on the type being the integer tag type.
// `getCoerced` assumes the value will fit the new type.
src/type.zig
@@ -320,11 +320,12 @@ pub const Type = struct {
.generic_poison => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.decl.unwrap()) |decl_index| {
const decl = mod.declPtr(decl_index);
try decl.renderFullyQualifiedName(mod, writer);
- } else if (struct_type.namespace.unwrap()) |namespace_index| {
+ } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
try namespace.renderFullyQualifiedName(mod, .empty, writer);
} else {
@@ -573,7 +574,8 @@ pub const Type = struct {
.generic_poison => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
@@ -601,7 +603,8 @@ pub const Type = struct {
return false;
},
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).runtime_tag) {
.none => {
if (union_type.flagsPtr(ip).status == .field_types_wip) {
@@ -628,9 +631,8 @@ pub const Type = struct {
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy,
}
- const union_obj = ip.loadUnionType(union_type);
- for (0..union_obj.field_types.len) |field_index| {
- const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
+ for (0..union_type.field_types.len) |field_index| {
+ const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
@@ -639,7 +641,7 @@ pub const Type = struct {
},
.opaque_type => true,
- .enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
+ .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
// values, not types
.undef,
@@ -736,15 +738,19 @@ pub const Type = struct {
.generic_poison,
=> false,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
// Struct with no fields have a well-defined layout of no bits.
return struct_type.layout != .Auto or struct_type.field_types.len == 0;
},
- .union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
- .none, .safety => union_type.flagsPtr(ip).layout != .Auto,
- .tagged => false,
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
+ return switch (union_type.flagsPtr(ip).runtime_tag) {
+ .none, .safety => union_type.flagsPtr(ip).layout != .Auto,
+ .tagged => false,
+ };
},
- .enum_type => |enum_type| switch (enum_type.tag_mode) {
+ .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
},
@@ -1019,7 +1025,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
@@ -1066,7 +1073,8 @@ pub const Type = struct {
}
return .{ .scalar = big_align };
},
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
const flags = union_type.flagsPtr(ip).*;
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
@@ -1082,8 +1090,8 @@ pub const Type = struct {
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
- .enum_type => |enum_type| return .{
- .scalar = Type.fromInterned(enum_type.tag_ty).abiAlignment(mod),
+ .enum_type => return .{
+ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod),
},
// values, not types
@@ -1394,7 +1402,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => switch (struct_type.layout) {
@@ -1439,7 +1448,8 @@ pub const Type = struct {
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
@@ -1455,7 +1465,7 @@ pub const Type = struct {
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
- .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = Type.fromInterned(enum_type.tag_ty).abiSize(mod) },
+ .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) },
// values, not types
.undef,
@@ -1644,7 +1654,8 @@ pub const Type = struct {
.extern_options => unreachable,
.type_info => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@@ -1661,7 +1672,8 @@ pub const Type = struct {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@@ -1670,19 +1682,18 @@ pub const Type = struct {
if (!is_packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
- const union_obj = ip.loadUnionType(union_type);
- assert(union_obj.flagsPtr(ip).status.haveFieldTypes());
+ assert(union_type.flagsPtr(ip).status.haveFieldTypes());
var size: u64 = 0;
- for (0..union_obj.field_types.len) |field_index| {
- const field_ty = union_obj.field_types.get(ip)[field_index];
+ for (0..union_type.field_types.len) |field_index| {
+ const field_ty = union_type.field_types.get(ip)[field_index];
size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema));
}
return size;
},
.opaque_type => unreachable,
- .enum_type => |enum_type| return bitSizeAdvanced(Type.fromInterned(enum_type.tag_ty), mod, opt_sema),
+ .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema),
// values, not types
.undef,
@@ -1713,8 +1724,8 @@ pub const Type = struct {
pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.haveLayout(ip),
- .union_type => |union_type| union_type.haveLayout(ip),
+ .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
+ .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
return Type.fromInterned(array_type.child).layoutIsResolved(mod);
@@ -1914,16 +1925,18 @@ pub const Type = struct {
/// Otherwise, returns `null`.
pub fn unionTagType(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
- return switch (ip.indexToKey(ty.toIntern())) {
- .union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
- .tagged => {
- assert(union_type.flagsPtr(ip).status.haveFieldTypes());
- return Type.fromInterned(union_type.enum_tag_ty);
- },
- else => null,
+ switch (ip.indexToKey(ty.toIntern())) {
+ .union_type => {},
+ else => return null,
+ }
+ const union_type = ip.loadUnionType(ty.toIntern());
+ switch (union_type.flagsPtr(ip).runtime_tag) {
+ .tagged => {
+ assert(union_type.flagsPtr(ip).status.haveFieldTypes());
+ return Type.fromInterned(union_type.enum_tag_ty);
},
- else => null,
- };
+ else => return null,
+ }
}
/// Same as `unionTagType` but includes safety tag.
@@ -1931,7 +1944,8 @@ pub const Type = struct {
pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip)) return null;
assert(union_type.haveFieldTypes(ip));
return Type.fromInterned(union_type.enum_tag_ty);
@@ -1981,17 +1995,16 @@ pub const Type = struct {
pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout {
const ip = &mod.intern_pool;
- const union_type = ip.indexToKey(ty.toIntern()).union_type;
- const union_obj = ip.loadUnionType(union_type);
+ const union_obj = ip.loadUnionType(ty.toIntern());
return mod.getUnionLayout(union_obj);
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.layout,
+ .struct_type => ip.loadStructType(ty.toIntern()).layout,
.anon_struct_type => .Auto,
- .union_type => |union_type| union_type.flagsPtr(ip).layout,
+ .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
else => unreachable,
};
}
@@ -2095,22 +2108,15 @@ pub const Type = struct {
/// Asserts the type is an array or vector or struct.
pub fn arrayLen(ty: Type, mod: *const Module) u64 {
- return arrayLenIp(ty, &mod.intern_pool);
+ return ty.arrayLenIp(&mod.intern_pool);
}
pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 {
- return switch (ip.indexToKey(ty.toIntern())) {
- .vector_type => |vector_type| vector_type.len,
- .array_type => |array_type| array_type.len,
- .struct_type => |struct_type| struct_type.field_types.len,
- .anon_struct_type => |tuple| tuple.types.len,
-
- else => unreachable,
- };
+ return ip.aggregateTypeLen(ty.toIntern());
}
pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 {
- return ty.arrayLen(mod) + @intFromBool(ty.sentinel(mod) != null);
+ return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
}
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
@@ -2199,8 +2205,8 @@ pub const Type = struct {
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
- .struct_type => |t| ty = Type.fromInterned(t.backingIntType(ip).*),
- .enum_type => |enum_type| ty = Type.fromInterned(enum_type.tag_ty),
+ .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*),
+ .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
.vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
.error_set_type, .inferred_error_set_type => {
@@ -2463,7 +2469,8 @@ pub const Type = struct {
.generic_poison => unreachable,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveFieldTypes(ip));
if (struct_type.knownNonOpv(ip))
return null;
@@ -2505,11 +2512,11 @@ pub const Type = struct {
} })));
},
- .union_type => |union_type| {
- const union_obj = ip.loadUnionType(union_type);
+ .union_type => {
+ const union_obj = ip.loadUnionType(ty.toIntern());
const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
return null;
- if (union_obj.field_names.len == 0) {
+ if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
@@ -2524,45 +2531,48 @@ pub const Type = struct {
return Value.fromInterned(only);
},
.opaque_type => return null,
- .enum_type => |enum_type| switch (enum_type.tag_mode) {
- .nonexhaustive => {
- if (enum_type.tag_ty == .comptime_int_type) return null;
-
- if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
- const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = int_opv.toIntern(),
- } });
- return Value.fromInterned(only);
- }
+ .enum_type => {
+ const enum_type = ip.loadEnumType(ty.toIntern());
+ switch (enum_type.tag_mode) {
+ .nonexhaustive => {
+ if (enum_type.tag_ty == .comptime_int_type) return null;
+
+ if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = int_opv.toIntern(),
+ } });
+ return Value.fromInterned(only);
+ }
- return null;
- },
- .auto, .explicit => {
- if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
+ return null;
+ },
+ .auto, .explicit => {
+ if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
- switch (enum_type.names.len) {
- 0 => {
- const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
- return Value.fromInterned(only);
- },
- 1 => {
- if (enum_type.values.len == 0) {
- const only = try mod.intern(.{ .enum_tag = .{
- .ty = ty.toIntern(),
- .int = try mod.intern(.{ .int = .{
- .ty = enum_type.tag_ty,
- .storage = .{ .u64 = 0 },
- } }),
- } });
+ switch (enum_type.names.len) {
+ 0 => {
+ const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
- } else {
- return Value.fromInterned(enum_type.values.get(ip)[0]);
- }
- },
- else => return null,
- }
- },
+ },
+ 1 => {
+ if (enum_type.values.len == 0) {
+ const only = try mod.intern(.{ .enum_tag = .{
+ .ty = ty.toIntern(),
+ .int = try mod.intern(.{ .int = .{
+ .ty = enum_type.tag_ty,
+ .storage = .{ .u64 = 0 },
+ } }),
+ } });
+ return Value.fromInterned(only);
+ } else {
+ return Value.fromInterned(enum_type.values.get(ip)[0]);
+ }
+ },
+ else => return null,
+ }
+ },
+ }
},
// values, not types
@@ -2676,7 +2686,8 @@ pub const Type = struct {
.type_info,
=> true,
},
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
if (struct_type.layout == .Packed)
@@ -2726,38 +2737,40 @@ pub const Type = struct {
return false;
},
- .union_type => |union_type| switch (union_type.flagsPtr(ip).requires_comptime) {
- .no, .wip => false,
- .yes => true,
- .unknown => {
- // The type is not resolved; assert that we have a Sema.
- const sema = opt_sema.?;
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
+ switch (union_type.flagsPtr(ip).requires_comptime) {
+ .no, .wip => return false,
+ .yes => return true,
+ .unknown => {
+ // The type is not resolved; assert that we have a Sema.
+ const sema = opt_sema.?;
- if (union_type.flagsPtr(ip).status == .field_types_wip)
- return false;
+ if (union_type.flagsPtr(ip).status == .field_types_wip)
+ return false;
- union_type.flagsPtr(ip).requires_comptime = .wip;
- errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
+ union_type.flagsPtr(ip).requires_comptime = .wip;
+ errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
- try sema.resolveTypeFieldsUnion(ty, union_type);
+ try sema.resolveTypeFieldsUnion(ty, union_type);
- const union_obj = ip.loadUnionType(union_type);
- for (0..union_obj.field_types.len) |field_idx| {
- const field_ty = union_obj.field_types.get(ip)[field_idx];
- if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
- union_obj.flagsPtr(ip).requires_comptime = .yes;
- return true;
+ for (0..union_type.field_types.len) |field_idx| {
+ const field_ty = union_type.field_types.get(ip)[field_idx];
+ if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
+ union_type.flagsPtr(ip).requires_comptime = .yes;
+ return true;
+ }
}
- }
- union_obj.flagsPtr(ip).requires_comptime = .no;
- return false;
- },
+ union_type.flagsPtr(ip).requires_comptime = .no;
+ return false;
+ },
+ }
},
.opaque_type => false,
- .enum_type => |enum_type| return Type.fromInterned(enum_type.tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
+ .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
// values, not types
.undef,
@@ -2830,11 +2843,12 @@ pub const Type = struct {
/// Returns null if the type has no namespace.
pub fn getNamespaceIndex(ty: Type, mod: *Module) InternPool.OptionalNamespaceIndex {
- return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
- .opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
- .struct_type => |struct_type| struct_type.namespace,
- .union_type => |union_type| union_type.namespace.toOptional(),
- .enum_type => |enum_type| enum_type.namespace,
+ const ip = &mod.intern_pool;
+ return switch (ip.indexToKey(ty.toIntern())) {
+ .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace.toOptional(),
+ .struct_type => ip.loadStructType(ty.toIntern()).namespace,
+ .union_type => ip.loadUnionType(ty.toIntern()).namespace.toOptional(),
+ .enum_type => ip.loadEnumType(ty.toIntern()).namespace,
else => .none,
};
@@ -2920,16 +2934,18 @@ pub const Type = struct {
/// Asserts the type is an enum or a union.
pub fn intTagType(ty: Type, mod: *Module) Type {
- return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
- .union_type => |union_type| Type.fromInterned(union_type.enum_tag_ty).intTagType(mod),
- .enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty),
+ const ip = &mod.intern_pool;
+ return switch (ip.indexToKey(ty.toIntern())) {
+ .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod),
+ .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
else => unreachable,
};
}
pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
- return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
- .enum_type => |enum_type| switch (enum_type.tag_mode) {
+ const ip = &mod.intern_pool;
+ return switch (ip.indexToKey(ty.toIntern())) {
+ .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.nonexhaustive => true,
.auto, .explicit => false,
},
@@ -2953,21 +2969,21 @@ pub const Type = struct {
}
pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice {
- return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names;
+ return mod.intern_pool.loadEnumType(ty.toIntern()).names;
}
pub fn enumFieldCount(ty: Type, mod: *Module) usize {
- return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len;
+ return mod.intern_pool.loadEnumType(ty.toIntern()).names.len;
}
pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
const ip = &mod.intern_pool;
- return ip.indexToKey(ty.toIntern()).enum_type.names.get(ip)[field_index];
+ return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index];
}
pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
return enum_type.nameIndex(ip, field_name);
}
@@ -2976,7 +2992,7 @@ pub const Type = struct {
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
.int => enum_tag.toIntern(),
.enum_tag => |info| info.int,
@@ -2990,7 +3006,7 @@ pub const Type = struct {
pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.fieldName(ip, field_index),
+ .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
else => unreachable,
};
@@ -3010,7 +3026,7 @@ pub const Type = struct {
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.field_types.len,
+ .struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
.anon_struct_type => |anon_struct| anon_struct.types.len,
else => unreachable,
};
@@ -3020,9 +3036,9 @@ pub const Type = struct {
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| Type.fromInterned(struct_type.field_types.get(ip)[index]),
- .union_type => |union_type| {
- const union_obj = ip.loadUnionType(union_type);
+ .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
+ .union_type => {
+ const union_obj = ip.loadUnionType(ty.toIntern());
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
},
.anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
@@ -3033,7 +3049,8 @@ pub const Type = struct {
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .Packed);
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
@@ -3042,8 +3059,8 @@ pub const Type = struct {
.anon_struct_type => |anon_struct| {
return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
},
- .union_type => |union_type| {
- const union_obj = ip.loadUnionType(union_type);
+ .union_type => {
+ const union_obj = ip.loadUnionType(ty.toIntern());
return mod.unionFieldNormalAlignment(union_obj, @intCast(index));
},
else => unreachable,
@@ -3053,7 +3070,8 @@ pub const Type = struct {
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
const val = struct_type.fieldInit(ip, index);
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
@@ -3072,7 +3090,8 @@ pub const Type = struct {
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.fieldIsComptime(ip, index)) {
assert(struct_type.haveFieldInits(ip));
return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
@@ -3095,7 +3114,7 @@ pub const Type = struct {
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| struct_type.fieldIsComptime(ip, index),
+ .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
else => unreachable,
};
@@ -3110,7 +3129,8 @@ pub const Type = struct {
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
assert(struct_type.layout != .Packed);
return struct_type.offsets.get(ip)[index];
@@ -3137,11 +3157,11 @@ pub const Type = struct {
return offset;
},
- .union_type => |union_type| {
+ .union_type => {
+ const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip))
return 0;
- const union_obj = ip.loadUnionType(union_type);
- const layout = mod.getUnionLayout(union_obj);
+ const layout = mod.getUnionLayout(union_type);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return layout.payload_align.forward(layout.tag_size);
@@ -3194,7 +3214,8 @@ pub const Type = struct {
pub fn isTuple(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@@ -3215,7 +3236,8 @@ pub const Type = struct {
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |struct_type| {
+ .struct_type => {
+ const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@@ -3262,12 +3284,12 @@ pub const Type = struct {
}
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
- return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
- inline .struct_type,
- .union_type,
- .enum_type,
- .opaque_type,
- => |info| info.zir_index.unwrap(),
+ const ip = &zcu.intern_pool;
+ return switch (ip.indexToKey(ty.toIntern())) {
+ .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
+ .union_type => ip.loadUnionType(ty.toIntern()).zir_index.unwrap(),
+ .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
+ .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index.unwrap(),
else => null,
};
}
src/TypedValue.zig
@@ -89,7 +89,7 @@ pub fn print(
if (payload.tag) |tag| {
try print(.{
- .ty = Type.fromInterned(ip.indexToKey(ty.toIntern()).union_type.enum_tag_ty),
+ .ty = Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty),
.val = tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
@@ -247,7 +247,7 @@ pub fn print(
if (level == 0) {
return writer.writeAll("(enum)");
}
- const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
+ const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
try writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
return;
@@ -398,7 +398,7 @@ pub fn print(
}
},
.Union => {
- const field_name = mod.typeToUnion(container_ty).?.field_names.get(ip)[@intCast(field.index)];
+ const field_name = mod.typeToUnion(container_ty).?.loadTagType(ip).names.get(ip)[@intCast(field.index)];
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => {
@@ -482,11 +482,7 @@ fn printAggregate(
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
- const field_name = switch (ip.indexToKey(ty.toIntern())) {
- .struct_type => |x| x.fieldName(ip, i),
- .anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
- else => unreachable,
- };
+ const field_name = ty.structFieldName(@intCast(i), mod);
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(ip)});
try print(.{
src/Value.zig
@@ -424,22 +424,28 @@ pub fn toType(self: Value) Type {
pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
const ip = &mod.intern_pool;
- return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) {
+ const enum_ty = ip.typeOf(val.toIntern());
+ return switch (ip.indexToKey(enum_ty)) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_literal => |enum_literal| {
const field_index = ty.enumFieldIndex(enum_literal, mod).?;
- return switch (ip.indexToKey(ty.toIntern())) {
+ switch (ip.indexToKey(ty.toIntern())) {
// Assume it is already an integer and return it directly.
- .simple_type, .int_type => val,
- .enum_type => |enum_type| if (enum_type.values.len != 0)
- Value.fromInterned(enum_type.values.get(ip)[field_index])
- else // Field index and integer values are the same.
- mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index),
+ .simple_type, .int_type => return val,
+ .enum_type => {
+ const enum_type = ip.loadEnumType(ty.toIntern());
+ if (enum_type.values.len != 0) {
+ return Value.fromInterned(enum_type.values.get(ip)[field_index]);
+ } else {
+ // Field index and integer values are the same.
+ return mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
+ }
+ },
else => unreachable,
- };
+ }
},
- .enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)),
+ .enum_type => try mod.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
else => unreachable,
};
}
@@ -832,7 +838,7 @@ pub fn writeToPackedMemory(
}
},
.Struct => {
- const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
+ const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
assert(struct_type.layout == .Packed);