Commit d18881de1b
Changed files (11)
src
arch
x86_64
link
src/arch/x86_64/CodeGen.zig
@@ -11411,7 +11411,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.fields.keys()[extra.field_index];
const tag_ty = union_obj.tag_ty;
- const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
+ const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.enumToInt(tag_ty, mod);
const tag_int = tag_int_val.toUnsignedInt(mod);
src/codegen/c/type.zig
@@ -1951,7 +1951,7 @@ pub const CType = extern union {
defer c_field_i += 1;
fields_pl[c_field_i] = .{
- .name = try if (ty.isSimpleTuple())
+ .name = try if (ty.isSimpleTuple(mod))
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, switch (zig_ty_tag) {
@@ -2102,7 +2102,7 @@ pub const CType = extern union {
.payload => unreachable,
}) or !mem.eql(
u8,
- if (ty.isSimpleTuple())
+ if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
@@ -2224,7 +2224,7 @@ pub const CType = extern union {
.global => .global,
.payload => unreachable,
});
- hasher.update(if (ty.isSimpleTuple())
+ hasher.update(if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
src/codegen/c.zig
@@ -3417,8 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const op_inst = Air.refToIndex(un_op);
const op_ty = f.typeOf(un_op);
const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty;
- var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
+ const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) {
try reap(f, inst, &.{un_op});
@@ -4115,8 +4114,7 @@ fn airCall(
}
resolved_arg.* = try f.resolveInst(arg);
if (arg_cty != try f.typeToIndex(arg_ty, .complete)) {
- var lowered_arg_buf: LowerFnRetTyBuffer = undefined;
- const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod);
+ const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod);
const array_local = try f.allocLocal(inst, lowered_arg_ty);
try writer.writeAll("memcpy(");
@@ -4146,8 +4144,7 @@ fn airCall(
};
const ret_ty = fn_ty.fnReturnType();
- var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
- const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
+ const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
const result_local = result: {
if (modifier == .always_tail) {
@@ -5200,7 +5197,7 @@ fn fieldLocation(
const field_ty = container_ty.structFieldType(next_field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- break .{ .field = if (container_ty.isSimpleTuple())
+ break .{ .field = if (container_ty.isSimpleTuple(mod))
.{ .field = next_field_index }
else
.{ .identifier = container_ty.structFieldName(next_field_index, mod) } };
@@ -5395,16 +5392,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const field_name: CValue = switch (struct_ty.ip_index) {
.none => switch (struct_ty.tag()) {
- .tuple, .anon_struct => if (struct_ty.isSimpleTuple())
- .{ .field = extra.field_index }
- else
- .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
-
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
- .Auto, .Extern => if (struct_ty.isSimpleTuple())
+ .Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
@@ -5465,6 +5457,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
},
},
+
+ .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
+ .{ .field = extra.field_index }
+ else
+ .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
+
.union_type => |union_type| field_name: {
const union_obj = mod.unionPtr(union_type.index);
if (union_obj.layout == .Packed) {
@@ -6791,7 +6789,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const a = try Assignment.start(f, writer, field_ty);
- try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
+ try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
.{ .field = field_i }
else
.{ .identifier = inst_ty.structFieldName(field_i, mod) });
@@ -7704,25 +7702,21 @@ const Vectorize = struct {
}
};
-const LowerFnRetTyBuffer = struct {
- names: [1][]const u8,
- types: [1]Type,
- values: [1]Value,
- payload: Type.Payload.AnonStruct,
-};
-fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type {
- if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn;
+fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
+ if (ret_ty.ip_index == .noreturn_type) return Type.noreturn;
if (lowersToArray(ret_ty, mod)) {
- buffer.names = [1][]const u8{"array"};
- buffer.types = [1]Type{ret_ty};
- buffer.values = [1]Value{Value.@"unreachable"};
- buffer.payload = .{ .data = .{
- .names = &buffer.names,
- .types = &buffer.types,
- .values = &buffer.values,
- } };
- return Type.initPayload(&buffer.payload.base);
+ const names = [1]InternPool.NullTerminatedString{
+ try mod.intern_pool.getOrPutString(mod.gpa, "array"),
+ };
+ const types = [1]InternPool.Index{ret_ty.ip_index};
+ const values = [1]InternPool.Index{.none};
+ const interned = try mod.intern(.{ .anon_struct_type = .{
+ .names = &names,
+ .types = &types,
+ .values = &values,
+ } });
+ return interned.toType();
}
return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;
src/codegen/llvm.zig
@@ -2009,83 +2009,84 @@ pub const Object = struct {
break :blk fwd_decl;
};
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
-
- var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
- defer di_fields.deinit(gpa);
-
- try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
-
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
-
- for (tuple.types, 0..) |field_ty, i| {
- const field_val = tuple.values[i];
- if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
-
- const field_size = field_ty.abiSize(mod);
- const field_align = field_ty.abiAlignment(mod);
- const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- offset = field_offset + field_size;
-
- const field_name = if (ty.castTag(.anon_struct)) |payload|
- try gpa.dupeZ(u8, payload.data.names[i])
- else
- try std.fmt.allocPrintZ(gpa, "{d}", .{i});
- defer gpa.free(field_name);
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
+ defer di_fields.deinit(gpa);
+
+ try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+
+ const field_size = field_ty.toType().abiSize(mod);
+ const field_align = field_ty.toType().abiAlignment(mod);
+ const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ offset = field_offset + field_size;
+
+ const field_name = if (tuple.names.len != 0)
+ mod.intern_pool.stringToSlice(tuple.names[i])
+ else
+ try std.fmt.allocPrintZ(gpa, "{d}", .{i});
+ defer gpa.free(field_name);
+
+ try di_fields.append(gpa, dib.createMemberType(
+ fwd_decl.toScope(),
+ field_name,
+ null, // file
+ 0, // line
+ field_size * 8, // size in bits
+ field_align * 8, // align in bits
+ field_offset * 8, // offset in bits
+ 0, // flags
+ try o.lowerDebugType(field_ty.toType(), .full),
+ ));
+ }
- try di_fields.append(gpa, dib.createMemberType(
- fwd_decl.toScope(),
- field_name,
+ const full_di_ty = dib.createStructType(
+ compile_unit_scope,
+ name.ptr,
null, // file
0, // line
- field_size * 8, // size in bits
- field_align * 8, // align in bits
- field_offset * 8, // offset in bits
+ ty.abiSize(mod) * 8, // size in bits
+ ty.abiAlignment(mod) * 8, // align in bits
0, // flags
- try o.lowerDebugType(field_ty, .full),
- ));
- }
-
- const full_di_ty = dib.createStructType(
- compile_unit_scope,
- name.ptr,
- null, // file
- 0, // line
- ty.abiSize(mod) * 8, // size in bits
- ty.abiAlignment(mod) * 8, // align in bits
- 0, // flags
- null, // derived from
- di_fields.items.ptr,
- @intCast(c_int, di_fields.items.len),
- 0, // run time lang
- null, // vtable holder
- "", // unique id
- );
- dib.replaceTemporary(fwd_decl, full_di_ty);
- // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
- return full_di_ty;
- }
-
- if (mod.typeToStruct(ty)) |struct_obj| {
- if (!struct_obj.haveFieldTypes()) {
- // This can happen if a struct type makes it all the way to
- // flush() without ever being instantiated or referenced (even
- // via pointer). The only reason we are hearing about it now is
- // that it is being used as a namespace to put other debug types
- // into. Therefore we can satisfy this by making an empty namespace,
- // rather than changing the frontend to unnecessarily resolve the
- // struct field types.
- const owner_decl_index = ty.getOwnerDecl(mod);
- const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
- dib.replaceTemporary(fwd_decl, struct_di_ty);
- // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
- // means we can't use `gop` anymore.
- try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
- return struct_di_ty;
- }
+ null, // derived from
+ di_fields.items.ptr,
+ @intCast(c_int, di_fields.items.len),
+ 0, // run time lang
+ null, // vtable holder
+ "", // unique id
+ );
+ dib.replaceTemporary(fwd_decl, full_di_ty);
+ // The recursive call to `lowerDebugType` means we can't use `gop` anymore.
+ try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
+ return full_di_ty;
+ },
+ .struct_type => |struct_type| s: {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
+
+ if (!struct_obj.haveFieldTypes()) {
+ // This can happen if a struct type makes it all the way to
+ // flush() without ever being instantiated or referenced (even
+ // via pointer). The only reason we are hearing about it now is
+ // that it is being used as a namespace to put other debug types
+ // into. Therefore we can satisfy this by making an empty namespace,
+ // rather than changing the frontend to unnecessarily resolve the
+ // struct field types.
+ const owner_decl_index = ty.getOwnerDecl(mod);
+ const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
+ dib.replaceTemporary(fwd_decl, struct_di_ty);
+ // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
+ // means we can't use `gop` anymore.
+ try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
+ return struct_di_ty;
+ }
+ },
+ else => {},
}
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
@@ -2931,59 +2932,61 @@ pub const DeclGen = struct {
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
- if (t.isSimpleTupleOrAnonStruct()) {
- const tuple = t.tupleFields();
- const llvm_struct_ty = dg.context.structCreateNamed("");
- gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
+ const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) {
+ .anon_struct_type => |tuple| {
+ const llvm_struct_ty = dg.context.structCreateNamed("");
+ gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
- var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
- defer llvm_field_types.deinit(gpa);
+ var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
+ defer llvm_field_types.deinit(gpa);
- try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
+ try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
- for (tuple.types, 0..) |field_ty, i| {
- const field_val = tuple.values[i];
- if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
- const field_align = field_ty.abiAlignment(mod);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- try llvm_field_types.append(gpa, llvm_array_ty);
- }
- const field_llvm_ty = try dg.lowerType(field_ty);
- try llvm_field_types.append(gpa, field_llvm_ty);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ try llvm_field_types.append(gpa, llvm_array_ty);
+ }
+ const field_llvm_ty = try dg.lowerType(field_ty.toType());
+ try llvm_field_types.append(gpa, field_llvm_ty);
- offset += field_ty.abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- try llvm_field_types.append(gpa, llvm_array_ty);
+ offset += field_ty.toType().abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ try llvm_field_types.append(gpa, llvm_array_ty);
+ }
}
- }
- llvm_struct_ty.structSetBody(
- llvm_field_types.items.ptr,
- @intCast(c_uint, llvm_field_types.items.len),
- .False,
- );
+ llvm_struct_ty.structSetBody(
+ llvm_field_types.items.ptr,
+ @intCast(c_uint, llvm_field_types.items.len),
+ .False,
+ );
- return llvm_struct_ty;
- }
+ return llvm_struct_ty;
+ },
+ .struct_type => |struct_type| struct_type,
+ else => unreachable,
+ };
- const struct_obj = mod.typeToStruct(t).?;
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
@@ -3625,71 +3628,74 @@ pub const DeclGen = struct {
const field_vals = tv.val.castTag(.aggregate).?.data;
const gpa = dg.gpa;
- if (tv.ty.isSimpleTupleOrAnonStruct()) {
- const tuple = tv.ty.tupleFields();
- var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
- defer llvm_fields.deinit(gpa);
+ const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
+ defer llvm_fields.deinit(gpa);
- try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
+ try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
- comptime assert(struct_layout_version == 2);
- var offset: u64 = 0;
- var big_align: u32 = 0;
- var need_unnamed = false;
+ comptime assert(struct_layout_version == 2);
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ var need_unnamed = false;
- for (tuple.types, 0..) |field_ty, i| {
- if (tuple.values[i].ip_index != .unreachable_value) continue;
- if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
-
- const field_align = field_ty.abiAlignment(mod);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
-
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- // TODO make this and all other padding elsewhere in debug
- // builds be 0xaa not undef.
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
- }
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none) continue;
+ if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
- const field_llvm_val = try dg.lowerValue(.{
- .ty = field_ty,
- .val = field_vals[i],
- });
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ // TODO make this and all other padding elsewhere in debug
+ // builds be 0xaa not undef.
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
- llvm_fields.appendAssumeCapacity(field_llvm_val);
+ const field_llvm_val = try dg.lowerValue(.{
+ .ty = field_ty.toType(),
+ .val = field_vals[i],
+ });
- offset += field_ty.abiSize(mod);
- }
- {
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, big_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
- llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
+
+ llvm_fields.appendAssumeCapacity(field_llvm_val);
+
+ offset += field_ty.toType().abiSize(mod);
+ }
+ {
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
+ llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
+ }
}
- }
- if (need_unnamed) {
- return dg.context.constStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- .False,
- );
- } else {
- return llvm_struct_ty.constNamedStruct(
- llvm_fields.items.ptr,
- @intCast(c_uint, llvm_fields.items.len),
- );
- }
- }
+ if (need_unnamed) {
+ return dg.context.constStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ .False,
+ );
+ } else {
+ return llvm_struct_ty.constNamedStruct(
+ llvm_fields.items.ptr,
+ @intCast(c_uint, llvm_fields.items.len),
+ );
+ }
+ },
+ .struct_type => |struct_type| struct_type,
+ else => unreachable,
+ };
- const struct_obj = mod.typeToStruct(tv.ty).?;
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
@@ -4077,13 +4083,11 @@ pub const DeclGen = struct {
return field_addr.constIntToPtr(final_llvm_ty);
}
- var ty_buf: Type.Payload.Pointer = undefined;
-
const parent_llvm_ty = try dg.lowerType(parent_ty);
- if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
+ if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
- llvm_u32.constInt(llvm_field_index, .False),
+ llvm_u32.constInt(llvm_field.index, .False),
};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
} else {
@@ -6006,8 +6010,7 @@ pub const FuncGen = struct {
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
},
else => {
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
+ const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index;
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
},
},
@@ -6035,16 +6038,22 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag(mod)) {
.Struct => {
assert(struct_ty.containerLayout(mod) != .Packed);
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
+ const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
- const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
+ const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
+ const field_ptr_ty = try mod.ptrType(.{
+ .elem_type = llvm_field.ty.ip_index,
+ .alignment = llvm_field.alignment,
+ });
if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
- return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false);
+ const field_alignment = if (llvm_field.alignment != 0)
+ llvm_field.alignment
+ else
+ llvm_field.ty.abiAlignment(mod);
+ return self.loadByRef(field_ptr, field_ty, field_alignment, false);
} else {
return self.load(field_ptr, field_ptr_ty);
}
@@ -6912,12 +6921,14 @@ pub const FuncGen = struct {
const struct_ty = self.air.getRefType(ty_pl.ty);
const field_index = ty_pl.payload;
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
const mod = self.dg.module;
- const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
+ const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
- const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, "");
- const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
+ const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
+ const field_ptr_ty = try mod.ptrType(.{
+ .elem_type = llvm_field.ty.ip_index,
+ .alignment = llvm_field.alignment,
+ });
return self.load(field_ptr, field_ptr_ty);
}
@@ -7430,9 +7441,8 @@ pub const FuncGen = struct {
const result = self.builder.buildExtractValue(result_struct, 0, "");
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
- var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
+ const result_index = llvmField(dest_ty, 0, mod).?.index;
+ const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
const result_alignment = dest_ty.abiAlignment(mod);
@@ -7736,9 +7746,8 @@ pub const FuncGen = struct {
const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
- var ty_buf: Type.Payload.Pointer = undefined;
- const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
- const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
+ const result_index = llvmField(dest_ty, 0, mod).?.index;
+ const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
const result_alignment = dest_ty.abiAlignment(mod);
@@ -9300,8 +9309,6 @@ pub const FuncGen = struct {
return running_int;
}
- var ptr_ty_buf: Type.Payload.Pointer = undefined;
-
if (isByRef(result_ty, mod)) {
const llvm_u32 = self.context.intType(32);
// TODO in debug builds init to undef so that the padding will be 0xaa
@@ -9313,7 +9320,7 @@ pub const FuncGen = struct {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
+ const llvm_i = llvmField(result_ty, i, mod).?.index;
indices[1] = llvm_u32.constInt(llvm_i, .False);
const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
var field_ptr_payload: Type.Payload.Pointer = .{
@@ -9334,7 +9341,7 @@ pub const FuncGen = struct {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
- const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
+ const llvm_i = llvmField(result_ty, i, mod).?.index;
result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
}
return result;
@@ -9796,9 +9803,8 @@ pub const FuncGen = struct {
else => {
const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty);
- var ty_buf: Type.Payload.Pointer = undefined;
- if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
- return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, "");
+ if (llvmField(struct_ty, field_index, mod)) |llvm_field| {
+ return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, "");
} else {
// If we found no index then this means this is a zero sized field at the
// end of the struct. Treat our struct pointer as an array of two and get
@@ -10457,59 +10463,61 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
+const LlvmField = struct {
+ index: c_uint,
+ ty: Type,
+ alignment: u32,
+};
+
/// Take into account 0 bit fields and padding. Returns null if an llvm
/// field could not be found.
/// This only happens if you want the field index of a zero sized field at
/// the end of the struct.
-fn llvmFieldIndex(
- ty: Type,
- field_index: usize,
- mod: *Module,
- ptr_pl_buf: *Type.Payload.Pointer,
-) ?c_uint {
+fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
// Detects where we inserted extra padding fields so that we can skip
// over them in this function.
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
- var llvm_field_index: c_uint = 0;
- for (tuple.types, 0..) |field_ty, i| {
- if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
+ const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var llvm_field_index: c_uint = 0;
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
- const field_align = field_ty.abiAlignment(mod);
- big_align = @max(big_align, field_align);
- const prev_offset = offset;
- offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ const field_align = field_ty.toType().abiAlignment(mod);
+ big_align = @max(big_align, field_align);
+ const prev_offset = offset;
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
- const padding_len = offset - prev_offset;
- if (padding_len > 0) {
- llvm_field_index += 1;
- }
+ const padding_len = offset - prev_offset;
+ if (padding_len > 0) {
+ llvm_field_index += 1;
+ }
- if (field_index <= i) {
- ptr_pl_buf.* = .{
- .data = .{
- .pointee_type = field_ty,
- .@"align" = field_align,
- .@"addrspace" = .generic,
- },
- };
- return llvm_field_index;
- }
+ if (field_index <= i) {
+ return .{
+ .index = llvm_field_index,
+ .ty = field_ty.toType(),
+ .alignment = field_align,
+ };
+ }
- llvm_field_index += 1;
- offset += field_ty.abiSize(mod);
- }
- return null;
- }
- const layout = ty.containerLayout(mod);
+ llvm_field_index += 1;
+ offset += field_ty.toType().abiSize(mod);
+ }
+ return null;
+ },
+ .struct_type => |s| s,
+ else => unreachable,
+ };
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const layout = struct_obj.layout;
assert(layout != .Packed);
var llvm_field_index: c_uint = 0;
- var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
+ var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, layout);
@@ -10523,14 +10531,11 @@ fn llvmFieldIndex(
}
if (field_index == field_and_index.index) {
- ptr_pl_buf.* = .{
- .data = .{
- .pointee_type = field.ty,
- .@"align" = field_align,
- .@"addrspace" = .generic,
- },
+ return .{
+ .index = llvm_field_index,
+ .ty = field.ty,
+ .alignment = field_align,
};
- return llvm_field_index;
}
llvm_field_index += 1;
@@ -11089,21 +11094,24 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout(mod) == .Packed) return false;
- if (ty.isSimpleTupleOrAnonStruct()) {
- const tuple = ty.tupleFields();
- var count: usize = 0;
- for (tuple.values, 0..) |field_val, i| {
- if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue;
-
- count += 1;
- if (count > max_fields_byval) return true;
- if (isByRef(tuple.types[i], mod)) return true;
- }
- return false;
- }
+ const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ var count: usize = 0;
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
+
+ count += 1;
+ if (count > max_fields_byval) return true;
+ if (isByRef(field_ty.toType(), mod)) return true;
+ }
+ return false;
+ },
+ .struct_type => |s| s,
+ else => unreachable,
+ };
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
var count: usize = 0;
- const fields = ty.structFields(mod);
- for (fields.values()) |field| {
+ for (struct_obj.fields.values()) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
count += 1;
src/codegen/spirv.zig
@@ -682,7 +682,7 @@ pub const DeclGen = struct {
else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
},
.Struct => {
- if (ty.isSimpleTupleOrAnonStruct()) {
+ if (ty.isSimpleTupleOrAnonStruct(mod)) {
unreachable; // TODO
} else {
const struct_ty = mod.typeToStruct(ty).?;
@@ -1319,7 +1319,8 @@ pub const DeclGen = struct {
defer self.gpa.free(member_names);
var member_index: usize = 0;
- for (struct_ty.fields.values(), 0..) |field, i| {
+ const struct_obj = void; // TODO
+ for (struct_obj.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field.ty, .indirect);
@@ -1327,7 +1328,7 @@ pub const DeclGen = struct {
member_index += 1;
}
- const name = try struct_ty.getFullyQualifiedName(self.module);
+ const name = try struct_obj.getFullyQualifiedName(self.module);
defer self.module.gpa.free(name);
return try self.spv.resolve(.{ .struct_type = .{
@@ -2090,7 +2091,7 @@ pub const DeclGen = struct {
var i: usize = 0;
while (i < mask_len) : (i += 1) {
- const elem = try mask.elemValue(self.module, i);
+ const elem = try mask.elemValue(mod, i);
if (elem.isUndef(mod)) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
@@ -2805,7 +2806,7 @@ pub const DeclGen = struct {
const value = try self.resolve(bin_op.rhs);
const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
- const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
+ const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
const undef = try self.spv.constUndef(ptr_ty_ref);
try self.store(ptr_ty, ptr, undef);
src/link/Dwarf.zig
@@ -333,13 +333,12 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
- switch (ty.tag()) {
- .tuple, .anon_struct => {
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |fields| {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
- const fields = ty.tupleFields();
- for (fields.types, 0..) |field, field_index| {
+ for (fields.types, 0..) |field_ty, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@@ -347,28 +346,30 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
- else => {
+ .struct_type => |struct_type| s: {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
- const struct_obj = mod.typeToStruct(ty).?;
if (struct_obj.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
- const fields = ty.structFields(mod);
- for (fields.keys(), 0..) |field_name, field_index| {
- const field = fields.get(field_name).?;
+ for (
+ struct_obj.fields.keys(),
+ struct_obj.fields.values(),
+ 0..,
+ ) |field_name, field, field_index| {
if (!field.ty.hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
@@ -385,6 +386,7 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
+ else => unreachable,
}
// DW.AT.structure_type delimit children
src/InternPool.zig
@@ -137,9 +137,14 @@ pub const Key = union(enum) {
payload_type: Index,
},
simple_type: SimpleType,
- /// If `empty_struct_type` is handled separately, then this value may be
- /// safely assumed to never be `none`.
+ /// This represents a struct that has been explicitly declared in source code,
+ /// or was created with `@Type`. It is unique and based on a declaration.
+ /// It may be a tuple, if declared like this: `struct {A, B, C}`.
struct_type: StructType,
+ /// This is an anonymous struct or tuple type which has no corresponding
+ /// declaration. It is used for types that have no `struct` keyword in the
+ /// source code, and were not created via `@Type`.
+ anon_struct_type: AnonStructType,
union_type: UnionType,
opaque_type: OpaqueType,
enum_type: EnumType,
@@ -168,7 +173,7 @@ pub const Key = union(enum) {
/// Each element/field stored as an `Index`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
/// so the slice length will be one more than the type's array length.
- aggregate: Aggregate,
+ aggregate: Key.Aggregate,
/// An instance of a union.
un: Union,
@@ -222,22 +227,25 @@ pub const Key = union(enum) {
namespace: Module.Namespace.Index,
};
- /// There are three possibilities here:
- /// * `@TypeOf(.{})` (untyped empty struct literal)
- /// - namespace == .none, index == .none
- /// * A struct which has a namepace, but no fields.
- /// - index == .none
- /// * A struct which has fields as well as a namepace.
pub const StructType = struct {
- /// The `none` tag is used to represent two cases:
- /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`.
- /// * A struct with no fields, in which case `namespace` will be populated.
+ /// The `none` tag is used to represent a struct with no fields.
index: Module.Struct.OptionalIndex,
- /// This will be `none` only in the case of `@TypeOf(.{})`
- /// (`Index.empty_struct_type`).
+ /// May be `none` if the struct has no declarations.
namespace: Module.Namespace.OptionalIndex,
};
+ pub const AnonStructType = struct {
+ types: []const Index,
+ /// This may be empty, indicating this is a tuple.
+ names: []const NullTerminatedString,
+ /// These elements may be `none`, indicating runtime-known.
+ values: []const Index,
+
+ pub fn isTuple(self: AnonStructType) bool {
+ return self.names.len == 0;
+ }
+ };
+
pub const UnionType = struct {
index: Module.Union.Index,
runtime_tag: RuntimeTag,
@@ -498,6 +506,12 @@ pub const Key = union(enum) {
std.hash.autoHash(hasher, aggregate.ty);
for (aggregate.fields) |field| std.hash.autoHash(hasher, field);
},
+
+ .anon_struct_type => |anon_struct_type| {
+ for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem);
+ for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem);
+ for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem);
+ },
}
}
@@ -650,6 +664,12 @@ pub const Key = union(enum) {
if (a_info.ty != b_info.ty) return false;
return std.mem.eql(Index, a_info.fields, b_info.fields);
},
+ .anon_struct_type => |a_info| {
+ const b_info = b.anon_struct_type;
+ return std.mem.eql(Index, a_info.types, b_info.types) and
+ std.mem.eql(Index, a_info.values, b_info.values) and
+ std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
+ },
}
}
@@ -666,6 +686,7 @@ pub const Key = union(enum) {
.union_type,
.opaque_type,
.enum_type,
+ .anon_struct_type,
=> .type_type,
inline .ptr,
@@ -1020,9 +1041,10 @@ pub const static_keys = [_]Key{
.{ .simple_type = .var_args_param },
// empty_struct_type
- .{ .struct_type = .{
- .namespace = .none,
- .index = .none,
+ .{ .anon_struct_type = .{
+ .types = &.{},
+ .names = &.{},
+ .values = &.{},
} },
.{ .simple_value = .undefined },
@@ -1144,6 +1166,12 @@ pub const Tag = enum(u8) {
/// Module.Struct object allocated for it.
/// data is Module.Namespace.Index.
type_struct_ns,
+ /// An AnonStructType which stores types, names, and values for each field.
+ /// data is extra index of `TypeStructAnon`.
+ type_struct_anon,
+ /// An AnonStructType which has only types and values for each field.
+ /// data is extra index of `TypeStructAnon`.
+ type_tuple_anon,
/// A tagged union type.
/// `data` is `Module.Union.Index`.
type_union_tagged,
@@ -1249,6 +1277,26 @@ pub const Tag = enum(u8) {
only_possible_value,
/// data is extra index to Key.Union.
union_value,
+ /// An instance of a struct, array, or vector.
+ /// data is extra index to `Aggregate`.
+ aggregate,
+};
+
+/// Trailing:
+/// 0. element: Index for each len
+/// len is determined by the aggregate type.
+pub const Aggregate = struct {
+ /// The type of the aggregate.
+ ty: Index,
+};
+
+/// Trailing:
+/// 0. type: Index for each fields_len
+/// 1. value: Index for each fields_len
+/// 2. name: NullTerminatedString for each fields_len
+/// The set of field names is omitted when the `Tag` is `type_tuple_anon`.
+pub const TypeStructAnon = struct {
+ fields_len: u32,
};
/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
@@ -1572,6 +1620,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
}
pub fn indexToKey(ip: InternPool, index: Index) Key {
+ assert(index != .none);
const item = ip.items.get(@enumToInt(index));
const data = item.data;
return switch (item.tag) {
@@ -1659,6 +1708,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.namespace = @intToEnum(Module.Namespace.Index, data).toOptional(),
} },
+ .type_struct_anon => {
+ const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
+ const fields_len = type_struct_anon.data.fields_len;
+ const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
+ const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
+ const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len];
+ return .{ .anon_struct_type = .{
+ .types = @ptrCast([]const Index, types),
+ .values = @ptrCast([]const Index, values),
+ .names = @ptrCast([]const NullTerminatedString, names),
+ } };
+ },
+ .type_tuple_anon => {
+ const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
+ const fields_len = type_struct_anon.data.fields_len;
+ const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
+ const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
+ return .{ .anon_struct_type = .{
+ .types = @ptrCast([]const Index, types),
+ .values = @ptrCast([]const Index, values),
+ .names = &.{},
+ } };
+ },
+
.type_union_untagged => .{ .union_type = .{
.index = @intToEnum(Module.Union.Index, data),
.runtime_tag = .none,
@@ -1797,6 +1870,15 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
else => unreachable,
};
},
+ .aggregate => {
+ const extra = ip.extraDataTrail(Aggregate, data);
+ const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty));
+ const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
+ return .{ .aggregate = .{
+ .ty = extra.data.ty,
+ .fields = fields,
+ } };
+ },
.union_value => .{ .un = ip.extraData(Key.Union, data) },
.enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) },
};
@@ -1982,6 +2064,45 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
+ .anon_struct_type => |anon_struct_type| {
+ assert(anon_struct_type.types.len == anon_struct_type.values.len);
+ for (anon_struct_type.types) |elem| assert(elem != .none);
+
+ const fields_len = @intCast(u32, anon_struct_type.types.len);
+ if (anon_struct_type.names.len == 0) {
+ try ip.extra.ensureUnusedCapacity(
+ gpa,
+ @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2),
+ );
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_tuple_anon,
+ .data = ip.addExtraAssumeCapacity(TypeStructAnon{
+ .fields_len = fields_len,
+ }),
+ });
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
+ return @intToEnum(Index, ip.items.len - 1);
+ }
+
+ assert(anon_struct_type.names.len == anon_struct_type.types.len);
+
+ try ip.extra.ensureUnusedCapacity(
+ gpa,
+ @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3),
+ );
+ ip.items.appendAssumeCapacity(.{
+ .tag = .type_struct_anon,
+ .data = ip.addExtraAssumeCapacity(TypeStructAnon{
+ .fields_len = fields_len,
+ }),
+ });
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names));
+ return @intToEnum(Index, ip.items.len - 1);
+ },
+
.union_type => |union_type| {
ip.items.appendAssumeCapacity(.{
.tag = switch (union_type.runtime_tag) {
@@ -2269,6 +2390,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.aggregate => |aggregate| {
+ assert(aggregate.ty != .none);
+ for (aggregate.fields) |elem| assert(elem != .none);
+ if (aggregate.fields.len != ip.aggregateTypeLen(aggregate.ty)) {
+ std.debug.print("aggregate fields len = {d}, type len = {d}\n", .{
+ aggregate.fields.len,
+ ip.aggregateTypeLen(aggregate.ty),
+ });
+ }
+ assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty));
+
if (aggregate.fields.len == 0) {
ip.items.appendAssumeCapacity(.{
.tag = .only_possible_value,
@@ -2276,7 +2407,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
return @intToEnum(Index, ip.items.len - 1);
}
- @panic("TODO");
+
+ try ip.extra.ensureUnusedCapacity(
+ gpa,
+ @typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len,
+ );
+
+ ip.items.appendAssumeCapacity(.{
+ .tag = .aggregate,
+ .data = ip.addExtraAssumeCapacity(Aggregate{
+ .ty = aggregate.ty,
+ }),
+ });
+ ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.fields));
},
.un => |un| {
@@ -2913,6 +3056,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_opaque => @sizeOf(Key.OpaqueType),
.type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
.type_struct_ns => @sizeOf(Module.Namespace),
+ .type_struct_anon => b: {
+ const info = ip.extraData(TypeStructAnon, data);
+ break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
+ },
+ .type_tuple_anon => b: {
+ const info = ip.extraData(TypeStructAnon, data);
+ break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len);
+ },
.type_union_tagged,
.type_union_untagged,
@@ -2942,6 +3093,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
},
.enum_tag => @sizeOf(Key.EnumTag),
+ .aggregate => b: {
+ const info = ip.extraData(Aggregate, data);
+ const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty));
+ break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len);
+ },
+
.float_f16 => 0,
.float_f32 => 0,
.float_f64 => @sizeOf(Float64),
@@ -3079,3 +3236,13 @@ pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E {
const int = ip.indexToKey(i).enum_tag.int;
return @intToEnum(E, ip.indexToKey(int).int.storage.u64);
}
+
+pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 {
+ return switch (ip.indexToKey(ty)) {
+ .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
+ .anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
+ .array_type => |array_type| array_type.len,
+ .vector_type => |vector_type| vector_type.len,
+ else => unreachable,
+ };
+}
src/Sema.zig
@@ -7896,12 +7896,15 @@ fn resolveGenericInstantiationType(
}
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
- if (!ty.isSimpleTupleOrAnonStruct()) return;
- const tuple = ty.tupleFields();
- for (tuple.values, 0..) |field_val, i| {
- try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
- if (field_val.ip_index == .unreachable_value) continue;
- try sema.resolveLazyValue(field_val);
+ const mod = sema.mod;
+ const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |tuple| tuple,
+ else => return,
+ };
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ try sema.resolveTupleLazyValues(block, src, field_ty.toType());
+ if (field_val == .none) continue;
+ try sema.resolveLazyValue(field_val.toValue());
}
}
@@ -12038,31 +12041,49 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs);
const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known");
const ty = try sema.resolveTypeFields(unresolved_ty);
+ const ip = &mod.intern_pool;
const has_field = hf: {
- if (ty.isSlice(mod)) {
- if (mem.eql(u8, field_name, "ptr")) break :hf true;
- if (mem.eql(u8, field_name, "len")) break :hf true;
- break :hf false;
- }
- if (ty.castTag(.anon_struct)) |pl| {
- break :hf for (pl.data.names) |name| {
- if (mem.eql(u8, name, field_name)) break true;
- } else false;
- }
- if (ty.isTuple(mod)) {
- const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false;
- break :hf field_index < ty.structFieldCount(mod);
- }
- break :hf switch (ty.zigTypeTag(mod)) {
- .Struct => ty.structFields(mod).contains(field_name),
- .Union => ty.unionFields(mod).contains(field_name),
- .Enum => ty.enumFieldIndex(field_name, mod) != null,
- .Array => mem.eql(u8, field_name, "len"),
- else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
- ty.fmt(sema.mod),
- }),
- };
+ switch (ip.indexToKey(ty.ip_index)) {
+ .ptr_type => |ptr_type| switch (ptr_type.size) {
+ .Slice => {
+ if (mem.eql(u8, field_name, "ptr")) break :hf true;
+ if (mem.eql(u8, field_name, "len")) break :hf true;
+ break :hf false;
+ },
+ else => {},
+ },
+ .anon_struct_type => |anon_struct| {
+ if (anon_struct.names.len != 0) {
+ // If the string is not interned, then the field certainly is not present.
+ const name_interned = ip.getString(field_name).unwrap() orelse break :hf false;
+ break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null;
+ } else {
+ const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false;
+ break :hf field_index < ty.structFieldCount(mod);
+ }
+ },
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false;
+ assert(struct_obj.haveFieldTypes());
+ break :hf struct_obj.fields.contains(field_name);
+ },
+ .union_type => |union_type| {
+ const union_obj = mod.unionPtr(union_type.index);
+ assert(union_obj.haveFieldTypes());
+ break :hf union_obj.fields.contains(field_name);
+ },
+ .enum_type => |enum_type| {
+ // If the string is not interned, then the field certainly is not present.
+ const name_interned = ip.getString(field_name).unwrap() orelse break :hf false;
+ break :hf enum_type.nameIndex(ip, name_interned) != null;
+ },
+ .array_type => break :hf mem.eql(u8, field_name, "len"),
+ else => {},
+ }
+ return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
+ ty.fmt(sema.mod),
+ });
};
if (has_field) {
return Air.Inst.Ref.bool_true;
@@ -12632,42 +12653,48 @@ fn analyzeTupleCat(
}
const final_len = try sema.usizeCast(block, rhs_src, dest_fields);
- const types = try sema.arena.alloc(Type, final_len);
- const values = try sema.arena.alloc(Value, final_len);
+ const types = try sema.arena.alloc(InternPool.Index, final_len);
+ const values = try sema.arena.alloc(InternPool.Index, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
var i: u32 = 0;
while (i < lhs_len) : (i += 1) {
- types[i] = lhs_ty.structFieldType(i, mod);
+ types[i] = lhs_ty.structFieldType(i, mod).ip_index;
const default_val = lhs_ty.structFieldDefaultValue(i, mod);
- values[i] = default_val;
+ values[i] = default_val.ip_index;
const operand_src = lhs_src; // TODO better source location
if (default_val.ip_index == .unreachable_value) {
runtime_src = operand_src;
+ values[i] = .none;
}
}
i = 0;
while (i < rhs_len) : (i += 1) {
- types[i + lhs_len] = rhs_ty.structFieldType(i, mod);
+ types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index;
const default_val = rhs_ty.structFieldDefaultValue(i, mod);
- values[i + lhs_len] = default_val;
+ values[i + lhs_len] = default_val.ip_index;
const operand_src = rhs_src; // TODO better source location
if (default_val.ip_index == .unreachable_value) {
runtime_src = operand_src;
+ values[i + lhs_len] = .none;
}
}
break :rs runtime_src;
};
- const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
+ const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
- });
+ .names = &.{},
+ } });
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
- return sema.addConstant(tuple_ty, tuple_val);
+ const tuple_val = try mod.intern(.{ .aggregate = .{
+ .ty = tuple_ty,
+ .fields = values,
+ } });
+ return sema.addConstant(tuple_ty.toType(), tuple_val.toValue());
};
try sema.requireRuntimeBlock(block, src, runtime_src);
@@ -12685,7 +12712,7 @@ fn analyzeTupleCat(
try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty);
}
- return block.addAggregateInit(tuple_ty, element_refs);
+ return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -12938,7 +12965,7 @@ fn analyzeTupleMul(
block: *Block,
src_node: i32,
operand: Air.Inst.Ref,
- factor: u64,
+ factor: usize,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const operand_ty = sema.typeOf(operand);
@@ -12947,44 +12974,45 @@ fn analyzeTupleMul(
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node };
const tuple_len = operand_ty.structFieldCount(mod);
- const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch
+ const final_len = std.math.mul(usize, tuple_len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
- if (final_len_u64 == 0) {
+ if (final_len == 0) {
return sema.addConstant(Type.empty_struct_literal, Value.empty_struct);
}
- const final_len = try sema.usizeCast(block, rhs_src, final_len_u64);
-
- const types = try sema.arena.alloc(Type, final_len);
- const values = try sema.arena.alloc(Value, final_len);
+ const types = try sema.arena.alloc(InternPool.Index, final_len);
+ const values = try sema.arena.alloc(InternPool.Index, final_len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
- var i: u32 = 0;
- while (i < tuple_len) : (i += 1) {
- types[i] = operand_ty.structFieldType(i, mod);
- values[i] = operand_ty.structFieldDefaultValue(i, mod);
+ for (0..tuple_len) |i| {
+ types[i] = operand_ty.structFieldType(i, mod).ip_index;
+ values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index;
const operand_src = lhs_src; // TODO better source location
- if (values[i].ip_index == .unreachable_value) {
+ if (values[i] == .unreachable_value) {
runtime_src = operand_src;
+ values[i] = .none; // TODO don't treat unreachable_value as special
}
}
- i = 0;
- while (i < factor) : (i += 1) {
- mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]);
- mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]);
+ for (0..factor) |i| {
+ mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
+ mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
}
break :rs runtime_src;
};
- const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
+ const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
- });
+ .names = &.{},
+ } });
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
- return sema.addConstant(tuple_ty, tuple_val);
+ const tuple_val = try mod.intern(.{ .aggregate = .{
+ .ty = tuple_ty,
+ .fields = values,
+ } });
+ return sema.addConstant(tuple_ty.toType(), tuple_val.toValue());
};
try sema.requireRuntimeBlock(block, src, runtime_src);
@@ -13000,7 +13028,7 @@ fn analyzeTupleMul(
@memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
- return block.addAggregateInit(tuple_ty, element_refs);
+ return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -13020,7 +13048,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
if (lhs_ty.isTuple(mod)) {
// In `**` rhs must be comptime-known, but lhs can be runtime-known
const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known");
- return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor);
+ const factor_casted = try sema.usizeCast(block, rhs_src, factor);
+ return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted);
}
// Analyze the lhs first, to catch the case that someone tried to do exponentiation
@@ -14533,19 +14562,14 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
.child = .u1_type,
}) else Type.u1;
- const types = try sema.arena.alloc(Type, 2);
- const values = try sema.arena.alloc(Value, 2);
- const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
- .types = types,
- .values = values,
- });
-
- types[0] = ty;
- types[1] = ov_ty;
- values[0] = Value.@"unreachable";
- values[1] = Value.@"unreachable";
-
- return tuple_ty;
+ const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index };
+ const values = [2]InternPool.Index{ .none, .none };
+ const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
+ .types = &types,
+ .values = &values,
+ .names = &.{},
+ } });
+ return tuple_ty.toType();
}
fn analyzeArithmetic(
@@ -16506,57 +16530,66 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const layout = struct_ty.containerLayout(mod);
const struct_field_vals = fv: {
- if (struct_ty.isSimpleTupleOrAnonStruct()) {
- const tuple = struct_ty.tupleFields();
- const field_types = tuple.types;
- const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len);
- for (struct_field_vals, 0..) |*struct_field_val, i| {
- const field_ty = field_types[i];
- const name_val = v: {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- const bytes = if (struct_ty.castTag(.anon_struct)) |payload|
- try anon_decl.arena().dupeZ(u8, payload.data.names[i])
- else
- try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i});
- const new_decl = try anon_decl.finish(
- try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod),
- try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
- 0, // default alignment
- );
- break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
- .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
- .len = try mod.intValue(Type.usize, bytes.len),
- });
- };
-
- const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
- const field_val = tuple.values[i];
- const is_comptime = field_val.ip_index != .unreachable_value;
- const opt_default_val = if (is_comptime) field_val else null;
- const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val);
- struct_field_fields.* = .{
- // name: []const u8,
- name_val,
- // type: type,
- try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty),
- // default_value: ?*const anyopaque,
- try default_val_ptr.copy(fields_anon_decl.arena()),
- // is_comptime: bool,
- Value.makeBool(is_comptime),
- // alignment: comptime_int,
- try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()),
- };
- struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
- }
- break :fv struct_field_vals;
- }
- const struct_fields = struct_ty.structFields(mod);
- const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count());
+ const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+ .anon_struct_type => |tuple| {
+ const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len);
+ for (
+ tuple.types,
+ tuple.values,
+ struct_field_vals,
+ 0..,
+ ) |field_ty, field_val, *struct_field_val, i| {
+ const name_val = v: {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ const bytes = if (tuple.names.len != 0)
+ // https://github.com/ziglang/zig/issues/15709
+ @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i]))
+ else
+ try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i});
+ const new_decl = try anon_decl.finish(
+ try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod),
+ try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]),
+ 0, // default alignment
+ );
+ break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{
+ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl),
+ .len = try mod.intValue(Type.usize, bytes.len),
+ });
+ };
- for (struct_field_vals, 0..) |*field_val, i| {
- const field = struct_fields.values()[i];
- const name = struct_fields.keys()[i];
+ const struct_field_fields = try fields_anon_decl.arena().create([5]Value);
+ const is_comptime = field_val != .none;
+ const opt_default_val = if (is_comptime) field_val.toValue() else null;
+ const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val);
+ struct_field_fields.* = .{
+ // name: []const u8,
+ name_val,
+ // type: type,
+ field_ty.toValue(),
+ // default_value: ?*const anyopaque,
+ try default_val_ptr.copy(fields_anon_decl.arena()),
+ // is_comptime: bool,
+ Value.makeBool(is_comptime),
+ // alignment: comptime_int,
+ try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()),
+ };
+ struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields);
+ }
+ break :fv struct_field_vals;
+ },
+ .struct_type => |s| s,
+ else => unreachable,
+ };
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
+ break :fv &[0]Value{};
+ const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count());
+
+ for (
+ struct_field_vals,
+ struct_obj.fields.keys(),
+ struct_obj.fields.values(),
+ ) |*field_val, name, field| {
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@@ -18013,7 +18046,7 @@ fn zirStructInit(
try sema.requireRuntimeBlock(block, src, null);
try sema.queueFullTypeResolution(resolved_ty);
return block.addUnionInit(resolved_ty, field_index, init_inst);
- } else if (resolved_ty.isAnonStruct()) {
+ } else if (resolved_ty.isAnonStruct(mod)) {
return sema.fail(block, src, "TODO anon struct init validation", .{});
}
unreachable;
@@ -18034,60 +18067,54 @@ fn finishStructInit(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- if (struct_ty.isAnonStruct()) {
- const struct_obj = struct_ty.castTag(.anon_struct).?.data;
- for (struct_obj.values, 0..) |default_val, i| {
- if (field_inits[i] != .none) continue;
-
- if (default_val.ip_index == .unreachable_value) {
- const field_name = struct_obj.names[i];
- const template = "missing struct field: {s}";
- const args = .{field_name};
- if (root_msg) |msg| {
- try sema.errNote(block, init_src, msg, template, args);
- } else {
- root_msg = try sema.errMsg(block, init_src, template, args);
- }
- } else {
- field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val);
- }
- }
- } else if (struct_ty.isTuple(mod)) {
- var i: u32 = 0;
- const len = struct_ty.structFieldCount(mod);
- while (i < len) : (i += 1) {
- if (field_inits[i] != .none) continue;
+ switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+ .anon_struct_type => |anon_struct| {
+ for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| {
+ if (field_inits[i] != .none) continue;
- const default_val = struct_ty.structFieldDefaultValue(i, mod);
- if (default_val.ip_index == .unreachable_value) {
- const template = "missing tuple field with index {d}";
- if (root_msg) |msg| {
- try sema.errNote(block, init_src, msg, template, .{i});
+ if (default_val == .none) {
+ if (anon_struct.names.len == 0) {
+ const template = "missing tuple field with index {d}";
+ if (root_msg) |msg| {
+ try sema.errNote(block, init_src, msg, template, .{i});
+ } else {
+ root_msg = try sema.errMsg(block, init_src, template, .{i});
+ }
+ } else {
+ const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]);
+ const template = "missing struct field: {s}";
+ const args = .{field_name};
+ if (root_msg) |msg| {
+ try sema.errNote(block, init_src, msg, template, args);
+ } else {
+ root_msg = try sema.errMsg(block, init_src, template, args);
+ }
+ }
} else {
- root_msg = try sema.errMsg(block, init_src, template, .{i});
+ field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue());
}
- } else {
- field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val);
}
- }
- } else {
- const struct_obj = mod.typeToStruct(struct_ty).?;
- for (struct_obj.fields.values(), 0..) |field, i| {
- if (field_inits[i] != .none) continue;
+ },
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ for (struct_obj.fields.values(), 0..) |field, i| {
+ if (field_inits[i] != .none) continue;
- if (field.default_val.ip_index == .unreachable_value) {
- const field_name = struct_obj.fields.keys()[i];
- const template = "missing struct field: {s}";
- const args = .{field_name};
- if (root_msg) |msg| {
- try sema.errNote(block, init_src, msg, template, args);
+ if (field.default_val.ip_index == .unreachable_value) {
+ const field_name = struct_obj.fields.keys()[i];
+ const template = "missing struct field: {s}";
+ const args = .{field_name};
+ if (root_msg) |msg| {
+ try sema.errNote(block, init_src, msg, template, args);
+ } else {
+ root_msg = try sema.errMsg(block, init_src, template, args);
+ }
} else {
- root_msg = try sema.errMsg(block, init_src, template, args);
+ field_inits[i] = try sema.addConstant(field.ty, field.default_val);
}
- } else {
- field_inits[i] = try sema.addConstant(field.ty, field.default_val);
}
- }
+ },
+ else => unreachable,
}
if (root_msg) |msg| {
@@ -18159,31 +18186,33 @@ fn zirStructInitAnon(
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
+ const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index);
- const types = try sema.arena.alloc(Type, extra.data.fields_len);
- const values = try sema.arena.alloc(Value, types.len);
- var fields = std.StringArrayHashMapUnmanaged(u32){};
- defer fields.deinit(sema.gpa);
- try fields.ensureUnusedCapacity(sema.gpa, types.len);
+ const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len);
+ const values = try sema.arena.alloc(InternPool.Index, types.len);
+ var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena);
+ try fields.ensureUnusedCapacity(types.len);
// Find which field forces the expression to be runtime, if any.
const opt_runtime_index = rs: {
var runtime_index: ?usize = null;
var extra_index = extra.end;
- for (types, 0..) |*field_ty, i| {
+ for (types, 0..) |*field_ty, i_usize| {
+ const i = @intCast(u32, i_usize);
const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index);
extra_index = item.end;
const name = sema.code.nullTerminatedString(item.data.field_name);
- const gop = fields.getOrPutAssumeCapacity(name);
+ const name_ip = try mod.intern_pool.getOrPutString(gpa, name);
+ const gop = fields.getOrPutAssumeCapacity(name_ip);
if (gop.found_existing) {
const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, i);
const msg = try sema.errMsg(block, field_src, "duplicate field", .{});
- errdefer msg.destroy(sema.gpa);
+ errdefer msg.destroy(gpa);
const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*);
try sema.errNote(block, prev_source, msg, "other field here", .{});
@@ -18191,41 +18220,44 @@ fn zirStructInitAnon(
};
return sema.failWithOwnedErrorMsg(msg);
}
- gop.value_ptr.* = @intCast(u32, i);
+ gop.value_ptr.* = i;
const init = try sema.resolveInst(item.data.init);
- field_ty.* = sema.typeOf(init);
- if (types[i].zigTypeTag(mod) == .Opaque) {
+ field_ty.* = sema.typeOf(init).ip_index;
+ if (types[i].toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, i);
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, types[i]);
+ try sema.addDeclaredHereNote(msg, types[i].toType());
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(init)) |init_val| {
- values[i] = init_val;
+ values[i] = init_val.ip_index;
} else {
- values[i] = Value.@"unreachable";
+ values[i] = .none;
runtime_index = i;
}
}
break :rs runtime_index;
};
- const tuple_ty = try Type.Tag.anon_struct.create(sema.arena, .{
- .names = try sema.arena.dupe([]const u8, fields.keys()),
+ const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
+ .names = fields.keys(),
.types = types,
.values = values,
- });
+ } });
const runtime_index = opt_runtime_index orelse {
- const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
- return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref);
+ const tuple_val = try mod.intern(.{ .aggregate = .{
+ .ty = tuple_ty,
+ .fields = values,
+ } });
+ return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
};
sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) {
@@ -18241,7 +18273,7 @@ fn zirStructInitAnon(
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = tuple_ty,
+ .pointee_type = tuple_ty.toType(),
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
@@ -18254,9 +18286,9 @@ fn zirStructInitAnon(
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
- .pointee_type = field_ty,
+ .pointee_type = field_ty.toType(),
});
- if (values[i].ip_index == .unreachable_value) {
+ if (values[i] == .none) {
const init = try sema.resolveInst(item.data.init);
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
_ = try block.addBinOp(.store, field_ptr, init);
@@ -18274,7 +18306,7 @@ fn zirStructInitAnon(
element_refs[i] = try sema.resolveInst(item.data.init);
}
- return block.addAggregateInit(tuple_ty, element_refs);
+ return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn zirArrayInit(
@@ -18400,43 +18432,47 @@ fn zirArrayInitAnon(
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
const mod = sema.mod;
- const types = try sema.arena.alloc(Type, operands.len);
- const values = try sema.arena.alloc(Value, operands.len);
+ const types = try sema.arena.alloc(InternPool.Index, operands.len);
+ const values = try sema.arena.alloc(InternPool.Index, operands.len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (operands, 0..) |operand, i| {
const operand_src = src; // TODO better source location
const elem = try sema.resolveInst(operand);
- types[i] = sema.typeOf(elem);
- if (types[i].zigTypeTag(mod) == .Opaque) {
+ types[i] = sema.typeOf(elem).ip_index;
+ if (types[i].toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
- try sema.addDeclaredHereNote(msg, types[i]);
+ try sema.addDeclaredHereNote(msg, types[i].toType());
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(elem)) |val| {
- values[i] = val;
+ values[i] = val.ip_index;
} else {
- values[i] = Value.@"unreachable";
+ values[i] = .none;
runtime_src = operand_src;
}
}
break :rs runtime_src;
};
- const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
+ const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
.types = types,
.values = values,
- });
+ .names = &.{},
+ } });
const runtime_src = opt_runtime_src orelse {
- const tuple_val = try Value.Tag.aggregate.create(sema.arena, values);
- return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref);
+ const tuple_val = try mod.intern(.{ .aggregate = .{
+ .ty = tuple_ty,
+ .fields = values,
+ } });
+ return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref);
};
try sema.requireRuntimeBlock(block, src, runtime_src);
@@ -18444,7 +18480,7 @@ fn zirArrayInitAnon(
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{
- .pointee_type = tuple_ty,
+ .pointee_type = tuple_ty.toType(),
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
});
const alloc = try block.addTy(.alloc, alloc_ty);
@@ -18453,9 +18489,9 @@ fn zirArrayInitAnon(
const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(target, .local),
- .pointee_type = types[i],
+ .pointee_type = types[i].toType(),
});
- if (values[i].ip_index == .unreachable_value) {
+ if (values[i] == .none) {
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
_ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand));
}
@@ -18469,7 +18505,7 @@ fn zirArrayInitAnon(
element_refs[i] = try sema.resolveInst(operand);
}
- return block.addAggregateInit(tuple_ty, element_refs);
+ return block.addAggregateInit(tuple_ty.toType(), element_refs);
}
fn addConstantMaybeRef(
@@ -18532,15 +18568,18 @@ fn fieldType(
const resolved_ty = try sema.resolveTypeFields(cur_ty);
cur_ty = resolved_ty;
switch (cur_ty.zigTypeTag(mod)) {
- .Struct => {
- if (cur_ty.isAnonStruct()) {
+ .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) {
+ .anon_struct_type => |anon_struct| {
const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
- return sema.addType(cur_ty.tupleFields().types[field_index]);
- }
- const struct_obj = mod.typeToStruct(cur_ty).?;
- const field = struct_obj.fields.get(field_name) orelse
- return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
- return sema.addType(field.ty);
+ return sema.addType(anon_struct.types[field_index].toType());
+ },
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const field = struct_obj.fields.get(field_name) orelse
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
+ return sema.addType(field.ty);
+ },
+ else => unreachable,
},
.Union => {
const union_obj = mod.typeToUnion(cur_ty).?;
@@ -24697,7 +24736,7 @@ fn structFieldPtr(
}
const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
- } else if (struct_ty.isAnonStruct()) {
+ } else if (struct_ty.isAnonStruct(mod)) {
const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
}
@@ -24721,11 +24760,11 @@ fn structFieldPtrByIndex(
struct_ty: Type,
initializing: bool,
) CompileError!Air.Inst.Ref {
- if (struct_ty.isAnonStruct()) {
+ const mod = sema.mod;
+ if (struct_ty.isAnonStruct(mod)) {
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
- const mod = sema.mod;
const struct_obj = mod.typeToStruct(struct_ty).?;
const field = struct_obj.fields.values()[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
@@ -24830,45 +24869,42 @@ fn structFieldVal(
assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct);
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
- switch (struct_ty.ip_index) {
- .empty_struct_type => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty),
- .none => switch (struct_ty.tag()) {
- .tuple => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty),
- .anon_struct => {
- const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
- return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
- },
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
+ switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
- const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
- const field_index = @intCast(u32, field_index_usize);
- const field = struct_obj.fields.values()[field_index];
-
- if (field.is_comptime) {
- return sema.addConstant(field.ty, field.default_val);
- }
+ const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
+ const field = struct_obj.fields.values()[field_index];
- if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
- if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty);
- if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
- return sema.addConstant(field.ty, opv);
- }
+ if (field.is_comptime) {
+ return sema.addConstant(field.ty, field.default_val);
+ }
- const field_values = struct_val.castTag(.aggregate).?.data;
- return sema.addConstant(field.ty, field_values[field_index]);
+ if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
+ if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty);
+ if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
+ return sema.addConstant(field.ty, opv);
}
- try sema.requireRuntimeBlock(block, src, null);
- return block.addStructFieldVal(struct_byval, field_index, field.ty);
- },
- else => unreachable,
+ const field_values = struct_val.castTag(.aggregate).?.data;
+ return sema.addConstant(field.ty, field_values[field_index]);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addStructFieldVal(struct_byval, field_index, field.ty);
+ },
+ .anon_struct_type => |anon_struct| {
+ if (anon_struct.names.len == 0) {
+ return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
+ } else {
+ const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src);
+ return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty);
+ }
},
+ else => unreachable,
}
}
@@ -25931,7 +25967,7 @@ fn coerceExtra(
.Union => {
// pointer to anonymous struct to pointer to union
if (inst_ty.isSinglePointer(mod) and
- inst_ty.childType(mod).isAnonStruct() and
+ inst_ty.childType(mod).isAnonStruct(mod) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src);
@@ -25940,7 +25976,7 @@ fn coerceExtra(
.Struct => {
// pointer to anonymous struct to pointer to struct
if (inst_ty.isSinglePointer(mod) and
- inst_ty.childType(mod).isAnonStruct() and
+ inst_ty.childType(mod).isAnonStruct(mod) and
sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result))
{
return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) {
@@ -26231,7 +26267,7 @@ fn coerceExtra(
.Union => switch (inst_ty.zigTypeTag(mod)) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
- if (inst_ty.isAnonStruct()) {
+ if (inst_ty.isAnonStruct(mod)) {
return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
@@ -28771,8 +28807,8 @@ fn coerceAnonStructToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
- const anon_struct = inst_ty.castTag(.anon_struct).?.data;
- const field_name = anon_struct.names[0];
+ const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
+ const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]);
const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty);
return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src);
}
@@ -29010,13 +29046,14 @@ fn coerceTupleToStruct(
@memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
+ const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
var runtime_src: ?LazySrcLoc = null;
- const field_count = inst_ty.structFieldCount(mod);
- var field_i: u32 = 0;
- while (field_i < field_count) : (field_i += 1) {
+ for (0..anon_struct.types.len) |field_index_usize| {
+ const field_i = @intCast(u32, field_index_usize);
const field_src = inst_src; // TODO better source location
- const field_name = if (inst_ty.castTag(.anon_struct)) |payload|
- payload.data.names[field_i]
+ const field_name = if (anon_struct.names.len != 0)
+ // https://github.com/ziglang/zig/issues/15709
+ @as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i]))
else
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i});
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
@@ -29094,21 +29131,22 @@ fn coerceTupleToTuple(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
- const dest_field_count = tuple_ty.structFieldCount(mod);
- const field_vals = try sema.arena.alloc(Value, dest_field_count);
+ const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type;
+ const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len);
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
@memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
- const inst_field_count = inst_ty.structFieldCount(mod);
- if (inst_field_count > dest_field_count) return error.NotCoercible;
+ const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type;
+ if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible;
var runtime_src: ?LazySrcLoc = null;
- var field_i: u32 = 0;
- while (field_i < inst_field_count) : (field_i += 1) {
+ for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| {
+ const field_i = @intCast(u32, field_index_usize);
const field_src = inst_src; // TODO better source location
- const field_name = if (inst_ty.castTag(.anon_struct)) |payload|
- payload.data.names[field_i]
+ const field_name = if (src_tuple.names.len != 0)
+ // https://github.com/ziglang/zig/issues/15709
+ @as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i]))
else
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i});
@@ -29118,23 +29156,21 @@ fn coerceTupleToTuple(
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
- const field_ty = tuple_ty.structFieldType(field_i, mod);
- const default_val = tuple_ty.structFieldDefaultValue(field_i, mod);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
- const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
+ const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src);
field_refs[field_index] = coerced;
- if (default_val.ip_index != .unreachable_value) {
+ if (default_val != .none) {
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known");
};
- if (!init_val.eql(default_val, field_ty, sema.mod)) {
+ if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(coerced)) |field_val| {
- field_vals[field_index] = field_val;
+ field_vals[field_index] = field_val.ip_index;
} else {
runtime_src = field_src;
}
@@ -29145,14 +29181,16 @@ fn coerceTupleToTuple(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
- for (field_refs, 0..) |*field_ref, i| {
+ for (
+ dest_tuple.types,
+ dest_tuple.values,
+ field_refs,
+ 0..,
+ ) |field_ty, default_val, *field_ref, i| {
if (field_ref.* != .none) continue;
- const default_val = tuple_ty.structFieldDefaultValue(i, mod);
- const field_ty = tuple_ty.structFieldType(i, mod);
-
const field_src = inst_src; // TODO better source location
- if (default_val.ip_index == .unreachable_value) {
+ if (default_val == .none) {
if (tuple_ty.isTuple(mod)) {
const template = "missing tuple field: {d}";
if (root_msg) |msg| {
@@ -29174,7 +29212,7 @@ fn coerceTupleToTuple(
if (runtime_src == null) {
field_vals[i] = default_val;
} else {
- field_ref.* = try sema.addConstant(field_ty, default_val);
+ field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue());
}
}
@@ -29191,7 +29229,10 @@ fn coerceTupleToTuple(
return sema.addConstant(
tuple_ty,
- try Value.Tag.aggregate.create(sema.arena, field_vals),
+ (try mod.intern(.{ .aggregate = .{
+ .ty = tuple_ty.ip_index,
+ .fields = field_vals,
+ } })).toValue(),
);
}
@@ -31591,17 +31632,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
return sema.resolveTypeRequiresComptime(ty.optionalChild(mod));
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.types, 0..) |field_ty, i| {
- const have_comptime_val = tuple.values[i].ip_index != .unreachable_value;
- if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) {
- return true;
- }
- }
- return false;
- },
-
.error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
@@ -31690,6 +31720,16 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
},
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, field_val| {
+ const have_comptime_val = field_val != .none;
+ if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) {
+ return true;
+ }
+ }
+ return false;
+ },
+
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
switch (union_obj.requires_comptime) {
@@ -31740,20 +31780,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
return sema.resolveTypeFully(child_ty);
},
.Struct => switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
-
+ .none => {}, // TODO make this unreachable when all types are migrated to InternPool
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => return sema.resolveStructFully(ty),
+ .anon_struct_type => |tuple| {
for (tuple.types) |field_ty| {
- try sema.resolveTypeFully(field_ty);
+ try sema.resolveTypeFully(field_ty.toType());
}
},
else => {},
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => return sema.resolveStructFully(ty),
- else => {},
- },
},
.Union => return sema.resolveUnionFully(ty),
.Array => return sema.resolveTypeFully(ty.childType(mod)),
@@ -33038,17 +33074,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
}
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.values, 0..) |val, i| {
- const is_comptime = val.ip_index != .unreachable_value;
- if (is_comptime) continue;
- if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue;
- return null;
- }
- return Value.empty_struct;
- },
-
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
@@ -33150,7 +33175,36 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
}
}
}
- // In this case the struct has no fields and therefore has one possible value.
+ // In this case the struct has no runtime-known fields and
+ // therefore has one possible value.
+
+ // TODO: this is incorrect for structs with comptime fields, I think
+ // we should use a temporary allocator to construct an aggregate that
+ // is populated with the comptime values and then intern that value here.
+ // This TODO is repeated for anon_struct_type below, as well as
+ // in the redundant implementation of one-possible-value in type.zig.
+ const empty = try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .fields = &.{},
+ } });
+ return empty.toValue();
+ },
+
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, val| {
+ const is_comptime = val != .none;
+ if (is_comptime) continue;
+ if ((try sema.typeHasOnePossibleValue(field_ty.toType())) != null) continue;
+ return null;
+ }
+ // In this case the struct has no runtime-known fields and
+ // therefore has one possible value.
+
+ // TODO: this is incorrect for structs with comptime fields, I think
+ // we should use a temporary allocator to construct an aggregate that
+ // is populated with the comptime values and then intern that value here.
+ // This TODO is repeated for struct_type above, as well as
+ // in the redundant implementation of one-possible-value in type.zig.
const empty = try mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.fields = &.{},
@@ -33647,17 +33701,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
return sema.typeRequiresComptime(ty.optionalChild(mod));
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.types, 0..) |field_ty, i| {
- const have_comptime_val = tuple.values[i].ip_index != .unreachable_value;
- if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) {
- return true;
- }
- }
- return false;
- },
-
.error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
@@ -33752,6 +33795,15 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
},
}
},
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, val| {
+ const have_comptime_val = val != .none;
+ if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) {
+ return true;
+ }
+ }
+ return false;
+ },
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
@@ -33865,7 +33917,7 @@ fn structFieldIndex(
) !u32 {
const mod = sema.mod;
const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty);
- if (struct_ty.isAnonStruct()) {
+ if (struct_ty.isAnonStruct(mod)) {
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
} else {
const struct_obj = mod.typeToStruct(struct_ty).?;
@@ -33882,9 +33934,10 @@ fn anonStructFieldIndex(
field_name: []const u8,
field_src: LazySrcLoc,
) !u32 {
- const anon_struct = struct_ty.castTag(.anon_struct).?.data;
+ const mod = sema.mod;
+ const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type;
for (anon_struct.names, 0..) |name, i| {
- if (mem.eql(u8, name, field_name)) {
+ if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) {
return @intCast(u32, i);
}
}
src/type.zig
@@ -54,10 +54,6 @@ pub const Type = struct {
.error_union => return .ErrorUnion,
.anyframe_T => return .AnyFrame,
-
- .tuple,
- .anon_struct,
- => return .Struct,
},
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.int_type => return .Int,
@@ -66,7 +62,7 @@ pub const Type = struct {
.vector_type => return .Vector,
.opt_type => return .Optional,
.error_union_type => return .ErrorUnion,
- .struct_type => return .Struct,
+ .struct_type, .anon_struct_type => return .Struct,
.union_type => return .Union,
.opaque_type => return .Opaque,
.enum_type => return .Enum,
@@ -465,76 +461,6 @@ pub const Type = struct {
if (b.zigTypeTag(mod) != .AnyFrame) return false;
return a.elemType2(mod).eql(b.elemType2(mod), mod);
},
-
- .tuple => {
- if (!b.isSimpleTuple()) return false;
-
- const a_tuple = a.tupleFields();
- const b_tuple = b.tupleFields();
-
- if (a_tuple.types.len != b_tuple.types.len) return false;
-
- for (a_tuple.types, 0..) |a_ty, i| {
- const b_ty = b_tuple.types[i];
- if (!eql(a_ty, b_ty, mod)) return false;
- }
-
- for (a_tuple.values, 0..) |a_val, i| {
- const ty = a_tuple.types[i];
- const b_val = b_tuple.values[i];
- if (a_val.ip_index == .unreachable_value) {
- if (b_val.ip_index == .unreachable_value) {
- continue;
- } else {
- return false;
- }
- } else {
- if (b_val.ip_index == .unreachable_value) {
- return false;
- } else {
- if (!Value.eql(a_val, b_val, ty, mod)) return false;
- }
- }
- }
-
- return true;
- },
- .anon_struct => {
- const a_struct_obj = a.castTag(.anon_struct).?.data;
- const b_struct_obj = (b.castTag(.anon_struct) orelse return false).data;
-
- if (a_struct_obj.types.len != b_struct_obj.types.len) return false;
-
- for (a_struct_obj.names, 0..) |a_name, i| {
- const b_name = b_struct_obj.names[i];
- if (!std.mem.eql(u8, a_name, b_name)) return false;
- }
-
- for (a_struct_obj.types, 0..) |a_ty, i| {
- const b_ty = b_struct_obj.types[i];
- if (!eql(a_ty, b_ty, mod)) return false;
- }
-
- for (a_struct_obj.values, 0..) |a_val, i| {
- const ty = a_struct_obj.types[i];
- const b_val = b_struct_obj.values[i];
- if (a_val.ip_index == .unreachable_value) {
- if (b_val.ip_index == .unreachable_value) {
- continue;
- } else {
- return false;
- }
- } else {
- if (b_val.ip_index == .unreachable_value) {
- return false;
- } else {
- if (!Value.eql(a_val, b_val, ty, mod)) return false;
- }
- }
- }
-
- return true;
- },
}
}
@@ -641,34 +567,6 @@ pub const Type = struct {
std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame);
hashWithHasher(ty.childType(mod), hasher, mod);
},
-
- .tuple => {
- std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
-
- const tuple = ty.tupleFields();
- std.hash.autoHash(hasher, tuple.types.len);
-
- for (tuple.types, 0..) |field_ty, i| {
- hashWithHasher(field_ty, hasher, mod);
- const field_val = tuple.values[i];
- if (field_val.ip_index == .unreachable_value) continue;
- field_val.hash(field_ty, hasher, mod);
- }
- },
- .anon_struct => {
- const struct_obj = ty.castTag(.anon_struct).?.data;
- std.hash.autoHash(hasher, std.builtin.TypeId.Struct);
- std.hash.autoHash(hasher, struct_obj.types.len);
-
- for (struct_obj.types, 0..) |field_ty, i| {
- const field_name = struct_obj.names[i];
- const field_val = struct_obj.values[i];
- hasher.update(field_name);
- hashWithHasher(field_ty, hasher, mod);
- if (field_val.ip_index == .unreachable_value) continue;
- field_val.hash(field_ty, hasher, mod);
- }
- },
}
}
@@ -733,41 +631,6 @@ pub const Type = struct {
};
},
- .tuple => {
- const payload = self.castTag(.tuple).?.data;
- const types = try allocator.alloc(Type, payload.types.len);
- const values = try allocator.alloc(Value, payload.values.len);
- for (payload.types, 0..) |ty, i| {
- types[i] = try ty.copy(allocator);
- }
- for (payload.values, 0..) |val, i| {
- values[i] = try val.copy(allocator);
- }
- return Tag.tuple.create(allocator, .{
- .types = types,
- .values = values,
- });
- },
- .anon_struct => {
- const payload = self.castTag(.anon_struct).?.data;
- const names = try allocator.alloc([]const u8, payload.names.len);
- const types = try allocator.alloc(Type, payload.types.len);
- const values = try allocator.alloc(Value, payload.values.len);
- for (payload.names, 0..) |name, i| {
- names[i] = try allocator.dupe(u8, name);
- }
- for (payload.types, 0..) |ty, i| {
- types[i] = try ty.copy(allocator);
- }
- for (payload.values, 0..) |val, i| {
- values[i] = try val.copy(allocator);
- }
- return Tag.anon_struct.create(allocator, .{
- .names = names,
- .types = types,
- .values = values,
- });
- },
.function => {
const payload = self.castTag(.function).?.data;
const param_types = try allocator.alloc(Type, payload.param_types.len);
@@ -935,42 +798,6 @@ pub const Type = struct {
ty = return_type;
continue;
},
- .tuple => {
- const tuple = ty.castTag(.tuple).?.data;
- try writer.writeAll("tuple{");
- for (tuple.types, 0..) |field_ty, i| {
- if (i != 0) try writer.writeAll(", ");
- const val = tuple.values[i];
- if (val.ip_index != .unreachable_value) {
- try writer.writeAll("comptime ");
- }
- try field_ty.dump("", .{}, writer);
- if (val.ip_index != .unreachable_value) {
- try writer.print(" = {}", .{val.fmtDebug()});
- }
- }
- try writer.writeAll("}");
- return;
- },
- .anon_struct => {
- const anon_struct = ty.castTag(.anon_struct).?.data;
- try writer.writeAll("struct{");
- for (anon_struct.types, 0..) |field_ty, i| {
- if (i != 0) try writer.writeAll(", ");
- const val = anon_struct.values[i];
- if (val.ip_index != .unreachable_value) {
- try writer.writeAll("comptime ");
- }
- try writer.writeAll(anon_struct.names[i]);
- try writer.writeAll(": ");
- try field_ty.dump("", .{}, writer);
- if (val.ip_index != .unreachable_value) {
- try writer.print(" = {}", .{val.fmtDebug()});
- }
- }
- try writer.writeAll("}");
- return;
- },
.optional => {
const child_type = ty.castTag(.optional).?.data;
try writer.writeByte('?');
@@ -1131,45 +958,6 @@ pub const Type = struct {
try print(error_union.payload, writer, mod);
},
- .tuple => {
- const tuple = ty.castTag(.tuple).?.data;
-
- try writer.writeAll("tuple{");
- for (tuple.types, 0..) |field_ty, i| {
- if (i != 0) try writer.writeAll(", ");
- const val = tuple.values[i];
- if (val.ip_index != .unreachable_value) {
- try writer.writeAll("comptime ");
- }
- try print(field_ty, writer, mod);
- if (val.ip_index != .unreachable_value) {
- try writer.print(" = {}", .{val.fmtValue(field_ty, mod)});
- }
- }
- try writer.writeAll("}");
- },
- .anon_struct => {
- const anon_struct = ty.castTag(.anon_struct).?.data;
-
- try writer.writeAll("struct{");
- for (anon_struct.types, 0..) |field_ty, i| {
- if (i != 0) try writer.writeAll(", ");
- const val = anon_struct.values[i];
- if (val.ip_index != .unreachable_value) {
- try writer.writeAll("comptime ");
- }
- try writer.writeAll(anon_struct.names[i]);
- try writer.writeAll(": ");
-
- try print(field_ty, writer, mod);
-
- if (val.ip_index != .unreachable_value) {
- try writer.print(" = {}", .{val.fmtValue(field_ty, mod)});
- }
- }
- try writer.writeAll("}");
- },
-
.pointer => {
const info = ty.ptrInfo(mod);
@@ -1335,6 +1123,27 @@ pub const Type = struct {
try writer.writeAll("@TypeOf(.{})");
}
},
+ .anon_struct_type => |anon_struct| {
+ try writer.writeAll("struct{");
+ for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| {
+ if (i != 0) try writer.writeAll(", ");
+ if (val != .none) {
+ try writer.writeAll("comptime ");
+ }
+ if (anon_struct.names.len != 0) {
+ const name = mod.intern_pool.stringToSlice(anon_struct.names[i]);
+ try writer.writeAll(name);
+ try writer.writeAll(": ");
+ }
+
+ try print(field_ty.toType(), writer, mod);
+
+ if (val != .none) {
+ try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)});
+ }
+ }
+ try writer.writeAll("}");
+ },
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
@@ -1443,16 +1252,6 @@ pub const Type = struct {
}
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.types, 0..) |field_ty, i| {
- const val = tuple.values[i];
- if (val.ip_index != .unreachable_value) continue; // comptime field
- if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
- }
- return false;
- },
-
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
@@ -1567,6 +1366,13 @@ pub const Type = struct {
return false;
}
},
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, val| {
+ if (val != .none) continue; // comptime field
+ if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
+ }
+ return false;
+ },
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
@@ -1634,8 +1440,6 @@ pub const Type = struct {
.function,
.error_union,
.anyframe_T,
- .tuple,
- .anon_struct,
=> false,
.inferred_alloc_mut => unreachable,
@@ -1705,6 +1509,7 @@ pub const Type = struct {
};
return struct_obj.layout != .Auto;
},
+ .anon_struct_type => false,
.union_type => |union_type| switch (union_type.runtime_tag) {
.none, .safety => mod.unionPtr(union_type.index).layout != .Auto,
.tagged => false,
@@ -1923,26 +1728,6 @@ pub const Type = struct {
.optional => return abiAlignmentAdvancedOptional(ty, mod, strat),
.error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat),
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- var big_align: u32 = 0;
- for (tuple.types, 0..) |field_ty, i| {
- const val = tuple.values[i];
- if (val.ip_index != .unreachable_value) continue; // comptime field
- if (!(field_ty.hasRuntimeBits(mod))) continue;
-
- switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
- .scalar => |field_align| big_align = @max(big_align, field_align),
- .val => switch (strat) {
- .eager => unreachable, // field type alignment not resolved
- .sema => unreachable, // passed to abiAlignmentAdvanced above
- .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
- },
- }
- }
- return AbiAlignmentAdvanced{ .scalar = big_align };
- },
-
.inferred_alloc_const,
.inferred_alloc_mut,
=> unreachable,
@@ -2100,6 +1885,24 @@ pub const Type = struct {
}
return AbiAlignmentAdvanced{ .scalar = big_align };
},
+ .anon_struct_type => |tuple| {
+ var big_align: u32 = 0;
+ for (tuple.types, tuple.values) |field_ty, val| {
+ if (val != .none) continue; // comptime field
+ if (!(field_ty.toType().hasRuntimeBits(mod))) continue;
+
+ switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) {
+ .scalar => |field_align| big_align = @max(big_align, field_align),
+ .val => switch (strat) {
+ .eager => unreachable, // field type alignment not resolved
+ .sema => unreachable, // passed to abiAlignmentAdvanced above
+ .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
+ },
+ }
+ }
+ return AbiAlignmentAdvanced{ .scalar = big_align };
+ },
+
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag());
@@ -2287,18 +2090,6 @@ pub const Type = struct {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .tuple, .anon_struct => {
- switch (strat) {
- .sema => |sema| try sema.resolveTypeLayout(ty),
- .lazy, .eager => {},
- }
- const field_count = ty.structFieldCount(mod);
- if (field_count == 0) {
- return AbiSizeAdvanced{ .scalar = 0 };
- }
- return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
- },
-
.anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.pointer => switch (ty.castTag(.pointer).?.data.size) {
@@ -2496,6 +2287,18 @@ pub const Type = struct {
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
},
+ .anon_struct_type => |tuple| {
+ switch (strat) {
+ .sema => |sema| try sema.resolveTypeLayout(ty),
+ .lazy, .eager => {},
+ }
+ const field_count = tuple.types.len;
+ if (field_count == 0) {
+ return AbiSizeAdvanced{ .scalar = 0 };
+ }
+ return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
+ },
+
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag());
@@ -2609,18 +2412,6 @@ pub const Type = struct {
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .tuple, .anon_struct => {
- if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
- if (ty.containerLayout(mod) != .Packed) {
- return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
- }
- var total: u64 = 0;
- for (ty.tupleFields().types) |field_ty| {
- total += try bitSizeAdvanced(field_ty, mod, opt_sema);
- }
- return total;
- },
-
.anyframe_T => return target.ptrBitWidth(),
.pointer => switch (ty.castTag(.pointer).?.data.size) {
@@ -2724,6 +2515,11 @@ pub const Type = struct {
return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
},
+ .anon_struct_type => {
+ if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
+ return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
+ },
+
.union_type => |union_type| {
if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
if (ty.containerLayout(mod) != .Packed) {
@@ -3220,23 +3016,17 @@ pub const Type = struct {
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
- return switch (ty.ip_index) {
- .empty_struct_type => .Auto,
- .none => switch (ty.tag()) {
- .tuple, .anon_struct => .Auto,
- else => unreachable,
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
+ return struct_obj.layout;
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
- return struct_obj.layout;
- },
- .union_type => |union_type| {
- const union_obj = mod.unionPtr(union_type.index);
- return union_obj.layout;
- },
- else => unreachable,
+ .anon_struct_type => .Auto,
+ .union_type => |union_type| {
+ const union_obj = mod.unionPtr(union_type.index);
+ return union_obj.layout;
},
+ else => unreachable,
};
}
@@ -3349,23 +3139,16 @@ pub const Type = struct {
}
pub fn arrayLenIp(ty: Type, ip: InternPool) u64 {
- return switch (ty.ip_index) {
- .empty_struct_type => 0,
- .none => switch (ty.tag()) {
- .tuple => ty.castTag(.tuple).?.data.types.len,
- .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
-
- else => unreachable,
- },
- else => switch (ip.indexToKey(ty.ip_index)) {
- .vector_type => |vector_type| vector_type.len,
- .array_type => |array_type| array_type.len,
- .struct_type => |struct_type| {
- const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0;
- return struct_obj.fields.count();
- },
- else => unreachable,
+ return switch (ip.indexToKey(ty.ip_index)) {
+ .vector_type => |vector_type| vector_type.len,
+ .array_type => |array_type| array_type.len,
+ .struct_type => |struct_type| {
+ const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0;
+ return struct_obj.fields.count();
},
+ .anon_struct_type => |tuple| tuple.types.len,
+
+ else => unreachable,
};
}
@@ -3374,16 +3157,10 @@ pub const Type = struct {
}
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
- return switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len),
- .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len),
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .vector_type => |vector_type| vector_type.len,
- else => unreachable,
- },
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .vector_type => |vector_type| vector_type.len,
+ .anon_struct_type => |tuple| @intCast(u32, tuple.types.len),
+ else => unreachable,
};
}
@@ -3391,8 +3168,6 @@ pub const Type = struct {
pub fn sentinel(ty: Type, mod: *const Module) ?Value {
return switch (ty.ip_index) {
.none => switch (ty.tag()) {
- .tuple => null,
-
.pointer => ty.castTag(.pointer).?.data.sentinel,
else => unreachable,
@@ -3400,6 +3175,7 @@ pub const Type = struct {
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.vector_type,
.struct_type,
+ .anon_struct_type,
=> null,
.array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null,
@@ -3486,10 +3262,12 @@ pub const Type = struct {
ty = struct_obj.backing_int_ty;
},
.enum_type => |enum_type| ty = enum_type.tag_ty.toType(),
+ .vector_type => |vector_type| ty = vector_type.child.toType(),
+
+ .anon_struct_type => unreachable,
.ptr_type => unreachable,
.array_type => unreachable,
- .vector_type => |vector_type| ty = vector_type.child.toType(),
.opt_type => unreachable,
.error_union_type => unreachable,
@@ -3711,17 +3489,6 @@ pub const Type = struct {
}
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.values, 0..) |val, i| {
- const is_comptime = val.ip_index != .unreachable_value;
- if (is_comptime) continue;
- if ((try tuple.types[i].onePossibleValue(mod)) != null) continue;
- return null;
- }
- return Value.empty_struct;
- },
-
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
},
@@ -3810,7 +3577,33 @@ pub const Type = struct {
return null;
}
}
- // In this case the struct has no fields and therefore has one possible value.
+ // In this case the struct has no runtime-known fields and
+ // therefore has one possible value.
+
+ // TODO: this is incorrect for structs with comptime fields, I think
+ // we should use a temporary allocator to construct an aggregate that
+ // is populated with the comptime values and then intern that value here.
+ // This TODO is repeated for anon_struct_type below, as well as in
+ // the redundant implementation of one-possible-value logic in Sema.zig.
+ const empty = try mod.intern(.{ .aggregate = .{
+ .ty = ty.ip_index,
+ .fields = &.{},
+ } });
+ return empty.toValue();
+ },
+
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, val| {
+ if (val != .none) continue; // comptime field
+ if ((try field_ty.toType().onePossibleValue(mod)) != null) continue;
+ return null;
+ }
+
+ // TODO: this is incorrect for structs with comptime fields, I think
+ // we should use a temporary allocator to construct an aggregate that
+ // is populated with the comptime values and then intern that value here.
+ // This TODO is repeated for struct_type above, as well as in
+ // the redundant implementation of one-possible-value logic in Sema.zig.
const empty = try mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.fields = &.{},
@@ -3915,15 +3708,6 @@ pub const Type = struct {
return ty.optionalChild(mod).comptimeOnly(mod);
},
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
- for (tuple.types, 0..) |field_ty, i| {
- const have_comptime_val = tuple.values[i].ip_index != .unreachable_value;
- if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true;
- }
- return false;
- },
-
.error_union => return ty.errorUnionPayload().comptimeOnly(mod),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
@@ -4007,6 +3791,14 @@ pub const Type = struct {
}
},
+ .anon_struct_type => |tuple| {
+ for (tuple.types, tuple.values) |field_ty, val| {
+ const have_comptime_val = val != .none;
+ if (!have_comptime_val and field_ty.toType().comptimeOnly(mod)) return true;
+ }
+ return false;
+ },
+
.union_type => |union_type| {
const union_obj = mod.unionPtr(union_type.index);
switch (union_obj.requires_comptime) {
@@ -4275,171 +4067,116 @@ pub const Type = struct {
}
pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 {
- switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index],
- else => unreachable,
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ assert(struct_obj.haveFieldTypes());
+ return struct_obj.fields.keys()[field_index];
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- assert(struct_obj.haveFieldTypes());
- return struct_obj.fields.keys()[field_index];
- },
- else => unreachable,
+ .anon_struct_type => |anon_struct| {
+ const name = anon_struct.names[field_index];
+ return mod.intern_pool.stringToSlice(name);
},
+ else => unreachable,
}
}
pub fn structFieldCount(ty: Type, mod: *Module) usize {
- return switch (ty.ip_index) {
- .empty_struct_type => 0,
- .none => switch (ty.tag()) {
- .tuple => ty.castTag(.tuple).?.data.types.len,
- .anon_struct => ty.castTag(.anon_struct).?.data.types.len,
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
- assert(struct_obj.haveFieldTypes());
- return struct_obj.fields.count();
- },
- else => unreachable,
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
+ assert(struct_obj.haveFieldTypes());
+ return struct_obj.fields.count();
},
+ .anon_struct_type => |anon_struct| anon_struct.types.len,
+ else => unreachable,
};
}
/// Supports structs and unions.
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
- return switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => return ty.castTag(.tuple).?.data.types[index],
- .anon_struct => return ty.castTag(.anon_struct).?.data.types[index],
- else => unreachable,
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ return struct_obj.fields.values()[index].ty;
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- return struct_obj.fields.values()[index].ty;
- },
- .union_type => |union_type| {
- const union_obj = mod.unionPtr(union_type.index);
- return union_obj.fields.values()[index].ty;
- },
- else => unreachable,
+ .union_type => |union_type| {
+ const union_obj = mod.unionPtr(union_type.index);
+ return union_obj.fields.values()[index].ty;
},
+ .anon_struct_type => |anon_struct| anon_struct.types[index].toType(),
+ else => unreachable,
};
}
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
- switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod),
- .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod),
- else => unreachable,
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ assert(struct_obj.layout != .Packed);
+ return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- assert(struct_obj.layout != .Packed);
- return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
- },
- .union_type => |union_type| {
- const union_obj = mod.unionPtr(union_type.index);
- return union_obj.fields.values()[index].normalAlignment(mod);
- },
- else => unreachable,
+ .anon_struct_type => |anon_struct| {
+ return anon_struct.types[index].toType().abiAlignment(mod);
},
+ .union_type => |union_type| {
+ const union_obj = mod.unionPtr(union_type.index);
+ return union_obj.fields.values()[index].normalAlignment(mod);
+ },
+ else => unreachable,
}
}
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
- switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => {
- const tuple = ty.castTag(.tuple).?.data;
- return tuple.values[index];
- },
- .anon_struct => {
- const struct_obj = ty.castTag(.anon_struct).?.data;
- return struct_obj.values[index];
- },
- else => unreachable,
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ return struct_obj.fields.values()[index].default_val;
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- return struct_obj.fields.values()[index].default_val;
- },
- else => unreachable,
+ .anon_struct_type => |anon_struct| {
+ const val = anon_struct.values[index];
+ // TODO: avoid using `unreachable` to indicate this.
+ if (val == .none) return Value.@"unreachable";
+ return val.toValue();
},
+ else => unreachable,
}
}
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
- switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => {
- const tuple = ty.castTag(.tuple).?.data;
- const val = tuple.values[index];
- if (val.ip_index == .unreachable_value) {
- return tuple.types[index].onePossibleValue(mod);
- } else {
- return val;
- }
- },
- .anon_struct => {
- const anon_struct = ty.castTag(.anon_struct).?.data;
- const val = anon_struct.values[index];
- if (val.ip_index == .unreachable_value) {
- return anon_struct.types[index].onePossibleValue(mod);
- } else {
- return val;
- }
- },
- else => unreachable,
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const field = struct_obj.fields.values()[index];
+ if (field.is_comptime) {
+ return field.default_val;
+ } else {
+ return field.ty.onePossibleValue(mod);
+ }
},
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- const field = struct_obj.fields.values()[index];
- if (field.is_comptime) {
- return field.default_val;
- } else {
- return field.ty.onePossibleValue(mod);
- }
- },
- else => unreachable,
+ .anon_struct_type => |tuple| {
+ const val = tuple.values[index];
+ if (val == .none) {
+ return tuple.types[index].toType().onePossibleValue(mod);
+ } else {
+ return val.toValue();
+ }
},
+ else => unreachable,
}
}
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
- switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => {
- const tuple = ty.castTag(.tuple).?.data;
- const val = tuple.values[index];
- return val.ip_index != .unreachable_value;
- },
- .anon_struct => {
- const anon_struct = ty.castTag(.anon_struct).?.data;
- const val = anon_struct.values[index];
- return val.ip_index != .unreachable_value;
- },
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- if (struct_obj.layout == .Packed) return false;
- const field = struct_obj.fields.values()[index];
- return field.is_comptime;
- },
- else => unreachable,
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ if (struct_obj.layout == .Packed) return false;
+ const field = struct_obj.fields.values()[index];
+ return field.is_comptime;
},
- }
+ .anon_struct_type => |anon_struct| anon_struct.values[index] != .none,
+ else => unreachable,
+ };
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
@@ -4516,46 +4253,43 @@ pub const Type = struct {
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
switch (ty.ip_index) {
.none => switch (ty.tag()) {
- .tuple, .anon_struct => {
- const tuple = ty.tupleFields();
+ else => unreachable,
+ },
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ assert(struct_obj.haveLayout());
+ assert(struct_obj.layout != .Packed);
+ var it = ty.iterateStructOffsets(mod);
+ while (it.next()) |field_offset| {
+ if (index == field_offset.field)
+ return field_offset.offset;
+ }
+ return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1));
+ },
+
+ .anon_struct_type => |tuple| {
var offset: u64 = 0;
var big_align: u32 = 0;
- for (tuple.types, 0..) |field_ty, i| {
- const field_val = tuple.values[i];
- if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) {
+ for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
+ if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) {
// comptime field
if (i == index) return offset;
continue;
}
- const field_align = field_ty.abiAlignment(mod);
+ const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
if (i == index) return offset;
- offset += field_ty.abiSize(mod);
+ offset += field_ty.toType().abiSize(mod);
}
offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1));
return offset;
},
- else => unreachable,
- },
- else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
- assert(struct_obj.haveLayout());
- assert(struct_obj.layout != .Packed);
- var it = ty.iterateStructOffsets(mod);
- while (it.next()) |field_offset| {
- if (index == field_offset.field)
- return field_offset.offset;
- }
-
- return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1));
- },
-
.union_type => |union_type| {
if (!union_type.hasTag())
return 0;
@@ -4655,10 +4389,6 @@ pub const Type = struct {
inferred_alloc_const, // See last_no_payload_tag below.
// After this, the tag requires a payload.
- /// Possible Value tags for this: @"struct"
- tuple,
- /// Possible Value tags for this: @"struct"
- anon_struct,
pointer,
function,
optional,
@@ -4691,8 +4421,6 @@ pub const Type = struct {
.function => Payload.Function,
.error_union => Payload.ErrorUnion,
.error_set_single => Payload.Name,
- .tuple => Payload.Tuple,
- .anon_struct => Payload.AnonStruct,
};
}
@@ -4723,83 +4451,48 @@ pub const Type = struct {
pub fn isTuple(ty: Type, mod: *Module) bool {
return switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .tuple => true,
- else => false,
- },
- else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .none => false,
+ else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
return struct_obj.is_tuple;
},
+ .anon_struct_type => |anon_struct| anon_struct.names.len == 0,
else => false,
},
};
}
- pub fn isAnonStruct(ty: Type) bool {
- return switch (ty.ip_index) {
- .empty_struct_type => true,
- .none => switch (ty.tag()) {
- .anon_struct => true,
- else => false,
- },
+ pub fn isAnonStruct(ty: Type, mod: *Module) bool {
+ if (ty.ip_index == .empty_struct_type) return true;
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0,
else => false,
};
}
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
- return switch (ty.ip_index) {
- .empty_struct_type => true,
- .none => switch (ty.tag()) {
- .tuple, .anon_struct => true,
- else => false,
- },
- else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => |struct_type| {
- const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
- return struct_obj.is_tuple;
- },
- else => false,
- },
- };
- }
-
- pub fn isSimpleTuple(ty: Type) bool {
- return switch (ty.ip_index) {
- .empty_struct_type => true,
- .none => switch (ty.tag()) {
- .tuple => true,
- else => false,
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
+ return struct_obj.is_tuple;
},
+ .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
else => false,
};
}
- pub fn isSimpleTupleOrAnonStruct(ty: Type) bool {
- return switch (ty.ip_index) {
- .empty_struct_type => true,
- .none => switch (ty.tag()) {
- .tuple, .anon_struct => true,
- else => false,
- },
+ pub fn isSimpleTuple(ty: Type, mod: *Module) bool {
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
else => false,
};
}
- // Only allowed for simple tuple types
- pub fn tupleFields(ty: Type) Payload.Tuple.Data {
- return switch (ty.ip_index) {
- .empty_struct_type => .{ .types = &.{}, .values = &.{} },
- .none => switch (ty.tag()) {
- .tuple => ty.castTag(.tuple).?.data,
- .anon_struct => .{
- .types = ty.castTag(.anon_struct).?.data.types,
- .values = ty.castTag(.anon_struct).?.data.values,
- },
- else => unreachable,
- },
- else => unreachable,
+ pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool {
+ return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => true,
+ else => false,
};
}
@@ -4947,29 +4640,6 @@ pub const Type = struct {
/// memory is owned by `Module`
data: []const u8,
};
-
- pub const Tuple = struct {
- base: Payload = .{ .tag = .tuple },
- data: Data,
-
- pub const Data = struct {
- types: []Type,
- /// unreachable_value elements are used to indicate runtime-known.
- values: []Value,
- };
- };
-
- pub const AnonStruct = struct {
- base: Payload = .{ .tag = .anon_struct },
- data: Data,
-
- pub const Data = struct {
- names: []const []const u8,
- types: []Type,
- /// unreachable_value elements are used to indicate runtime-known.
- values: []Value,
- };
- };
};
pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined };
src/TypedValue.zig
@@ -177,13 +177,16 @@ pub fn print(
}
if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) {
- switch (field_ptr.container_ty.tag()) {
- .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
- else => {
- const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
- return writer.print(".{s}", .{field_name});
+ switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) {
+ .anon_struct_type => |anon_struct| {
+ if (anon_struct.names.len == 0) {
+ return writer.print(".@\"{d}\"", .{field_ptr.field_index});
+ }
},
+ else => {},
}
+ const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
+ return writer.print(".{s}", .{field_name});
} else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) {
const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index];
return writer.print(".{s}", .{field_name});
@@ -396,12 +399,9 @@ fn printAggregate(
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
switch (ty.ip_index) {
- .none => switch (ty.tag()) {
- .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
- else => {},
- },
+ .none => {}, // TODO make this unreachable after finishing InternPool migration
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
- .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
+ .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
else => {},
},
}
src/value.zig
@@ -1889,26 +1889,28 @@ pub const Value = struct {
const b_field_vals = b.castTag(.aggregate).?.data;
assert(a_field_vals.len == b_field_vals.len);
- if (ty.isSimpleTupleOrAnonStruct()) {
- const types = ty.tupleFields().types;
- assert(types.len == a_field_vals.len);
- for (types, 0..) |field_ty, i| {
- if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) {
- return false;
+ switch (mod.intern_pool.indexToKey(ty.ip_index)) {
+ .anon_struct_type => |anon_struct| {
+ assert(anon_struct.types.len == a_field_vals.len);
+ for (anon_struct.types, 0..) |field_ty, i| {
+ if (!(try eqlAdvanced(a_field_vals[i], field_ty.toType(), b_field_vals[i], field_ty.toType(), mod, opt_sema))) {
+ return false;
+ }
}
- }
- return true;
- }
-
- if (ty.zigTypeTag(mod) == .Struct) {
- const fields = ty.structFields(mod).values();
- assert(fields.len == a_field_vals.len);
- for (fields, 0..) |field, i| {
- if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
- return false;
+ return true;
+ },
+ .struct_type => |struct_type| {
+ const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
+ const fields = struct_obj.fields.values();
+ assert(fields.len == a_field_vals.len);
+ for (fields, 0..) |field, i| {
+ if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
+ return false;
+ }
}
- }
- return true;
+ return true;
+ },
+ else => {},
}
const elem_ty = ty.childType(mod);
@@ -2017,20 +2019,6 @@ pub const Value = struct {
if ((try ty.onePossibleValue(mod)) != null) {
return true;
}
- if (a_ty.castTag(.anon_struct)) |payload| {
- const tuple = payload.data;
- if (tuple.values.len != 1) {
- return false;
- }
- const field_name = tuple.names[0];
- const union_obj = mod.typeToUnion(ty).?;
- const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false);
- const tag_and_val = b.castTag(.@"union").?.data;
- const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index);
- const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
- if (!tag_matches) return false;
- return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema);
- }
return false;
},
.Float => {