Commit 7f4bd247c7
Changed files (5)
src/Compilation.zig
@@ -2807,6 +2807,13 @@ const Header = extern struct {
limbs_len: u32,
string_bytes_len: u32,
tracked_insts_len: u32,
+ src_hash_deps_len: u32,
+ decl_val_deps_len: u32,
+ namespace_deps_len: u32,
+ namespace_name_deps_len: u32,
+ first_dependency_len: u32,
+ dep_entries_len: u32,
+ free_dep_entries_len: u32,
},
};
@@ -2814,7 +2821,7 @@ const Header = extern struct {
/// saved, such as the target and most CLI flags. A cache hit will only occur
/// when subsequent compiler invocations use the same set of flags.
pub fn saveState(comp: *Compilation) !void {
- var bufs_list: [7]std.os.iovec_const = undefined;
+ var bufs_list: [19]std.os.iovec_const = undefined;
var bufs_len: usize = 0;
const lf = comp.bin_file orelse return;
@@ -2828,6 +2835,13 @@ pub fn saveState(comp: *Compilation) !void {
.limbs_len = @intCast(ip.limbs.items.len),
.string_bytes_len = @intCast(ip.string_bytes.items.len),
.tracked_insts_len = @intCast(ip.tracked_insts.count()),
+ .src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
+ .decl_val_deps_len = @intCast(ip.decl_val_deps.count()),
+ .namespace_deps_len = @intCast(ip.namespace_deps.count()),
+ .namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()),
+ .first_dependency_len = @intCast(ip.first_dependency.count()),
+ .dep_entries_len = @intCast(ip.dep_entries.items.len),
+ .free_dep_entries_len = @intCast(ip.free_dep_entries.items.len),
},
};
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header));
@@ -2838,6 +2852,20 @@ pub fn saveState(comp: *Compilation) !void {
addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.values()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.values()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.values()));
+
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.keys()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.values()));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items));
+ addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items));
+
// TODO: compilation errors
// TODO: files
// TODO: namespaces
src/InternPool.zig
@@ -58,6 +58,38 @@ string_table: std.HashMapUnmanaged(
/// persists across incremental updates.
tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{},
+/// Dependencies on the source code hash associated with a ZIR instruction.
+/// * For a `declaration`, this is the entire declaration body.
+/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
+/// * For a `func`, this is the source of the full function signature.
+/// These are also invalidated if tracking fails for this instruction.
+/// Value is index into `dep_entries` of the first dependency on this hash.
+src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
+/// Dependencies on the value of a Decl.
+/// Value is index into `dep_entries` of the first dependency on this Decl value.
+decl_val_deps: std.AutoArrayHashMapUnmanaged(DeclIndex, DepEntry.Index) = .{},
+/// Dependencies on the full set of names in a ZIR namespace.
+/// Key refers to a `struct_decl`, `union_decl`, etc.
+/// Value is index into `dep_entries` of the first dependency on this namespace.
+namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
+/// Dependencies on the (non-)existence of some name in a namespace.
+/// Value is index into `dep_entries` of the first dependency on this name.
+namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .{},
+
+/// Given a `Depender`, points to an entry in `dep_entries` whose `depender`
+/// matches. The `next_dependee` field can be used to iterate all such entries
+/// and remove them from the corresponding lists.
+first_dependency: std.AutoArrayHashMapUnmanaged(Depender, DepEntry.Index) = .{},
+
+/// Stores dependency information. The hashmaps declared above are used to look
+/// up entries in this list as required. This is not stored in `extra` so that
+/// we can use `free_dep_entries` to track free indices, since dependencies are
+/// removed frequently.
+dep_entries: std.ArrayListUnmanaged(DepEntry) = .{},
+/// Stores unused indices in `dep_entries` which can be reused without a full
+/// garbage collection pass.
+free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{},
+
pub const TrackedInst = extern struct {
path_digest: Cache.BinDigest,
inst: Zir.Inst.Index,
@@ -70,6 +102,19 @@ pub const TrackedInst = extern struct {
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index {
return ip.tracked_insts.keys()[@intFromEnum(i)].inst;
}
+ pub fn toOptional(i: TrackedInst.Index) Optional {
+ return @enumFromInt(@intFromEnum(i));
+ }
+ pub const Optional = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+ pub fn unwrap(opt: Optional) ?TrackedInst.Index {
+ return switch (opt) {
+ .none => null,
+ _ => @enumFromInt(@intFromEnum(opt)),
+ };
+ }
+ };
};
};
@@ -82,6 +127,202 @@ pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.I
return @enumFromInt(gop.index);
}
+/// Reperesents the "source" of a dependency edge, i.e. either a Decl or a
+/// runtime function (represented as an InternPool index).
+/// MSB is 0 for a Decl, 1 for a function.
+pub const Depender = enum(u32) {
+ _,
+ pub const Unwrapped = union(enum) {
+ decl: DeclIndex,
+ func: InternPool.Index,
+ };
+ pub fn unwrap(dep: Depender) Unwrapped {
+ const tag: u1 = @truncate(@intFromEnum(dep) >> 31);
+ const val: u31 = @truncate(@intFromEnum(dep));
+ return switch (tag) {
+ 0 => .{ .decl = @enumFromInt(val) },
+ 1 => .{ .func = @enumFromInt(val) },
+ };
+ }
+ pub fn wrap(raw: Unwrapped) Depender {
+ return @enumFromInt(switch (raw) {
+ .decl => |decl| @intFromEnum(decl),
+ .func => |func| (1 << 31) | @intFromEnum(func),
+ });
+ }
+ pub fn toOptional(dep: Depender) Optional {
+ return @enumFromInt(@intFromEnum(dep));
+ }
+ pub const Optional = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+ pub fn unwrap(opt: Optional) ?Depender {
+ return switch (opt) {
+ .none => null,
+ _ => @enumFromInt(@intFromEnum(opt)),
+ };
+ }
+ };
+};
+
+pub const Dependee = union(enum) {
+ src_hash: TrackedInst.Index,
+ decl_val: DeclIndex,
+ namespace: TrackedInst.Index,
+ namespace_name: NamespaceNameKey,
+};
+
+pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: Depender) void {
+ var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional();
+
+ while (opt_idx.unwrap()) |idx| {
+ const dep = ip.dep_entries.items[@intFromEnum(idx)];
+ opt_idx = dep.next_dependee;
+
+ const prev_idx = dep.prev.unwrap() orelse {
+ // This entry is the start of a list in some `*_deps`.
+ // We cannot easily remove this mapping, so this must remain as a dummy entry.
+ ip.dep_entries.items[@intFromEnum(idx)].depender = .none;
+ continue;
+ };
+
+ ip.dep_entries.items[@intFromEnum(prev_idx)].next = dep.next;
+ if (dep.next.unwrap()) |next_idx| {
+ ip.dep_entries.items[@intFromEnum(next_idx)].prev = dep.prev;
+ }
+
+ ip.free_dep_entries.append(gpa, idx) catch {
+ // This memory will be reclaimed on the next garbage collection.
+ // Thus, we do not need to propagate this error.
+ };
+ }
+}
+
+pub const DependencyIterator = struct {
+ ip: *const InternPool,
+ next_entry: DepEntry.Index.Optional,
+ pub fn next(it: *DependencyIterator) ?Depender {
+ const idx = it.next_entry.unwrap() orelse return null;
+ const entry = it.ip.dep_entries.items[@intFromEnum(idx)];
+ it.next_entry = entry.next;
+ return entry.depender.unwrap().?;
+ }
+};
+
+pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator {
+ const first_entry = switch (dependee) {
+ .src_hash => |x| ip.src_hash_deps.get(x),
+ .decl_val => |x| ip.decl_val_deps.get(x),
+ .namespace => |x| ip.namespace_deps.get(x),
+ .namespace_name => |x| ip.namespace_name_deps.get(x),
+ } orelse return .{
+ .ip = ip,
+ .next_entry = .none,
+ };
+ if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{
+ .ip = ip,
+ .next_entry = .none,
+ };
+ return .{
+ .ip = ip,
+ .next_entry = first_entry.toOptional(),
+ };
+}
+
+pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: Depender, dependee: Dependee) Allocator.Error!void {
+ const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: {
+ // The entry already exists, so there is capacity to overwrite it later.
+ break :dep idx.toOptional();
+ } else none: {
+ // Ensure there is capacity available to add this dependency later.
+ try ip.first_dependency.ensureUnusedCapacity(gpa, 1);
+ break :none .none;
+ };
+
+ // We're very likely to need space for a new entry - reserve it now to avoid
+ // the need for error cleanup logic.
+ if (ip.free_dep_entries.items.len == 0) {
+ try ip.dep_entries.ensureUnusedCapacity(gpa, 1);
+ }
+
+ // This block should allocate an entry and prepend it to the relevant `*_deps` list.
+ // The `next` field should be correctly initialized; all other fields may be undefined.
+ const new_index: DepEntry.Index = switch (dependee) {
+ inline else => |dependee_payload, tag| new_index: {
+ const gop = try switch (tag) {
+ .src_hash => ip.src_hash_deps,
+ .decl_val => ip.decl_val_deps,
+ .namespace => ip.namespace_deps,
+ .namespace_name => ip.namespace_name_deps,
+ }.getOrPut(gpa, dependee_payload);
+
+ if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
+ // Dummy entry, so we can reuse it rather than allocating a new one!
+ ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none;
+ break :new_index gop.value_ptr.*;
+ }
+
+ // Prepend a new dependency.
+ const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: {
+ break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
+ } else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
+ ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none;
+ gop.value_ptr.* = new_index;
+ break :new_index new_index;
+ },
+ };
+
+ ip.dep_entries.items[@intFromEnum(new_index)].depender = depender.toOptional();
+ ip.dep_entries.items[@intFromEnum(new_index)].prev = .none;
+ ip.dep_entries.items[@intFromEnum(new_index)].next_dependee = first_depender_dep;
+ ip.first_dependency.putAssumeCapacity(depender, new_index);
+}
+
+/// String is the name whose existence the dependency is on.
+/// DepEntry.Index refers to the first such dependency.
+pub const NamespaceNameKey = struct {
+ /// The instruction (`struct_decl` etc) which owns the namespace in question.
+ namespace: TrackedInst.Index,
+ /// The name whose existence the dependency is on.
+ name: NullTerminatedString,
+};
+
+pub const DepEntry = extern struct {
+ /// If null, this is a dummy entry - all other fields are `undefined`. It is
+ /// the first and only entry in one of `intern_pool.*_deps`, and does not
+ /// appear in any list by `first_dependency`, but is not in
+ /// `free_dep_entries` since `*_deps` stores a reference to it.
+ depender: Depender.Optional,
+ /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
+ /// Used to iterate all dependers for a given dependee during an update.
+ /// null if this is the end of the list.
+ next: DepEntry.Index.Optional,
+ /// The other link for `next`.
+ /// null if this is the start of the list.
+ prev: DepEntry.Index.Optional,
+ /// Index into `dep_entries` forming a singly linked list of dependencies *of* `depender`.
+ /// Used to efficiently remove all `DepEntry`s for a single `depender` when it is re-analyzed.
+ /// null if this is the end of the list.
+ next_dependee: DepEntry.Index.Optional,
+
+ pub const Index = enum(u32) {
+ _,
+ pub fn toOptional(dep: DepEntry.Index) Optional {
+ return @enumFromInt(@intFromEnum(dep));
+ }
+ pub const Optional = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+ pub fn unwrap(opt: Optional) ?DepEntry.Index {
+ return switch (opt) {
+ .none => null,
+ _ => @enumFromInt(@intFromEnum(opt)),
+ };
+ }
+ };
+ };
+};
+
const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false);
const builtin = @import("builtin");
@@ -428,6 +669,7 @@ pub const Key = union(enum) {
decl: DeclIndex,
/// Represents the declarations inside this opaque.
namespace: NamespaceIndex,
+ zir_index: TrackedInst.Index.Optional,
};
/// Although packed structs and non-packed structs are encoded differently,
@@ -440,7 +682,7 @@ pub const Key = union(enum) {
/// `none` when the struct has no declarations.
namespace: OptionalNamespaceIndex,
/// Index of the struct_decl ZIR instruction.
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
layout: std.builtin.Type.ContainerLayout,
field_names: NullTerminatedString.Slice,
field_types: Index.Slice,
@@ -684,7 +926,7 @@ pub const Key = union(enum) {
}
/// Asserts the struct is not packed.
- pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index) void {
+ pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
assert(s.layout != .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
@@ -800,7 +1042,7 @@ pub const Key = union(enum) {
flags: Tag.TypeUnion.Flags,
/// The enum that provides the list of field names and values.
enum_tag_ty: Index,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
/// The returned pointer expires with any addition to the `InternPool`.
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeUnion.Flags {
@@ -889,6 +1131,7 @@ pub const Key = union(enum) {
/// This is ignored by `get` but will be provided by `indexToKey` when
/// a value map exists.
values_map: OptionalMapIndex = .none,
+ zir_index: TrackedInst.Index.Optional,
pub const TagMode = enum {
/// The integer tag type was auto-numbered by zig.
@@ -953,6 +1196,7 @@ pub const Key = union(enum) {
tag_mode: EnumType.TagMode,
/// This may be updated via `setTagType` later.
tag_ty: Index = .none,
+ zir_index: TrackedInst.Index.Optional,
pub fn toEnumType(self: @This()) EnumType {
return .{
@@ -962,6 +1206,7 @@ pub const Key = union(enum) {
.tag_mode = self.tag_mode,
.names = .{ .start = 0, .len = 0 },
.values = .{ .start = 0, .len = 0 },
+ .zir_index = self.zir_index,
};
}
@@ -1909,7 +2154,7 @@ pub const UnionType = struct {
/// If this slice has length 0 it means all elements are `none`.
field_aligns: Alignment.Slice,
/// Index of the union_decl ZIR instruction.
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
/// Index into extra array of the `flags` field.
flags_index: u32,
/// Copied from `enum_tag_ty`.
@@ -2003,10 +2248,10 @@ pub const UnionType = struct {
}
/// This does not mutate the field of UnionType.
- pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index) void {
+ pub fn setZirIndex(self: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
- const ptr: *TrackedInst.Index =
+ const ptr: *TrackedInst.Index.Optional =
@ptrCast(&ip.extra.items[self.flags_index - flags_field_index + zir_index_field_index]);
ptr.* = new_zir_index;
}
@@ -3099,7 +3344,7 @@ pub const Tag = enum(u8) {
namespace: NamespaceIndex,
/// The enum that provides the list of field names and values.
tag_ty: Index,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
pub const Flags = packed struct(u32) {
runtime_tag: UnionType.RuntimeTag,
@@ -3121,7 +3366,7 @@ pub const Tag = enum(u8) {
/// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
pub const TypeStructPacked = struct {
decl: DeclIndex,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
fields_len: u32,
namespace: OptionalNamespaceIndex,
backing_int_ty: Index,
@@ -3168,7 +3413,7 @@ pub const Tag = enum(u8) {
/// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
pub const TypeStruct = struct {
decl: DeclIndex,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
fields_len: u32,
flags: Flags,
size: u32,
@@ -3523,6 +3768,7 @@ pub const EnumExplicit = struct {
/// If this is `none`, it means the trailing tag values are absent because
/// they are auto-numbered.
values_map: OptionalMapIndex,
+ zir_index: TrackedInst.Index.Optional,
};
/// Trailing:
@@ -3538,6 +3784,7 @@ pub const EnumAuto = struct {
fields_len: u32,
/// Maps field names to declaration index.
names_map: MapIndex,
+ zir_index: TrackedInst.Index.Optional,
};
pub const PackedU64 = packed struct(u64) {
@@ -3759,6 +4006,16 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.tracked_insts.deinit(gpa);
+ ip.src_hash_deps.deinit(gpa);
+ ip.decl_val_deps.deinit(gpa);
+ ip.namespace_deps.deinit(gpa);
+ ip.namespace_name_deps.deinit(gpa);
+
+ ip.first_dependency.deinit(gpa);
+
+ ip.dep_entries.deinit(gpa);
+ ip.free_dep_entries.deinit(gpa);
+
ip.* = undefined;
}
@@ -3885,6 +4142,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.tag_mode = .auto,
.names_map = enum_auto.data.names_map.toOptional(),
.values_map = .none,
+ .zir_index = enum_auto.data.zir_index,
} };
},
.type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
@@ -4493,6 +4751,7 @@ fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMo
.tag_mode = tag_mode,
.names_map = enum_explicit.data.names_map.toOptional(),
.values_map = enum_explicit.data.values_map,
+ .zir_index = enum_explicit.data.zir_index,
} };
}
@@ -5329,7 +5588,7 @@ pub const UnionTypeInit = struct {
flags: Tag.TypeUnion.Flags,
decl: DeclIndex,
namespace: NamespaceIndex,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
fields_len: u32,
enum_tag_ty: Index,
/// May have length 0 which leaves the values unset until later.
@@ -5401,7 +5660,7 @@ pub const StructTypeInit = struct {
decl: DeclIndex,
namespace: OptionalNamespaceIndex,
layout: std.builtin.Type.ContainerLayout,
- zir_index: TrackedInst.Index,
+ zir_index: TrackedInst.Index.Optional,
fields_len: u32,
known_non_opv: bool,
requires_comptime: RequiresComptime,
@@ -6264,6 +6523,7 @@ fn getIncompleteEnumAuto(
.int_tag_type = int_tag_type,
.names_map = names_map,
.fields_len = enum_type.fields_len,
+ .zir_index = enum_type.zir_index,
});
ip.items.appendAssumeCapacity(.{
@@ -6314,6 +6574,7 @@ fn getIncompleteEnumExplicit(
.fields_len = enum_type.fields_len,
.names_map = names_map,
.values_map = values_map,
+ .zir_index = enum_type.zir_index,
});
ip.items.appendAssumeCapacity(.{
@@ -6339,6 +6600,7 @@ pub const GetEnumInit = struct {
names: []const NullTerminatedString,
values: []const Index,
tag_mode: Key.EnumType.TagMode,
+ zir_index: TrackedInst.Index.Optional,
};
pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Error!Index {
@@ -6355,6 +6617,7 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
.tag_mode = undefined,
.names_map = undefined,
.values_map = undefined,
+ .zir_index = undefined,
},
}, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
@@ -6380,6 +6643,7 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
.int_tag_type = ini.tag_ty,
.names_map = names_map,
.fields_len = fields_len,
+ .zir_index = ini.zir_index,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
@@ -6416,6 +6680,7 @@ pub fn finishGetEnum(
.fields_len = fields_len,
.names_map = names_map,
.values_map = values_map,
+ .zir_index = ini.zir_index,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
@@ -6507,6 +6772,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
OptionalNullTerminatedString,
Tag.TypePointer.VectorIndex,
TrackedInst.Index,
+ TrackedInst.Index.Optional,
=> @intFromEnum(@field(extra, field.name)),
u32,
@@ -6583,6 +6849,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
OptionalNullTerminatedString,
Tag.TypePointer.VectorIndex,
TrackedInst.Index,
+ TrackedInst.Index.Optional,
=> @enumFromInt(int32),
u32,
src/Module.zig
@@ -149,6 +149,10 @@ error_limit: ErrorInt,
/// previous analysis.
generation: u32 = 0,
+/// Value is the number of PO dependencies of this Depender.
+potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, u32) = .{},
+outdated: std.AutoArrayHashMapUnmanaged(InternPool.Depender, void) = .{},
+
stage1_flags: packed struct {
have_winmain: bool = false,
have_wwinmain: bool = false,
@@ -680,14 +684,6 @@ pub const Decl = struct {
return mod.namespacePtr(decl.src_namespace).file_scope;
}
- pub fn removeDependant(decl: *Decl, other: Decl.Index) void {
- assert(decl.dependants.swapRemove(other));
- }
-
- pub fn removeDependency(decl: *Decl, other: Decl.Index) void {
- assert(decl.dependencies.swapRemove(other));
- }
-
pub fn getExternDecl(decl: Decl, mod: *Module) OptionalIndex {
assert(decl.has_tv);
return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) {
@@ -838,14 +834,6 @@ pub const File = struct {
/// undefined until `zir_loaded == true`.
path_digest: Cache.BinDigest = undefined,
- /// Used by change detection algorithm, after astgen, contains the
- /// set of decls that existed in the previous ZIR but not in the new one.
- deleted_decls: ArrayListUnmanaged(Decl.Index) = .{},
- /// Used by change detection algorithm, after astgen, contains the
- /// set of decls that existed both in the previous ZIR and in the new one,
- /// but their source code has been modified.
- outdated_decls: ArrayListUnmanaged(Decl.Index) = .{},
-
/// The most recent successful ZIR for this file, with no errors.
/// This is only populated when a previously successful ZIR
/// newly introduces compile errors during an update. When ZIR is
@@ -898,8 +886,6 @@ pub const File = struct {
gpa.free(file.sub_file_path);
file.unload(gpa);
}
- file.deleted_decls.deinit(gpa);
- file.outdated_decls.deinit(gpa);
file.references.deinit(gpa);
if (file.root_decl.unwrap()) |root_decl| {
mod.destroyDecl(root_decl);
@@ -2498,6 +2484,8 @@ pub fn deinit(zcu: *Zcu) void {
zcu.global_error_set.deinit(gpa);
+ zcu.potentially_outdated.deinit(gpa);
+
zcu.test_functions.deinit(gpa);
for (zcu.global_assembly.values()) |s| {
@@ -2856,27 +2844,18 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
}
if (file.prev_zir) |prev_zir| {
- // Iterate over all Namespace objects contained within this File, looking at the
- // previous and new ZIR together and update the references to point
- // to the new one. For example, Decl name, Decl zir_decl_index, and Namespace
- // decl_table keys need to get updated to point to the new memory, even if the
- // underlying source code is unchanged.
- // We do not need to hold any locks at this time because all the Decl and Namespace
- // objects being touched are specific to this File, and the only other concurrent
- // tasks are touching other File objects.
try updateZirRefs(mod, file, prev_zir.*);
- // At this point, `file.outdated_decls` and `file.deleted_decls` are populated,
- // and semantic analysis will deal with them properly.
// No need to keep previous ZIR.
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
file.prev_zir = null;
- } else if (file.root_decl.unwrap()) |root_decl| {
- // This is an update, but it is the first time the File has succeeded
- // ZIR. We must mark it outdated since we have already tried to
- // semantically analyze it.
- try file.outdated_decls.resize(gpa, 1);
- file.outdated_decls.items[0] = root_decl;
+ }
+
+ if (file.root_decl.unwrap()) |root_decl| {
+ // The root of this file must be re-analyzed, since the file has changed.
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try mod.outdated.put(gpa, InternPool.Depender.wrap(.{ .decl = root_decl }), {});
}
}
@@ -2950,25 +2929,142 @@ fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.File)
return zir;
}
+/// This is called from the AstGen thread pool, so must acquire
+/// the Compilation mutex when acting on shared state.
fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void {
const gpa = zcu.gpa;
+ const new_zir = file.zir;
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{};
defer inst_map.deinit(gpa);
- try mapOldZirToNew(gpa, old_zir, file.zir, &inst_map);
+ try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map);
+
+ const old_tag = old_zir.instructions.items(.tag);
+ const old_data = old_zir.instructions.items(.data);
// TODO: this should be done after all AstGen workers complete, to avoid
// iterating over this full set for every updated file.
- for (zcu.intern_pool.tracked_insts.keys()) |*ti| {
+ for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| {
+ const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw);
if (!std.mem.eql(u8, &ti.path_digest, &file.path_digest)) continue;
+ const old_inst = ti.inst;
ti.inst = inst_map.get(ti.inst) orelse {
- // TODO: invalidate this `TrackedInst` via the dependency mechanism
+ // Tracking failed for this instruction. Invalidate associated `src_hash` deps.
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .src_hash = ti_idx });
continue;
};
+
+ // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
+ const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
+ .extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
+ .struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
+ else => false,
+ },
+ else => false,
+ };
+ if (!has_namespace) continue;
+
+ var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
+ defer old_names.deinit(zcu.gpa);
+ {
+ var it = old_zir.declIterator(old_inst);
+ while (it.next()) |decl_inst| {
+ const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
+ switch (decl_name) {
+ .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
+ _ => if (decl_name.isNamedTest(old_zir)) continue,
+ }
+ const name_zir = decl_name.toString(old_zir).?;
+ const name_ip = try zcu.intern_pool.getOrPutString(
+ zcu.gpa,
+ old_zir.nullTerminatedString(name_zir),
+ );
+ try old_names.put(zcu.gpa, name_ip, {});
+ }
+ }
+ var any_change = false;
+ {
+ var it = new_zir.declIterator(ti.inst);
+ while (it.next()) |decl_inst| {
+ const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
+ switch (decl_name) {
+ .@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
+ _ => if (decl_name.isNamedTest(old_zir)) continue,
+ }
+ const name_zir = decl_name.toString(old_zir).?;
+ const name_ip = try zcu.intern_pool.getOrPutString(
+ zcu.gpa,
+ old_zir.nullTerminatedString(name_zir),
+ );
+ if (!old_names.swapRemove(name_ip)) continue;
+ // Name added
+ any_change = true;
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ .namespace = ti_idx,
+ .name = name_ip,
+ } });
+ }
+ }
+ // The only elements remaining in `old_names` now are any names which were removed.
+ for (old_names.keys()) |name_ip| {
+ any_change = true;
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ .namespace = ti_idx,
+ .name = name_ip,
+ } });
+ }
+
+ if (any_change) {
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ try zcu.markDependeeOutdated(.{ .namespace = ti_idx });
+ }
+ }
+}
+
+pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
+ var it = zcu.intern_pool.dependencyIterator(dependee);
+ while (it.next()) |depender| {
+ if (zcu.outdated.contains(depender)) continue;
+ const was_po = zcu.potentially_outdated.swapRemove(depender);
+ try zcu.outdated.putNoClobber(zcu.gpa, depender, {});
+ // If this is a Decl and was not previously PO, we must recursively
+ // mark dependencies on its tyval as PO.
+ if (was_po) switch (depender.unwrap()) {
+ .decl => |decl_index| try zcu.markDeclDependenciesPotentiallyOutdated(decl_index),
+ .func => {},
+ };
}
}
+/// Given a Decl which is newly outdated or PO, mark all dependers which depend
+/// on its tyval as PO.
+fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !void {
+ var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
+ while (it.next()) |po| {
+ if (zcu.potentially_outdated.getPtr(po)) |n| {
+ // There is now one more PO dependency.
+ n.* += 1;
+ continue;
+ }
+ try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
+ // If this ia a Decl, we must recursively mark dependencies
+ // on its tyval as PO.
+ switch (po.unwrap()) {
+ .decl => |po_decl| try zcu.markDeclDependenciesPotentiallyOutdated(po_decl),
+ .func => {},
+ }
+ }
+ // TODO: repeat the above for `decl_ty` dependencies when they are introduced
+}
+
pub fn mapOldZirToNew(
gpa: Allocator,
old_zir: Zir,
@@ -3535,6 +3631,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
break :blk .none;
};
+ mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .decl = decl_index }));
+
decl.analysis = .in_progress;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
@@ -3564,6 +3662,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
};
defer sema.deinit();
+ // Every Decl has a dependency on its own source.
+ try sema.declareDependency(.{ .src_hash = try ip.trackZir(sema.gpa, decl.getFileScope(mod), decl.zir_decl_index.unwrap().?) });
+
assert(!mod.declIsRoot(decl_index));
var block_scope: Sema.Block = .{
@@ -4362,6 +4463,8 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
+ mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index }));
+
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
src/Sema.zig
@@ -2748,7 +2748,7 @@ pub fn getStructType(
const ty = try ip.getStructType(gpa, .{
.decl = decl,
.namespace = namespace.toOptional(),
- .zir_index = tracked_inst,
+ .zir_index = tracked_inst.toOptional(),
.layout = small.layout,
.known_non_opv = small.known_non_opv,
.is_tuple = small.is_tuple,
@@ -2789,6 +2789,12 @@ fn zirStructDecl(
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
+ try ip.addDependency(
+ sema.gpa,
+ InternPool.Depender.wrap(.{ .decl = new_decl_index }),
+ .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) },
+ );
+
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
@@ -2973,6 +2979,12 @@ fn zirEnumDecl(
new_decl.owns_tv = true;
errdefer if (!done) mod.abortAnonDecl(new_decl_index);
+ try mod.intern_pool.addDependency(
+ sema.gpa,
+ InternPool.Depender.wrap(.{ .decl = new_decl_index }),
+ .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
+ );
+
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
@@ -3008,6 +3020,7 @@ fn zirEnumDecl(
.auto
else
.explicit,
+ .zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
});
if (sema.builtin_type_target_index != .none) {
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, incomplete_enum.index);
@@ -3225,6 +3238,12 @@ fn zirUnionDecl(
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
+ try mod.intern_pool.addDependency(
+ sema.gpa,
+ InternPool.Depender.wrap(.{ .decl = new_decl_index }),
+ .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
+ );
+
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
@@ -3254,7 +3273,7 @@ fn zirUnionDecl(
},
.decl = new_decl_index,
.namespace = new_namespace_index,
- .zir_index = try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst),
+ .zir_index = (try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst)).toOptional(),
.fields_len = fields_len,
.enum_tag_ty = .none,
.field_types = &.{},
@@ -3318,6 +3337,12 @@ fn zirOpaqueDecl(
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
+ try mod.intern_pool.addDependency(
+ sema.gpa,
+ InternPool.Depender.wrap(.{ .decl = new_decl_index }),
+ .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) },
+ );
+
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
@@ -3329,6 +3354,7 @@ fn zirOpaqueDecl(
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
+ .zir_index = (try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst)).toOptional(),
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(opaque_ty);
@@ -7890,6 +7916,8 @@ fn instantiateGenericCall(
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
const generic_owner_ty_info = mod.typeToFunc(Type.fromInterned(generic_owner_func.ty)).?;
+ try sema.declareDependency(.{ .src_hash = generic_owner_func.zir_body_inst });
+
// Even though there may already be a generic instantiation corresponding
// to this callsite, we must evaluate the expressions of the generic
// function signature with the values of the callsite plugged in.
@@ -13594,6 +13622,12 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
});
try sema.checkNamespaceType(block, lhs_src, container_type);
+ if (container_type.typeDeclInst(mod)) |type_decl_inst| {
+ try sema.declareDependency(.{ .namespace_name = .{
+ .namespace = type_decl_inst,
+ .name = decl_name,
+ } });
+ }
const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse
return .bool_false;
@@ -17447,6 +17481,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const type_info_ty = try sema.getBuiltinType("Type");
const type_info_tag_ty = type_info_ty.unionTagType(mod).?;
+ if (ty.typeDeclInst(mod)) |type_decl_inst| {
+ try sema.declareDependency(.{ .namespace = type_decl_inst });
+ }
+
switch (ty.zigTypeTag(mod)) {
.Type,
.Void,
@@ -21313,6 +21351,7 @@ fn zirReify(
else
.explicit,
.tag_ty = int_tag_ty.toIntern(),
+ .zir_index = .none,
});
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(incomplete_enum.index);
@@ -21410,6 +21449,7 @@ fn zirReify(
const opaque_ty = try mod.intern(.{ .opaque_type = .{
.decl = new_decl_index,
.namespace = new_namespace_index,
+ .zir_index = .none,
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(opaque_ty);
@@ -21628,7 +21668,7 @@ fn zirReify(
.namespace = new_namespace_index,
.enum_tag_ty = enum_tag_ty,
.fields_len = fields_len,
- .zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst), // TODO: should reified types be handled differently?
+ .zir_index = .none,
.flags = .{
.layout = layout,
.status = .have_field_types,
@@ -21796,7 +21836,7 @@ fn reifyStruct(
const ty = try ip.getStructType(gpa, .{
.decl = new_decl_index,
.namespace = .none,
- .zir_index = try mod.intern_pool.trackZir(gpa, block.getFileScope(mod), inst), // TODO: should reified types be handled differently?
+ .zir_index = .none,
.layout = layout,
.known_non_opv = false,
.fields_len = fields_len,
@@ -26416,6 +26456,7 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
// owns the function.
try sema.ensureDeclAnalyzed(decl_index);
const tv = try mod.declPtr(decl_index).typedValue();
+ try sema.declareDependency(.{ .decl_val = decl_index });
assert(tv.ty.zigTypeTag(mod) == .Fn);
assert(try sema.fnHasRuntimeBits(tv.ty));
const func_index = tv.val.toIntern();
@@ -26837,6 +26878,13 @@ fn fieldVal(
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
const child_type = val.toType();
+ if (child_type.typeDeclInst(mod)) |type_decl_inst| {
+ try sema.declareDependency(.{ .namespace_name = .{
+ .namespace = type_decl_inst,
+ .name = field_name,
+ } });
+ }
+
switch (try child_type.zigTypeTagOrPoison(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -27060,6 +27108,13 @@ fn fieldPtr(
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
const child_type = val.toType();
+ if (child_type.typeDeclInst(mod)) |type_decl_inst| {
+ try sema.declareDependency(.{ .namespace_name = .{
+ .namespace = type_decl_inst,
+ .name = field_name,
+ } });
+ }
+
switch (child_type.zigTypeTag(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -31129,6 +31184,7 @@ fn beginComptimePtrLoad(
const is_mutable = ptr.addr == .mut_decl;
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
+ try sema.declareDependency(.{ .decl_val = decl_index });
if (decl.val.getVariable(mod) != null) return error.RuntimeLoad;
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
@@ -32382,6 +32438,8 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
+ // TODO: if this is a `decl_ref`, only depend on decl type
+ try sema.declareDependency(.{ .decl_val = decl_index });
const ptr_ty = try sema.ptrType(.{
.child = decl_tv.ty.toIntern(),
.flags = .{
@@ -35678,7 +35736,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp
break :blk accumulator;
};
- const zir_index = struct_type.zir_index.resolve(ip);
+ const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
@@ -36443,7 +36501,7 @@ fn semaStructFields(
const decl = mod.declPtr(decl_index);
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
- const zir_index = struct_type.zir_index.resolve(ip);
+ const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
@@ -36714,7 +36772,7 @@ fn semaStructFieldInits(
const decl = mod.declPtr(decl_index);
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
- const zir_index = struct_type.zir_index.resolve(ip);
+ const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa);
@@ -36863,7 +36921,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un
const ip = &mod.intern_pool;
const decl_index = union_type.decl;
const zir = mod.namespacePtr(union_type.namespace).file_scope.zir;
- const zir_index = union_type.zir_index.resolve(ip);
+ const zir_index = union_type.zir_index.unwrap().?.resolve(ip);
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .union_decl);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
@@ -37307,6 +37365,7 @@ fn generateUnionTagTypeNumbered(
.names = enum_field_names,
.values = enum_field_vals,
.tag_mode = .explicit,
+ .zir_index = .none,
});
new_decl.ty = Type.type;
@@ -37357,6 +37416,7 @@ fn generateUnionTagTypeSimple(
.names = enum_field_names,
.values = &.{},
.tag_mode = .auto,
+ .zir_index = .none,
});
const new_decl = mod.declPtr(new_decl_index);
@@ -38870,3 +38930,13 @@ fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
}
return sema.mod.ptrType(info);
}
+
+pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
+ const depender = InternPool.Depender.wrap(
+ if (sema.owner_func_index != .none)
+ .{ .func = sema.owner_func_index }
+ else
+ .{ .decl = sema.owner_decl_index },
+ );
+ try sema.mod.intern_pool.addDependency(sema.gpa, depender, dependee);
+}
src/type.zig
@@ -4,6 +4,7 @@ const Value = @import("value.zig").Value;
const assert = std.debug.assert;
const Target = std.Target;
const Module = @import("Module.zig");
+const Zcu = Module;
const log = std.log.scoped(.Type);
const target_util = @import("target.zig");
const TypedValue = @import("TypedValue.zig");
@@ -3228,6 +3229,17 @@ pub const Type = struct {
};
}
+ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
+ return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
+ inline .struct_type,
+ .union_type,
+ .enum_type,
+ .opaque_type,
+ => |info| info.zir_index.unwrap(),
+ else => null,
+ };
+ }
+
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };