Commit aa6c1c40ec
Changed files (4)
src/Zcu/PerThread.zig
@@ -360,7 +360,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps.
log.debug("tracking failed for %{d}", .{old_inst});
tracked_inst.inst = .lost;
- try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
+ try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
continue;
};
tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst);
@@ -383,7 +383,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
});
}
// The source hash associated with this instruction changed - invalidate relevant dependencies.
- try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
+ try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
}
// If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
@@ -435,7 +435,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
if (!old_names.swapRemove(name_ip)) continue;
// Name added
any_change = true;
- try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
@@ -444,14 +444,14 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
// The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| {
any_change = true;
- try zcu.markDependeeOutdated(.{ .namespace_name = .{
+ try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
if (any_change) {
- try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index });
+ try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace = tracked_inst_index });
}
}
}
@@ -508,7 +508,7 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
const cau = ip.getCau(cau_index);
- log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)});
+ //log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)});
assert(!zcu.analysis_in_progress.contains(anal_unit));
@@ -527,8 +527,91 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
if (cau_outdated) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
+ } else {
+ // We can trust the current information about this `Cau`.
+ if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
+ return error.AnalysisFail;
+ }
+ // If it wasn't failed and wasn't marked outdated, then either...
+ // * it is a type and is up-to-date, or
+ // * it is a `comptime` decl and is up-to-date, or
+ // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved)
+ // We just need to check for that last case.
+ switch (cau.owner.unwrap()) {
+ .type, .none => return,
+ .nav => |nav| if (ip.getNav(nav).status == .resolved) return,
+ }
+ }
+
+ const sema_result: SemaCauResult, const analysis_fail = if (pt.ensureCauAnalyzedInner(cau_index, cau_outdated)) |result|
+ .{ result, false }
+ else |err| switch (err) {
+ error.AnalysisFail => res: {
+ if (!zcu.failed_analysis.contains(anal_unit)) {
+ // If this `Cau` caused the error, it would have an entry in `failed_analysis`.
+ // Since it does not, this must be a transitive failure.
+ try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
+ }
+ // We treat errors as up-to-date, since those uses would just trigger a transitive error
+ break :res .{ .{
+ .invalidate_decl_val = false,
+ .invalidate_decl_ref = false,
+ }, true };
+ },
+ error.OutOfMemory => res: {
+ try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
+ try zcu.retryable_failures.ensureUnusedCapacity(gpa, 1);
+ const msg = try Zcu.ErrorMsg.create(
+ gpa,
+ .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) },
+ "unable to analyze: OutOfMemory",
+ .{},
+ );
+ zcu.retryable_failures.appendAssumeCapacity(anal_unit);
+ zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg);
+ // We treat errors as up-to-date, since those uses would just trigger a transitive error
+ break :res .{ .{
+ .invalidate_decl_val = false,
+ .invalidate_decl_ref = false,
+ }, true };
+ },
+ };
+
+ if (cau_outdated) {
+ // TODO: we do not yet have separate dependencies for decl values vs types.
+ const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref;
+ const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) {
+ .none => return, // there are no dependencies on a `comptime` decl!
+ .nav => |nav_index| .{ .nav_val = nav_index },
+ .type => |ty| .{ .interned = ty },
+ };
+
+ if (invalidate) {
+ // This dependency was marked as PO, meaning dependees were waiting
+ // on its analysis result, and it has turned out to be outdated.
+ // Update dependees accordingly.
+ try zcu.markDependeeOutdated(.marked_po, dependee);
+ } else {
+ // This dependency was previously PO, but turned out to be up-to-date.
+ // We do not need to queue successive analysis.
+ try zcu.markPoDependeeUpToDate(dependee);
+ }
}
+ if (analysis_fail) return error.AnalysisFail;
+}
+
+fn ensureCauAnalyzedInner(
+ pt: Zcu.PerThread,
+ cau_index: InternPool.Cau.Index,
+ cau_outdated: bool,
+) Zcu.SemaError!SemaCauResult {
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+
+ const cau = ip.getCau(cau_index);
+ const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
+
const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
// TODO: document this elsewhere mlugg!
@@ -550,22 +633,6 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
return error.AnalysisFail;
}
- if (!cau_outdated) {
- // We can trust the current information about this `Cau`.
- if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
- return error.AnalysisFail;
- }
- // If it wasn't failed and wasn't marked outdated, then either...
- // * it is a type and is up-to-date, or
- // * it is a `comptime` decl and is up-to-date, or
- // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved)
- // We just need to check for that last case.
- switch (cau.owner.unwrap()) {
- .type, .none => return,
- .nav => |nav| if (ip.getNav(nav).status == .resolved) return,
- }
- }
-
// `cau_outdated` can be true in the initial update for `comptime` declarations,
// so this isn't a `dev.check`.
if (cau_outdated and dev.env.supports(.incremental)) {
@@ -573,76 +640,34 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
// prior to re-analysis.
zcu.deleteUnitExports(anal_unit);
zcu.deleteUnitReferences(anal_unit);
- }
-
- const sema_result: SemaCauResult = res: {
- if (inst_info.inst == .main_struct_inst) {
- // Note that this is definitely a *recreation* due to outdated, because
- // this instruction indicates that `cau.owner` is a `type`, which only
- // reaches here if `cau_outdated`.
- try pt.recreateFileRoot(inst_info.file);
- break :res .{
- .invalidate_decl_val = true,
- .invalidate_decl_ref = true,
- };
+ if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
+ kv.value.destroy(zcu.gpa);
}
+ _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
+ }
- const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) {
- .nav => |nav| ip.getNav(nav).fqn.toSlice(ip),
- .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
- .none => "comptime",
- }, 0);
- defer decl_prog_node.end();
-
- break :res pt.semaCau(cau_index) catch |err| switch (err) {
- error.AnalysisFail => {
- if (!zcu.failed_analysis.contains(anal_unit)) {
- // If this `Cau` caused the error, it would have an entry in `failed_analysis`.
- // Since it does not, this must be a transitive failure.
- try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
- }
- return error.AnalysisFail;
- },
- error.GenericPoison => unreachable,
- error.ComptimeBreak => unreachable,
- error.ComptimeReturn => unreachable,
- error.OutOfMemory => {
- try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- try zcu.retryable_failures.append(gpa, anal_unit);
- zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, try Zcu.ErrorMsg.create(
- gpa,
- .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) },
- "unable to analyze: OutOfMemory",
- .{},
- ));
- return error.AnalysisFail;
- },
+ if (inst_info.inst == .main_struct_inst) {
+ // Note that this is definitely a *recreation* due to outdated, because
+ // this instruction indicates that `cau.owner` is a `type`, which only
+ // reaches here if `cau_outdated`.
+ try pt.recreateFileRoot(inst_info.file);
+ return .{
+ .invalidate_decl_val = true,
+ .invalidate_decl_ref = true,
};
- };
-
- if (!cau_outdated) {
- // We definitely don't need to do any dependency tracking, so our work is done.
- return;
}
- // TODO: we do not yet have separate dependencies for decl values vs types.
- const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref;
- const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) {
- .none => return, // there are no dependencies on a `comptime` decl!
- .nav => |nav_index| .{ .nav_val = nav_index },
- .type => |ty| .{ .interned = ty },
- };
+ const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) {
+ .nav => |nav| ip.getNav(nav).fqn.toSlice(ip),
+ .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
+ .none => "comptime",
+ }, 0);
+ defer decl_prog_node.end();
- if (invalidate) {
- // This dependency was marked as PO, meaning dependees were waiting
- // on its analysis result, and it has turned out to be outdated.
- // Update dependees accordingly.
- try zcu.markDependeeOutdated(dependee);
- } else {
- // This dependency was previously PO, but turned out to be up-to-date.
- // We do not need to queue successive analysis.
- try zcu.markPoDependeeUpToDate(dependee);
- }
+ return pt.semaCau(cau_index) catch |err| switch (err) {
+ error.GenericPoison, error.ComptimeBreak, error.ComptimeReturn => unreachable,
+ error.AnalysisFail, error.OutOfMemory => |e| return e,
+ };
}
pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void {
@@ -660,7 +685,64 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
const func = zcu.funcInfo(maybe_coerced_func_index);
- log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)});
+ //log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)});
+
+ const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
+ const func_outdated = zcu.outdated.swapRemove(anal_unit) or
+ zcu.potentially_outdated.swapRemove(anal_unit);
+
+ if (func_outdated) {
+ _ = zcu.outdated_ready.swapRemove(anal_unit);
+ } else {
+ // We can trust the current information about this function.
+ if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
+ return error.AnalysisFail;
+ }
+ switch (func.analysisUnordered(ip).state) {
+ .unreferenced => {}, // this is the first reference
+ .queued => {}, // we're waiting on first-time analysis
+ .analyzed => return, // up-to-date
+ }
+ }
+
+ const ies_outdated, const analysis_fail = if (pt.ensureFuncBodyAnalyzedInner(func_index, func_outdated)) |result|
+ .{ result.ies_outdated, false }
+ else |err| switch (err) {
+ error.AnalysisFail => res: {
+ if (!zcu.failed_analysis.contains(anal_unit)) {
+ // If this function caused the error, it would have an entry in `failed_analysis`.
+ // Since it does not, this must be a transitive failure.
+ try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
+ }
+ break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error
+ },
+ error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed`
+ };
+
+ if (func_outdated) {
+ if (ies_outdated) {
+ log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
+ try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index });
+ } else {
+ log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
+ try zcu.markPoDependeeUpToDate(.{ .interned = func_index });
+ }
+ }
+
+ if (analysis_fail) return error.AnalysisFail;
+}
+
+fn ensureFuncBodyAnalyzedInner(
+ pt: Zcu.PerThread,
+ func_index: InternPool.Index,
+ func_outdated: bool,
+) Zcu.SemaError!struct { ies_outdated: bool } {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+
+ const func = zcu.funcInfo(func_index);
+ const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
// Here's an interesting question: is this function actually valid?
// Maybe the signature changed, so we'll end up creating a whole different `func`
@@ -681,7 +763,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
});
if (ip.isRemoved(func_index) or (func.generic_owner != .none and ip.isRemoved(func.generic_owner))) {
- try zcu.markDependeeOutdated(.{ .interned = func_index }); // IES
+ if (func_outdated) {
+ try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES
+ }
ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
ip.remove(pt.tid, func_index);
@panic("TODO: remove orphaned function from binary");
@@ -694,15 +778,14 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
else
.none;
- const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
- const func_outdated = zcu.outdated.swapRemove(anal_unit) or
- zcu.potentially_outdated.swapRemove(anal_unit);
-
if (func_outdated) {
- _ = zcu.outdated_ready.swapRemove(anal_unit);
dev.check(.incremental);
zcu.deleteUnitExports(anal_unit);
zcu.deleteUnitReferences(anal_unit);
+ if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
+ kv.value.destroy(gpa);
+ }
+ _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
}
if (!func_outdated) {
@@ -713,7 +796,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
switch (func.analysisUnordered(ip).state) {
.unreferenced => {}, // this is the first reference
.queued => {}, // we're waiting on first-time analysis
- .analyzed => return, // up-to-date
+ .analyzed => return .{ .ies_outdated = false }, // up-to-date
}
}
@@ -722,28 +805,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
if (func_outdated) "outdated" else "never analyzed",
});
- var air = pt.analyzeFnBody(func_index) catch |err| switch (err) {
- error.AnalysisFail => {
- if (!zcu.failed_analysis.contains(anal_unit)) {
- // If this function caused the error, it would have an entry in `failed_analysis`.
- // Since it does not, this must be a transitive failure.
- try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
- }
- return error.AnalysisFail;
- },
- error.OutOfMemory => return error.OutOfMemory,
- };
+ var air = try pt.analyzeFnBody(func_index);
errdefer air.deinit(gpa);
- if (func_outdated) {
- if (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies) {
- log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
- try zcu.markDependeeOutdated(.{ .interned = func_index });
- } else {
- log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
- try zcu.markPoDependeeUpToDate(.{ .interned = func_index });
- }
- }
+ const ies_outdated = func_outdated and
+ (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies);
const comp = zcu.comp;
@@ -752,13 +818,15 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
air.deinit(gpa);
- return;
+ return .{ .ies_outdated = ies_outdated };
}
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
} });
+
+ return .{ .ies_outdated = ies_outdated };
}
/// Takes ownership of `air`, even on error.
@@ -1935,6 +2003,8 @@ const ScanDeclIter = struct {
.@"comptime" => cau: {
const cau = existing_cau orelse try ip.createComptimeCau(gpa, pt.tid, tracked_inst, namespace_index);
+ try namespace.other_decls.append(gpa, cau);
+
// For a `comptime` declaration, whether to re-analyze is based solely on whether the
// `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already.
const unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
src/Compilation.zig
@@ -2300,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
zcu.intern_pool.dumpGenericInstances(gpa);
}
- if (comp.config.is_test and comp.totalErrorCount() == 0) {
+ if (comp.config.is_test and try comp.totalErrorCount() == 0) {
// The `test_functions` decl has been intentionally postponed until now,
// at which point we must populate it with the list of test functions that
// have been discovered and not filtered out.
@@ -2310,7 +2310,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try pt.processExports();
}
- if (comp.totalErrorCount() != 0) {
+ if (try comp.totalErrorCount() != 0) {
// Skip flushing and keep source files loaded for error reporting.
comp.link_error_flags = .{};
return;
@@ -2394,7 +2394,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
try flush(comp, arena, .main, main_progress_node);
- if (comp.totalErrorCount() != 0) return;
+ if (try comp.totalErrorCount() != 0) return;
// Failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
@@ -2411,7 +2411,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
},
.incremental => {
try flush(comp, arena, .main, main_progress_node);
- if (comp.totalErrorCount() != 0) return;
+ if (try comp.totalErrorCount() != 0) return;
},
}
}
@@ -3048,7 +3048,7 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void {
}
/// This function is temporally single-threaded.
-pub fn totalErrorCount(comp: *Compilation) u32 {
+pub fn totalErrorCount(comp: *Compilation) Allocator.Error!u32 {
var total: usize =
comp.misc_failures.count() +
@intFromBool(comp.alloc_failure_occurred) +
@@ -3088,7 +3088,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 {
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (zcu.failed_analysis.keys()) |anal_unit| {
- if (!all_references.contains(anal_unit)) continue;
+ if (comp.incremental and !all_references.contains(anal_unit)) continue;
const file_index = switch (anal_unit.unwrap()) {
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
.func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file,
@@ -3225,7 +3225,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
if (err) |e| return e;
}
for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| {
- if (!all_references.contains(anal_unit)) continue;
+ if (comp.incremental and !all_references.contains(anal_unit)) continue;
const file_index = switch (anal_unit.unwrap()) {
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
@@ -3341,10 +3341,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
}
- assert(comp.totalErrorCount() == bundle.root_list.items.len);
+ assert(try comp.totalErrorCount() == bundle.root_list.items.len);
if (comp.module) |zcu| {
- if (bundle.root_list.items.len == 0) {
+ if (comp.incremental and bundle.root_list.items.len == 0) {
const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
if (all_references.contains(failed_unit)) break true;
} else false;
@@ -3448,6 +3448,7 @@ pub fn addModuleErrorMsg(
const span = try src.span(gpa);
const loc = std.zig.findLineColumn(source.bytes, span.main);
const rt_file_path = try src.file_scope.fullPath(gpa);
+ defer gpa.free(rt_file_path);
const name = switch (ref.referencer.unwrap()) {
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
.nav => |nav| ip.getNav(nav).name.toSlice(ip),
src/Sema.zig
@@ -112,6 +112,11 @@ exports: std.ArrayListUnmanaged(Zcu.Export) = .{},
references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
+/// All dependencies registered so far by this `Sema`. This is a temporary duplicate
+/// of the main dependency data. It exists to avoid adding dependencies to a given
+/// `AnalUnit` multiple times.
+dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{},
+
const MaybeComptimeAlloc = struct {
/// The runtime index of the `alloc` instruction.
runtime_index: Value.RuntimeIndex,
@@ -879,6 +884,7 @@ pub fn deinit(sema: *Sema) void {
sema.exports.deinit(gpa);
sema.references.deinit(gpa);
sema.type_references.deinit(gpa);
+ sema.dependencies.deinit(gpa);
sema.* = undefined;
}
@@ -2740,7 +2746,7 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool {
_ = zcu.outdated_ready.swapRemove(cau_unit);
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit);
zcu.intern_pool.remove(pt.tid, ty);
- try zcu.markDependeeOutdated(.{ .interned = ty });
+ try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
return true;
}
@@ -6066,7 +6072,9 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try pt.ensureFileAnalyzed(result.file_index);
- return Air.internedToRef(zcu.fileRootType(result.file_index));
+ const ty = zcu.fileRootType(result.file_index);
+ try sema.addTypeReferenceEntry(src, ty);
+ return Air.internedToRef(ty);
}
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6820,6 +6828,13 @@ fn lookupInNamespace(
const src_file = zcu.namespacePtr(block.namespace).file_scope;
+ if (Type.fromInterned(namespace.owner_type).typeDeclInst(zcu)) |type_decl_inst| {
+ try sema.declareDependency(.{ .namespace_name = .{
+ .namespace = type_decl_inst,
+ .name = ident_name,
+ } });
+ }
+
if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) {
const gpa = sema.gpa;
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{};
@@ -13981,12 +13996,6 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
});
try sema.checkNamespaceType(block, lhs_src, container_type);
- if (container_type.typeDeclInst(mod)) |type_decl_inst| {
- try sema.declareDependency(.{ .namespace_name = .{
- .namespace = type_decl_inst,
- .name = decl_name,
- } });
- }
const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false;
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| {
@@ -14026,7 +14035,9 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try pt.ensureFileAnalyzed(result.file_index);
- return Air.internedToRef(zcu.fileRootType(result.file_index));
+ const ty = zcu.fileRootType(result.file_index);
+ try sema.addTypeReferenceEntry(operand_src, ty);
+ return Air.internedToRef(ty);
}
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -27696,13 +27707,6 @@ fn fieldVal(
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
const child_type = val.toType();
- if (child_type.typeDeclInst(mod)) |type_decl_inst| {
- try sema.declareDependency(.{ .namespace_name = .{
- .namespace = type_decl_inst,
- .name = field_name,
- } });
- }
-
switch (try child_type.zigTypeTagOrPoison(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -27934,13 +27938,6 @@ fn fieldPtr(
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
const child_type = val.toType();
- if (child_type.typeDeclInst(mod)) |type_decl_inst| {
- try sema.declareDependency(.{ .namespace_name = .{
- .namespace = type_decl_inst,
- .name = field_name,
- } });
- }
-
switch (child_type.zigTypeTag(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -32260,7 +32257,7 @@ fn addReferenceEntry(
referenced_unit: AnalUnit,
) !void {
const zcu = sema.pt.zcu;
- if (zcu.comp.reference_trace == 0) return;
+ if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.references.getOrPut(sema.gpa, referenced_unit);
if (gop.found_existing) return;
// TODO: we need to figure out how to model inline calls here.
@@ -32275,7 +32272,7 @@ fn addTypeReferenceEntry(
referenced_type: InternPool.Index,
) !void {
const zcu = sema.pt.zcu;
- if (zcu.comp.reference_trace == 0) return;
+ if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type);
if (gop.found_existing) return;
try zcu.addTypeReference(sema.owner, referenced_type, src);
@@ -38272,6 +38269,9 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
const zcu = sema.pt.zcu;
if (!zcu.comp.incremental) return;
+ const gop = try sema.dependencies.getOrPut(sema.gpa, dependee);
+ if (gop.found_existing) return;
+
// Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields
// of a type and they use `@This()`. This dependency would be unnecessary, and in fact would
// just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve
src/Zcu.zig
@@ -10,7 +10,7 @@ const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const log = std.log.scoped(.module);
+const log = std.log.scoped(.zcu);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
@@ -153,9 +153,11 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .
/// Maximum amount of distinct error values, set by --error-limit
error_limit: ErrorInt,
-/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
+/// Value is the number of PO dependencies of this AnalUnit.
+/// This value will decrease as we perform semantic analysis to learn what is outdated.
+/// If any of these PO deps is outdated, this value will be moved to `outdated`.
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
-/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
+/// Value is the number of PO dependencies of this AnalUnit.
/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
@@ -2276,55 +2278,90 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
return zir;
}
-pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
- log.debug("outdated dependee: {}", .{dependee});
+pub fn markDependeeOutdated(
+ zcu: *Zcu,
+ /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
+ /// However, when we discover during analysis that something was outdated, the `Dependee` was already
+ /// marked as PO, so we need to decrement the PO dep count for each depender.
+ marked_po: enum { not_marked_po, marked_po },
+ dependee: InternPool.Dependee,
+) !void {
+ log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
- if (zcu.outdated.contains(depender)) {
- // We do not need to increment the PO dep count, as if the outdated
- // dependee is a Decl, we had already marked this as PO.
+ if (zcu.outdated.getPtr(depender)) |po_dep_count| {
+ switch (marked_po) {
+ .not_marked_po => {},
+ .marked_po => {
+ po_dep_count.* -= 1;
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* });
+ if (po_dep_count.* == 0) {
+ log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
+ try zcu.outdated_ready.put(zcu.gpa, depender, {});
+ }
+ },
+ }
continue;
}
const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
+ const new_po_dep_count = switch (marked_po) {
+ .not_marked_po => if (opt_po_entry) |e| e.value else 0,
+ .marked_po => if (opt_po_entry) |e| e.value - 1 else {
+ // This dependency has been registered during in-progress analysis, but the unit is
+ // not in `potentially_outdated` because analysis is in-progress. Nothing to do.
+ continue;
+ },
+ };
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count });
try zcu.outdated.putNoClobber(
zcu.gpa,
depender,
- // We do not need to increment this count for the same reason as above.
- if (opt_po_entry) |e| e.value else 0,
+ new_po_dep_count,
);
- log.debug("outdated: {}", .{depender});
- if (opt_po_entry == null) {
- // This is a new entry with no PO dependencies.
+ log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)});
+ if (new_po_dep_count == 0) {
+ log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
// If this is a Decl and was not previously PO, we must recursively
// mark dependencies on its tyval as PO.
if (opt_po_entry == null) {
+ assert(marked_po == .not_marked_po);
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
}
}
}
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
+ log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
// This depender is already outdated, but it now has one
// less PO dependency!
po_dep_count.* -= 1;
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* });
if (po_dep_count.* == 0) {
+ log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
continue;
}
// This depender is definitely at least PO, because this Decl was just analyzed
// due to being outdated.
- const ptr = zcu.potentially_outdated.getPtr(depender).?;
+ const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
+ // This dependency has been registered during in-progress analysis, but the unit is
+ // not in `potentially_outdated` because analysis is in-progress. Nothing to do.
+ continue;
+ };
if (ptr.* > 1) {
ptr.* -= 1;
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* });
continue;
}
+ log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)});
+
// This dependency is no longer PO, i.e. is known to be up-to-date.
assert(zcu.potentially_outdated.swapRemove(depender));
// If this is a Decl, we must recursively mark dependencies on its tyval
@@ -2344,14 +2381,16 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void {
const ip = &zcu.intern_pool;
- var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) {
+ const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
.nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced
- .none, .type => return, // analysis of this `Cau` can't outdate any dependencies
+ .type => |ty| .{ .interned = ty },
+ .none => return, // analysis of this `Cau` can't outdate any dependencies
},
.func => |func_index| .{ .interned = func_index }, // IES
- });
-
+ };
+ log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)});
+ var it = ip.dependencyIterator(dependee);
while (it.next()) |po| {
if (zcu.outdated.getPtr(po)) |po_dep_count| {
// This dependency is already outdated, but it now has one more PO
@@ -2360,14 +2399,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
_ = zcu.outdated_ready.swapRemove(po);
}
po_dep_count.* += 1;
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* });
continue;
}
if (zcu.potentially_outdated.getPtr(po)) |n| {
// There is now one more PO dependency.
n.* += 1;
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* });
continue;
}
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
+ log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 });
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
try zcu.markTransitiveDependersPotentiallyOutdated(po);
}
@@ -2391,13 +2433,9 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
// In this case, we must defer to more complex logic below.
if (zcu.outdated_ready.count() > 0) {
- log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{
- @tagName(zcu.outdated_ready.keys()[0].unwrap()),
- switch (zcu.outdated_ready.keys()[0].unwrap()) {
- inline else => |x| @intFromEnum(x),
- },
- });
- return zcu.outdated_ready.keys()[0];
+ const unit = zcu.outdated_ready.keys()[0];
+ log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)});
+ return unit;
}
// There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
@@ -2445,8 +2483,16 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
}
}
- log.debug("findOutdatedToAnalyze: heuristic returned Cau {d} ({d} dependers)", .{
- @intFromEnum(chosen_cau.?),
+ if (chosen_cau == null) {
+ for (zcu.outdated.keys(), zcu.outdated.values()) |o, opod| {
+ const func = o.unwrap().func;
+ const nav = zcu.funcInfo(func).owner_nav;
+ std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
+ }
+ }
+
+ log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
+ fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu),
chosen_cau_dependers,
});
@@ -3090,7 +3136,6 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
});
defer gpa.free(resolved_path);
const file = zcu.import_table.get(resolved_path).?;
- if (zcu.fileByIndex(file).status != .success_zir) continue;
const root_ty = zcu.fileRootType(file);
if (root_ty == .none) continue;
type_queue.putAssumeCapacityNoClobber(root_ty, null);
@@ -3102,6 +3147,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
const referencer = kv.value;
try checked_types.putNoClobber(gpa, ty, {});
+ log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
+
// If this type has a `Cau` for resolution, it's automatically referenced.
const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) {
.struct_type => ip.loadStructType(ty).cau,
@@ -3132,13 +3179,14 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
// Queue any decls within this type which would be automatically analyzed.
// Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
- const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap() orelse continue;
+ const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
for (zcu.namespacePtr(ns).other_decls.items) |cau| {
// These are `comptime` and `test` declarations.
// `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter.
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
- const zir = file.zir;
+ // If the file failed AstGen, the TrackedInst refers to the old ZIR.
+ const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
const declaration = zir.getDeclaration(inst_info.inst)[0];
const want_analysis = switch (declaration.name) {
.@"usingnamespace" => unreachable,
@@ -3158,27 +3206,51 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
};
if (want_analysis) {
const unit = AnalUnit.wrap(.{ .cau = cau });
- if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
+ if (!result.contains(unit)) {
+ log.debug("type '{}': ref cau %{}", .{
+ Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
+ @intFromEnum(inst_info.inst),
+ });
+ try unit_queue.put(gpa, unit, referencer);
+ }
}
}
for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
- const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0];
+ const file = zcu.fileByIndex(inst_info.file);
+ // If the file failed AstGen, the TrackedInst refers to the old ZIR.
+ const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
+ const declaration = zir.getDeclaration(inst_info.inst)[0];
if (declaration.flags.is_export) {
const unit = AnalUnit.wrap(.{ .cau = cau });
- if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
+ if (!result.contains(unit)) {
+ log.debug("type '{}': ref cau %{}", .{
+ Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
+ @intFromEnum(inst_info.inst),
+ });
+ try unit_queue.put(gpa, unit, referencer);
+ }
}
}
for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
- const declaration = zcu.fileByIndex(inst_info.file).zir.getDeclaration(inst_info.inst)[0];
+ const file = zcu.fileByIndex(inst_info.file);
+ // If the file failed AstGen, the TrackedInst refers to the old ZIR.
+ const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
+ const declaration = zir.getDeclaration(inst_info.inst)[0];
if (declaration.flags.is_export) {
const unit = AnalUnit.wrap(.{ .cau = cau });
- if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
+ if (!result.contains(unit)) {
+ log.debug("type '{}': ref cau %{}", .{
+ Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
+ @intFromEnum(inst_info.inst),
+ });
+ try unit_queue.put(gpa, unit, referencer);
+ }
}
}
// Incremental compilation does not support `usingnamespace`.
@@ -3199,15 +3271,23 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
const unit = kv.key;
try result.putNoClobber(gpa, unit, kv.value);
+ log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)});
+
if (zcu.reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_references.items[ref_idx];
- if (!result.contains(ref.referenced)) try unit_queue.put(gpa, ref.referenced, .{
- .referencer = unit,
- .src = ref.src,
- });
+ if (!result.contains(ref.referenced)) {
+ log.debug("unit '{}': ref unit '{}'", .{
+ fmtAnalUnit(unit, zcu),
+ fmtAnalUnit(ref.referenced, zcu),
+ });
+ try unit_queue.put(gpa, ref.referenced, .{
+ .referencer = unit,
+ .src = ref.src,
+ });
+ }
ref_idx = ref.next;
}
}
@@ -3216,10 +3296,16 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_type_references.items[ref_idx];
- if (!checked_types.contains(ref.referenced)) try type_queue.put(gpa, ref.referenced, .{
- .referencer = unit,
- .src = ref.src,
- });
+ if (!checked_types.contains(ref.referenced)) {
+ log.debug("unit '{}': ref type '{}'", .{
+ fmtAnalUnit(unit, zcu),
+ Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
+ });
+ try type_queue.put(gpa, ref.referenced, .{
+ .referencer = unit,
+ .src = ref.src,
+ });
+ }
ref_idx = ref.next;
}
}
@@ -3293,3 +3379,72 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File {
const file_index = ip.getCau(cau).zir_index.resolveFile(ip);
return zcu.fileByIndex(file_index);
}
+
+fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) {
+ return .{ .data = .{ .unit = unit, .zcu = zcu } };
+}
+fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) {
+ return .{ .data = .{ .dependee = d, .zcu = zcu } };
+}
+
+fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
+ _ = .{ fmt, options };
+ const zcu = data.zcu;
+ const ip = &zcu.intern_pool;
+ switch (data.unit.unwrap()) {
+ .cau => |cau_index| {
+ const cau = ip.getCau(cau_index);
+ switch (cau.owner.unwrap()) {
+ .nav => |nav| return writer.print("cau(decl='{}')", .{ip.getNav(nav).fqn.fmt(ip)}),
+ .type => |ty| return writer.print("cau(ty='{}')", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}),
+ .none => if (cau.zir_index.resolveFull(ip)) |resolved| {
+ const file_path = zcu.fileByIndex(resolved.file).sub_file_path;
+ return writer.print("cau(inst=('{s}', %{}))", .{ file_path, @intFromEnum(resolved.inst) });
+ } else {
+ return writer.writeAll("cau(inst=<lost>)");
+ },
+ }
+ },
+ .func => |func| {
+ const nav = zcu.funcInfo(func).owner_nav;
+ return writer.print("func('{}')", .{ip.getNav(nav).fqn.fmt(ip)});
+ },
+ }
+}
+fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
+ _ = .{ fmt, options };
+ const zcu = data.zcu;
+ const ip = &zcu.intern_pool;
+ switch (data.dependee) {
+ .src_hash => |ti| {
+ const info = ti.resolveFull(ip) orelse {
+ return writer.writeAll("inst(<lost>)");
+ };
+ const file_path = zcu.fileByIndex(info.file).sub_file_path;
+ return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
+ },
+ .nav_val => |nav| {
+ const fqn = ip.getNav(nav).fqn;
+ return writer.print("nav('{}')", .{fqn.fmt(ip)});
+ },
+ .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
+ .struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
+ .func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
+ else => unreachable,
+ },
+ .namespace => |ti| {
+ const info = ti.resolveFull(ip) orelse {
+ return writer.writeAll("namespace(<lost>)");
+ };
+ const file_path = zcu.fileByIndex(info.file).sub_file_path;
+ return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
+ },
+ .namespace_name => |k| {
+ const info = k.namespace.resolveFull(ip) orelse {
+ return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
+ };
+ const file_path = zcu.fileByIndex(info.file).sub_file_path;
+ return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) });
+ },
+ }
+}