Commit a8e53801d0
Changed files (6)
src/Package/Module.zig
@@ -488,7 +488,6 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.zir = null,
.zoir = null,
.status = .never_loaded,
- .prev_status = .never_loaded,
.mod = new,
};
break :b new;
src/Zcu/PerThread.zig
@@ -109,7 +109,7 @@ pub fn astGenFile(
break :lock .shared;
},
- .astgen_failure, .success_zir => lock: {
+ .astgen_failure, .success => lock: {
const unchanged_metadata =
stat.size == file.stat.size and
stat.mtime == file.stat.mtime and
@@ -214,8 +214,7 @@ pub fn astGenFile(
.inode = header.stat_inode,
.mtime = header.stat_mtime,
};
- file.prev_status = file.status;
- file.status = .success_zir;
+ file.status = .success;
log.debug("AstGen cached success: {s}", .{file.sub_file_path});
if (file.zir.?.hasCompileErrors()) {
@@ -248,19 +247,11 @@ pub fn astGenFile(
pt.lockAndClearFileCompileError(file);
- // Previous ZIR is kept for two reasons:
- //
- // 1. In case an update to the file causes a Parse or AstGen failure, we
- // need to compare two successful ZIR files in order to proceed with an
- // incremental update. This avoids needlessly tossing out semantic
- // analysis work when an error is temporarily introduced.
- //
- // 2. In order to detect updates, we need to iterate over the intern pool
- // values while comparing old ZIR to new ZIR. This is better done in a
- // single-threaded context, so we need to keep both versions around
- // until that point in the pipeline. Previous ZIR data is freed after
- // that.
- if (file.zir != null and !file.zir.?.loweringFailed()) {
+ // If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`.
+ // We need to keep it around!
+ // As an optimization, also check `loweringFailed`; if true, but `prev_zir == null`, then this
+ // file has never passed AstGen, so we actually need not cache the old ZIR.
+ if (file.zir != null and file.prev_zir == null and !file.zir.?.loweringFailed()) {
assert(file.prev_zir == null);
const prev_zir_ptr = try gpa.create(Zir);
file.prev_zir = prev_zir_ptr;
@@ -289,8 +280,7 @@ pub fn astGenFile(
// Any potential AST errors are converted to ZIR errors here.
file.zir = try AstGen.generate(gpa, file.tree.?);
- file.prev_status = file.status;
- file.status = .success_zir;
+ file.status = .success;
log.debug("AstGen fresh success: {s}", .{file.sub_file_path});
const safety_buffer = if (Zcu.data_has_safety_tag)
@@ -383,9 +373,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
defer cleanupUpdatedFiles(gpa, &updated_files);
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
- if (file.prev_status != file.status and file.prev_status != .never_loaded) {
- try zcu.markDependeeOutdated(.not_marked_po, .{ .file = file_index });
- }
+ assert(file.status == .success);
const old_zir = file.prev_zir orelse continue;
const new_zir = file.zir.?;
const gop = try updated_files.getOrPut(gpa, file_index);
@@ -394,9 +382,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
.file = file,
.inst_map = .{},
};
- if (!new_zir.loweringFailed()) {
- try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map);
- }
+ try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map);
}
if (updated_files.count() == 0)
@@ -416,13 +402,9 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
.index = @intCast(tracked_inst_unwrapped_index),
}).wrap(ip);
const new_inst = updated_file.inst_map.get(old_inst) orelse {
- // Tracking failed for this instruction.
- // This may be due to changes in the ZIR, or AstGen might have failed due to a very broken file.
- // Either way, invalidate associated `src_hash` deps.
- log.debug("tracking failed for %{d}{s}", .{
- old_inst,
- if (file.zir.?.loweringFailed()) " due to AstGen failure" else "",
- });
+ // Tracking failed for this instruction due to changes in the ZIR.
+ // Invalidate associated `src_hash` deps.
+ log.debug("tracking failed for %{d}", .{old_inst});
tracked_inst.inst = .lost;
try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
continue;
@@ -527,23 +509,19 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
for (updated_files.keys(), updated_files.values()) |file_index, updated_file| {
const file = updated_file.file;
- if (file.zir.?.loweringFailed()) {
- // Keep `prev_zir` around: it's the last usable ZIR.
- // Don't update the namespace, as we have no new data to update *to*.
- } else {
- const prev_zir = file.prev_zir.?;
- file.prev_zir = null;
- prev_zir.deinit(gpa);
- gpa.destroy(prev_zir);
-
- // For every file which has changed, re-scan the namespace of the file's root struct type.
- // These types are special-cased because they don't have an enclosing declaration which will
- // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this
- // now because this work is fast (no actual Sema work is happening, we're just updating the
- // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace`
- // will track some instructions.
- try pt.updateFileNamespace(file_index);
- }
+
+ const prev_zir = file.prev_zir.?;
+ file.prev_zir = null;
+ prev_zir.deinit(gpa);
+ gpa.destroy(prev_zir);
+
+ // For every file which has changed, re-scan the namespace of the file's root struct type.
+ // These types are special-cased because they don't have an enclosing declaration which will
+ // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this
+ // now because this work is fast (no actual Sema work is happening, we're just updating the
+ // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace`
+ // will track some instructions.
+ try pt.updateFileNamespace(file_index);
}
}
@@ -745,6 +723,7 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU
kv.value.destroy(gpa);
}
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
+ zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
}
} else {
// We can trust the current information about this unit.
@@ -796,15 +775,8 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
const inst_resolved = comptime_unit.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_resolved.file);
- // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is
- // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends
- // in `ensureComptimeUnitUpToDate`.
- if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir.?;
- // We are about to re-analyze this unit; drop its depenndencies.
- zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
-
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
@@ -923,6 +895,7 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
kv.value.destroy(gpa);
}
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
+ ip.removeDependenciesForDepender(gpa, anal_unit);
} else {
// We can trust the current information about this unit.
if (prev_failed) return error.AnalysisFail;
@@ -993,15 +966,8 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_resolved.file);
- // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is
- // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends
- // in `ensureComptimeUnitUpToDate`.
- if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir.?;
- // We are about to re-analyze this unit; drop its depenndencies.
- zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
-
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
@@ -1301,6 +1267,7 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
kv.value.destroy(gpa);
}
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
+ ip.removeDependenciesForDepender(gpa, anal_unit);
} else {
// We can trust the current information about this unit.
if (prev_failed) return error.AnalysisFail;
@@ -1371,15 +1338,8 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_resolved.file);
- // TODO: stop the compiler ever reaching Sema if there are failed files. That way, this check is
- // unnecessary, and we can move the below `removeDependenciesForDepender` call up with its friends
- // in `ensureComptimeUnitUpToDate`.
- if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir.?;
- // We are about to re-analyze this unit; drop its depenndencies.
- zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
-
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
defer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
@@ -1828,7 +1788,6 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.
const zcu = pt.zcu;
const file = zcu.fileByIndex(file_index);
- assert(file.status == .success_zir);
const file_root_type = zcu.fileRootType(file_index);
if (file_root_type == .none) return;
@@ -1865,9 +1824,6 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
assert(file.getMode() == .zig);
assert(zcu.fileRootType(file_index) == .none);
- if (file.status != .success_zir) {
- return error.AnalysisFail;
- }
assert(file.zir != null);
const new_namespace_index = try pt.createNamespace(.{
@@ -1910,7 +1866,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
}
}
-pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult {
+pub fn importPkg(pt: Zcu.PerThread, mod: *Module) Allocator.Error!Zcu.ImportFileResult {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -1984,7 +1940,6 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult {
.zir = null,
.zoir = null,
.status = .never_loaded,
- .prev_status = .never_loaded,
.mod = mod,
};
@@ -1997,13 +1952,19 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult {
};
}
-/// Called from a worker thread during AstGen.
+/// Called from a worker thread during AstGen (with the Compilation mutex held).
/// Also called from Sema during semantic analysis.
+/// Does not attempt to load the file from disk; just returns a corresponding `*Zcu.File`.
pub fn importFile(
pt: Zcu.PerThread,
cur_file: *Zcu.File,
import_string: []const u8,
-) !Zcu.ImportFileResult {
+) error{
+ OutOfMemory,
+ ModuleNotFound,
+ ImportOutsideModulePath,
+ CurrentWorkingDirectoryUnlinked,
+}!Zcu.ImportFileResult {
const zcu = pt.zcu;
const mod = cur_file.mod;
@@ -2061,7 +2022,10 @@ pub fn importFile(
defer gpa.free(resolved_root_path);
const sub_file_path = p: {
- const relative = try std.fs.path.relative(gpa, resolved_root_path, resolved_path);
+ const relative = std.fs.path.relative(gpa, resolved_root_path, resolved_path) catch |err| switch (err) {
+ error.Unexpected => unreachable,
+ else => |e| return e,
+ };
errdefer gpa.free(relative);
if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
@@ -2089,13 +2053,15 @@ pub fn importFile(
gop.value_ptr.* = new_file_index;
new_file.* = .{
.sub_file_path = sub_file_path,
+
+ .status = .never_loaded,
.stat = undefined,
+
.source = null,
.tree = null,
.zir = null,
.zoir = null,
- .status = .never_loaded,
- .prev_status = .never_loaded,
+
.mod = mod,
};
@@ -2835,7 +2801,7 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err
/// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry.
fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void {
const zir = file.zir orelse return;
- if (zir.hasCompileErrors()) return;
+ if (!zir.hasCompileErrors()) return;
pt.zcu.comp.mutex.lock();
defer pt.zcu.comp.mutex.unlock();
@@ -3196,6 +3162,7 @@ pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Inde
}
}
+/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`.
pub fn reportRetryableAstGenError(
pt: Zcu.PerThread,
src: Zcu.AstGenSrc,
@@ -3231,13 +3198,18 @@ pub fn reportRetryableAstGenError(
});
errdefer err_msg.destroy(gpa);
- {
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
- try zcu.failed_files.putNoClobber(gpa, file, err_msg);
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+ const gop = try zcu.failed_files.getOrPut(gpa, file);
+ if (gop.found_existing) {
+ if (gop.value_ptr.*) |old_err_msg| {
+ old_err_msg.destroy(gpa);
+ }
}
+ gop.value_ptr.* = err_msg;
}
+/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`.
pub fn reportRetryableFileError(
pt: Zcu.PerThread,
file_index: Zcu.File.Index,
@@ -3771,7 +3743,6 @@ fn recreateStructType(
const inst_info = key.zir_index.resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
- assert(file.status == .success_zir); // otherwise inst tracking failed
const zir = file.zir.?;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
@@ -3844,7 +3815,6 @@ fn recreateUnionType(
const inst_info = key.zir_index.resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
- assert(file.status == .success_zir); // otherwise inst tracking failed
const zir = file.zir.?;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
@@ -3931,7 +3901,6 @@ fn recreateEnumType(
const inst_info = key.zir_index.resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
- assert(file.status == .success_zir); // otherwise inst tracking failed
const zir = file.zir.?;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
@@ -4075,7 +4044,6 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
- if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir.?;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
src/Builtin.zig
@@ -299,7 +299,7 @@ pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void {
file.zir = try AstGen.generate(comp.gpa, file.tree.?);
assert(!file.zir.?.hasCompileErrors()); // builtin.zig must not have astgen errors
- file.status = .success_zir;
+ file.status = .success;
// Note that whilst we set `zir` here, we populated `path_digest`
// all the way back in `Package.Module.create`.
}
src/Compilation.zig
@@ -3203,8 +3203,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
if (comp.zcu) |zcu| {
- const ip = &zcu.intern_pool;
-
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
if (error_msg) |msg| {
try addModuleErrorMsg(zcu, &bundle, msg.*);
@@ -3277,20 +3275,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
if (!refs.contains(anal_unit)) continue;
}
- report_ok: {
- const file_index = switch (anal_unit.unwrap()) {
- .@"comptime" => |cu| ip.getComptimeUnit(cu).zir_index.resolveFile(ip),
- .nav_val, .nav_ty => |nav| ip.getNav(nav).analysis.?.zir_index.resolveFile(ip),
- .type => |ty| Type.fromInterned(ty).typeDeclInst(zcu).?.resolveFile(ip),
- .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFile(ip),
- .memoized_state => break :report_ok, // always report std.builtin errors
- };
-
- // Skip errors for AnalUnits within files that had a parse failure.
- // We'll try again once parsing succeeds.
- if (!zcu.fileByIndex(file_index).okToReportErrors()) continue;
- }
-
std.log.scoped(.zcu).debug("analysis error '{s}' reported from unit '{}'", .{
error_msg.msg,
zcu.fmtAnalUnit(anal_unit),
@@ -3318,12 +3302,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
}
}
- for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| {
- if (!zcu.navFileScope(nav).okToReportErrors()) continue;
+ for (zcu.failed_codegen.values()) |error_msg| {
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
- for (zcu.failed_types.keys(), zcu.failed_types.values()) |ty_index, error_msg| {
- if (!zcu.typeFileScope(ty_index).okToReportErrors()) continue;
+ for (zcu.failed_types.values()) |error_msg| {
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
for (zcu.failed_exports.values()) |value| {
@@ -3827,12 +3809,35 @@ fn performAllTheWorkInner(
if (comp.zcu) |zcu| {
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
+
+ try reportMultiModuleErrors(pt);
+
+ const any_fatal_files = for (zcu.import_table.values()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+ if (file.getMode() == .zon) continue;
+ switch (file.status) {
+ .never_loaded => unreachable, // everything is loaded by the workers
+ .retryable_failure, .astgen_failure => break true,
+ .success => {},
+ }
+ } else false;
+
+ if (any_fatal_files) {
+ // We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents
+ // us from invalidating lots of incremental dependencies due to files with e.g. parse errors.
+ // However, this means our analysis data is invalid, so we want to omit all analysis errors.
+ // To do that, let's just clear the analysis roots!
+
+ assert(zcu.failed_files.count() > 0); // we will get an error
+ zcu.analysis_roots.clear(); // no analysis happened
+ return;
+ }
+
if (comp.incremental) {
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
defer update_zir_refs_node.end();
try pt.updateZirRefs();
}
- try reportMultiModuleErrors(pt);
try zcu.flushRetryableFailures();
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
@@ -4294,11 +4299,12 @@ fn workerAstGenFile(
pt.astGenFile(file, path_digest) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
- file.status = .retryable_failure;
pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
- // Swallowing this error is OK because it's implied to be OOM when
- // there is a missing `failed_files` error message.
- error.OutOfMemory => {},
+ error.OutOfMemory => {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.setAllocFailure();
+ },
};
return;
},
src/main.zig
@@ -6134,7 +6134,6 @@ fn cmdAstCheck(
var file: Zcu.File = .{
.status = .never_loaded,
- .prev_status = .never_loaded,
.sub_file_path = undefined,
.stat = undefined,
.source = null,
@@ -6512,7 +6511,6 @@ fn cmdDumpZir(
var file: Zcu.File = .{
.status = .never_loaded,
- .prev_status = .never_loaded,
.sub_file_path = undefined,
.stat = undefined,
.source = null,
@@ -6578,7 +6576,6 @@ fn cmdChangelist(
var file: Zcu.File = .{
.status = .never_loaded,
- .prev_status = .never_loaded,
.sub_file_path = old_source_file,
.stat = .{
.size = stat.size,
src/Zcu.zig
@@ -658,11 +658,27 @@ pub const Namespace = struct {
};
pub const File = struct {
- status: Status,
- prev_status: Status,
/// Relative to the owning package's root source directory.
/// Memory is stored in gpa, owned by File.
sub_file_path: []const u8,
+
+ status: enum {
+ /// We have not yet attempted to load this file.
+ /// `stat` is not populated and may be `undefined`.
+ never_loaded,
+ /// A filesystem access failed. It should be retried on the next update.
+ /// There is a `failed_files` entry containing a non-`null` message.
+ /// `stat` is not populated and may be `undefined`.
+ retryable_failure,
+ /// Parsing/AstGen/ZonGen of this file has failed.
+ /// There is an error in `zir` or `zoir`.
+ /// There is a `failed_files` entry (with a `null` message).
+ /// `stat` is populated.
+ astgen_failure,
+ /// Parsing and AstGen/ZonGen of this file has succeeded.
+ /// `stat` is populated.
+ success,
+ },
/// Whether this is populated depends on `status`.
stat: Cache.File.Stat,
@@ -678,19 +694,17 @@ pub const File = struct {
/// List of references to this file, used for multi-package errors.
references: std.ArrayListUnmanaged(File.Reference) = .empty,
- /// The most recent successful ZIR for this file, with no errors.
- /// This is only populated when a previously successful ZIR
- /// newly introduces compile errors during an update. When ZIR is
- /// successful, this field is unloaded.
+ /// The ZIR for this file from the last update with no file failures. As such, this ZIR is never
+ /// failed (although it may have compile errors).
+ ///
+ /// Because updates with file failures do not perform ZIR mapping or semantic analysis, we keep
+ /// this around so we have the "old" ZIR to map when an update is ready to do so. Once such an
+ /// update occurs, this field is unloaded, since it is no longer necessary.
+ ///
+ /// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this
+ /// field is populated with that old ZIR.
prev_zir: ?*Zir = null,
- pub const Status = enum {
- never_loaded,
- retryable_failure,
- astgen_failure,
- success_zir,
- };
-
/// A single reference to a file.
pub const Reference = union(enum) {
/// The file is imported directly (i.e. not as a package) with @import.
@@ -763,7 +777,7 @@ pub const File = struct {
return error.FileTooBig;
const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
- defer gpa.free(source);
+ errdefer gpa.free(source);
const amt = try f.readAll(source);
if (amt != stat.size)
@@ -773,8 +787,9 @@ pub const File = struct {
// used for error reporting. We need to keep the stat fields stale so that
// astGenFile can know to regenerate ZIR.
- errdefer comptime unreachable; // don't error after populating `source`
file.source = source;
+ errdefer comptime unreachable; // don't error after populating `source`
+
return .{
.bytes = source,
.stat = .{
@@ -849,13 +864,6 @@ pub const File = struct {
std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 });
}
- pub fn okToReportErrors(file: File) bool {
- return switch (file.status) {
- .astgen_failure => false,
- else => true,
- };
- }
-
/// Add a reference to this file during AstGen.
pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void {
// Don't add the same module root twice. Note that since we always add module roots at the
@@ -3295,19 +3303,6 @@ pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
return zcu.root_mod.optimize_mode;
}
-fn lockAndClearFileCompileError(zcu: *Zcu, file: *File) void {
- switch (file.status) {
- .success_zir, .retryable_failure => {},
- .never_loaded, .astgen_failure => {
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
- if (zcu.failed_files.fetchSwapRemove(file)) |kv| {
- if (kv.value) |msg| msg.destroy(zcu.gpa); // Delete previous error message.
- }
- },
- }
-}
-
pub fn handleUpdateExports(
zcu: *Zcu,
export_indices: []const Export.Index,
@@ -3662,9 +3657,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
// `test` declarations are analyzed depending on the test filter.
const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
- // If the file failed AstGen, the TrackedInst refers to the old ZIR.
- const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*;
- const decl = zir.getDeclaration(inst_info.inst);
+ const decl = file.zir.?.getDeclaration(inst_info.inst);
if (!comp.config.is_test or file.mod != zcu.main_mod) continue;
@@ -3694,9 +3687,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
// These are named declarations. They are analyzed only if marked `export`.
const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
- // If the file failed AstGen, the TrackedInst refers to the old ZIR.
- const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*;
- const decl = zir.getDeclaration(inst_info.inst);
+ const decl = file.zir.?.getDeclaration(inst_info.inst);
if (decl.linkage == .@"export") {
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
if (!result.contains(unit)) {
@@ -3712,9 +3703,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
// These are named declarations. They are analyzed only if marked `export`.
const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
- // If the file failed AstGen, the TrackedInst refers to the old ZIR.
- const zir = if (file.status == .success_zir) file.zir.? else file.prev_zir.?.*;
- const decl = zir.getDeclaration(inst_info.inst);
+ const decl = file.zir.?.getDeclaration(inst_info.inst);
if (decl.linkage == .@"export") {
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
if (!result.contains(unit)) {