master
1//! Zig Compilation Unit
2//!
3//! Compilation of all Zig source code is represented by one `Zcu`.
4//!
5//! Each `Compilation` has exactly one or zero `Zcu`, depending on whether
6//! there is or is not any zig source code, respectively.
7const Zcu = @This();
8const builtin = @import("builtin");
9
10const std = @import("std");
11const Io = std.Io;
12const Writer = std.Io.Writer;
13const mem = std.mem;
14const Allocator = std.mem.Allocator;
15const assert = std.debug.assert;
16const log = std.log.scoped(.zcu);
17const BigIntConst = std.math.big.int.Const;
18const BigIntMutable = std.math.big.int.Mutable;
19const Target = std.Target;
20const Ast = std.zig.Ast;
21
22const Compilation = @import("Compilation.zig");
23const Cache = std.Build.Cache;
24pub const Value = @import("Value.zig");
25pub const Type = @import("Type.zig");
26const Package = @import("Package.zig");
27const link = @import("link.zig");
28const Air = @import("Air.zig");
29const Zir = std.zig.Zir;
30const trace = @import("tracy.zig").trace;
31const AstGen = std.zig.AstGen;
32const Sema = @import("Sema.zig");
33const target_util = @import("target.zig");
34const build_options = @import("build_options");
35const isUpDir = @import("introspect.zig").isUpDir;
36const InternPool = @import("InternPool.zig");
37const Alignment = InternPool.Alignment;
38const AnalUnit = InternPool.AnalUnit;
39const BuiltinFn = std.zig.BuiltinFn;
40const LlvmObject = @import("codegen/llvm.zig").Object;
41const dev = @import("dev.zig");
42const Zoir = std.zig.Zoir;
43const ZonGen = std.zig.ZonGen;
44
45comptime {
46 @setEvalBranchQuota(4000);
47 for (
48 @typeInfo(Zir.Inst.Ref).@"enum".fields,
49 @typeInfo(Air.Inst.Ref).@"enum".fields,
50 @typeInfo(InternPool.Index).@"enum".fields,
51 ) |zir_field, air_field, ip_field| {
52 assert(mem.eql(u8, zir_field.name, ip_field.name));
53 assert(mem.eql(u8, air_field.name, ip_field.name));
54 }
55}
56
57/// General-purpose allocator. Used for both temporary and long-term storage.
58gpa: Allocator,
59comp: *Compilation,
60/// If the ZCU is emitting an LLVM object (i.e. we are using the LLVM backend), then this is the
61/// `LlvmObject` we are emitting to.
62llvm_object: ?LlvmObject.Ptr,
63
64/// Pointer to externally managed resource.
65root_mod: *Package.Module,
66/// Normally, `main_mod` and `root_mod` are the same. The exception is `zig test`, in which
67/// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests.
68main_mod: *Package.Module,
69std_mod: *Package.Module,
70sema_prog_node: std.Progress.Node = .none,
71codegen_prog_node: std.Progress.Node = .none,
72/// The number of codegen jobs which are pending or in-progress. Whichever thread drops this value
73/// to 0 is responsible for ending `codegen_prog_node`. While semantic analysis is happening, this
74/// value bottoms out at 1 instead of 0, to ensure that it can only drop to 0 after analysis is
75/// completed (since semantic analysis could trigger more codegen work).
76pending_codegen_jobs: std.atomic.Value(u32) = .init(0),
77
78/// This is the progress node *under* `sema_prog_node` which is currently running.
79/// When we have to pause to analyze something else, we just temporarily rename this node.
80/// Eventually, when we thread semantic analysis, we will want one of these per thread.
81cur_sema_prog_node: std.Progress.Node = .none,
82
83/// Used by AstGen worker to load and store ZIR cache.
84global_zir_cache: Cache.Directory,
85/// Used by AstGen worker to load and store ZIR cache.
86local_zir_cache: Cache.Directory,
87
88/// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
89/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
90all_exports: std.ArrayList(Export) = .empty,
91/// This is a list of free indices in `all_exports`. These indices may be reused by exports from
92/// future semantic analysis.
93free_exports: std.ArrayList(Export.Index) = .empty,
94/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
95/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
96/// whose analysis triggered the export.
97single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, Export.Index) = .empty,
98/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports.
99/// The exports are `all_exports.items[index..][0..len]`.
100multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
101 index: u32,
102 len: u32,
103}) = .{},
104
105/// Key is the digest returned by `Builtin.hash`; value is the corresponding module.
106builtin_modules: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, *Package.Module) = .empty,
107
108/// Populated as soon as the `Compilation` is created. Guaranteed to contain all modules, even builtin ones.
109/// Modules whose root file is not a Zig or ZON file have the value `.none`.
110module_roots: std.AutoArrayHashMapUnmanaged(*Package.Module, File.Index.Optional) = .empty,
111
112/// The set of all the Zig source files in the Zig Compilation Unit. Tracked in
113/// order to iterate over it and check which source files have been modified on
114/// the file system when an update is requested, as well as to cache `@import`
115/// results.
116///
117/// Always accessed through `ImportTableAdapter`, where keys are fully resolved
118/// file paths in order to ensure files are properly deduplicated. This table owns
119/// the keys and values.
120///
121/// Protected by Compilation's mutex.
122///
123/// Not serialized. This state is reconstructed during the first call to
124/// `Compilation.update` of the process for a given `Compilation`.
125import_table: std.ArrayHashMapUnmanaged(
126 File.Index,
127 void,
128 struct {
129 pub const hash = @compileError("all accesses should be through ImportTableAdapter");
130 pub const eql = @compileError("all accesses should be through ImportTableAdapter");
131 },
132 true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
133) = .empty,
134
135/// The set of all files in `import_table` which are "alive" this update, meaning
136/// they are reachable by traversing imports starting from an analysis root. This
137/// is usually all files in `import_table`, but some could be omitted if an incremental
138/// update removes an import, or if a module specified on the CLI is never imported.
139/// Reconstructed on every update, after AstGen and before Sema.
140/// Value is why the file is alive.
141alive_files: std.AutoArrayHashMapUnmanaged(File.Index, File.Reference) = .empty,
142
143/// If this is populated, a "file exists in multiple modules" error should be emitted.
144/// This causes file errors to not be shown, because we don't really know which files
145/// should be alive (because the user has messed up their imports somewhere!).
146/// Cleared and recomputed every update, after AstGen and before Sema.
147multi_module_err: ?struct {
148 file: File.Index,
149 modules: [2]*Package.Module,
150 refs: [2]File.Reference,
151} = null,
152
153/// The set of all the files which have been loaded with `@embedFile` in the Module.
154/// We keep track of this in order to iterate over it and check which files have been
155/// modified on the file system when an update is requested, as well as to cache
156/// `@embedFile` results.
157///
158/// Like `import_table`, this is accessed through `EmbedTableAdapter`, so that it is keyed
159/// on the `Compilation.Path` of the `EmbedFile`.
160///
161/// This table owns all of the `*EmbedFile` memory, which is allocated into gpa.
162embed_table: std.ArrayHashMapUnmanaged(
163 *EmbedFile,
164 void,
165 struct {
166 pub const hash = @compileError("all accesses should be through EmbedTableAdapter");
167 pub const eql = @compileError("all accesses should be through EmbedTableAdapter");
168 },
169 true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
170) = .empty,
171
172/// Stores all Type and Value objects.
173/// The idea is that this will be periodically garbage-collected, but such logic
174/// is not yet implemented.
175intern_pool: InternPool = .empty,
176
177analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
178/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
179failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .empty,
180/// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed.
181transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
182/// This `Nav` succeeded analysis, but failed codegen.
183/// This may be a simple "value" `Nav`, or it may be a function.
184/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
185/// While multiple threads are active (most of the time!), this is guarded by `zcu.comp.mutex`, as
186/// codegen and linking run on a separate thread.
187failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
188failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
189/// Keep track of `@compileLog`s per `AnalUnit`.
190/// We track the source location of the first `@compileLog` call, and all logged lines as a linked list.
191/// The list is singly linked, but we do track its tail for fast appends (optimizing many logs in one unit).
192compile_logs: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
193 base_node_inst: InternPool.TrackedInst.Index,
194 node_offset: Ast.Node.Offset,
195 first_line: CompileLogLine.Index,
196 last_line: CompileLogLine.Index,
197 pub fn src(self: @This()) LazySrcLoc {
198 return .{
199 .base_node_inst = self.base_node_inst,
200 .offset = LazySrcLoc.Offset.nodeOffset(self.node_offset),
201 };
202 }
203}) = .empty,
204compile_log_lines: std.ArrayList(CompileLogLine) = .empty,
205free_compile_log_lines: std.ArrayList(CompileLogLine.Index) = .empty,
206/// This tracks files which triggered errors when generating AST/ZIR/ZOIR.
207/// If not `null`, the value is a retryable error (the file status is guaranteed
208/// to be `.retryable_failure`). Otherwise, the file status is `.astgen_failure`
209/// or `.success`, and there are ZIR/ZOIR errors which should be printed.
210/// We just store a `[]u8` instead of a full `*ErrorMsg`, because the source
211/// location is always the entire file. The `[]u8` memory is owned by the map
212/// and allocated into `gpa`.
213failed_files: std.AutoArrayHashMapUnmanaged(File.Index, ?[]u8) = .empty,
214/// AstGen is not aware of modules, and so cannot determine whether an import
215/// string makes sense. That is the job of a traversal after AstGen.
216///
217/// There are several ways in which an import can fail:
218///
219/// * It is an import of a file which does not exist. This case is not handled
220/// by this field, but with a `failed_files` entry on the *imported* file.
221/// * It is an import of a module which does not exist in the current module's
222/// dependency table. This happens at `Sema` time, so is not tracked by this
223/// field.
224/// * It is an import which reaches outside of the current module's root
225/// directory. This is tracked by this field.
226/// * It is an import which reaches into an "illegal import directory". Right now,
227/// the only such directory is 'global_cache/b/', but in general, these are
228/// directories the compiler treats specially. This is tracked by this field.
229///
230/// This is a flat array containing all of the relevant errors. It is cleared and
231/// recomputed on every update. The errors here are fatal, i.e. they block any
232/// semantic analysis this update.
233///
234/// Allocated into gpa.
235failed_imports: std.ArrayList(struct {
236 file_index: File.Index,
237 import_string: Zir.NullTerminatedString,
238 import_token: Ast.TokenIndex,
239 kind: enum { file_outside_module_root, illegal_zig_import },
240}) = .empty,
241failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty,
242/// If analysis failed due to a cimport error, the corresponding Clang errors
243/// are stored here.
244cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty,
245
246/// Maximum amount of distinct error values, set by --error-limit
247error_limit: ErrorInt,
248
249/// Value is the number of PO dependencies of this AnalUnit.
250/// This value will decrease as we perform semantic analysis to learn what is outdated.
251/// If any of these PO deps is outdated, this value will be moved to `outdated`.
252potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
253/// Value is the number of PO dependencies of this AnalUnit.
254/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
255outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
256/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
257/// Such `AnalUnit`s are ready for immediate re-analysis.
258/// See `findOutdatedToAnalyze` for details.
259outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
260/// This contains a list of AnalUnit whose analysis or codegen failed, but the
261/// failure was something like running out of disk space, and trying again may
262/// succeed. On the next update, we will flush this list, marking all members of
263/// it as outdated.
264retryable_failures: std.ArrayList(AnalUnit) = .empty,
265
266func_body_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty,
267nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
268
269/// These are the modules which we initially queue for analysis in `Compilation.update`.
270/// `resolveReferences` will use these as the root of its reachability traversal.
271analysis_roots_buffer: [4]*Package.Module,
272analysis_roots_len: usize = 0,
273/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
274/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
275/// Allocated into `gpa`.
276resolved_references: ?std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null,
277
278/// If `true`, then semantic analysis must not occur on this update due to AstGen errors.
279/// Essentially the entire pipeline after AstGen, including Sema, codegen, and link, is skipped.
280/// Reset to `false` at the start of each update in `Compilation.update`.
281skip_analysis_this_update: bool = false,
282
283test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
284
285global_assembly: std.AutoArrayHashMapUnmanaged(AnalUnit, []u8) = .empty,
286
287/// Key is the `AnalUnit` *performing* the reference. This representation allows
288/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
289/// Value is index into `all_references` of the first reference triggered by the unit.
290/// The `next` field on the `Reference` forms a linked list of all references
291/// triggered by the key `AnalUnit`.
292reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
293all_references: std.ArrayList(Reference) = .empty,
294/// Freelist of indices in `all_references`.
295free_references: std.ArrayList(u32) = .empty,
296
297inline_reference_frames: std.ArrayList(InlineReferenceFrame) = .empty,
298free_inline_reference_frames: std.ArrayList(InlineReferenceFrame.Index) = .empty,
299
300/// Key is the `AnalUnit` *performing* the reference. This representation allows
301/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
302/// Value is index into `all_type_reference` of the first reference triggered by the unit.
303/// The `next` field on the `TypeReference` forms a linked list of all type references
304/// triggered by the key `AnalUnit`.
305type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
306all_type_references: std.ArrayList(TypeReference) = .empty,
307/// Freelist of indices in `all_type_references`.
308free_type_references: std.ArrayList(u32) = .empty,
309
310/// Populated by analysis of `AnalUnit.wrap(.{ .memoized_state = s })`, where `s` depends on the element.
311builtin_decl_values: BuiltinDecl.Memoized = .initFill(.none),
312
313incremental_debug_state: if (build_options.enable_debug_extensions) IncrementalDebugState else void =
314 if (build_options.enable_debug_extensions) .init else {},
315
316/// Times semantic analysis of the current `AnalUnit`. When we pause to analyze a different unit,
317/// this timer must be temporarily paused and resumed later.
318cur_analysis_timer: ?Compilation.Timer = null,
319
320generation: u32 = 0,
321
322pub const IncrementalDebugState = struct {
323 /// All container types in the ZCU, even dead ones.
324 /// Value is the generation the type was created on.
325 types: std.AutoArrayHashMapUnmanaged(InternPool.Index, u32),
326 /// All `Nav`s in the ZCU, even dead ones.
327 /// Value is the generation the `Nav` was created on.
328 navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, u32),
329 /// All `AnalUnit`s in the ZCU, even dead ones.
330 units: std.AutoArrayHashMapUnmanaged(AnalUnit, UnitInfo),
331
332 pub const init: IncrementalDebugState = .{
333 .types = .empty,
334 .navs = .empty,
335 .units = .empty,
336 };
337 pub fn deinit(ids: *IncrementalDebugState, gpa: Allocator) void {
338 for (ids.units.values()) |*unit_info| {
339 unit_info.deps.deinit(gpa);
340 }
341 ids.types.deinit(gpa);
342 ids.navs.deinit(gpa);
343 ids.units.deinit(gpa);
344 }
345
346 pub const UnitInfo = struct {
347 last_update_gen: u32,
348 /// This information isn't easily recoverable from `InternPool`'s dependency storage format.
349 deps: std.ArrayList(InternPool.Dependee),
350 };
351 pub fn getUnitInfo(ids: *IncrementalDebugState, gpa: Allocator, unit: AnalUnit) Allocator.Error!*UnitInfo {
352 const gop = try ids.units.getOrPut(gpa, unit);
353 if (!gop.found_existing) gop.value_ptr.* = .{
354 .last_update_gen = std.math.maxInt(u32),
355 .deps = .empty,
356 };
357 return gop.value_ptr;
358 }
359 pub fn newType(ids: *IncrementalDebugState, zcu: *Zcu, ty: InternPool.Index) Allocator.Error!void {
360 try ids.types.putNoClobber(zcu.gpa, ty, zcu.generation);
361 }
362 pub fn newNav(ids: *IncrementalDebugState, zcu: *Zcu, nav: InternPool.Nav.Index) Allocator.Error!void {
363 try ids.navs.putNoClobber(zcu.gpa, nav, zcu.generation);
364 }
365};
366
367pub const PerThread = @import("Zcu/PerThread.zig");
368
369pub const ImportTableAdapter = struct {
370 zcu: *const Zcu,
371 pub fn hash(ctx: ImportTableAdapter, path: Compilation.Path) u32 {
372 _ = ctx;
373 return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
374 }
375 pub fn eql(ctx: ImportTableAdapter, a_path: Compilation.Path, b_file: File.Index, b_index: usize) bool {
376 _ = b_index;
377 const b_path = ctx.zcu.fileByIndex(b_file).path;
378 return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
379 }
380};
381
382pub const EmbedTableAdapter = struct {
383 pub fn hash(ctx: EmbedTableAdapter, path: Compilation.Path) u32 {
384 _ = ctx;
385 return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
386 }
387 pub fn eql(ctx: EmbedTableAdapter, a_path: Compilation.Path, b_file: *EmbedFile, b_index: usize) bool {
388 _ = ctx;
389 _ = b_index;
390 const b_path = b_file.path;
391 return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
392 }
393};
394
395/// Names of declarations in `std.builtin` whose values are memoized in a `BuiltinDecl.Memoized`.
396/// The name must exactly match the declaration name, as comptime logic is used to compute the namespace accesses.
397/// Parent namespaces must be before their children in this enum. For instance, `.Type` must be before `.@"Type.Fn"`.
398/// Additionally, parent namespaces must be resolved in the same stage as their children; see `BuiltinDecl.stage`.
399pub const BuiltinDecl = enum {
400 Signedness,
401 AddressSpace,
402 CallingConvention,
403 returnError,
404 StackTrace,
405 SourceLocation,
406 CallModifier,
407 AtomicOrder,
408 AtomicRmwOp,
409 ReduceOp,
410 FloatMode,
411 PrefetchOptions,
412 ExportOptions,
413 ExternOptions,
414 BranchHint,
415
416 Type,
417 @"Type.Fn",
418 @"Type.Fn.Param",
419 @"Type.Fn.Param.Attributes",
420 @"Type.Fn.Attributes",
421 @"Type.Int",
422 @"Type.Float",
423 @"Type.Pointer",
424 @"Type.Pointer.Size",
425 @"Type.Pointer.Attributes",
426 @"Type.Array",
427 @"Type.Vector",
428 @"Type.Optional",
429 @"Type.Error",
430 @"Type.ErrorUnion",
431 @"Type.EnumField",
432 @"Type.Enum",
433 @"Type.Enum.Mode",
434 @"Type.Union",
435 @"Type.UnionField",
436 @"Type.UnionField.Attributes",
437 @"Type.Struct",
438 @"Type.StructField",
439 @"Type.StructField.Attributes",
440 @"Type.ContainerLayout",
441 @"Type.Opaque",
442 @"Type.Declaration",
443
444 panic,
445 @"panic.call",
446 @"panic.sentinelMismatch",
447 @"panic.unwrapError",
448 @"panic.outOfBounds",
449 @"panic.startGreaterThanEnd",
450 @"panic.inactiveUnionField",
451 @"panic.sliceCastLenRemainder",
452 @"panic.reachedUnreachable",
453 @"panic.unwrapNull",
454 @"panic.castToNull",
455 @"panic.incorrectAlignment",
456 @"panic.invalidErrorCode",
457 @"panic.integerOutOfBounds",
458 @"panic.integerOverflow",
459 @"panic.shlOverflow",
460 @"panic.shrOverflow",
461 @"panic.divideByZero",
462 @"panic.exactDivisionRemainder",
463 @"panic.integerPartOutOfBounds",
464 @"panic.corruptSwitch",
465 @"panic.shiftRhsTooBig",
466 @"panic.invalidEnumValue",
467 @"panic.forLenMismatch",
468 @"panic.copyLenMismatch",
469 @"panic.memcpyAlias",
470 @"panic.noreturnReturned",
471
472 VaList,
473
474 assembly,
475 @"assembly.Clobbers",
476
477 /// Determines what kind of validation will be done to the decl's value.
478 pub fn kind(decl: BuiltinDecl) enum { type, func, string } {
479 return switch (decl) {
480 .returnError => .func,
481
482 .StackTrace,
483 .CallingConvention,
484 .SourceLocation,
485 .Signedness,
486 .AddressSpace,
487 .VaList,
488 .CallModifier,
489 .AtomicOrder,
490 .AtomicRmwOp,
491 .ReduceOp,
492 .FloatMode,
493 .PrefetchOptions,
494 .ExportOptions,
495 .ExternOptions,
496 .BranchHint,
497 .assembly,
498 .@"assembly.Clobbers",
499 => .type,
500
501 .Type,
502 .@"Type.Fn",
503 .@"Type.Fn.Param",
504 .@"Type.Fn.Param.Attributes",
505 .@"Type.Fn.Attributes",
506 .@"Type.Int",
507 .@"Type.Float",
508 .@"Type.Pointer",
509 .@"Type.Pointer.Size",
510 .@"Type.Pointer.Attributes",
511 .@"Type.Array",
512 .@"Type.Vector",
513 .@"Type.Optional",
514 .@"Type.Error",
515 .@"Type.ErrorUnion",
516 .@"Type.EnumField",
517 .@"Type.Enum",
518 .@"Type.Enum.Mode",
519 .@"Type.Union",
520 .@"Type.UnionField",
521 .@"Type.UnionField.Attributes",
522 .@"Type.Struct",
523 .@"Type.StructField",
524 .@"Type.StructField.Attributes",
525 .@"Type.ContainerLayout",
526 .@"Type.Opaque",
527 .@"Type.Declaration",
528 => .type,
529
530 .panic => .type,
531
532 .@"panic.call",
533 .@"panic.sentinelMismatch",
534 .@"panic.unwrapError",
535 .@"panic.outOfBounds",
536 .@"panic.startGreaterThanEnd",
537 .@"panic.inactiveUnionField",
538 .@"panic.sliceCastLenRemainder",
539 .@"panic.reachedUnreachable",
540 .@"panic.unwrapNull",
541 .@"panic.castToNull",
542 .@"panic.incorrectAlignment",
543 .@"panic.invalidErrorCode",
544 .@"panic.integerOutOfBounds",
545 .@"panic.integerOverflow",
546 .@"panic.shlOverflow",
547 .@"panic.shrOverflow",
548 .@"panic.divideByZero",
549 .@"panic.exactDivisionRemainder",
550 .@"panic.integerPartOutOfBounds",
551 .@"panic.corruptSwitch",
552 .@"panic.shiftRhsTooBig",
553 .@"panic.invalidEnumValue",
554 .@"panic.forLenMismatch",
555 .@"panic.copyLenMismatch",
556 .@"panic.memcpyAlias",
557 .@"panic.noreturnReturned",
558 => .func,
559 };
560 }
561
562 /// Resolution of these values is done in three distinct stages:
563 /// * Resolution of `std.builtin.Panic` and everything under it
564 /// * Resolution of `VaList`
565 /// * Resolution of `assembly`
566 /// * Everything else
567 ///
568 /// Panics are separated because they are provided by the user, so must be able to use
569 /// things like reification.
570 ///
571 /// `VaList` is separate because its value depends on the target, so it needs some reflection
572 /// machinery to work; additionally, it is `@compileError` on some targets, so must be referenced
573 /// by itself.
574 ///
575 /// `assembly` is separate because its value depends on the target.
576 pub fn stage(decl: BuiltinDecl) InternPool.MemoizedStateStage {
577 return switch (decl) {
578 .VaList => .va_list,
579 .assembly, .@"assembly.Clobbers" => .assembly,
580 else => {
581 if (@intFromEnum(decl) <= @intFromEnum(BuiltinDecl.@"Type.Declaration")) {
582 return .main;
583 } else {
584 return .panic;
585 }
586 },
587 };
588 }
589
590 /// Based on the tag name, determines how to access this decl; either as a direct child of the
591 /// `std.builtin` namespace, or as a child of some preceding `BuiltinDecl` value.
592 pub fn access(decl: BuiltinDecl) union(enum) {
593 direct: []const u8,
594 nested: struct { BuiltinDecl, []const u8 },
595 } {
596 @setEvalBranchQuota(2000);
597 return switch (decl) {
598 inline else => |tag| {
599 const name = @tagName(tag);
600 const split = (comptime std.mem.lastIndexOfScalar(u8, name, '.')) orelse return .{ .direct = name };
601 const parent = @field(BuiltinDecl, name[0..split]);
602 comptime assert(@intFromEnum(parent) < @intFromEnum(tag)); // dependencies ordered correctly
603 return .{ .nested = .{ parent, name[split + 1 ..] } };
604 },
605 };
606 }
607
608 const Memoized = std.enums.EnumArray(BuiltinDecl, InternPool.Index);
609};
610
611pub const SimplePanicId = enum {
612 reached_unreachable,
613 unwrap_null,
614 cast_to_null,
615 incorrect_alignment,
616 invalid_error_code,
617 integer_out_of_bounds,
618 integer_overflow,
619 shl_overflow,
620 shr_overflow,
621 divide_by_zero,
622 exact_division_remainder,
623 integer_part_out_of_bounds,
624 corrupt_switch,
625 shift_rhs_too_big,
626 invalid_enum_value,
627 for_len_mismatch,
628 copy_len_mismatch,
629 memcpy_alias,
630 noreturn_returned,
631
632 pub fn toBuiltin(id: SimplePanicId) BuiltinDecl {
633 return switch (id) {
634 // zig fmt: off
635 .reached_unreachable => .@"panic.reachedUnreachable",
636 .unwrap_null => .@"panic.unwrapNull",
637 .cast_to_null => .@"panic.castToNull",
638 .incorrect_alignment => .@"panic.incorrectAlignment",
639 .invalid_error_code => .@"panic.invalidErrorCode",
640 .integer_out_of_bounds => .@"panic.integerOutOfBounds",
641 .integer_overflow => .@"panic.integerOverflow",
642 .shl_overflow => .@"panic.shlOverflow",
643 .shr_overflow => .@"panic.shrOverflow",
644 .divide_by_zero => .@"panic.divideByZero",
645 .exact_division_remainder => .@"panic.exactDivisionRemainder",
646 .integer_part_out_of_bounds => .@"panic.integerPartOutOfBounds",
647 .corrupt_switch => .@"panic.corruptSwitch",
648 .shift_rhs_too_big => .@"panic.shiftRhsTooBig",
649 .invalid_enum_value => .@"panic.invalidEnumValue",
650 .for_len_mismatch => .@"panic.forLenMismatch",
651 .copy_len_mismatch => .@"panic.copyLenMismatch",
652 .memcpy_alias => .@"panic.memcpyAlias",
653 .noreturn_returned => .@"panic.noreturnReturned",
654 // zig fmt: on
655 };
656 }
657};
658
659pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
660
661pub const CImportError = struct {
662 offset: u32,
663 line: u32,
664 column: u32,
665 path: ?[*:0]u8,
666 source_line: ?[*:0]u8,
667 msg: [*:0]u8,
668
669 pub fn deinit(err: CImportError, gpa: Allocator) void {
670 if (err.path) |some| gpa.free(std.mem.span(some));
671 if (err.source_line) |some| gpa.free(std.mem.span(some));
672 gpa.free(std.mem.span(err.msg));
673 }
674};
675
676pub const ErrorInt = u32;
677
678pub const Exported = union(enum) {
679 /// The Nav being exported. Note this is *not* the Nav corresponding to the AnalUnit performing the export.
680 nav: InternPool.Nav.Index,
681 /// Constant value being exported.
682 uav: InternPool.Index,
683
684 pub fn getValue(exported: Exported, zcu: *Zcu) Value {
685 return switch (exported) {
686 .nav => |nav| zcu.navValue(nav),
687 .uav => |uav| Value.fromInterned(uav),
688 };
689 }
690
691 pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment {
692 return switch (exported) {
693 .nav => |nav| switch (zcu.intern_pool.getNav(nav).status) {
694 .unresolved => unreachable,
695 .type_resolved => |r| r.alignment,
696 .fully_resolved => |r| r.alignment,
697 },
698 .uav => .none,
699 };
700 }
701};
702
703pub const Export = struct {
704 opts: Options,
705 src: LazySrcLoc,
706 exported: Exported,
707 status: enum {
708 in_progress,
709 failed,
710 /// Indicates that the failure was due to a temporary issue, such as an I/O error
711 /// when writing to the output file. Retrying the export may succeed.
712 failed_retryable,
713 complete,
714 },
715
716 pub const Options = struct {
717 name: InternPool.NullTerminatedString,
718 linkage: std.builtin.GlobalLinkage = .strong,
719 section: InternPool.OptionalNullTerminatedString = .none,
720 visibility: std.builtin.SymbolVisibility = .default,
721 };
722
723 /// Index into `all_exports`.
724 pub const Index = enum(u32) {
725 _,
726
727 pub fn ptr(i: Index, zcu: *const Zcu) *Export {
728 return &zcu.all_exports.items[@intFromEnum(i)];
729 }
730 };
731};
732
733pub const CompileLogLine = struct {
734 next: Index.Optional,
735 /// Does *not* include the trailing newline.
736 data: InternPool.NullTerminatedString,
737 pub const Index = enum(u32) {
738 _,
739 pub fn get(idx: Index, zcu: *Zcu) *CompileLogLine {
740 return &zcu.compile_log_lines.items[@intFromEnum(idx)];
741 }
742 pub fn toOptional(idx: Index) Optional {
743 return @enumFromInt(@intFromEnum(idx));
744 }
745 pub const Optional = enum(u32) {
746 none = std.math.maxInt(u32),
747 _,
748 pub fn unwrap(opt: Optional) ?Index {
749 return switch (opt) {
750 .none => null,
751 _ => @enumFromInt(@intFromEnum(opt)),
752 };
753 }
754 };
755 };
756};
757
758pub const Reference = struct {
759 /// The `AnalUnit` whose semantic analysis was triggered by this reference.
760 referenced: AnalUnit,
761 /// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`.
762 /// `std.math.maxInt(u32)` is the sentinel.
763 next: u32,
764 /// The source location of the reference.
765 src: LazySrcLoc,
766 /// If not `.none`, this is the index of the `InlineReferenceFrame` which should appear
767 /// between the referencer and `referenced` in the reference trace. These frames represent
768 /// inline calls, which do not create actual references (since they happen in the caller's
769 /// `AnalUnit`), but do show in the reference trace.
770 inline_frame: InlineReferenceFrame.Index.Optional,
771};
772
773pub const InlineReferenceFrame = struct {
774 /// The inline *callee*; that is, the function which was called inline.
775 /// The *caller* is either `parent`, or else the unit causing the original `Reference`.
776 callee: InternPool.Index,
777 /// The source location of the inline call, in the *caller*.
778 call_src: LazySrcLoc,
779 /// If not `.none`, a frame which should appear directly below this one.
780 /// This will be the "parent" inline call; this frame's `callee` is our caller.
781 parent: InlineReferenceFrame.Index.Optional,
782
783 pub const Index = enum(u32) {
784 _,
785 pub fn ptr(idx: Index, zcu: *Zcu) *InlineReferenceFrame {
786 return &zcu.inline_reference_frames.items[@intFromEnum(idx)];
787 }
788 pub fn toOptional(idx: Index) Optional {
789 return @enumFromInt(@intFromEnum(idx));
790 }
791 pub const Optional = enum(u32) {
792 none = std.math.maxInt(u32),
793 _,
794 pub fn unwrap(opt: Optional) ?Index {
795 return switch (opt) {
796 .none => null,
797 _ => @enumFromInt(@intFromEnum(opt)),
798 };
799 }
800 };
801 };
802};
803
804pub const TypeReference = struct {
805 /// The container type which was referenced.
806 referenced: InternPool.Index,
807 /// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`.
808 /// `std.math.maxInt(u32)` is the sentinel.
809 next: u32,
810 /// The source location of the reference.
811 src: LazySrcLoc,
812};
813
814/// The container that structs, enums, unions, and opaques have.
815pub const Namespace = struct {
816 parent: OptionalIndex,
817 file_scope: File.Index,
818 generation: u32,
819 /// Will be a struct, enum, union, or opaque.
820 owner_type: InternPool.Index,
821 /// Members of the namespace which are marked `pub`.
822 pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
823 /// Members of the namespace which are *not* marked `pub`.
824 priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
825 /// All `comptime` declarations in this namespace. We store these purely so that incremental
826 /// compilation can re-use the existing `ComptimeUnit`s when a namespace changes.
827 comptime_decls: std.ArrayList(InternPool.ComptimeUnit.Id) = .empty,
828 /// All `test` declarations in this namespace. We store these purely so that incremental
829 /// compilation can re-use the existing `Nav`s when a namespace changes.
830 test_decls: std.ArrayList(InternPool.Nav.Index) = .empty,
831
832 pub const Index = InternPool.NamespaceIndex;
833 pub const OptionalIndex = InternPool.OptionalNamespaceIndex;
834
835 const NavNameContext = struct {
836 zcu: *Zcu,
837
838 pub fn hash(ctx: NavNameContext, nav: InternPool.Nav.Index) u32 {
839 const name = ctx.zcu.intern_pool.getNav(nav).name;
840 return std.hash.int(@intFromEnum(name));
841 }
842
843 pub fn eql(ctx: NavNameContext, a_nav: InternPool.Nav.Index, b_nav: InternPool.Nav.Index, b_index: usize) bool {
844 _ = b_index;
845 const a_name = ctx.zcu.intern_pool.getNav(a_nav).name;
846 const b_name = ctx.zcu.intern_pool.getNav(b_nav).name;
847 return a_name == b_name;
848 }
849 };
850
851 pub const NameAdapter = struct {
852 zcu: *Zcu,
853
854 pub fn hash(ctx: NameAdapter, s: InternPool.NullTerminatedString) u32 {
855 _ = ctx;
856 return std.hash.int(@intFromEnum(s));
857 }
858
859 pub fn eql(ctx: NameAdapter, a: InternPool.NullTerminatedString, b_nav: InternPool.Nav.Index, b_index: usize) bool {
860 _ = b_index;
861 return a == ctx.zcu.intern_pool.getNav(b_nav).name;
862 }
863 };
864
865 pub fn fileScope(ns: Namespace, zcu: *Zcu) *File {
866 return zcu.fileByIndex(ns.file_scope);
867 }
868
869 pub fn fileScopeIp(ns: Namespace, ip: *InternPool) *File {
870 return ip.filePtr(ns.file_scope);
871 }
872
873 /// This renders e.g. "std/fs.zig:Dir.OpenOptions"
874 pub fn renderFullyQualifiedDebugName(
875 ns: Namespace,
876 zcu: *Zcu,
877 name: InternPool.NullTerminatedString,
878 writer: anytype,
879 ) @TypeOf(writer).Error!void {
880 const sep: u8 = if (ns.parent.unwrap()) |parent| sep: {
881 try zcu.namespacePtr(parent).renderFullyQualifiedDebugName(
882 zcu,
883 zcu.declPtr(ns.decl_index).name,
884 writer,
885 );
886 break :sep '.';
887 } else sep: {
888 try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer);
889 break :sep ':';
890 };
891 if (name != .empty) try writer.print("{c}{f}", .{ sep, name.fmt(&zcu.intern_pool) });
892 }
893
894 pub fn internFullyQualifiedName(
895 ns: Namespace,
896 ip: *InternPool,
897 gpa: Allocator,
898 tid: Zcu.PerThread.Id,
899 name: InternPool.NullTerminatedString,
900 ) !InternPool.NullTerminatedString {
901 const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
902 if (name == .empty) return ns_name;
903 return ip.getOrPutStringFmt(gpa, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
904 }
905};
906
907pub const File = struct {
908 status: enum {
909 /// We have not yet attempted to load this file.
910 /// `stat` is not populated and may be `undefined`.
911 never_loaded,
912 /// A filesystem access failed. It should be retried on the next update.
913 /// There is guaranteed to be a `failed_files` entry with at least one message.
914 /// ZIR/ZOIR errors should not be emitted as `zir`/`zoir` is not up-to-date.
915 /// `stat` is not populated and may be `undefined`.
916 retryable_failure,
917 /// This file has failed parsing, AstGen, or ZonGen.
918 /// There is guaranteed to be a `failed_files` entry, which may or may not have messages.
919 /// ZIR/ZOIR errors *should* be emitted as `zir`/`zoir` is up-to-date.
920 /// `stat` is populated.
921 astgen_failure,
922 /// Parsing and AstGen/ZonGen of this file has succeeded.
923 /// There may still be a `failed_files` entry, e.g. for non-fatal AstGen errors.
924 /// `stat` is populated.
925 success,
926 },
927 /// Whether this is populated depends on `status`.
928 stat: Cache.File.Stat,
929
930 /// Whether this file is the generated file of a "builtin" module. This matters because those
931 /// files are generated and stored in-nemory rather than being read off-disk. The rest of the
932 /// pipeline generally shouldn't care about this.
933 is_builtin: bool,
934
935 /// The path of this file. It is important that this path has a "canonical form" because files
936 /// are deduplicated based on path; `Compilation.Path` guarantees this. Owned by this `File`,
937 /// allocated into `gpa`.
938 path: Compilation.Path,
939
940 /// Populated only when emitting error messages; see `getSource`.
941 source: ?[:0]const u8,
942 /// Populated only when emitting error messages; see `getTree`.
943 tree: ?Ast,
944
945 zir: ?Zir,
946 zoir: ?Zoir,
947
948 /// Module that this file is a part of, managed externally.
949 /// This is initially `null`. After AstGen, a pass is run to determine which module each
950 /// file belongs to, at which point this field is set. It is never set to `null` again;
951 /// this is so that if the file starts belonging to a different module instead, we can
952 /// tell, and invalidate dependencies as needed (see `module_changed`).
953 /// During semantic analysis, this is always non-`null` for alive files (i.e. those which
954 /// have imports targeting them).
955 mod: ?*Package.Module,
956 /// Relative to the root directory of `mod`. If `mod == null`, this field is `undefined`.
957 /// This memory is managed externally and must not be directly freed.
958 /// Its lifetime is at least equal to that of this `File`.
959 sub_file_path: []const u8,
960
961 /// If this file's module identity changes on an incremental update, this flag is set to signal
962 /// to `Zcu.updateZirRefs` that all references to this file must be invalidated. This matters
963 /// because changing your module changes things like your optimization mode and codegen flags,
964 /// so everything needs to be re-done. `updateZirRefs` is responsible for resetting this flag.
965 module_changed: bool,
966
967 /// The ZIR for this file from the last update with no file failures. As such, this ZIR is never
968 /// failed (although it may have compile errors).
969 ///
970 /// Because updates with file failures do not perform ZIR mapping or semantic analysis, we keep
971 /// this around so we have the "old" ZIR to map when an update is ready to do so. Once such an
972 /// update occurs, this field is unloaded, since it is no longer necessary.
973 ///
974 /// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this
975 /// field is populated with that old ZIR.
976 prev_zir: ?*Zir,
977
978 /// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not
979 /// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR
980 /// changed -- this field is just a simple boolean.
981 ///
982 /// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`,
983 /// we invalidate the corresponding `zon_file` dependency, and reset it to `false`.
984 zoir_invalidated: bool,
985
986 pub const Path = struct {
987 root: enum {
988 cwd,
989 fs_root,
990 local_cache,
991 global_cache,
992 lib_dir,
993 },
994 };
995
996 /// A single reference to a file.
997 pub const Reference = union(enum) {
998 analysis_root: *Package.Module,
999 import: struct {
1000 importer: Zcu.File.Index,
1001 tok: Ast.TokenIndex,
1002 /// If the file is imported as the root of a module, this is that module.
1003 /// `null` means the file was imported directly by path.
1004 module: ?*Package.Module,
1005 },
1006 };
1007
1008 pub fn getMode(self: File) Ast.Mode {
1009 // We never create a `File` whose path doesn't give a mode.
1010 return modeFromPath(self.path.sub_path).?;
1011 }
1012
1013 pub fn modeFromPath(path: []const u8) ?Ast.Mode {
1014 if (std.mem.endsWith(u8, path, ".zon")) {
1015 return .zon;
1016 } else if (std.mem.endsWith(u8, path, ".zig")) {
1017 return .zig;
1018 } else {
1019 return null;
1020 }
1021 }
1022
1023 pub fn unload(file: *File, gpa: Allocator) void {
1024 if (file.zoir) |zoir| zoir.deinit(gpa);
1025 file.unloadTree(gpa);
1026 file.unloadSource(gpa);
1027 file.unloadZir(gpa);
1028 }
1029
1030 pub fn unloadTree(file: *File, gpa: Allocator) void {
1031 if (file.tree) |*tree| {
1032 tree.deinit(gpa);
1033 file.tree = null;
1034 }
1035 }
1036
1037 pub fn unloadSource(file: *File, gpa: Allocator) void {
1038 if (file.source) |source| {
1039 gpa.free(source);
1040 file.source = null;
1041 }
1042 }
1043
1044 pub fn unloadZir(file: *File, gpa: Allocator) void {
1045 if (file.zir) |*zir| {
1046 zir.deinit(gpa);
1047 file.zir = null;
1048 }
1049 }
1050
1051 pub const GetSourceError = error{
1052 OutOfMemory,
1053 FileChanged,
1054 } || std.Io.File.OpenError || std.Io.File.Reader.Error;
1055
1056 /// This must only be called in error conditions where `stat` *is* populated. It returns the
1057 /// contents of the source file, assuming the stat has not changed since it was originally
1058 /// loaded.
1059 pub fn getSource(file: *File, zcu: *const Zcu) GetSourceError![:0]const u8 {
1060 const gpa = zcu.gpa;
1061 const io = zcu.comp.io;
1062
1063 if (file.source) |source| return source;
1064
1065 switch (file.status) {
1066 .never_loaded => unreachable, // stat must be populated
1067 .retryable_failure => unreachable, // stat must be populated
1068 .astgen_failure, .success => {},
1069 }
1070
1071 assert(file.stat.size <= std.math.maxInt(u32)); // `PerThread.updateFile` checks this
1072
1073 var f = f: {
1074 const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
1075 break :f try dir.openFile(sub_path, .{});
1076 };
1077 defer f.close();
1078
1079 const stat = f.stat() catch |err| switch (err) {
1080 error.Streaming => {
1081 // Since `file.stat` is populated, this was previously a file stream; since it is
1082 // now not a file stream, it must have changed.
1083 return error.FileChanged;
1084 },
1085 else => |e| return e,
1086 };
1087
1088 if (stat.inode != file.stat.inode or
1089 stat.size != file.stat.size or
1090 stat.mtime.nanoseconds != file.stat.mtime.nanoseconds)
1091 {
1092 return error.FileChanged;
1093 }
1094
1095 const source = try gpa.allocSentinel(u8, @intCast(file.stat.size), 0);
1096 errdefer gpa.free(source);
1097
1098 var file_reader = f.reader(io, &.{});
1099 file_reader.size = stat.size;
1100 file_reader.interface.readSliceAll(source) catch return file_reader.err.?;
1101
1102 file.source = source;
1103 errdefer comptime unreachable; // don't error after populating `source`
1104
1105 return source;
1106 }
1107
1108 /// This must only be called in error conditions where `stat` *is* populated. It returns the
1109 /// parsed AST of the source file, assuming the stat has not changed since it was originally
1110 /// loaded.
1111 pub fn getTree(file: *File, zcu: *const Zcu) GetSourceError!*const Ast {
1112 if (file.tree) |*tree| return tree;
1113
1114 const source = try file.getSource(zcu);
1115 file.tree = try .parse(zcu.gpa, source, file.getMode());
1116 return &file.tree.?;
1117 }
1118
1119 pub fn fullyQualifiedNameLen(file: File) usize {
1120 const ext = std.fs.path.extension(file.sub_file_path);
1121 return file.sub_file_path.len - ext.len;
1122 }
1123
1124 pub fn renderFullyQualifiedName(file: File, writer: anytype) !void {
1125 // Convert all the slashes into dots and truncate the extension.
1126 const ext = std.fs.path.extension(file.sub_file_path);
1127 const noext = file.sub_file_path[0 .. file.sub_file_path.len - ext.len];
1128 for (noext) |byte| switch (byte) {
1129 '/', '\\' => try writer.writeByte('.'),
1130 else => try writer.writeByte(byte),
1131 };
1132 }
1133
1134 pub fn renderFullyQualifiedDebugName(file: File, writer: anytype) !void {
1135 for (file.sub_file_path) |byte| switch (byte) {
1136 '/', '\\' => try writer.writeByte('/'),
1137 else => try writer.writeByte(byte),
1138 };
1139 }
1140
1141 pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
1142 const gpa = pt.zcu.gpa;
1143 const ip = &pt.zcu.intern_pool;
1144 const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa);
1145 var w: Writer = .fixed((try string_bytes.addManyAsSlice(file.fullyQualifiedNameLen()))[0]);
1146 file.renderFullyQualifiedName(&w) catch unreachable;
1147 assert(w.end == w.buffer.len);
1148 return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(w.end), .no_embedded_nulls);
1149 }
1150
1151 pub const Index = InternPool.FileIndex;
1152
1153 pub fn errorBundleWholeFileSrc(
1154 file: *File,
1155 zcu: *const Zcu,
1156 eb: *std.zig.ErrorBundle.Wip,
1157 ) Allocator.Error!std.zig.ErrorBundle.SourceLocationIndex {
1158 return eb.addSourceLocation(.{
1159 .src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
1160 .span_start = 0,
1161 .span_main = 0,
1162 .span_end = 0,
1163 .line = 0,
1164 .column = 0,
1165 .source_line = 0,
1166 });
1167 }
1168 /// Asserts that the tree has already been loaded with `getTree`.
1169 pub fn errorBundleTokenSrc(
1170 file: *File,
1171 tok: Ast.TokenIndex,
1172 zcu: *const Zcu,
1173 eb: *std.zig.ErrorBundle.Wip,
1174 ) Allocator.Error!std.zig.ErrorBundle.SourceLocationIndex {
1175 const tree = &file.tree.?;
1176 const start = tree.tokenStart(tok);
1177 const end = start + tree.tokenSlice(tok).len;
1178 const loc = std.zig.findLineColumn(file.source.?, start);
1179 return eb.addSourceLocation(.{
1180 .src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
1181 .span_start = start,
1182 .span_main = start,
1183 .span_end = @intCast(end),
1184 .line = @intCast(loc.line),
1185 .column = @intCast(loc.column),
1186 .source_line = try eb.addString(loc.source_line),
1187 });
1188 }
1189};
1190
1191/// Represents the contents of a file loaded with `@embedFile`.
1192pub const EmbedFile = struct {
1193 path: Compilation.Path,
1194 /// `.none` means the file was not loaded, so `stat` is undefined.
1195 val: InternPool.Index,
1196 /// If this is `null` and `val` is `.none`, the file has never been loaded.
1197 err: ?(std.fs.File.OpenError || std.fs.File.StatError || std.fs.File.ReadError || error{UnexpectedEof}),
1198 stat: Cache.File.Stat,
1199
1200 pub const Index = enum(u32) {
1201 _,
1202 pub fn get(idx: Index, zcu: *const Zcu) *EmbedFile {
1203 return zcu.embed_table.keys()[@intFromEnum(idx)];
1204 }
1205 };
1206};
1207
1208/// This struct holds data necessary to construct API-facing `AllErrors.Message`.
1209/// Its memory is managed with the general purpose allocator so that they
1210/// can be created and destroyed in response to incremental updates.
1211pub const ErrorMsg = struct {
1212 src_loc: LazySrcLoc,
1213 msg: []const u8,
1214 notes: []ErrorMsg = &.{},
1215 reference_trace_root: AnalUnit.Optional = .none,
1216
1217 pub fn create(
1218 gpa: Allocator,
1219 src_loc: LazySrcLoc,
1220 comptime format: []const u8,
1221 args: anytype,
1222 ) !*ErrorMsg {
1223 assert(src_loc.offset != .unneeded);
1224 const err_msg = try gpa.create(ErrorMsg);
1225 errdefer gpa.destroy(err_msg);
1226 err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args);
1227 return err_msg;
1228 }
1229
1230 /// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
1231 /// as well as all notes.
1232 pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
1233 err_msg.deinit(gpa);
1234 gpa.destroy(err_msg);
1235 }
1236
1237 pub fn init(gpa: Allocator, src_loc: LazySrcLoc, comptime format: []const u8, args: anytype) !ErrorMsg {
1238 return .{
1239 .src_loc = src_loc,
1240 .msg = try std.fmt.allocPrint(gpa, format, args),
1241 };
1242 }
1243
1244 pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
1245 for (err_msg.notes) |*note| {
1246 note.deinit(gpa);
1247 }
1248 gpa.free(err_msg.notes);
1249 gpa.free(err_msg.msg);
1250 err_msg.* = undefined;
1251 }
1252};
1253
1254pub const AstGenSrc = union(enum) {
1255 root,
1256 import: struct {
1257 importing_file: Zcu.File.Index,
1258 import_tok: std.zig.Ast.TokenIndex,
1259 },
1260};
1261
1262/// Canonical reference to a position within a source file.
1263pub const SrcLoc = struct {
1264 file_scope: *File,
1265 base_node: Ast.Node.Index,
1266 /// Relative to `base_node`.
1267 lazy: LazySrcLoc.Offset,
1268
1269 pub fn baseSrcToken(src_loc: SrcLoc) Ast.TokenIndex {
1270 const tree = src_loc.file_scope.tree.?;
1271 return tree.firstToken(src_loc.base_node);
1272 }
1273
1274 pub const Span = Ast.Span;
1275
1276 pub fn span(src_loc: SrcLoc, zcu: *const Zcu) !Span {
1277 switch (src_loc.lazy) {
1278 .unneeded => unreachable,
1279
1280 .byte_abs => |byte_index| return Span{ .start = byte_index, .end = byte_index + 1, .main = byte_index },
1281
1282 .token_abs => |tok_index| {
1283 const tree = try src_loc.file_scope.getTree(zcu);
1284 const start = tree.tokenStart(tok_index);
1285 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1286 return Span{ .start = start, .end = end, .main = start };
1287 },
1288 .node_abs => |node| {
1289 const tree = try src_loc.file_scope.getTree(zcu);
1290 return tree.nodeToSpan(node);
1291 },
1292 .byte_offset => |byte_off| {
1293 const tree = try src_loc.file_scope.getTree(zcu);
1294 const tok_index = src_loc.baseSrcToken();
1295 const start = tree.tokenStart(tok_index) + byte_off;
1296 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1297 return Span{ .start = start, .end = end, .main = start };
1298 },
1299 .token_offset => |tok_off| {
1300 const tree = try src_loc.file_scope.getTree(zcu);
1301 const tok_index = tok_off.toAbsolute(src_loc.baseSrcToken());
1302 const start = tree.tokenStart(tok_index);
1303 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1304 return Span{ .start = start, .end = end, .main = start };
1305 },
1306 .node_offset => |traced_off| {
1307 const node_off = traced_off.x;
1308 const tree = try src_loc.file_scope.getTree(zcu);
1309 const node = node_off.toAbsolute(src_loc.base_node);
1310 return tree.nodeToSpan(node);
1311 },
1312 .node_offset_main_token => |node_off| {
1313 const tree = try src_loc.file_scope.getTree(zcu);
1314 const node = node_off.toAbsolute(src_loc.base_node);
1315 const main_token = tree.nodeMainToken(node);
1316 return tree.tokensToSpan(main_token, main_token, main_token);
1317 },
1318 .node_offset_bin_op => |node_off| {
1319 const tree = try src_loc.file_scope.getTree(zcu);
1320 const node = node_off.toAbsolute(src_loc.base_node);
1321 return tree.nodeToSpan(node);
1322 },
1323 .node_offset_initializer => |node_off| {
1324 const tree = try src_loc.file_scope.getTree(zcu);
1325 const node = node_off.toAbsolute(src_loc.base_node);
1326 return tree.tokensToSpan(
1327 tree.firstToken(node) - 3,
1328 tree.lastToken(node),
1329 tree.nodeMainToken(node) - 2,
1330 );
1331 },
1332 .node_offset_var_decl_ty => |node_off| {
1333 const tree = try src_loc.file_scope.getTree(zcu);
1334 const node = node_off.toAbsolute(src_loc.base_node);
1335 const full = switch (tree.nodeTag(node)) {
1336 .global_var_decl,
1337 .local_var_decl,
1338 .simple_var_decl,
1339 .aligned_var_decl,
1340 => tree.fullVarDecl(node).?,
1341 else => unreachable,
1342 };
1343 if (full.ast.type_node.unwrap()) |type_node| {
1344 return tree.nodeToSpan(type_node);
1345 }
1346 const tok_index = full.ast.mut_token + 1; // the name token
1347 const start = tree.tokenStart(tok_index);
1348 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1349 return Span{ .start = start, .end = end, .main = start };
1350 },
1351 .node_offset_var_decl_align => |node_off| {
1352 const tree = try src_loc.file_scope.getTree(zcu);
1353 const node = node_off.toAbsolute(src_loc.base_node);
1354 var buf: [1]Ast.Node.Index = undefined;
1355 const align_node = if (tree.fullVarDecl(node)) |v|
1356 v.ast.align_node.unwrap().?
1357 else if (tree.fullFnProto(&buf, node)) |f|
1358 f.ast.align_expr.unwrap().?
1359 else
1360 unreachable;
1361 return tree.nodeToSpan(align_node);
1362 },
1363 .node_offset_var_decl_section => |node_off| {
1364 const tree = try src_loc.file_scope.getTree(zcu);
1365 const node = node_off.toAbsolute(src_loc.base_node);
1366 var buf: [1]Ast.Node.Index = undefined;
1367 const section_node = if (tree.fullVarDecl(node)) |v|
1368 v.ast.section_node.unwrap().?
1369 else if (tree.fullFnProto(&buf, node)) |f|
1370 f.ast.section_expr.unwrap().?
1371 else
1372 unreachable;
1373 return tree.nodeToSpan(section_node);
1374 },
1375 .node_offset_var_decl_addrspace => |node_off| {
1376 const tree = try src_loc.file_scope.getTree(zcu);
1377 const node = node_off.toAbsolute(src_loc.base_node);
1378 var buf: [1]Ast.Node.Index = undefined;
1379 const addrspace_node = if (tree.fullVarDecl(node)) |v|
1380 v.ast.addrspace_node.unwrap().?
1381 else if (tree.fullFnProto(&buf, node)) |f|
1382 f.ast.addrspace_expr.unwrap().?
1383 else
1384 unreachable;
1385 return tree.nodeToSpan(addrspace_node);
1386 },
1387 .node_offset_var_decl_init => |node_off| {
1388 const tree = try src_loc.file_scope.getTree(zcu);
1389 const node = node_off.toAbsolute(src_loc.base_node);
1390 const init_node = switch (tree.nodeTag(node)) {
1391 .global_var_decl,
1392 .local_var_decl,
1393 .aligned_var_decl,
1394 .simple_var_decl,
1395 => tree.fullVarDecl(node).?.ast.init_node.unwrap().?,
1396 .assign_destructure => tree.assignDestructure(node).ast.value_expr,
1397 else => unreachable,
1398 };
1399 return tree.nodeToSpan(init_node);
1400 },
1401 .node_offset_builtin_call_arg => |builtin_arg| {
1402 const tree = try src_loc.file_scope.getTree(zcu);
1403 const node = builtin_arg.builtin_call_node.toAbsolute(src_loc.base_node);
1404 var buf: [2]Ast.Node.Index = undefined;
1405 const params = tree.builtinCallParams(&buf, node).?;
1406 return tree.nodeToSpan(params[builtin_arg.arg_index]);
1407 },
1408 .node_offset_ptrcast_operand => |node_off| {
1409 const tree = try src_loc.file_scope.getTree(zcu);
1410
1411 var node = node_off.toAbsolute(src_loc.base_node);
1412 while (true) {
1413 switch (tree.nodeTag(node)) {
1414 .builtin_call_two, .builtin_call_two_comma => {},
1415 else => break,
1416 }
1417
1418 const first_arg, const second_arg = tree.nodeData(node).opt_node_and_opt_node;
1419 if (first_arg == .none) break; // 0 args
1420 if (second_arg != .none) break; // 2 args
1421
1422 const builtin_token = tree.nodeMainToken(node);
1423 const builtin_name = tree.tokenSlice(builtin_token);
1424 const info = BuiltinFn.list.get(builtin_name) orelse break;
1425
1426 switch (info.tag) {
1427 else => break,
1428 .ptr_cast,
1429 .align_cast,
1430 .addrspace_cast,
1431 .const_cast,
1432 .volatile_cast,
1433 => {},
1434 }
1435
1436 node = first_arg.unwrap().?;
1437 }
1438
1439 return tree.nodeToSpan(node);
1440 },
1441 .node_offset_array_access_index => |node_off| {
1442 const tree = try src_loc.file_scope.getTree(zcu);
1443 const node = node_off.toAbsolute(src_loc.base_node);
1444 return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
1445 },
1446 .node_offset_slice_ptr,
1447 .node_offset_slice_start,
1448 .node_offset_slice_end,
1449 .node_offset_slice_sentinel,
1450 => |node_off| {
1451 const tree = try src_loc.file_scope.getTree(zcu);
1452 const node = node_off.toAbsolute(src_loc.base_node);
1453 const full = tree.fullSlice(node).?;
1454 const part_node = switch (src_loc.lazy) {
1455 .node_offset_slice_ptr => full.ast.sliced,
1456 .node_offset_slice_start => full.ast.start,
1457 .node_offset_slice_end => full.ast.end.unwrap().?,
1458 .node_offset_slice_sentinel => full.ast.sentinel.unwrap().?,
1459 else => unreachable,
1460 };
1461 return tree.nodeToSpan(part_node);
1462 },
1463 .node_offset_call_func => |node_off| {
1464 const tree = try src_loc.file_scope.getTree(zcu);
1465 const node = node_off.toAbsolute(src_loc.base_node);
1466 var buf: [1]Ast.Node.Index = undefined;
1467 const full = tree.fullCall(&buf, node).?;
1468 return tree.nodeToSpan(full.ast.fn_expr);
1469 },
1470 .node_offset_field_name => |node_off| {
1471 const tree = try src_loc.file_scope.getTree(zcu);
1472 const node = node_off.toAbsolute(src_loc.base_node);
1473 var buf: [1]Ast.Node.Index = undefined;
1474 const tok_index = switch (tree.nodeTag(node)) {
1475 .field_access => tree.nodeData(node).node_and_token[1],
1476 .call_one,
1477 .call_one_comma,
1478 .call,
1479 .call_comma,
1480 => blk: {
1481 const full = tree.fullCall(&buf, node).?;
1482 break :blk tree.lastToken(full.ast.fn_expr);
1483 },
1484 else => tree.firstToken(node) - 2,
1485 };
1486 const start = tree.tokenStart(tok_index);
1487 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1488 return Span{ .start = start, .end = end, .main = start };
1489 },
1490 .node_offset_field_name_init => |node_off| {
1491 const tree = try src_loc.file_scope.getTree(zcu);
1492 const node = node_off.toAbsolute(src_loc.base_node);
1493 const tok_index = tree.firstToken(node) - 2;
1494 const start = tree.tokenStart(tok_index);
1495 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1496 return Span{ .start = start, .end = end, .main = start };
1497 },
1498 .node_offset_deref_ptr => |node_off| {
1499 const tree = try src_loc.file_scope.getTree(zcu);
1500 const node = node_off.toAbsolute(src_loc.base_node);
1501 return tree.nodeToSpan(node);
1502 },
1503 .node_offset_asm_source => |node_off| {
1504 const tree = try src_loc.file_scope.getTree(zcu);
1505 const node = node_off.toAbsolute(src_loc.base_node);
1506 const full = tree.fullAsm(node).?;
1507 return tree.nodeToSpan(full.ast.template);
1508 },
1509 .node_offset_asm_ret_ty => |node_off| {
1510 const tree = try src_loc.file_scope.getTree(zcu);
1511 const node = node_off.toAbsolute(src_loc.base_node);
1512 const full = tree.fullAsm(node).?;
1513 const asm_output = full.outputs[0];
1514 return tree.nodeToSpan(tree.nodeData(asm_output).opt_node_and_token[0].unwrap().?);
1515 },
1516
1517 .node_offset_if_cond => |node_off| {
1518 const tree = try src_loc.file_scope.getTree(zcu);
1519 const node = node_off.toAbsolute(src_loc.base_node);
1520 const src_node = switch (tree.nodeTag(node)) {
1521 .if_simple,
1522 .@"if",
1523 => tree.fullIf(node).?.ast.cond_expr,
1524
1525 .while_simple,
1526 .while_cont,
1527 .@"while",
1528 => tree.fullWhile(node).?.ast.cond_expr,
1529
1530 .for_simple,
1531 .@"for",
1532 => {
1533 const inputs = tree.fullFor(node).?.ast.inputs;
1534 const start = tree.firstToken(inputs[0]);
1535 const end = tree.lastToken(inputs[inputs.len - 1]);
1536 return tree.tokensToSpan(start, end, start);
1537 },
1538
1539 .@"orelse" => node,
1540 .@"catch" => node,
1541 else => unreachable,
1542 };
1543 return tree.nodeToSpan(src_node);
1544 },
1545 .for_input => |for_input| {
1546 const tree = try src_loc.file_scope.getTree(zcu);
1547 const node = for_input.for_node_offset.toAbsolute(src_loc.base_node);
1548 const for_full = tree.fullFor(node).?;
1549 const src_node = for_full.ast.inputs[for_input.input_index];
1550 return tree.nodeToSpan(src_node);
1551 },
1552 .for_capture_from_input => |node_off| {
1553 const tree = try src_loc.file_scope.getTree(zcu);
1554 const input_node = node_off.toAbsolute(src_loc.base_node);
1555 // We have to actually linear scan the whole AST to find the for loop
1556 // that contains this input.
1557 const node_tags = tree.nodes.items(.tag);
1558 for (node_tags, 0..) |node_tag, node_usize| {
1559 const node: Ast.Node.Index = @enumFromInt(node_usize);
1560 switch (node_tag) {
1561 .for_simple, .@"for" => {
1562 const for_full = tree.fullFor(node).?;
1563 for (for_full.ast.inputs, 0..) |input, input_index| {
1564 if (input_node == input) {
1565 var count = input_index;
1566 var tok = for_full.payload_token;
1567 while (true) {
1568 switch (tree.tokenTag(tok)) {
1569 .comma => {
1570 count -= 1;
1571 tok += 1;
1572 },
1573 .identifier => {
1574 if (count == 0)
1575 return tree.tokensToSpan(tok, tok + 1, tok);
1576 tok += 1;
1577 },
1578 .asterisk => {
1579 if (count == 0)
1580 return tree.tokensToSpan(tok, tok + 2, tok);
1581 tok += 1;
1582 },
1583 else => unreachable,
1584 }
1585 }
1586 }
1587 }
1588 },
1589 else => continue,
1590 }
1591 } else unreachable;
1592 },
1593 .call_arg => |call_arg| {
1594 const tree = try src_loc.file_scope.getTree(zcu);
1595 const node = call_arg.call_node_offset.toAbsolute(src_loc.base_node);
1596 var buf: [2]Ast.Node.Index = undefined;
1597 const call_full = tree.fullCall(buf[0..1], node) orelse {
1598 assert(tree.nodeTag(node) == .builtin_call);
1599 const call_args_node: Ast.Node.Index = @enumFromInt(tree.extra_data[@intFromEnum(tree.nodeData(node).extra_range.end) - 1]);
1600 switch (tree.nodeTag(call_args_node)) {
1601 .array_init_one,
1602 .array_init_one_comma,
1603 .array_init_dot_two,
1604 .array_init_dot_two_comma,
1605 .array_init_dot,
1606 .array_init_dot_comma,
1607 .array_init,
1608 .array_init_comma,
1609 => {
1610 const full = tree.fullArrayInit(&buf, call_args_node).?.ast.elements;
1611 return tree.nodeToSpan(full[call_arg.arg_index]);
1612 },
1613 .struct_init_one,
1614 .struct_init_one_comma,
1615 .struct_init_dot_two,
1616 .struct_init_dot_two_comma,
1617 .struct_init_dot,
1618 .struct_init_dot_comma,
1619 .struct_init,
1620 .struct_init_comma,
1621 => {
1622 const full = tree.fullStructInit(&buf, call_args_node).?.ast.fields;
1623 return tree.nodeToSpan(full[call_arg.arg_index]);
1624 },
1625 else => return tree.nodeToSpan(call_args_node),
1626 }
1627 };
1628 return tree.nodeToSpan(call_full.ast.params[call_arg.arg_index]);
1629 },
1630 .fn_proto_param, .fn_proto_param_type => |fn_proto_param| {
1631 const tree = try src_loc.file_scope.getTree(zcu);
1632 const node = fn_proto_param.fn_proto_node_offset.toAbsolute(src_loc.base_node);
1633 var buf: [1]Ast.Node.Index = undefined;
1634 const full = tree.fullFnProto(&buf, node).?;
1635 var it = full.iterate(tree);
1636 var i: usize = 0;
1637 while (it.next()) |param| : (i += 1) {
1638 if (i != fn_proto_param.param_index) continue;
1639
1640 switch (src_loc.lazy) {
1641 .fn_proto_param_type => if (param.anytype_ellipsis3) |tok| {
1642 return tree.tokenToSpan(tok);
1643 } else {
1644 return tree.nodeToSpan(param.type_expr.?);
1645 },
1646 .fn_proto_param => if (param.anytype_ellipsis3) |tok| {
1647 const first = param.comptime_noalias orelse param.name_token orelse tok;
1648 return tree.tokensToSpan(first, tok, first);
1649 } else {
1650 const first = param.comptime_noalias orelse param.name_token orelse tree.firstToken(param.type_expr.?);
1651 return tree.tokensToSpan(first, tree.lastToken(param.type_expr.?), first);
1652 },
1653 else => unreachable,
1654 }
1655 }
1656 unreachable;
1657 },
1658 .node_offset_bin_lhs => |node_off| {
1659 const tree = try src_loc.file_scope.getTree(zcu);
1660 const node = node_off.toAbsolute(src_loc.base_node);
1661 return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]);
1662 },
1663 .node_offset_bin_rhs => |node_off| {
1664 const tree = try src_loc.file_scope.getTree(zcu);
1665 const node = node_off.toAbsolute(src_loc.base_node);
1666 return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
1667 },
1668 .array_cat_lhs, .array_cat_rhs => |cat| {
1669 const tree = try src_loc.file_scope.getTree(zcu);
1670 const node = cat.array_cat_offset.toAbsolute(src_loc.base_node);
1671 const arr_node = if (src_loc.lazy == .array_cat_lhs)
1672 tree.nodeData(node).node_and_node[0]
1673 else
1674 tree.nodeData(node).node_and_node[1];
1675
1676 var buf: [2]Ast.Node.Index = undefined;
1677 switch (tree.nodeTag(arr_node)) {
1678 .array_init_one,
1679 .array_init_one_comma,
1680 .array_init_dot_two,
1681 .array_init_dot_two_comma,
1682 .array_init_dot,
1683 .array_init_dot_comma,
1684 .array_init,
1685 .array_init_comma,
1686 => {
1687 const full = tree.fullArrayInit(&buf, arr_node).?.ast.elements;
1688 return tree.nodeToSpan(full[cat.elem_index]);
1689 },
1690 else => return tree.nodeToSpan(arr_node),
1691 }
1692 },
1693
1694 .node_offset_try_operand => |node_off| {
1695 const tree = try src_loc.file_scope.getTree(zcu);
1696 const node = node_off.toAbsolute(src_loc.base_node);
1697 return tree.nodeToSpan(tree.nodeData(node).node);
1698 },
1699
1700 .node_offset_switch_operand => |node_off| {
1701 const tree = try src_loc.file_scope.getTree(zcu);
1702 const node = node_off.toAbsolute(src_loc.base_node);
1703 const condition, _ = tree.nodeData(node).node_and_extra;
1704 return tree.nodeToSpan(condition);
1705 },
1706
1707 .node_offset_switch_else_prong => |node_off| {
1708 const tree = try src_loc.file_scope.getTree(zcu);
1709 const switch_node = node_off.toAbsolute(src_loc.base_node);
1710 _, const extra_index = tree.nodeData(switch_node).node_and_extra;
1711 const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
1712 for (case_nodes) |case_node| {
1713 const case = tree.fullSwitchCase(case_node).?;
1714 if (case.ast.values.len == 0) {
1715 return tree.nodeToSpan(case_node);
1716 }
1717 } else unreachable;
1718 },
1719
1720 .node_offset_switch_under_prong => |node_off| {
1721 const tree = try src_loc.file_scope.getTree(zcu);
1722 const switch_node = node_off.toAbsolute(src_loc.base_node);
1723 _, const extra_index = tree.nodeData(switch_node).node_and_extra;
1724 const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
1725 for (case_nodes) |case_node| {
1726 const case = tree.fullSwitchCase(case_node).?;
1727 for (case.ast.values) |val| {
1728 if (tree.nodeTag(val) == .identifier and
1729 mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val)), "_"))
1730 {
1731 return tree.tokensToSpan(
1732 tree.firstToken(case_node),
1733 tree.lastToken(case_node),
1734 tree.nodeMainToken(val),
1735 );
1736 }
1737 }
1738 } else unreachable;
1739 },
1740
1741 .node_offset_switch_range => |node_off| {
1742 const tree = try src_loc.file_scope.getTree(zcu);
1743 const switch_node = node_off.toAbsolute(src_loc.base_node);
1744 _, const extra_index = tree.nodeData(switch_node).node_and_extra;
1745 const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
1746 for (case_nodes) |case_node| {
1747 const case = tree.fullSwitchCase(case_node).?;
1748 for (case.ast.values) |item_node| {
1749 if (tree.nodeTag(item_node) == .switch_range) {
1750 return tree.nodeToSpan(item_node);
1751 }
1752 }
1753 } else unreachable;
1754 },
1755 .node_offset_fn_type_align => |node_off| {
1756 const tree = try src_loc.file_scope.getTree(zcu);
1757 const node = node_off.toAbsolute(src_loc.base_node);
1758 var buf: [1]Ast.Node.Index = undefined;
1759 const full = tree.fullFnProto(&buf, node).?;
1760 return tree.nodeToSpan(full.ast.align_expr.unwrap() orelse node);
1761 },
1762 .node_offset_fn_type_addrspace => |node_off| {
1763 const tree = try src_loc.file_scope.getTree(zcu);
1764 const node = node_off.toAbsolute(src_loc.base_node);
1765 var buf: [1]Ast.Node.Index = undefined;
1766 const full = tree.fullFnProto(&buf, node).?;
1767 return tree.nodeToSpan(full.ast.addrspace_expr.unwrap() orelse node);
1768 },
1769 .node_offset_fn_type_section => |node_off| {
1770 const tree = try src_loc.file_scope.getTree(zcu);
1771 const node = node_off.toAbsolute(src_loc.base_node);
1772 var buf: [1]Ast.Node.Index = undefined;
1773 const full = tree.fullFnProto(&buf, node).?;
1774 return tree.nodeToSpan(full.ast.section_expr.unwrap() orelse node);
1775 },
1776 .node_offset_fn_type_cc => |node_off| {
1777 const tree = try src_loc.file_scope.getTree(zcu);
1778 const node = node_off.toAbsolute(src_loc.base_node);
1779 var buf: [1]Ast.Node.Index = undefined;
1780 const full = tree.fullFnProto(&buf, node).?;
1781 return tree.nodeToSpan(full.ast.callconv_expr.unwrap() orelse node);
1782 },
1783
1784 .node_offset_fn_type_ret_ty => |node_off| {
1785 const tree = try src_loc.file_scope.getTree(zcu);
1786 const node = node_off.toAbsolute(src_loc.base_node);
1787 var buf: [1]Ast.Node.Index = undefined;
1788 const full = tree.fullFnProto(&buf, node).?;
1789 return tree.nodeToSpan(full.ast.return_type.unwrap().?);
1790 },
1791 .node_offset_param => |node_off| {
1792 const tree = try src_loc.file_scope.getTree(zcu);
1793 const node = node_off.toAbsolute(src_loc.base_node);
1794
1795 var first_tok = tree.firstToken(node);
1796 while (true) switch (tree.tokenTag(first_tok - 1)) {
1797 .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
1798 else => break,
1799 };
1800 return tree.tokensToSpan(
1801 first_tok,
1802 tree.lastToken(node),
1803 first_tok,
1804 );
1805 },
1806 .token_offset_param => |token_off| {
1807 const tree = try src_loc.file_scope.getTree(zcu);
1808 const main_token = tree.nodeMainToken(src_loc.base_node);
1809 const tok_index = token_off.toAbsolute(main_token);
1810
1811 var first_tok = tok_index;
1812 while (true) switch (tree.tokenTag(first_tok - 1)) {
1813 .colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
1814 else => break,
1815 };
1816 return tree.tokensToSpan(
1817 first_tok,
1818 tok_index,
1819 first_tok,
1820 );
1821 },
1822
1823 .node_offset_anyframe_type => |node_off| {
1824 const tree = try src_loc.file_scope.getTree(zcu);
1825 const parent_node = node_off.toAbsolute(src_loc.base_node);
1826 _, const child_type = tree.nodeData(parent_node).token_and_node;
1827 return tree.nodeToSpan(child_type);
1828 },
1829
1830 .node_offset_lib_name => |node_off| {
1831 const tree = try src_loc.file_scope.getTree(zcu);
1832 const parent_node = node_off.toAbsolute(src_loc.base_node);
1833 var buf: [1]Ast.Node.Index = undefined;
1834 const full = tree.fullFnProto(&buf, parent_node).?;
1835 const tok_index = full.lib_name.?;
1836 const start = tree.tokenStart(tok_index);
1837 const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
1838 return Span{ .start = start, .end = end, .main = start };
1839 },
1840
1841 .node_offset_array_type_len => |node_off| {
1842 const tree = try src_loc.file_scope.getTree(zcu);
1843 const parent_node = node_off.toAbsolute(src_loc.base_node);
1844
1845 const full = tree.fullArrayType(parent_node).?;
1846 return tree.nodeToSpan(full.ast.elem_count);
1847 },
1848 .node_offset_array_type_sentinel => |node_off| {
1849 const tree = try src_loc.file_scope.getTree(zcu);
1850 const parent_node = node_off.toAbsolute(src_loc.base_node);
1851
1852 const full = tree.fullArrayType(parent_node).?;
1853 return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
1854 },
1855 .node_offset_array_type_elem => |node_off| {
1856 const tree = try src_loc.file_scope.getTree(zcu);
1857 const parent_node = node_off.toAbsolute(src_loc.base_node);
1858
1859 const full = tree.fullArrayType(parent_node).?;
1860 return tree.nodeToSpan(full.ast.elem_type);
1861 },
1862 .node_offset_un_op => |node_off| {
1863 const tree = try src_loc.file_scope.getTree(zcu);
1864 const node = node_off.toAbsolute(src_loc.base_node);
1865 return tree.nodeToSpan(tree.nodeData(node).node);
1866 },
1867 .node_offset_ptr_elem => |node_off| {
1868 const tree = try src_loc.file_scope.getTree(zcu);
1869 const parent_node = node_off.toAbsolute(src_loc.base_node);
1870
1871 const full = tree.fullPtrType(parent_node).?;
1872 return tree.nodeToSpan(full.ast.child_type);
1873 },
1874 .node_offset_ptr_sentinel => |node_off| {
1875 const tree = try src_loc.file_scope.getTree(zcu);
1876 const parent_node = node_off.toAbsolute(src_loc.base_node);
1877
1878 const full = tree.fullPtrType(parent_node).?;
1879 return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
1880 },
1881 .node_offset_ptr_align => |node_off| {
1882 const tree = try src_loc.file_scope.getTree(zcu);
1883 const parent_node = node_off.toAbsolute(src_loc.base_node);
1884
1885 const full = tree.fullPtrType(parent_node).?;
1886 return tree.nodeToSpan(full.ast.align_node.unwrap().?);
1887 },
1888 .node_offset_ptr_addrspace => |node_off| {
1889 const tree = try src_loc.file_scope.getTree(zcu);
1890 const parent_node = node_off.toAbsolute(src_loc.base_node);
1891
1892 const full = tree.fullPtrType(parent_node).?;
1893 return tree.nodeToSpan(full.ast.addrspace_node.unwrap().?);
1894 },
1895 .node_offset_ptr_bitoffset => |node_off| {
1896 const tree = try src_loc.file_scope.getTree(zcu);
1897 const parent_node = node_off.toAbsolute(src_loc.base_node);
1898
1899 const full = tree.fullPtrType(parent_node).?;
1900 return tree.nodeToSpan(full.ast.bit_range_start.unwrap().?);
1901 },
1902 .node_offset_ptr_hostsize => |node_off| {
1903 const tree = try src_loc.file_scope.getTree(zcu);
1904 const parent_node = node_off.toAbsolute(src_loc.base_node);
1905
1906 const full = tree.fullPtrType(parent_node).?;
1907 return tree.nodeToSpan(full.ast.bit_range_end.unwrap().?);
1908 },
1909 .node_offset_container_tag => |node_off| {
1910 const tree = try src_loc.file_scope.getTree(zcu);
1911 const parent_node = node_off.toAbsolute(src_loc.base_node);
1912
1913 switch (tree.nodeTag(parent_node)) {
1914 .container_decl_arg, .container_decl_arg_trailing => {
1915 const full = tree.containerDeclArg(parent_node);
1916 const arg_node = full.ast.arg.unwrap().?;
1917 return tree.nodeToSpan(arg_node);
1918 },
1919 .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => {
1920 const full = tree.taggedUnionEnumTag(parent_node);
1921 const arg_node = full.ast.arg.unwrap().?;
1922
1923 return tree.tokensToSpan(
1924 tree.firstToken(arg_node) - 2,
1925 tree.lastToken(arg_node) + 1,
1926 tree.nodeMainToken(arg_node),
1927 );
1928 },
1929 else => unreachable,
1930 }
1931 },
1932 .node_offset_field_default => |node_off| {
1933 const tree = try src_loc.file_scope.getTree(zcu);
1934 const parent_node = node_off.toAbsolute(src_loc.base_node);
1935
1936 const full: Ast.full.ContainerField = switch (tree.nodeTag(parent_node)) {
1937 .container_field => tree.containerField(parent_node),
1938 .container_field_init => tree.containerFieldInit(parent_node),
1939 else => unreachable,
1940 };
1941 return tree.nodeToSpan(full.ast.value_expr.unwrap().?);
1942 },
1943 .node_offset_init_ty => |node_off| {
1944 const tree = try src_loc.file_scope.getTree(zcu);
1945 const parent_node = node_off.toAbsolute(src_loc.base_node);
1946
1947 var buf: [2]Ast.Node.Index = undefined;
1948 const type_expr = if (tree.fullArrayInit(&buf, parent_node)) |array_init|
1949 array_init.ast.type_expr.unwrap().?
1950 else
1951 tree.fullStructInit(&buf, parent_node).?.ast.type_expr.unwrap().?;
1952 return tree.nodeToSpan(type_expr);
1953 },
1954 .node_offset_store_ptr => |node_off| {
1955 const tree = try src_loc.file_scope.getTree(zcu);
1956 const node = node_off.toAbsolute(src_loc.base_node);
1957
1958 switch (tree.nodeTag(node)) {
1959 .assign,
1960 .assign_mul,
1961 .assign_div,
1962 .assign_mod,
1963 .assign_add,
1964 .assign_sub,
1965 .assign_shl,
1966 .assign_shl_sat,
1967 .assign_shr,
1968 .assign_bit_and,
1969 .assign_bit_xor,
1970 .assign_bit_or,
1971 .assign_mul_wrap,
1972 .assign_add_wrap,
1973 .assign_sub_wrap,
1974 .assign_mul_sat,
1975 .assign_add_sat,
1976 .assign_sub_sat,
1977 => return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]),
1978 else => return tree.nodeToSpan(node),
1979 }
1980 },
1981 .node_offset_store_operand => |node_off| {
1982 const tree = try src_loc.file_scope.getTree(zcu);
1983 const node = node_off.toAbsolute(src_loc.base_node);
1984
1985 switch (tree.nodeTag(node)) {
1986 .assign,
1987 .assign_mul,
1988 .assign_div,
1989 .assign_mod,
1990 .assign_add,
1991 .assign_sub,
1992 .assign_shl,
1993 .assign_shl_sat,
1994 .assign_shr,
1995 .assign_bit_and,
1996 .assign_bit_xor,
1997 .assign_bit_or,
1998 .assign_mul_wrap,
1999 .assign_add_wrap,
2000 .assign_sub_wrap,
2001 .assign_mul_sat,
2002 .assign_add_sat,
2003 .assign_sub_sat,
2004 => return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]),
2005 else => return tree.nodeToSpan(node),
2006 }
2007 },
2008 .node_offset_return_operand => |node_off| {
2009 const tree = try src_loc.file_scope.getTree(zcu);
2010 const node = node_off.toAbsolute(src_loc.base_node);
2011 if (tree.nodeTag(node) == .@"return") {
2012 if (tree.nodeData(node).opt_node.unwrap()) |lhs| {
2013 return tree.nodeToSpan(lhs);
2014 }
2015 }
2016 return tree.nodeToSpan(node);
2017 },
2018 .container_field_name,
2019 .container_field_value,
2020 .container_field_type,
2021 .container_field_align,
2022 => |field_idx| {
2023 const tree = try src_loc.file_scope.getTree(zcu);
2024 const node = src_loc.base_node;
2025 var buf: [2]Ast.Node.Index = undefined;
2026 const container_decl = tree.fullContainerDecl(&buf, node) orelse
2027 return tree.nodeToSpan(node);
2028
2029 var cur_field_idx: usize = 0;
2030 for (container_decl.ast.members) |member_node| {
2031 const field = tree.fullContainerField(member_node) orelse continue;
2032 if (cur_field_idx < field_idx) {
2033 cur_field_idx += 1;
2034 continue;
2035 }
2036 const field_component_node = switch (src_loc.lazy) {
2037 .container_field_name => .none,
2038 .container_field_value => field.ast.value_expr,
2039 .container_field_type => field.ast.type_expr,
2040 .container_field_align => field.ast.align_expr,
2041 else => unreachable,
2042 };
2043 if (field_component_node.unwrap()) |component_node| {
2044 return tree.nodeToSpan(component_node);
2045 } else {
2046 return tree.tokenToSpan(field.ast.main_token);
2047 }
2048 } else unreachable;
2049 },
2050 .tuple_field_type, .tuple_field_init => |field_info| {
2051 const tree = try src_loc.file_scope.getTree(zcu);
2052 const node = field_info.tuple_decl_node_offset.toAbsolute(src_loc.base_node);
2053 var buf: [2]Ast.Node.Index = undefined;
2054 const container_decl = tree.fullContainerDecl(&buf, node) orelse
2055 return tree.nodeToSpan(node);
2056
2057 const field = tree.fullContainerField(container_decl.ast.members[field_info.elem_index]).?;
2058 return tree.nodeToSpan(switch (src_loc.lazy) {
2059 .tuple_field_type => field.ast.type_expr.unwrap().?,
2060 .tuple_field_init => field.ast.value_expr.unwrap().?,
2061 else => unreachable,
2062 });
2063 },
2064 .init_elem => |init_elem| {
2065 const tree = try src_loc.file_scope.getTree(zcu);
2066 const init_node = init_elem.init_node_offset.toAbsolute(src_loc.base_node);
2067 var buf: [2]Ast.Node.Index = undefined;
2068 if (tree.fullArrayInit(&buf, init_node)) |full| {
2069 const elem_node = full.ast.elements[init_elem.elem_index];
2070 return tree.nodeToSpan(elem_node);
2071 } else if (tree.fullStructInit(&buf, init_node)) |full| {
2072 const field_node = full.ast.fields[init_elem.elem_index];
2073 return tree.tokensToSpan(
2074 tree.firstToken(field_node) - 3,
2075 tree.lastToken(field_node),
2076 tree.nodeMainToken(field_node) - 2,
2077 );
2078 } else unreachable;
2079 },
2080 .init_field_name,
2081 .init_field_linkage,
2082 .init_field_section,
2083 .init_field_visibility,
2084 .init_field_rw,
2085 .init_field_locality,
2086 .init_field_cache,
2087 .init_field_library,
2088 .init_field_thread_local,
2089 .init_field_dll_import,
2090 .init_field_relocation,
2091 => |builtin_call_node| {
2092 const wanted = switch (src_loc.lazy) {
2093 .init_field_name => "name",
2094 .init_field_linkage => "linkage",
2095 .init_field_section => "section",
2096 .init_field_visibility => "visibility",
2097 .init_field_rw => "rw",
2098 .init_field_locality => "locality",
2099 .init_field_cache => "cache",
2100 .init_field_library => "library",
2101 .init_field_thread_local => "thread_local",
2102 .init_field_dll_import => "dll_import",
2103 .init_field_relocation => "relocation",
2104 else => unreachable,
2105 };
2106 const tree = try src_loc.file_scope.getTree(zcu);
2107 const node = builtin_call_node.toAbsolute(src_loc.base_node);
2108 var builtin_buf: [2]Ast.Node.Index = undefined;
2109 const args = tree.builtinCallParams(&builtin_buf, node).?;
2110 const arg_node = args[1];
2111 var buf: [2]Ast.Node.Index = undefined;
2112 const full = tree.fullStructInit(&buf, arg_node) orelse
2113 return tree.nodeToSpan(arg_node);
2114 for (full.ast.fields) |field_node| {
2115 // . IDENTIFIER = field_node
2116 const name_token = tree.firstToken(field_node) - 2;
2117 const name = tree.tokenSlice(name_token);
2118 if (std.mem.eql(u8, name, wanted)) {
2119 return tree.tokensToSpan(
2120 name_token - 1,
2121 tree.lastToken(field_node),
2122 tree.nodeMainToken(field_node) - 2,
2123 );
2124 }
2125 }
2126 return tree.nodeToSpan(arg_node);
2127 },
2128 .switch_case_item,
2129 .switch_case_item_range_first,
2130 .switch_case_item_range_last,
2131 .switch_capture,
2132 .switch_tag_capture,
2133 => {
2134 const switch_node_offset, const want_case_idx = switch (src_loc.lazy) {
2135 .switch_case_item,
2136 .switch_case_item_range_first,
2137 .switch_case_item_range_last,
2138 => |x| .{ x.switch_node_offset, x.case_idx },
2139 .switch_capture,
2140 .switch_tag_capture,
2141 => |x| .{ x.switch_node_offset, x.case_idx },
2142 else => unreachable,
2143 };
2144
2145 const tree = try src_loc.file_scope.getTree(zcu);
2146 const switch_node = switch_node_offset.toAbsolute(src_loc.base_node);
2147 _, const extra_index = tree.nodeData(switch_node).node_and_extra;
2148 const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
2149
2150 var multi_i: u32 = 0;
2151 var scalar_i: u32 = 0;
2152 var underscore_node: Ast.Node.OptionalIndex = .none;
2153 const case = case: for (case_nodes) |case_node| {
2154 const case = tree.fullSwitchCase(case_node).?;
2155 if (case.ast.values.len == 0) {
2156 if (want_case_idx == LazySrcLoc.Offset.SwitchCaseIndex.special_else) {
2157 break :case case;
2158 }
2159 continue :case;
2160 }
2161 if (underscore_node == .none) for (case.ast.values) |val_node| {
2162 if (tree.nodeTag(val_node) == .identifier and
2163 mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val_node)), "_"))
2164 {
2165 underscore_node = val_node.toOptional();
2166 if (want_case_idx == LazySrcLoc.Offset.SwitchCaseIndex.special_under) {
2167 break :case case;
2168 }
2169 continue :case;
2170 }
2171 };
2172
2173 const is_multi = case.ast.values.len != 1 or
2174 tree.nodeTag(case.ast.values[0]) == .switch_range;
2175
2176 switch (want_case_idx.kind) {
2177 .scalar => if (!is_multi and want_case_idx.index == scalar_i)
2178 break :case case,
2179 .multi => if (is_multi and want_case_idx.index == multi_i)
2180 break :case case,
2181 }
2182
2183 if (is_multi) {
2184 multi_i += 1;
2185 } else {
2186 scalar_i += 1;
2187 }
2188 } else unreachable;
2189
2190 const want_item = switch (src_loc.lazy) {
2191 .switch_case_item,
2192 .switch_case_item_range_first,
2193 .switch_case_item_range_last,
2194 => |x| item_idx: {
2195 assert(want_case_idx != LazySrcLoc.Offset.SwitchCaseIndex.special_else);
2196 break :item_idx x.item_idx;
2197 },
2198 .switch_capture, .switch_tag_capture => {
2199 const start = switch (src_loc.lazy) {
2200 .switch_capture => case.payload_token.?,
2201 .switch_tag_capture => tok: {
2202 var tok = case.payload_token.?;
2203 if (tree.tokenTag(tok) == .asterisk) tok += 1;
2204 tok = tok + 2; // skip over comma
2205 break :tok tok;
2206 },
2207 else => unreachable,
2208 };
2209 const end = switch (tree.tokenTag(start)) {
2210 .asterisk => start + 1,
2211 else => start,
2212 };
2213 return tree.tokensToSpan(start, end, start);
2214 },
2215 else => unreachable,
2216 };
2217
2218 switch (want_item.kind) {
2219 .single => {
2220 var item_i: u32 = 0;
2221 for (case.ast.values) |item_node| {
2222 if (item_node.toOptional() == underscore_node or
2223 tree.nodeTag(item_node) == .switch_range)
2224 {
2225 continue;
2226 }
2227 if (item_i != want_item.index) {
2228 item_i += 1;
2229 continue;
2230 }
2231 return tree.nodeToSpan(item_node);
2232 } else unreachable;
2233 },
2234 .range => {
2235 var range_i: u32 = 0;
2236 for (case.ast.values) |item_node| {
2237 if (tree.nodeTag(item_node) != .switch_range) {
2238 continue;
2239 }
2240 if (range_i != want_item.index) {
2241 range_i += 1;
2242 continue;
2243 }
2244 const first, const last = tree.nodeData(item_node).node_and_node;
2245 return switch (src_loc.lazy) {
2246 .switch_case_item => tree.nodeToSpan(item_node),
2247 .switch_case_item_range_first => tree.nodeToSpan(first),
2248 .switch_case_item_range_last => tree.nodeToSpan(last),
2249 else => unreachable,
2250 };
2251 } else unreachable;
2252 },
2253 }
2254 },
2255 .func_decl_param_comptime => |param_idx| {
2256 const tree = try src_loc.file_scope.getTree(zcu);
2257 var buf: [1]Ast.Node.Index = undefined;
2258 const full = tree.fullFnProto(&buf, src_loc.base_node).?;
2259 var param_it = full.iterate(tree);
2260 for (0..param_idx) |_| assert(param_it.next() != null);
2261 const param = param_it.next().?;
2262 return tree.tokenToSpan(param.comptime_noalias.?);
2263 },
2264 .func_decl_param_ty => |param_idx| {
2265 const tree = try src_loc.file_scope.getTree(zcu);
2266 var buf: [1]Ast.Node.Index = undefined;
2267 const full = tree.fullFnProto(&buf, src_loc.base_node).?;
2268 var param_it = full.iterate(tree);
2269 for (0..param_idx) |_| assert(param_it.next() != null);
2270 const param = param_it.next().?;
2271 return tree.nodeToSpan(param.type_expr.?);
2272 },
2273 }
2274 }
2275};
2276
2277pub const LazySrcLoc = struct {
2278 /// This instruction provides the source node locations are resolved relative to.
2279 /// It is a `declaration`, `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl`.
2280 /// This must be valid even if `relative` is an absolute value, since it is required to
2281 /// determine the file which the `LazySrcLoc` refers to.
2282 base_node_inst: InternPool.TrackedInst.Index,
2283 /// This field determines the source location relative to `base_node_inst`.
2284 offset: Offset,
2285
2286 pub const Offset = union(enum) {
2287 /// When this tag is set, the code that constructed this `LazySrcLoc` is asserting
2288 /// that all code paths which would need to resolve the source location are
2289 /// unreachable. If you are debugging this tag incorrectly being this value,
2290 /// look into using reverse-continue with a memory watchpoint to see where the
2291 /// value is being set to this tag.
2292 /// `base_node_inst` is unused.
2293 unneeded,
2294 /// The source location points to a byte offset within a source file,
2295 /// offset from 0. The source file is determined contextually.
2296 byte_abs: u32,
2297 /// The source location points to a token within a source file,
2298 /// offset from 0. The source file is determined contextually.
2299 token_abs: Ast.TokenIndex,
2300 /// The source location points to an AST node within a source file,
2301 /// offset from 0. The source file is determined contextually.
2302 node_abs: Ast.Node.Index,
2303 /// The source location points to a byte offset within a source file,
2304 /// offset from the byte offset of the base node within the file.
2305 byte_offset: u32,
2306 /// This data is the offset into the token list from the base node's first token.
2307 token_offset: Ast.TokenOffset,
2308 /// The source location points to an AST node, which is this value offset
2309 /// from its containing base node AST index.
2310 node_offset: TracedOffset,
2311 /// The source location points to the main token of an AST node, found
2312 /// by taking this AST node index offset from the containing base node.
2313 node_offset_main_token: Ast.Node.Offset,
2314 /// The source location points to the beginning of a struct initializer.
2315 node_offset_initializer: Ast.Node.Offset,
2316 /// The source location points to a variable declaration type expression,
2317 /// found by taking this AST node index offset from the containing
2318 /// base node, which points to a variable declaration AST node. Next, navigate
2319 /// to the type expression.
2320 node_offset_var_decl_ty: Ast.Node.Offset,
2321 /// The source location points to the alignment expression of a var decl.
2322 node_offset_var_decl_align: Ast.Node.Offset,
2323 /// The source location points to the linksection expression of a var decl.
2324 node_offset_var_decl_section: Ast.Node.Offset,
2325 /// The source location points to the addrspace expression of a var decl.
2326 node_offset_var_decl_addrspace: Ast.Node.Offset,
2327 /// The source location points to the initializer of a var decl.
2328 node_offset_var_decl_init: Ast.Node.Offset,
2329 /// The source location points to the given argument of a builtin function call.
2330 /// `builtin_call_node` points to the builtin call.
2331 /// `arg_index` is the index of the argument which hte source location refers to.
2332 node_offset_builtin_call_arg: struct {
2333 builtin_call_node: Ast.Node.Offset,
2334 arg_index: u32,
2335 },
2336 /// Like `node_offset_builtin_call_arg` but recurses through arbitrarily many calls
2337 /// to pointer cast builtins (taking the first argument of the most nested).
2338 node_offset_ptrcast_operand: Ast.Node.Offset,
2339 /// The source location points to the index expression of an array access
2340 /// expression, found by taking this AST node index offset from the containing
2341 /// base node, which points to an array access AST node. Next, navigate
2342 /// to the index expression.
2343 node_offset_array_access_index: Ast.Node.Offset,
2344 /// The source location points to the LHS of a slice expression
2345 /// expression, found by taking this AST node index offset from the containing
2346 /// base node, which points to a slice AST node. Next, navigate
2347 /// to the sentinel expression.
2348 node_offset_slice_ptr: Ast.Node.Offset,
2349 /// The source location points to start expression of a slice expression
2350 /// expression, found by taking this AST node index offset from the containing
2351 /// base node, which points to a slice AST node. Next, navigate
2352 /// to the sentinel expression.
2353 node_offset_slice_start: Ast.Node.Offset,
2354 /// The source location points to the end expression of a slice
2355 /// expression, found by taking this AST node index offset from the containing
2356 /// base node, which points to a slice AST node. Next, navigate
2357 /// to the sentinel expression.
2358 node_offset_slice_end: Ast.Node.Offset,
2359 /// The source location points to the sentinel expression of a slice
2360 /// expression, found by taking this AST node index offset from the containing
2361 /// base node, which points to a slice AST node. Next, navigate
2362 /// to the sentinel expression.
2363 node_offset_slice_sentinel: Ast.Node.Offset,
2364 /// The source location points to the callee expression of a function
2365 /// call expression, found by taking this AST node index offset from the containing
2366 /// base node, which points to a function call AST node. Next, navigate
2367 /// to the callee expression.
2368 node_offset_call_func: Ast.Node.Offset,
2369 /// The payload is offset from the containing base node.
2370 /// The source location points to the field name of:
2371 /// * a field access expression (`a.b`), or
2372 /// * the callee of a method call (`a.b()`)
2373 node_offset_field_name: Ast.Node.Offset,
2374 /// The payload is offset from the containing base node.
2375 /// The source location points to the field name of the operand ("b" node)
2376 /// of a field initialization expression (`.a = b`)
2377 node_offset_field_name_init: Ast.Node.Offset,
2378 /// The source location points to the pointer of a pointer deref expression,
2379 /// found by taking this AST node index offset from the containing
2380 /// base node, which points to a pointer deref AST node. Next, navigate
2381 /// to the pointer expression.
2382 node_offset_deref_ptr: Ast.Node.Offset,
2383 /// The source location points to the assembly source code of an inline assembly
2384 /// expression, found by taking this AST node index offset from the containing
2385 /// base node, which points to inline assembly AST node. Next, navigate
2386 /// to the asm template source code.
2387 node_offset_asm_source: Ast.Node.Offset,
2388 /// The source location points to the return type of an inline assembly
2389 /// expression, found by taking this AST node index offset from the containing
2390 /// base node, which points to inline assembly AST node. Next, navigate
2391 /// to the return type expression.
2392 node_offset_asm_ret_ty: Ast.Node.Offset,
2393 /// The source location points to the condition expression of an if
2394 /// expression, found by taking this AST node index offset from the containing
2395 /// base node, which points to an if expression AST node. Next, navigate
2396 /// to the condition expression.
2397 node_offset_if_cond: Ast.Node.Offset,
2398 /// The source location points to a binary expression, such as `a + b`, found
2399 /// by taking this AST node index offset from the containing base node.
2400 node_offset_bin_op: Ast.Node.Offset,
2401 /// The source location points to the LHS of a binary expression, found
2402 /// by taking this AST node index offset from the containing base node,
2403 /// which points to a binary expression AST node. Next, navigate to the LHS.
2404 node_offset_bin_lhs: Ast.Node.Offset,
2405 /// The source location points to the RHS of a binary expression, found
2406 /// by taking this AST node index offset from the containing base node,
2407 /// which points to a binary expression AST node. Next, navigate to the RHS.
2408 node_offset_bin_rhs: Ast.Node.Offset,
2409 /// The source location points to the operand of a try expression, found
2410 /// by taking this AST node index offset from the containing base node,
2411 /// which points to a try expression AST node. Next, navigate to the
2412 /// operand expression.
2413 node_offset_try_operand: Ast.Node.Offset,
2414 /// The source location points to the operand of a switch expression, found
2415 /// by taking this AST node index offset from the containing base node,
2416 /// which points to a switch expression AST node. Next, navigate to the operand.
2417 node_offset_switch_operand: Ast.Node.Offset,
2418 /// The source location points to the else prong of a switch expression, found
2419 /// by taking this AST node index offset from the containing base node,
2420 /// which points to a switch expression AST node. Next, navigate to the else prong.
2421 node_offset_switch_else_prong: Ast.Node.Offset,
2422 /// The source location points to the `_` prong of a switch expression, found
2423 /// by taking this AST node index offset from the containing base node,
2424 /// which points to a switch expression AST node. Next, navigate to the `_` prong.
2425 node_offset_switch_under_prong: Ast.Node.Offset,
2426 /// The source location points to all the ranges of a switch expression, found
2427 /// by taking this AST node index offset from the containing base node,
2428 /// which points to a switch expression AST node. Next, navigate to any of the
2429 /// range nodes. The error applies to all of them.
2430 node_offset_switch_range: Ast.Node.Offset,
2431 /// The source location points to the align expr of a function type
2432 /// expression, found by taking this AST node index offset from the containing
2433 /// base node, which points to a function type AST node. Next, navigate to
2434 /// the calling convention node.
2435 node_offset_fn_type_align: Ast.Node.Offset,
2436 /// The source location points to the addrspace expr of a function type
2437 /// expression, found by taking this AST node index offset from the containing
2438 /// base node, which points to a function type AST node. Next, navigate to
2439 /// the calling convention node.
2440 node_offset_fn_type_addrspace: Ast.Node.Offset,
2441 /// The source location points to the linksection expr of a function type
2442 /// expression, found by taking this AST node index offset from the containing
2443 /// base node, which points to a function type AST node. Next, navigate to
2444 /// the calling convention node.
2445 node_offset_fn_type_section: Ast.Node.Offset,
2446 /// The source location points to the calling convention of a function type
2447 /// expression, found by taking this AST node index offset from the containing
2448 /// base node, which points to a function type AST node. Next, navigate to
2449 /// the calling convention node.
2450 node_offset_fn_type_cc: Ast.Node.Offset,
2451 /// The source location points to the return type of a function type
2452 /// expression, found by taking this AST node index offset from the containing
2453 /// base node, which points to a function type AST node. Next, navigate to
2454 /// the return type node.
2455 node_offset_fn_type_ret_ty: Ast.Node.Offset,
2456 node_offset_param: Ast.Node.Offset,
2457 token_offset_param: Ast.TokenOffset,
2458 /// The source location points to the type expression of an `anyframe->T`
2459 /// expression, found by taking this AST node index offset from the containing
2460 /// base node, which points to a `anyframe->T` expression AST node. Next, navigate
2461 /// to the type expression.
2462 node_offset_anyframe_type: Ast.Node.Offset,
2463 /// The source location points to the string literal of `extern "foo"`, found
2464 /// by taking this AST node index offset from the containing
2465 /// base node, which points to a function prototype or variable declaration
2466 /// expression AST node. Next, navigate to the string literal of the `extern "foo"`.
2467 node_offset_lib_name: Ast.Node.Offset,
2468 /// The source location points to the len expression of an `[N:S]T`
2469 /// expression, found by taking this AST node index offset from the containing
2470 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2471 /// to the len expression.
2472 node_offset_array_type_len: Ast.Node.Offset,
2473 /// The source location points to the sentinel expression of an `[N:S]T`
2474 /// expression, found by taking this AST node index offset from the containing
2475 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2476 /// to the sentinel expression.
2477 node_offset_array_type_sentinel: Ast.Node.Offset,
2478 /// The source location points to the elem expression of an `[N:S]T`
2479 /// expression, found by taking this AST node index offset from the containing
2480 /// base node, which points to an `[N:S]T` expression AST node. Next, navigate
2481 /// to the elem expression.
2482 node_offset_array_type_elem: Ast.Node.Offset,
2483 /// The source location points to the operand of an unary expression.
2484 node_offset_un_op: Ast.Node.Offset,
2485 /// The source location points to the elem type of a pointer.
2486 node_offset_ptr_elem: Ast.Node.Offset,
2487 /// The source location points to the sentinel of a pointer.
2488 node_offset_ptr_sentinel: Ast.Node.Offset,
2489 /// The source location points to the align expr of a pointer.
2490 node_offset_ptr_align: Ast.Node.Offset,
2491 /// The source location points to the addrspace expr of a pointer.
2492 node_offset_ptr_addrspace: Ast.Node.Offset,
2493 /// The source location points to the bit-offset of a pointer.
2494 node_offset_ptr_bitoffset: Ast.Node.Offset,
2495 /// The source location points to the host size of a pointer.
2496 node_offset_ptr_hostsize: Ast.Node.Offset,
2497 /// The source location points to the tag type of an union or an enum.
2498 node_offset_container_tag: Ast.Node.Offset,
2499 /// The source location points to the default value of a field.
2500 node_offset_field_default: Ast.Node.Offset,
2501 /// The source location points to the type of an array or struct initializer.
2502 node_offset_init_ty: Ast.Node.Offset,
2503 /// The source location points to the LHS of an assignment (or assign-op, e.g. `+=`).
2504 node_offset_store_ptr: Ast.Node.Offset,
2505 /// The source location points to the RHS of an assignment (or assign-op, e.g. `+=`).
2506 node_offset_store_operand: Ast.Node.Offset,
2507 /// The source location points to the operand of a `return` statement, or
2508 /// the `return` itself if there is no explicit operand.
2509 node_offset_return_operand: Ast.Node.Offset,
2510 /// The source location points to a for loop input.
2511 for_input: struct {
2512 /// Points to the for loop AST node.
2513 for_node_offset: Ast.Node.Offset,
2514 /// Picks one of the inputs from the condition.
2515 input_index: u32,
2516 },
2517 /// The source location points to one of the captures of a for loop, found
2518 /// by taking this AST node index offset from the containing
2519 /// base node, which points to one of the input nodes of a for loop.
2520 /// Next, navigate to the corresponding capture.
2521 for_capture_from_input: Ast.Node.Offset,
2522 /// The source location points to the argument node of a function call.
2523 call_arg: struct {
2524 /// Points to the function call AST node.
2525 call_node_offset: Ast.Node.Offset,
2526 /// The index of the argument the source location points to.
2527 arg_index: u32,
2528 },
2529 fn_proto_param: FnProtoParam,
2530 fn_proto_param_type: FnProtoParam,
2531 array_cat_lhs: ArrayCat,
2532 array_cat_rhs: ArrayCat,
2533 /// The source location points to the name of the field at the given index
2534 /// of the container type declaration at the base node.
2535 container_field_name: u32,
2536 /// Like `continer_field_name`, but points at the field's default value.
2537 container_field_value: u32,
2538 /// Like `continer_field_name`, but points at the field's type.
2539 container_field_type: u32,
2540 /// Like `continer_field_name`, but points at the field's alignment.
2541 container_field_align: u32,
2542 /// The source location points to the type of the field at the given index
2543 /// of the tuple type declaration at `tuple_decl_node_offset`.
2544 tuple_field_type: TupleField,
2545 /// The source location points to the default init of the field at the given index
2546 /// of the tuple type declaration at `tuple_decl_node_offset`.
2547 tuple_field_init: TupleField,
2548 /// The source location points to the given element/field of a struct or
2549 /// array initialization expression.
2550 init_elem: struct {
2551 /// Points to the AST node of the initialization expression.
2552 init_node_offset: Ast.Node.Offset,
2553 /// The index of the field/element the source location points to.
2554 elem_index: u32,
2555 },
2556 // The following source locations are like `init_elem`, but refer to a
2557 // field with a specific name. If such a field is not given, the entire
2558 // initialization expression is used instead.
2559 // The `Ast.Node.Offset` points to the AST node of a builtin call, whose *second*
2560 // argument is the init expression.
2561 init_field_name: Ast.Node.Offset,
2562 init_field_linkage: Ast.Node.Offset,
2563 init_field_section: Ast.Node.Offset,
2564 init_field_visibility: Ast.Node.Offset,
2565 init_field_rw: Ast.Node.Offset,
2566 init_field_locality: Ast.Node.Offset,
2567 init_field_cache: Ast.Node.Offset,
2568 init_field_library: Ast.Node.Offset,
2569 init_field_thread_local: Ast.Node.Offset,
2570 init_field_dll_import: Ast.Node.Offset,
2571 init_field_relocation: Ast.Node.Offset,
2572 /// The source location points to the value of an item in a specific
2573 /// case of a `switch`.
2574 switch_case_item: SwitchItem,
2575 /// The source location points to the "first" value of a range item in
2576 /// a specific case of a `switch`.
2577 switch_case_item_range_first: SwitchItem,
2578 /// The source location points to the "last" value of a range item in
2579 /// a specific case of a `switch`.
2580 switch_case_item_range_last: SwitchItem,
2581 /// The source location points to the main capture of a specific case of
2582 /// a `switch`.
2583 switch_capture: SwitchCapture,
2584 /// The source location points to the "tag" capture (second capture) of
2585 /// a specific case of a `switch`.
2586 switch_tag_capture: SwitchCapture,
2587 /// The source location points to the `comptime` token on the given comptime parameter,
2588 /// where the base node is a function declaration. The value is the parameter index.
2589 func_decl_param_comptime: u32,
2590 /// The source location points to the type annotation on the given function parameter,
2591 /// where the base node is a function declaration. The value is the parameter index.
2592 func_decl_param_ty: u32,
2593
2594 pub const FnProtoParam = struct {
2595 /// The offset of the function prototype AST node.
2596 fn_proto_node_offset: Ast.Node.Offset,
2597 /// The index of the parameter the source location points to.
2598 param_index: u32,
2599 };
2600
2601 pub const SwitchItem = struct {
2602 /// The offset of the switch AST node.
2603 switch_node_offset: Ast.Node.Offset,
2604 /// The index of the case to point to within this switch.
2605 case_idx: SwitchCaseIndex,
2606 /// The index of the item to point to within this case.
2607 item_idx: SwitchItemIndex,
2608 };
2609
2610 pub const SwitchCapture = struct {
2611 /// The offset of the switch AST node.
2612 switch_node_offset: Ast.Node.Offset,
2613 /// The index of the case whose capture to point to.
2614 case_idx: SwitchCaseIndex,
2615 };
2616
2617 pub const SwitchCaseIndex = packed struct(u32) {
2618 kind: enum(u1) { scalar, multi },
2619 index: u31,
2620
2621 pub const special_else: SwitchCaseIndex = @bitCast(@as(u32, std.math.maxInt(u32)));
2622 pub const special_under: SwitchCaseIndex = @bitCast(@as(u32, std.math.maxInt(u32) - 1));
2623 };
2624
2625 pub const SwitchItemIndex = packed struct(u32) {
2626 kind: enum(u1) { single, range },
2627 index: u31,
2628 };
2629
2630 pub const ArrayCat = struct {
2631 /// Points to the array concat AST node.
2632 array_cat_offset: Ast.Node.Offset,
2633 /// The index of the element the source location points to.
2634 elem_index: u32,
2635 };
2636
2637 pub const TupleField = struct {
2638 /// Points to the AST node of the tuple type decaration.
2639 tuple_decl_node_offset: Ast.Node.Offset,
2640 /// The index of the tuple field the source location points to.
2641 elem_index: u32,
2642 };
2643
2644 pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
2645
2646 noinline fn nodeOffsetDebug(node_offset: Ast.Node.Offset) Offset {
2647 var result: LazySrcLoc = .{ .node_offset = .{ .x = node_offset } };
2648 result.node_offset.trace.addAddr(@returnAddress(), "init");
2649 return result;
2650 }
2651
2652 fn nodeOffsetRelease(node_offset: Ast.Node.Offset) Offset {
2653 return .{ .node_offset = .{ .x = node_offset } };
2654 }
2655
2656 /// This wraps a simple integer in debug builds so that later on we can find out
2657 /// where in semantic analysis the value got set.
2658 pub const TracedOffset = struct {
2659 x: Ast.Node.Offset,
2660 trace: std.debug.Trace = std.debug.Trace.init,
2661
2662 const want_tracing = false;
2663 };
2664 };
2665
2666 pub const unneeded: LazySrcLoc = .{
2667 .base_node_inst = undefined,
2668 .offset = .unneeded,
2669 };
2670
2671 /// Returns `null` if the ZIR instruction has been lost across incremental updates.
2672 pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
2673 comptime assert(Zir.inst_tracking_version == 0);
2674
2675 const ip = &zcu.intern_pool;
2676 const file_index, const zir_inst = inst: {
2677 const info = base_node_inst.resolveFull(ip) orelse return null;
2678 break :inst .{ info.file, info.inst };
2679 };
2680 const file = zcu.fileByIndex(file_index);
2681
2682 // If we're relative to .main_struct_inst, we know the ast node is the root and don't need to resolve the ZIR,
2683 // which may not exist e.g. in the case of errors in ZON files.
2684 if (zir_inst == .main_struct_inst) return .{ file, .root };
2685
2686 // Otherwise, make sure ZIR is loaded.
2687 const zir = file.zir.?;
2688
2689 const inst = zir.instructions.get(@intFromEnum(zir_inst));
2690 const base_node: Ast.Node.Index = switch (inst.tag) {
2691 .declaration => inst.data.declaration.src_node,
2692 .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node,
2693 .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
2694 .extended => switch (inst.data.extended.opcode) {
2695 .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_node,
2696 .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node,
2697 .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_node,
2698 .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_node,
2699 .reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
2700 .reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.node,
2701 .reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.node,
2702 else => unreachable,
2703 },
2704 else => unreachable,
2705 };
2706 return .{ file, base_node };
2707 }
2708
2709 /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`.
2710 /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates.
2711 pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc {
2712 return lazy.upgradeOrLost(zcu).?;
2713 }
2714
2715 /// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
2716 pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
2717 const file, const base_node: Ast.Node.Index = resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
2718 return .{
2719 .file_scope = file,
2720 .base_node = base_node,
2721 .lazy = lazy.offset,
2722 };
2723 }
2724
2725 /// Used to sort error messages, so that they're printed in a consistent order.
2726 /// If an error is returned, a file could not be read in order to resolve a source location.
2727 /// In that case, `bad_file_out` is populated, and sorting is impossible.
2728 pub fn lessThan(lhs_lazy: LazySrcLoc, rhs_lazy: LazySrcLoc, zcu: *Zcu, bad_file_out: **Zcu.File) File.GetSourceError!bool {
2729 const lhs_src = lhs_lazy.upgradeOrLost(zcu) orelse {
2730 // LHS source location lost, so should never be referenced. Just sort it to the end.
2731 return false;
2732 };
2733 const rhs_src = rhs_lazy.upgradeOrLost(zcu) orelse {
2734 // RHS source location lost, so should never be referenced. Just sort it to the end.
2735 return true;
2736 };
2737 if (lhs_src.file_scope != rhs_src.file_scope) {
2738 const lhs_path = lhs_src.file_scope.path;
2739 const rhs_path = rhs_src.file_scope.path;
2740 if (lhs_path.root != rhs_path.root) {
2741 return @intFromEnum(lhs_path.root) < @intFromEnum(rhs_path.root);
2742 }
2743 return std.mem.order(u8, lhs_path.sub_path, rhs_path.sub_path).compare(.lt);
2744 }
2745
2746 const lhs_span = lhs_src.span(zcu) catch |err| {
2747 bad_file_out.* = lhs_src.file_scope;
2748 return err;
2749 };
2750 const rhs_span = rhs_src.span(zcu) catch |err| {
2751 bad_file_out.* = rhs_src.file_scope;
2752 return err;
2753 };
2754 return lhs_span.main < rhs_span.main;
2755 }
2756};
2757
2758pub const SemaError = error{ OutOfMemory, Canceled, AnalysisFail };
2759pub const CompileError = error{
2760 OutOfMemory,
2761 /// The compilation update is no longer desired.
2762 Canceled,
2763 /// When this is returned, the compile error for the failure has already been recorded.
2764 AnalysisFail,
2765 /// In a comptime scope, a return instruction was encountered. This error is only seen when
2766 /// doing a comptime function call.
2767 ComptimeReturn,
2768 /// In a comptime scope, a break instruction was encountered. This error is only seen when
2769 /// evaluating a comptime block.
2770 ComptimeBreak,
2771};
2772
2773pub fn init(zcu: *Zcu, thread_count: usize) !void {
2774 const gpa = zcu.gpa;
2775 try zcu.intern_pool.init(gpa, thread_count);
2776}
2777
2778pub fn deinit(zcu: *Zcu) void {
2779 const gpa = zcu.gpa;
2780 {
2781 const pt: Zcu.PerThread = .activate(zcu, .main);
2782 defer pt.deactivate();
2783
2784 if (zcu.llvm_object) |llvm_object| llvm_object.deinit();
2785
2786 zcu.builtin_modules.deinit(gpa);
2787 zcu.module_roots.deinit(gpa);
2788 for (zcu.import_table.keys()) |file_index| {
2789 pt.destroyFile(file_index);
2790 }
2791 zcu.import_table.deinit(gpa);
2792 zcu.alive_files.deinit(gpa);
2793
2794 for (zcu.embed_table.keys()) |embed_file| {
2795 embed_file.path.deinit(gpa);
2796 gpa.destroy(embed_file);
2797 }
2798 zcu.embed_table.deinit(gpa);
2799
2800 zcu.local_zir_cache.handle.close();
2801 zcu.global_zir_cache.handle.close();
2802
2803 for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
2804 for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
2805 for (zcu.failed_types.values()) |value| value.destroy(gpa);
2806 zcu.analysis_in_progress.deinit(gpa);
2807 zcu.failed_analysis.deinit(gpa);
2808 zcu.transitive_failed_analysis.deinit(gpa);
2809 zcu.failed_codegen.deinit(gpa);
2810 zcu.failed_types.deinit(gpa);
2811
2812 for (zcu.failed_files.values()) |value| {
2813 if (value) |msg| gpa.free(msg);
2814 }
2815 zcu.failed_files.deinit(gpa);
2816 zcu.failed_imports.deinit(gpa);
2817
2818 for (zcu.failed_exports.values()) |value| {
2819 value.destroy(gpa);
2820 }
2821 zcu.failed_exports.deinit(gpa);
2822
2823 for (zcu.cimport_errors.values()) |*errs| {
2824 errs.deinit(gpa);
2825 }
2826 zcu.cimport_errors.deinit(gpa);
2827
2828 zcu.compile_logs.deinit(gpa);
2829 zcu.compile_log_lines.deinit(gpa);
2830 zcu.free_compile_log_lines.deinit(gpa);
2831
2832 zcu.all_exports.deinit(gpa);
2833 zcu.free_exports.deinit(gpa);
2834 zcu.single_exports.deinit(gpa);
2835 zcu.multi_exports.deinit(gpa);
2836
2837 zcu.potentially_outdated.deinit(gpa);
2838 zcu.outdated.deinit(gpa);
2839 zcu.outdated_ready.deinit(gpa);
2840 zcu.retryable_failures.deinit(gpa);
2841
2842 zcu.func_body_analysis_queued.deinit(gpa);
2843 zcu.nav_val_analysis_queued.deinit(gpa);
2844
2845 zcu.test_functions.deinit(gpa);
2846
2847 for (zcu.global_assembly.values()) |s| {
2848 gpa.free(s);
2849 }
2850 zcu.global_assembly.deinit(gpa);
2851
2852 zcu.reference_table.deinit(gpa);
2853 zcu.all_references.deinit(gpa);
2854 zcu.free_references.deinit(gpa);
2855
2856 zcu.inline_reference_frames.deinit(gpa);
2857 zcu.free_inline_reference_frames.deinit(gpa);
2858
2859 zcu.type_reference_table.deinit(gpa);
2860 zcu.all_type_references.deinit(gpa);
2861 zcu.free_type_references.deinit(gpa);
2862
2863 if (zcu.resolved_references) |*r| r.deinit(gpa);
2864
2865 if (zcu.comp.debugIncremental()) {
2866 zcu.incremental_debug_state.deinit(gpa);
2867 }
2868 }
2869 zcu.intern_pool.deinit(gpa);
2870}
2871
2872pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
2873 return zcu.intern_pool.namespacePtr(index);
2874}
2875
2876pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
2877 return zcu.namespacePtr(index.unwrap() orelse return null);
2878}
2879
2880// TODO https://github.com/ziglang/zig/issues/8643
2881pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
2882pub const HackDataLayout = extern struct {
2883 data: [8]u8 align(@alignOf(Zir.Inst.Data)),
2884 safety_tag: u8,
2885};
2886comptime {
2887 if (data_has_safety_tag) {
2888 assert(@sizeOf(HackDataLayout) == @sizeOf(Zir.Inst.Data));
2889 }
2890}
2891
2892pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: std.fs.File) !Zir {
2893 var buffer: [2000]u8 = undefined;
2894 var file_reader = cache_file.reader(io, &buffer);
2895 return result: {
2896 const header = file_reader.interface.takeStructPointer(Zir.Header) catch |err| break :result err;
2897 break :result loadZirCacheBody(gpa, header.*, &file_reader.interface);
2898 } catch |err| switch (err) {
2899 error.ReadFailed => return file_reader.err.?,
2900 else => |e| return e,
2901 };
2902}
2903
2904pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader) !Zir {
2905 var instructions: std.MultiArrayList(Zir.Inst) = .{};
2906 errdefer instructions.deinit(gpa);
2907
2908 try instructions.setCapacity(gpa, header.instructions_len);
2909 instructions.len = header.instructions_len;
2910
2911 var zir: Zir = .{
2912 .instructions = instructions.toOwnedSlice(),
2913 .string_bytes = &.{},
2914 .extra = &.{},
2915 };
2916 errdefer zir.deinit(gpa);
2917
2918 zir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
2919 zir.extra = try gpa.alloc(u32, header.extra_len);
2920
2921 const safety_buffer = if (data_has_safety_tag)
2922 try gpa.alloc([8]u8, header.instructions_len)
2923 else
2924 undefined;
2925 defer if (data_has_safety_tag) gpa.free(safety_buffer);
2926
2927 var vecs = [_][]u8{
2928 @ptrCast(zir.instructions.items(.tag)),
2929 if (data_has_safety_tag)
2930 @ptrCast(safety_buffer)
2931 else
2932 @ptrCast(zir.instructions.items(.data)),
2933 zir.string_bytes,
2934 @ptrCast(zir.extra),
2935 };
2936 try cache_br.readVecAll(&vecs);
2937 if (data_has_safety_tag) {
2938 const tags = zir.instructions.items(.tag);
2939 for (zir.instructions.items(.data), 0..) |*data, i| {
2940 const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])];
2941 const as_struct = @as(*HackDataLayout, @ptrCast(data));
2942 as_struct.* = .{
2943 .safety_tag = @intFromEnum(union_tag),
2944 .data = safety_buffer[i],
2945 };
2946 }
2947 }
2948 return zir;
2949}
2950
2951pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.Stat, zir: Zir) (std.fs.File.WriteError || Allocator.Error)!void {
2952 const safety_buffer = if (data_has_safety_tag)
2953 try gpa.alloc([8]u8, zir.instructions.len)
2954 else
2955 undefined;
2956 defer if (data_has_safety_tag) gpa.free(safety_buffer);
2957
2958 if (data_has_safety_tag) {
2959 // The `Data` union has a safety tag but in the file format we store it without.
2960 for (zir.instructions.items(.data), 0..) |*data, i| {
2961 const as_struct: *const HackDataLayout = @ptrCast(data);
2962 safety_buffer[i] = as_struct.data;
2963 }
2964 }
2965
2966 const header: Zir.Header = .{
2967 .instructions_len = @intCast(zir.instructions.len),
2968 .string_bytes_len = @intCast(zir.string_bytes.len),
2969 .extra_len = @intCast(zir.extra.len),
2970
2971 .stat_size = stat.size,
2972 .stat_inode = stat.inode,
2973 .stat_mtime = stat.mtime.toNanoseconds(),
2974 };
2975 var vecs = [_][]const u8{
2976 @ptrCast((&header)[0..1]),
2977 @ptrCast(zir.instructions.items(.tag)),
2978 if (data_has_safety_tag)
2979 @ptrCast(safety_buffer)
2980 else
2981 @ptrCast(zir.instructions.items(.data)),
2982 zir.string_bytes,
2983 @ptrCast(zir.extra),
2984 };
2985 var cache_fw = cache_file.writer(&.{});
2986 cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
2987 error.WriteFailed => return cache_fw.err.?,
2988 };
2989}
2990
2991pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
2992 const header: Zoir.Header = .{
2993 .nodes_len = @intCast(zoir.nodes.len),
2994 .extra_len = @intCast(zoir.extra.len),
2995 .limbs_len = @intCast(zoir.limbs.len),
2996 .string_bytes_len = @intCast(zoir.string_bytes.len),
2997 .compile_errors_len = @intCast(zoir.compile_errors.len),
2998 .error_notes_len = @intCast(zoir.error_notes.len),
2999
3000 .stat_size = stat.size,
3001 .stat_inode = stat.inode,
3002 .stat_mtime = stat.mtime.toNanoseconds(),
3003 };
3004 var vecs = [_][]const u8{
3005 @ptrCast((&header)[0..1]),
3006 @ptrCast(zoir.nodes.items(.tag)),
3007 @ptrCast(zoir.nodes.items(.data)),
3008 @ptrCast(zoir.nodes.items(.ast_node)),
3009 @ptrCast(zoir.extra),
3010 @ptrCast(zoir.limbs),
3011 zoir.string_bytes,
3012 @ptrCast(zoir.compile_errors),
3013 @ptrCast(zoir.error_notes),
3014 };
3015 var cache_fw = cache_file.writer(&.{});
3016 cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
3017 error.WriteFailed => return cache_fw.err.?,
3018 };
3019}
3020
3021pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *Io.Reader) !Zoir {
3022 var zoir: Zoir = .{
3023 .nodes = .empty,
3024 .extra = &.{},
3025 .limbs = &.{},
3026 .string_bytes = &.{},
3027 .compile_errors = &.{},
3028 .error_notes = &.{},
3029 };
3030 errdefer zoir.deinit(gpa);
3031
3032 zoir.nodes = nodes: {
3033 var nodes: std.MultiArrayList(Zoir.Node.Repr) = .empty;
3034 defer nodes.deinit(gpa);
3035 try nodes.setCapacity(gpa, header.nodes_len);
3036 nodes.len = header.nodes_len;
3037 break :nodes nodes.toOwnedSlice();
3038 };
3039
3040 zoir.extra = try gpa.alloc(u32, header.extra_len);
3041 zoir.limbs = try gpa.alloc(std.math.big.Limb, header.limbs_len);
3042 zoir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
3043
3044 zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len);
3045 zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len);
3046
3047 var vecs = [_][]u8{
3048 @ptrCast(zoir.nodes.items(.tag)),
3049 @ptrCast(zoir.nodes.items(.data)),
3050 @ptrCast(zoir.nodes.items(.ast_node)),
3051 @ptrCast(zoir.extra),
3052 @ptrCast(zoir.limbs),
3053 zoir.string_bytes,
3054 @ptrCast(zoir.compile_errors),
3055 @ptrCast(zoir.error_notes),
3056 };
3057 try cache_br.readVecAll(&vecs);
3058 return zoir;
3059}
3060
3061pub fn markDependeeOutdated(
3062 zcu: *Zcu,
3063 /// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
3064 /// However, when we discover during analysis that something was outdated, the `Dependee` was already
3065 /// marked as PO, so we need to decrement the PO dep count for each depender.
3066 marked_po: enum { not_marked_po, marked_po },
3067 dependee: InternPool.Dependee,
3068) !void {
3069 log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
3070 var it = zcu.intern_pool.dependencyIterator(dependee);
3071 while (it.next()) |depender| {
3072 if (zcu.outdated.getPtr(depender)) |po_dep_count| {
3073 switch (marked_po) {
3074 .not_marked_po => {},
3075 .marked_po => {
3076 po_dep_count.* -= 1;
3077 log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
3078 if (po_dep_count.* == 0) {
3079 log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
3080 try zcu.outdated_ready.put(zcu.gpa, depender, {});
3081 }
3082 },
3083 }
3084 continue;
3085 }
3086 const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
3087 const new_po_dep_count = switch (marked_po) {
3088 .not_marked_po => if (opt_po_entry) |e| e.value else 0,
3089 .marked_po => if (opt_po_entry) |e| e.value - 1 else {
3090 // This `AnalUnit` has already been re-analyzed this update, and registered a dependency
3091 // on this thing, but already has sufficiently up-to-date information. Nothing to do.
3092 continue;
3093 },
3094 };
3095 try zcu.outdated.putNoClobber(
3096 zcu.gpa,
3097 depender,
3098 new_po_dep_count,
3099 );
3100 log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
3101 if (new_po_dep_count == 0) {
3102 log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
3103 try zcu.outdated_ready.put(zcu.gpa, depender, {});
3104 }
3105 // If this is a Decl and was not previously PO, we must recursively
3106 // mark dependencies on its tyval as PO.
3107 if (opt_po_entry == null) {
3108 assert(marked_po == .not_marked_po);
3109 try zcu.markTransitiveDependersPotentiallyOutdated(depender);
3110 }
3111 }
3112}
3113
3114pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
3115 log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
3116 var it = zcu.intern_pool.dependencyIterator(dependee);
3117 while (it.next()) |depender| {
3118 if (zcu.outdated.getPtr(depender)) |po_dep_count| {
3119 // This depender is already outdated, but it now has one
3120 // less PO dependency!
3121 po_dep_count.* -= 1;
3122 log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
3123 if (po_dep_count.* == 0) {
3124 log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
3125 try zcu.outdated_ready.put(zcu.gpa, depender, {});
3126 }
3127 continue;
3128 }
3129 // This depender is definitely at least PO, because this Decl was just analyzed
3130 // due to being outdated.
3131 const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
3132 // This dependency has been registered during in-progress analysis, but the unit is
3133 // not in `potentially_outdated` because analysis is in-progress. Nothing to do.
3134 continue;
3135 };
3136 if (ptr.* > 1) {
3137 ptr.* -= 1;
3138 log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
3139 continue;
3140 }
3141
3142 log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
3143
3144 // This dependency is no longer PO, i.e. is known to be up-to-date.
3145 assert(zcu.potentially_outdated.swapRemove(depender));
3146 // If this is a Decl, we must recursively mark dependencies on its tyval
3147 // as no longer PO.
3148 switch (depender.unwrap()) {
3149 .@"comptime" => {},
3150 .nav_val => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }),
3151 .nav_ty => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_ty = nav }),
3152 .type => |ty| try zcu.markPoDependeeUpToDate(.{ .interned = ty }),
3153 .func => |func| try zcu.markPoDependeeUpToDate(.{ .interned = func }),
3154 .memoized_state => |stage| try zcu.markPoDependeeUpToDate(.{ .memoized_state = stage }),
3155 }
3156 }
3157}
3158
3159/// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may
3160/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
3161fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void {
3162 const ip = &zcu.intern_pool;
3163 const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
3164 .@"comptime" => return, // analysis of a comptime decl can't outdate any dependencies
3165 .nav_val => |nav| .{ .nav_val = nav },
3166 .nav_ty => |nav| .{ .nav_ty = nav },
3167 .type => |ty| .{ .interned = ty },
3168 .func => |func_index| .{ .interned = func_index }, // IES
3169 .memoized_state => |stage| .{ .memoized_state = stage },
3170 };
3171 log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
3172 var it = ip.dependencyIterator(dependee);
3173 while (it.next()) |po| {
3174 if (zcu.outdated.getPtr(po)) |po_dep_count| {
3175 // This dependency is already outdated, but it now has one more PO
3176 // dependency.
3177 if (po_dep_count.* == 0) {
3178 _ = zcu.outdated_ready.swapRemove(po);
3179 }
3180 po_dep_count.* += 1;
3181 log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
3182 continue;
3183 }
3184 if (zcu.potentially_outdated.getPtr(po)) |n| {
3185 // There is now one more PO dependency.
3186 n.* += 1;
3187 log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
3188 continue;
3189 }
3190 try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
3191 log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
3192 // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
3193 try zcu.markTransitiveDependersPotentiallyOutdated(po);
3194 }
3195}
3196
3197pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
3198 if (!zcu.comp.config.incremental) return null;
3199
3200 if (zcu.outdated.count() == 0) {
3201 // Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
3202 // units have had any outdated dependencies so far, and all of their remaining PO deps are triggered
3203 // by other units in `potentially_outdated`. So, we can safety assume those units up-to-date.
3204 zcu.potentially_outdated.clearRetainingCapacity();
3205 log.debug("findOutdatedToAnalyze: no outdated depender", .{});
3206 return null;
3207 }
3208
3209 // Our goal is to find an outdated AnalUnit which itself has no outdated or
3210 // PO dependencies. Most of the time, such an AnalUnit will exist - we track
3211 // them in the `outdated_ready` set for efficiency. However, this is not
3212 // necessarily the case, since the Decl dependency graph may contain loops
3213 // via mutually recursive definitions:
3214 // pub const A = struct { b: *B };
3215 // pub const B = struct { b: *A };
3216 // In this case, we must defer to more complex logic below.
3217
3218 if (zcu.outdated_ready.count() > 0) {
3219 const unit = zcu.outdated_ready.keys()[0];
3220 log.debug("findOutdatedToAnalyze: trivial {f}", .{zcu.fmtAnalUnit(unit)});
3221 return unit;
3222 }
3223
3224 // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
3225 // AnalUnit with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of
3226 // A or B. We should definitely not select a function, since a function can't be responsible for the
3227 // loop (IES dependencies can't have loops). We should also, of course, not select a `comptime`
3228 // declaration, since you can't depend on those!
3229
3230 // The choice of this unit could have a big impact on how much total analysis we perform, since
3231 // if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit
3232 // may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a unit
3233 // which the most things depend on - the idea is that this will resolve a lot of loops (but this
3234 // is only a heuristic).
3235
3236 log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{
3237 zcu.outdated.count(),
3238 zcu.potentially_outdated.count(),
3239 });
3240
3241 const ip = &zcu.intern_pool;
3242
3243 var chosen_unit: ?AnalUnit = null;
3244 var chosen_unit_dependers: u32 = undefined;
3245
3246 inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
3247 for (outdated_units) |unit| {
3248 var n: u32 = 0;
3249 var it = ip.dependencyIterator(switch (unit.unwrap()) {
3250 .func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
3251 .@"comptime" => continue, // a `comptime` block can't even be depended on so it is a terrible choice
3252 .type => |ty| .{ .interned = ty },
3253 .nav_val => |nav| .{ .nav_val = nav },
3254 .nav_ty => |nav| .{ .nav_ty = nav },
3255 .memoized_state => {
3256 // If we've hit a loop and some `.memoized_state` is outdated, we should make that choice eagerly.
3257 // In general, it's good to resolve this early on, since -- for instance -- almost every function
3258 // references the panic handler.
3259 return unit;
3260 },
3261 });
3262 while (it.next()) |_| n += 1;
3263
3264 if (chosen_unit == null or n > chosen_unit_dependers) {
3265 chosen_unit = unit;
3266 chosen_unit_dependers = n;
3267 }
3268 }
3269 }
3270
3271 log.debug("findOutdatedToAnalyze: heuristic returned '{f}' ({d} dependers)", .{
3272 zcu.fmtAnalUnit(chosen_unit.?),
3273 chosen_unit_dependers,
3274 });
3275
3276 return chosen_unit.?;
3277}
3278
3279/// During an incremental update, before semantic analysis, call this to flush all values from
3280/// `retryable_failures` and mark them as outdated so they get re-analyzed.
3281pub fn flushRetryableFailures(zcu: *Zcu) !void {
3282 const gpa = zcu.gpa;
3283 for (zcu.retryable_failures.items) |depender| {
3284 if (zcu.outdated.contains(depender)) continue;
3285 if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| {
3286 // This AnalUnit was already PO, but we now consider it outdated.
3287 // Any transitive dependencies are already marked PO.
3288 try zcu.outdated.put(gpa, depender, kv.value);
3289 continue;
3290 }
3291 // This AnalUnit was not marked PO, but is now outdated. Mark it as
3292 // such, then recursively mark transitive dependencies as PO.
3293 try zcu.outdated.put(gpa, depender, 0);
3294 try zcu.markTransitiveDependersPotentiallyOutdated(depender);
3295 }
3296 zcu.retryable_failures.clearRetainingCapacity();
3297}
3298
3299pub fn mapOldZirToNew(
3300 gpa: Allocator,
3301 old_zir: Zir,
3302 new_zir: Zir,
3303 inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
3304) Allocator.Error!void {
3305 // Contain ZIR indexes of namespace declaration instructions, e.g. struct_decl, union_decl, etc.
3306 // Not `declaration`, as this does not create a namespace.
3307 const MatchedZirDecl = struct {
3308 old_inst: Zir.Inst.Index,
3309 new_inst: Zir.Inst.Index,
3310 };
3311 var match_stack: std.ArrayList(MatchedZirDecl) = .empty;
3312 defer match_stack.deinit(gpa);
3313
3314 // Used as temporary buffers for namespace declaration instructions
3315 var old_contents: Zir.DeclContents = .init;
3316 defer old_contents.deinit(gpa);
3317 var new_contents: Zir.DeclContents = .init;
3318 defer new_contents.deinit(gpa);
3319
3320 // Map the main struct inst (and anything in its fields)
3321 {
3322 try old_zir.findTrackableRoot(gpa, &old_contents);
3323 try new_zir.findTrackableRoot(gpa, &new_contents);
3324
3325 assert(old_contents.explicit_types.items[0] == .main_struct_inst);
3326 assert(new_contents.explicit_types.items[0] == .main_struct_inst);
3327
3328 assert(old_contents.func_decl == null);
3329 assert(new_contents.func_decl == null);
3330
3331 // We don't have any smart way of matching up these instructions, so we correlate them based on source order
3332 // in their respective arrays.
3333
3334 const num_explicit_types = @min(old_contents.explicit_types.items.len, new_contents.explicit_types.items.len);
3335 try match_stack.ensureUnusedCapacity(gpa, @intCast(num_explicit_types));
3336 for (
3337 old_contents.explicit_types.items[0..num_explicit_types],
3338 new_contents.explicit_types.items[0..num_explicit_types],
3339 ) |old_inst, new_inst| {
3340 // Here we use `match_stack`, so that we will recursively consider declarations on these types.
3341 match_stack.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
3342 }
3343
3344 const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
3345 try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
3346 for (
3347 old_contents.other.items[0..num_other],
3348 new_contents.other.items[0..num_other],
3349 ) |old_inst, new_inst| {
3350 // These instructions don't have declarations, so we just modify `inst_map` directly.
3351 inst_map.putAssumeCapacity(old_inst, new_inst);
3352 }
3353 }
3354
3355 while (match_stack.pop()) |match_item| {
3356 // First, a check: if the number of captures of this type has changed, we can't map it, because
3357 // we wouldn't know how to correlate type information with the last update.
3358 // Synchronizes with logic in `Zcu.PerThread.recreateStructType` etc.
3359 if (old_zir.typeCapturesLen(match_item.old_inst) != new_zir.typeCapturesLen(match_item.new_inst)) {
3360 // Don't map this type or anything within it.
3361 continue;
3362 }
3363
3364 // Match the namespace declaration itself
3365 try inst_map.put(gpa, match_item.old_inst, match_item.new_inst);
3366
3367 // Maps decl name to `declaration` instruction.
3368 var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3369 defer named_decls.deinit(gpa);
3370 // Maps test name to `declaration` instruction.
3371 var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3372 defer named_tests.deinit(gpa);
3373 // Maps test name to `declaration` instruction.
3374 var named_decltests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
3375 defer named_decltests.deinit(gpa);
3376 // All unnamed tests, in order, for a best-effort match.
3377 var unnamed_tests: std.ArrayList(Zir.Inst.Index) = .empty;
3378 defer unnamed_tests.deinit(gpa);
3379 // All comptime declarations, in order, for a best-effort match.
3380 var comptime_decls: std.ArrayList(Zir.Inst.Index) = .empty;
3381 defer comptime_decls.deinit(gpa);
3382
3383 {
3384 var old_decl_it = old_zir.declIterator(match_item.old_inst);
3385 while (old_decl_it.next()) |old_decl_inst| {
3386 const old_decl = old_zir.getDeclaration(old_decl_inst);
3387 switch (old_decl.kind) {
3388 .@"comptime" => try comptime_decls.append(gpa, old_decl_inst),
3389 .unnamed_test => try unnamed_tests.append(gpa, old_decl_inst),
3390 .@"test" => try named_tests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3391 .decltest => try named_decltests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3392 .@"const", .@"var" => try named_decls.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
3393 }
3394 }
3395 }
3396
3397 var unnamed_test_idx: u32 = 0;
3398 var comptime_decl_idx: u32 = 0;
3399
3400 var new_decl_it = new_zir.declIterator(match_item.new_inst);
3401 while (new_decl_it.next()) |new_decl_inst| {
3402 const new_decl = new_zir.getDeclaration(new_decl_inst);
3403 // Attempt to match this to a declaration in the old ZIR:
3404 // * For named declarations (`const`/`var`/`fn`), we match based on name.
3405 // * For named tests (`test "foo"`) and decltests (`test foo`), we also match based on name.
3406 // * For unnamed tests, we match based on order.
3407 // * For comptime blocks, we match based on order.
3408 // If we cannot match this declaration, we can't match anything nested inside of it either, so we just `continue`.
3409 const old_decl_inst = switch (new_decl.kind) {
3410 .@"comptime" => inst: {
3411 if (comptime_decl_idx == comptime_decls.items.len) continue;
3412 defer comptime_decl_idx += 1;
3413 break :inst comptime_decls.items[comptime_decl_idx];
3414 },
3415 .unnamed_test => inst: {
3416 if (unnamed_test_idx == unnamed_tests.items.len) continue;
3417 defer unnamed_test_idx += 1;
3418 break :inst unnamed_tests.items[unnamed_test_idx];
3419 },
3420 .@"test" => inst: {
3421 const name = new_zir.nullTerminatedString(new_decl.name);
3422 break :inst named_tests.get(name) orelse continue;
3423 },
3424 .decltest => inst: {
3425 const name = new_zir.nullTerminatedString(new_decl.name);
3426 break :inst named_decltests.get(name) orelse continue;
3427 },
3428 .@"const", .@"var" => inst: {
3429 const name = new_zir.nullTerminatedString(new_decl.name);
3430 break :inst named_decls.get(name) orelse continue;
3431 },
3432 };
3433
3434 // Match the `declaration` instruction
3435 try inst_map.put(gpa, old_decl_inst, new_decl_inst);
3436
3437 // Find trackable instructions within this declaration
3438 try old_zir.findTrackable(gpa, &old_contents, old_decl_inst);
3439 try new_zir.findTrackable(gpa, &new_contents, new_decl_inst);
3440
3441 // We don't have any smart way of matching up these instructions, so we correlate them based on source order
3442 // in their respective arrays.
3443
3444 const num_explicit_types = @min(old_contents.explicit_types.items.len, new_contents.explicit_types.items.len);
3445 try match_stack.ensureUnusedCapacity(gpa, @intCast(num_explicit_types));
3446 for (
3447 old_contents.explicit_types.items[0..num_explicit_types],
3448 new_contents.explicit_types.items[0..num_explicit_types],
3449 ) |old_inst, new_inst| {
3450 // Here we use `match_stack`, so that we will recursively consider declarations on these types.
3451 match_stack.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
3452 }
3453
3454 const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
3455 try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
3456 for (
3457 old_contents.other.items[0..num_other],
3458 new_contents.other.items[0..num_other],
3459 ) |old_inst, new_inst| {
3460 // These instructions don't have declarations, so we just modify `inst_map` directly.
3461 inst_map.putAssumeCapacity(old_inst, new_inst);
3462 }
3463
3464 if (old_contents.func_decl) |old_func_inst| {
3465 if (new_contents.func_decl) |new_func_inst| {
3466 // There are no declarations on a function either, so again, we just directly add it to `inst_map`.
3467 try inst_map.put(gpa, old_func_inst, new_func_inst);
3468 }
3469 }
3470 }
3471 }
3472}
3473
3474/// Ensure this function's body is or will be analyzed and emitted. This should
3475/// be called whenever a potential runtime call of a function is seen.
3476///
3477/// The caller is responsible for ensuring the function decl itself is already
3478/// analyzed, and for ensuring it can exist at runtime (see
3479/// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
3480/// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`.
3481pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void {
3482 const ip = &zcu.intern_pool;
3483
3484 const func = zcu.funcInfo(func_index);
3485
3486 assert(func.ty == func.uncoerced_ty); // analyze the body of the original function, not a coerced one
3487
3488 if (zcu.func_body_analysis_queued.contains(func_index)) return;
3489
3490 if (func.analysisUnordered(ip).is_analyzed) {
3491 if (!zcu.outdated.contains(.wrap(.{ .func = func_index })) and
3492 !zcu.potentially_outdated.contains(.wrap(.{ .func = func_index })))
3493 {
3494 // This function has been analyzed before and is definitely up-to-date.
3495 return;
3496 }
3497 }
3498
3499 try zcu.func_body_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
3500 try zcu.comp.queueJob(.{ .analyze_func = func_index });
3501 zcu.func_body_analysis_queued.putAssumeCapacityNoClobber(func_index, {});
3502}
3503
3504pub fn ensureNavValAnalysisQueued(zcu: *Zcu, nav_id: InternPool.Nav.Index) !void {
3505 const ip = &zcu.intern_pool;
3506
3507 if (zcu.nav_val_analysis_queued.contains(nav_id)) return;
3508
3509 if (ip.getNav(nav_id).status == .fully_resolved) {
3510 if (!zcu.outdated.contains(.wrap(.{ .nav_val = nav_id })) and
3511 !zcu.potentially_outdated.contains(.wrap(.{ .nav_val = nav_id })))
3512 {
3513 // This `Nav` has been analyzed before and is definitely up-to-date.
3514 return;
3515 }
3516 }
3517
3518 try zcu.nav_val_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
3519 try zcu.comp.queueJob(.{ .analyze_comptime_unit = .wrap(.{ .nav_val = nav_id }) });
3520 zcu.nav_val_analysis_queued.putAssumeCapacityNoClobber(nav_id, {});
3521}
3522
3523pub const ImportResult = struct {
3524 /// Whether `file` has been newly created; in other words, whether this is the first import of
3525 /// this file. This should only be `true` when importing files during AstGen. After that, all
3526 /// files should have already been discovered.
3527 is_new: bool,
3528
3529 /// `file.mod` is not populated by this function, so if `is_new`, then it is `undefined`.
3530 file: *Zcu.File,
3531 file_index: File.Index,
3532
3533 /// If this import was a simple file path, this is `null`; the imported file should exist within
3534 /// the importer's module. Otherwise, it's the module which the import resolved to. This module
3535 /// could match the module of `cur_file`, since a module can depend on itself.
3536 module: ?*Package.Module,
3537};
3538
3539/// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of
3540/// this `AnalUnit` will cause them to be re-created (or not).
3541pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
3542 const gpa = zcu.gpa;
3543
3544 const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv|
3545 .{ @intFromEnum(kv.value), 1 }
3546 else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info|
3547 .{ info.value.index, info.value.len }
3548 else
3549 return;
3550
3551 const exports = zcu.all_exports.items[exports_base..][0..exports_len];
3552
3553 // In an only-c build, we're guaranteed to never use incremental compilation, so there are
3554 // guaranteed not to be any exports in the output file that need deleting (since we only call
3555 // `updateExports` on flush).
3556 // This case is needed because in some rare edge cases, `Sema` wants to add and delete exports
3557 // within a single update.
3558 if (dev.env.supports(.incremental)) {
3559 for (exports, exports_base..) |exp, export_index_usize| {
3560 const export_idx: Export.Index = @enumFromInt(export_index_usize);
3561 if (zcu.comp.bin_file) |lf| {
3562 lf.deleteExport(exp.exported, exp.opts.name);
3563 }
3564 if (zcu.failed_exports.fetchSwapRemove(export_idx)) |failed_kv| {
3565 failed_kv.value.destroy(gpa);
3566 }
3567 }
3568 }
3569
3570 zcu.free_exports.ensureUnusedCapacity(gpa, exports_len) catch {
3571 // This space will be reused eventually, so we need not propagate this error.
3572 // Just leak it for now, and let GC reclaim it later on.
3573 return;
3574 };
3575 for (exports_base..exports_base + exports_len) |export_idx| {
3576 zcu.free_exports.appendAssumeCapacity(@enumFromInt(export_idx));
3577 }
3578}
3579
3580/// Delete all references in `reference_table` which are caused by this `AnalUnit`.
3581/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated.
3582pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
3583 const gpa = zcu.gpa;
3584
3585 zcu.clearCachedResolvedReferences();
3586
3587 unit_refs: {
3588 const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
3589 var idx = kv.value;
3590
3591 while (idx != std.math.maxInt(u32)) {
3592 const ref = zcu.all_references.items[idx];
3593 zcu.free_references.append(gpa, idx) catch {
3594 // This space will be reused eventually, so we need not propagate this error.
3595 // Just leak it for now, and let GC reclaim it later on.
3596 break :unit_refs;
3597 };
3598 idx = ref.next;
3599
3600 var opt_inline_frame = ref.inline_frame;
3601 while (opt_inline_frame.unwrap()) |inline_frame| {
3602 // The same inline frame could be used multiple times by one unit. We need to
3603 // detect this case to avoid adding it to `free_inline_reference_frames` more
3604 // than once. We do that by setting `parent` to itself as a marker.
3605 if (inline_frame.ptr(zcu).parent == inline_frame.toOptional()) break;
3606 zcu.free_inline_reference_frames.append(gpa, inline_frame) catch {
3607 // This space will be reused eventually, so we need not propagate this error.
3608 // Just leak it for now, and let GC reclaim it later on.
3609 break :unit_refs;
3610 };
3611 opt_inline_frame = inline_frame.ptr(zcu).parent;
3612 inline_frame.ptr(zcu).parent = inline_frame.toOptional(); // signal to code above
3613 }
3614 }
3615 }
3616
3617 type_refs: {
3618 const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs;
3619 var idx = kv.value;
3620
3621 while (idx != std.math.maxInt(u32)) {
3622 zcu.free_type_references.append(gpa, idx) catch {
3623 // This space will be reused eventually, so we need not propagate this error.
3624 // Just leak it for now, and let GC reclaim it later on.
3625 break :type_refs;
3626 };
3627 idx = zcu.all_type_references.items[idx].next;
3628 }
3629 }
3630}
3631
3632/// Delete all compile logs performed by this `AnalUnit`.
3633/// Re-analysis of the `AnalUnit` will cause logs to be rediscovered.
3634pub fn deleteUnitCompileLogs(zcu: *Zcu, anal_unit: AnalUnit) void {
3635 const kv = zcu.compile_logs.fetchSwapRemove(anal_unit) orelse return;
3636 const gpa = zcu.gpa;
3637 var opt_line_idx = kv.value.first_line.toOptional();
3638 while (opt_line_idx.unwrap()) |line_idx| {
3639 zcu.free_compile_log_lines.append(gpa, line_idx) catch {
3640 // This space will be reused eventually, so we need not propagate this error.
3641 // Just leak it for now, and let GC reclaim it later on.
3642 return;
3643 };
3644 opt_line_idx = line_idx.get(zcu).next;
3645 }
3646}
3647
3648pub fn addInlineReferenceFrame(zcu: *Zcu, frame: InlineReferenceFrame) Allocator.Error!Zcu.InlineReferenceFrame.Index {
3649 const frame_idx: InlineReferenceFrame.Index = zcu.free_inline_reference_frames.pop() orelse idx: {
3650 _ = try zcu.inline_reference_frames.addOne(zcu.gpa);
3651 break :idx @enumFromInt(zcu.inline_reference_frames.items.len - 1);
3652 };
3653 frame_idx.ptr(zcu).* = frame;
3654 return frame_idx;
3655}
3656
3657pub fn addUnitReference(
3658 zcu: *Zcu,
3659 src_unit: AnalUnit,
3660 referenced_unit: AnalUnit,
3661 ref_src: LazySrcLoc,
3662 inline_frame: InlineReferenceFrame.Index.Optional,
3663) Allocator.Error!void {
3664 const gpa = zcu.gpa;
3665
3666 zcu.clearCachedResolvedReferences();
3667
3668 try zcu.reference_table.ensureUnusedCapacity(gpa, 1);
3669
3670 const ref_idx = zcu.free_references.pop() orelse idx: {
3671 _ = try zcu.all_references.addOne(gpa);
3672 break :idx zcu.all_references.items.len - 1;
3673 };
3674
3675 errdefer comptime unreachable;
3676
3677 const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit);
3678
3679 zcu.all_references.items[ref_idx] = .{
3680 .referenced = referenced_unit,
3681 .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
3682 .src = ref_src,
3683 .inline_frame = inline_frame,
3684 };
3685
3686 gop.value_ptr.* = @intCast(ref_idx);
3687}
3688
3689pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
3690 const gpa = zcu.gpa;
3691
3692 zcu.clearCachedResolvedReferences();
3693
3694 try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
3695
3696 const ref_idx = zcu.free_type_references.pop() orelse idx: {
3697 _ = try zcu.all_type_references.addOne(gpa);
3698 break :idx zcu.all_type_references.items.len - 1;
3699 };
3700
3701 errdefer comptime unreachable;
3702
3703 const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit);
3704
3705 zcu.all_type_references.items[ref_idx] = .{
3706 .referenced = referenced_type,
3707 .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
3708 .src = ref_src,
3709 };
3710
3711 gop.value_ptr.* = @intCast(ref_idx);
3712}
3713
3714fn clearCachedResolvedReferences(zcu: *Zcu) void {
3715 if (zcu.resolved_references) |*r| r.deinit(zcu.gpa);
3716 zcu.resolved_references = null;
3717}
3718
3719pub fn errorSetBits(zcu: *const Zcu) u16 {
3720 const target = zcu.getTarget();
3721
3722 if (zcu.error_limit == 0) return 0;
3723 if (target.cpu.arch.isSpirV()) {
3724 // As expected by https://github.com/Snektron/zig-spirv-test-executor
3725 if (zcu.comp.config.is_test) return 32;
3726 }
3727
3728 return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1;
3729}
3730
3731pub fn errNote(
3732 zcu: *Zcu,
3733 src_loc: LazySrcLoc,
3734 parent: *ErrorMsg,
3735 comptime format: []const u8,
3736 args: anytype,
3737) error{OutOfMemory}!void {
3738 const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
3739 errdefer zcu.gpa.free(msg);
3740
3741 parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
3742 parent.notes[parent.notes.len - 1] = .{
3743 .src_loc = src_loc,
3744 .msg = msg,
3745 };
3746}
3747
3748/// Deprecated. There is no global target for a Zig Compilation Unit. Instead,
3749/// look up the target based on the Module that contains the source code being
3750/// analyzed.
3751pub fn getTarget(zcu: *const Zcu) *const Target {
3752 return &zcu.root_mod.resolved_target.result;
3753}
3754
3755/// Deprecated. There is no global optimization mode for a Zig Compilation
3756/// Unit. Instead, look up the optimization mode based on the Module that
3757/// contains the source code being analyzed.
3758pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
3759 return zcu.root_mod.optimize_mode;
3760}
3761
3762pub fn handleUpdateExports(
3763 zcu: *Zcu,
3764 export_indices: []const Export.Index,
3765 result: link.File.UpdateExportsError!void,
3766) Allocator.Error!void {
3767 const gpa = zcu.gpa;
3768 result catch |err| switch (err) {
3769 error.OutOfMemory => return error.OutOfMemory,
3770 error.AnalysisFail => {
3771 const export_idx = export_indices[0];
3772 const new_export = export_idx.ptr(zcu);
3773 new_export.status = .failed_retryable;
3774 try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
3775 const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{@errorName(err)});
3776 zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
3777 },
3778 };
3779}
3780
3781pub fn addGlobalAssembly(zcu: *Zcu, unit: AnalUnit, source: []const u8) !void {
3782 const gpa = zcu.gpa;
3783 const gop = try zcu.global_assembly.getOrPut(gpa, unit);
3784 if (gop.found_existing) {
3785 const new_value = try std.fmt.allocPrint(gpa, "{s}\n{s}", .{ gop.value_ptr.*, source });
3786 gpa.free(gop.value_ptr.*);
3787 gop.value_ptr.* = new_value;
3788 } else {
3789 gop.value_ptr.* = try gpa.dupe(u8, source);
3790 }
3791}
3792
3793pub const Feature = enum {
3794 /// When this feature is enabled, Sema will emit calls to
3795 /// `std.builtin.panic` functions for things like safety checks and
3796 /// unreachables. Otherwise traps will be emitted.
3797 panic_fn,
3798 /// When this feature is enabled, Sema will insert tracer functions for gathering a stack
3799 /// trace for error returns.
3800 error_return_trace,
3801 /// When this feature is enabled, Sema will emit the `is_named_enum_value` AIR instructions
3802 /// and use it to check for corrupt switches. Backends currently need to implement their own
3803 /// logic to determine whether an enum value is in the set of named values.
3804 is_named_enum_value,
3805 error_set_has_value,
3806 field_reordering,
3807 /// In theory, backends are supposed to work like this:
3808 ///
3809 /// * The AIR emitted by `Sema` is converted into MIR by `codegen.generateFunction`. This pass
3810 /// is "pure", in that it does not depend on or modify any external mutable state.
3811 ///
3812 /// * That MIR is sent to the linker, which calls `codegen.emitFunction` to convert the MIR to
3813 /// finalized machine code. This process is permitted to query and modify linker state.
3814 ///
3815 /// * The linker stores the resulting machine code in the binary as needed.
3816 ///
3817 /// The first stage described above can run in parallel to the rest of the compiler, and even to
3818 /// other code generation work; we can run as many codegen threads as we want in parallel because
3819 /// of the fact that this pass is pure. Emit and link must be single-threaded, but are generally
3820 /// very fast, so that isn't a problem.
3821 ///
3822 /// Unfortunately, some code generation implementations currently query and/or mutate linker state
3823 /// or even (in the case of the LLVM backend) semantic analysis state. Such backends cannot be run
3824 /// in parallel with each other, with linking, or (potentially) with semantic analysis.
3825 ///
3826 /// Additionally, some backends continue to need the AIR in the "emit" stage, despite this pass
3827 /// operating on MIR. This complicates memory management under the threading model above.
3828 ///
3829 /// These are both **bugs** in backend implementations, left over from legacy code. However, they
3830 /// are difficult to fix. So, this `Feature` currently guards correct threading of code generation:
3831 ///
3832 /// * With this feature enabled, the backend is threaded as described above. The "emit" stage does
3833 /// not have access to AIR (it will be `undefined`; see `codegen.emitFunction`).
3834 ///
3835 /// * With this feature disabled, semantic analysis, code generation, and linking all occur on the
3836 /// same thread, and the "emit" stage has access to AIR.
3837 separate_thread,
3838};
3839
3840pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
3841 const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
3842 return target_util.backendSupportsFeature(backend, feature);
3843}
3844
3845pub const AtomicPtrAlignmentError = error{
3846 FloatTooBig,
3847 IntTooBig,
3848 BadType,
3849 OutOfMemory,
3850};
3851
3852pub const AtomicPtrAlignmentDiagnostics = struct {
3853 bits: u16 = undefined,
3854 max_bits: u16 = undefined,
3855};
3856
3857/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
3858/// Otherwise returns the alignment required on a pointer for the target
3859/// to perform atomic operations.
3860// TODO this function does not take into account CPU features, which can affect
3861// this value. Audit this!
3862pub fn atomicPtrAlignment(
3863 zcu: *Zcu,
3864 ty: Type,
3865 diags: *AtomicPtrAlignmentDiagnostics,
3866) AtomicPtrAlignmentError!Alignment {
3867 const target = zcu.getTarget();
3868 const max_atomic_bits: u16 = switch (target.cpu.arch) {
3869 .aarch64,
3870 .aarch64_be,
3871 => 128,
3872
3873 .mips64,
3874 .mips64el,
3875 => 64, // N32 should be 64, not 32.
3876
3877 .x86_64 => if (target.cpu.has(.x86, .cx16)) 128 else 64, // x32 should be 64 or 128, not 32.
3878
3879 else => target.ptrBitWidth(),
3880 };
3881
3882 if (ty.toIntern() == .bool_type) return .none;
3883 if (ty.isRuntimeFloat()) {
3884 const bit_count = ty.floatBits(target);
3885 if (bit_count > max_atomic_bits) {
3886 diags.* = .{
3887 .bits = bit_count,
3888 .max_bits = max_atomic_bits,
3889 };
3890 return error.FloatTooBig;
3891 }
3892 return .none;
3893 }
3894 if (switch (ty.zigTypeTag(zcu)) {
3895 .int, .@"enum" => true,
3896 .@"struct" => ty.containerLayout(zcu) == .@"packed",
3897 else => false,
3898 }) {
3899 assert(ty.isAbiInt(zcu));
3900 const bit_count = ty.intInfo(zcu).bits;
3901 if (bit_count > max_atomic_bits) {
3902 diags.* = .{
3903 .bits = bit_count,
3904 .max_bits = max_atomic_bits,
3905 };
3906 return error.IntTooBig;
3907 }
3908 return .none;
3909 }
3910 if (ty.isPtrAtRuntime(zcu)) return .none;
3911 return error.BadType;
3912}
3913
3914/// Returns null in the following cases:
3915/// * Not a struct.
3916pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
3917 if (ty.ip_index == .none) return null;
3918 const ip = &zcu.intern_pool;
3919 return switch (ip.indexToKey(ty.ip_index)) {
3920 .struct_type => ip.loadStructType(ty.ip_index),
3921 else => null,
3922 };
3923}
3924
3925pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
3926 const s = zcu.typeToStruct(ty) orelse return null;
3927 if (s.layout != .@"packed") return null;
3928 return s;
3929}
3930
3931/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
3932/// into the packed struct InternPool data rather than computing this on the
3933/// fly, however it was found to perform worse when measured on real world
3934/// projects.
3935pub fn structPackedFieldBitOffset(
3936 zcu: *Zcu,
3937 struct_type: InternPool.LoadedStructType,
3938 field_index: u32,
3939) u16 {
3940 const ip = &zcu.intern_pool;
3941 assert(struct_type.layout == .@"packed");
3942 assert(struct_type.haveLayout(ip));
3943 var bit_sum: u64 = 0;
3944 for (0..struct_type.field_types.len) |i| {
3945 if (i == field_index) {
3946 return @intCast(bit_sum);
3947 }
3948 const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
3949 bit_sum += field_ty.bitSize(zcu);
3950 }
3951 unreachable; // index out of bounds
3952}
3953
3954pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
3955 if (ty.ip_index == .none) return null;
3956 const ip = &zcu.intern_pool;
3957 return switch (ip.indexToKey(ty.ip_index)) {
3958 .union_type => ip.loadUnionType(ty.ip_index),
3959 else => null,
3960 };
3961}
3962
3963pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
3964 if (ty.ip_index == .none) return null;
3965 return zcu.intern_pool.indexToFuncType(ty.toIntern());
3966}
3967
3968pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
3969 return zcu.intern_pool.iesFuncIndex(ies_index);
3970}
3971
3972pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
3973 return zcu.intern_pool.toFunc(func_index);
3974}
3975
3976pub const UnionLayout = struct {
3977 abi_size: u64,
3978 abi_align: Alignment,
3979 most_aligned_field: u32,
3980 most_aligned_field_size: u64,
3981 biggest_field: u32,
3982 payload_size: u64,
3983 payload_align: Alignment,
3984 tag_align: Alignment,
3985 tag_size: u64,
3986 padding: u32,
3987
3988 pub fn tagOffset(layout: UnionLayout) u64 {
3989 return if (layout.tag_align.compare(.lt, layout.payload_align)) layout.payload_size else 0;
3990 }
3991
3992 pub fn payloadOffset(layout: UnionLayout) u64 {
3993 return if (layout.tag_align.compare(.lt, layout.payload_align)) 0 else layout.tag_size;
3994 }
3995};
3996
3997/// Returns the index of the active field, given the current tag value
3998pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
3999 const ip = &zcu.intern_pool;
4000 if (enum_tag.toIntern() == .none) return null;
4001 assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
4002 return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
4003}
4004
4005pub const ResolvedReference = struct {
4006 referencer: AnalUnit,
4007 /// If `inline_frame` is not `.none`, this is the *deepest* source location in the chain of
4008 /// inline calls. For source locations further up the inline call stack, consult `inline_frame`.
4009 src: LazySrcLoc,
4010 inline_frame: InlineReferenceFrame.Index.Optional,
4011};
4012
4013/// Returns a mapping from an `AnalUnit` to where it is referenced.
4014/// If the value is `null`, the `AnalUnit` is a root of analysis.
4015/// If an `AnalUnit` is not in the returned map, it is unreferenced.
4016/// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller.
4017/// This hashmap is cached, so repeated calls to this function are cheap.
4018pub fn resolveReferences(zcu: *Zcu) !*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
4019 if (zcu.resolved_references == null) {
4020 zcu.resolved_references = try zcu.resolveReferencesInner();
4021 }
4022 return &zcu.resolved_references.?;
4023}
4024fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
4025 const gpa = zcu.gpa;
4026 const comp = zcu.comp;
4027 const ip = &zcu.intern_pool;
4028
4029 var units: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
4030 var types: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .empty;
4031 defer {
4032 units.deinit(gpa);
4033 types.deinit(gpa);
4034 }
4035
4036 // This is not a sufficient size, but an approximate lower bound.
4037 try units.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
4038
4039 try types.ensureTotalCapacity(gpa, zcu.analysis_roots_len);
4040 for (zcu.analysisRoots()) |mod| {
4041 const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
4042 const root_ty = zcu.fileRootType(file);
4043 if (root_ty == .none) continue;
4044 types.putAssumeCapacityNoClobber(root_ty, null);
4045 }
4046
4047 var unit_idx: usize = 0;
4048 var type_idx: usize = 0;
4049 while (true) {
4050 if (type_idx < types.count()) {
4051 const ty = types.keys()[type_idx];
4052 const referencer = types.values()[type_idx];
4053 type_idx += 1;
4054
4055 log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
4056
4057 // If this type undergoes type resolution, the corresponding `AnalUnit` is automatically referenced.
4058 const has_resolution: bool = switch (ip.indexToKey(ty)) {
4059 .struct_type, .union_type => true,
4060 .enum_type => |k| k != .generated_tag,
4061 .opaque_type => false,
4062 else => unreachable,
4063 };
4064 if (has_resolution) {
4065 // this should only be referenced by the type
4066 const unit: AnalUnit = .wrap(.{ .type = ty });
4067 try units.putNoClobber(gpa, unit, referencer);
4068 }
4069
4070 // If this is a union with a generated tag, its tag type is automatically referenced.
4071 // We don't add this reference for non-generated tags, as those will already be referenced via the union's type resolution, with a better source location.
4072 if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| {
4073 const tag_ty = union_obj.enum_tag_ty;
4074 if (tag_ty != .none) {
4075 if (ip.indexToKey(tag_ty).enum_type == .generated_tag) {
4076 const gop = try types.getOrPut(gpa, tag_ty);
4077 if (!gop.found_existing) gop.value_ptr.* = referencer;
4078 }
4079 }
4080 }
4081
4082 // Queue any decls within this type which would be automatically analyzed.
4083 // Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
4084 const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
4085 for (zcu.namespacePtr(ns).comptime_decls.items) |cu| {
4086 // `comptime` decls are always analyzed.
4087 const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
4088 const gop = try units.getOrPut(gpa, unit);
4089 if (!gop.found_existing) {
4090 log.debug("type '{f}': ref comptime %{}", .{
4091 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
4092 @intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
4093 });
4094 gop.value_ptr.* = referencer;
4095 }
4096 }
4097 for (zcu.namespacePtr(ns).test_decls.items) |nav_id| {
4098 const nav = ip.getNav(nav_id);
4099 // `test` declarations are analyzed depending on the test filter.
4100 const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue;
4101 const file = zcu.fileByIndex(inst_info.file);
4102 const decl = file.zir.?.getDeclaration(inst_info.inst);
4103
4104 if (!comp.config.is_test or file.mod != zcu.main_mod) continue;
4105
4106 const want_analysis = switch (decl.kind) {
4107 .@"const", .@"var" => unreachable,
4108 .@"comptime" => unreachable,
4109 .unnamed_test => true,
4110 .@"test", .decltest => a: {
4111 const fqn_slice = nav.fqn.toSlice(ip);
4112 if (comp.test_filters.len > 0) {
4113 for (comp.test_filters) |test_filter| {
4114 if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
4115 } else break :a false;
4116 }
4117 break :a true;
4118 },
4119 };
4120 if (want_analysis) {
4121 {
4122 const gop = try units.getOrPut(gpa, .wrap(.{ .nav_val = nav_id }));
4123 if (!gop.found_existing) {
4124 log.debug("type '{f}': ref test %{}", .{
4125 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
4126 @intFromEnum(inst_info.inst),
4127 });
4128 gop.value_ptr.* = referencer;
4129 }
4130 }
4131 // Non-fatal AstGen errors could mean this test decl failed
4132 if (nav.status == .fully_resolved) {
4133 const gop = try units.getOrPut(gpa, .wrap(.{ .func = nav.status.fully_resolved.val }));
4134 if (!gop.found_existing) gop.value_ptr.* = referencer;
4135 }
4136 }
4137 }
4138 for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
4139 // These are named declarations. They are analyzed only if marked `export`.
4140 const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
4141 const file = zcu.fileByIndex(inst_info.file);
4142 const decl = file.zir.?.getDeclaration(inst_info.inst);
4143 if (decl.linkage == .@"export") {
4144 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
4145 const gop = try units.getOrPut(gpa, unit);
4146 if (!gop.found_existing) {
4147 log.debug("type '{f}': ref named %{}", .{
4148 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
4149 @intFromEnum(inst_info.inst),
4150 });
4151 gop.value_ptr.* = referencer;
4152 }
4153 }
4154 }
4155 for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
4156 // These are named declarations. They are analyzed only if marked `export`.
4157 const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
4158 const file = zcu.fileByIndex(inst_info.file);
4159 const decl = file.zir.?.getDeclaration(inst_info.inst);
4160 if (decl.linkage == .@"export") {
4161 const unit: AnalUnit = .wrap(.{ .nav_val = nav });
4162 const gop = try units.getOrPut(gpa, unit);
4163 if (!gop.found_existing) {
4164 log.debug("type '{f}': ref named %{}", .{
4165 Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
4166 @intFromEnum(inst_info.inst),
4167 });
4168 gop.value_ptr.* = referencer;
4169 }
4170 }
4171 }
4172 continue;
4173 }
4174 if (unit_idx < units.count()) {
4175 const unit = units.keys()[unit_idx];
4176 unit_idx += 1;
4177
4178 // `nav_val` and `nav_ty` reference each other *implicitly* to save memory.
4179 queue_paired: {
4180 const other: AnalUnit = .wrap(switch (unit.unwrap()) {
4181 .nav_val => |n| .{ .nav_ty = n },
4182 .nav_ty => |n| .{ .nav_val = n },
4183 .@"comptime", .type, .func, .memoized_state => break :queue_paired,
4184 });
4185 const gop = try units.getOrPut(gpa, other);
4186 if (gop.found_existing) break :queue_paired;
4187 gop.value_ptr.* = units.values()[unit_idx]; // same reference location
4188 }
4189
4190 log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
4191
4192 if (zcu.reference_table.get(unit)) |first_ref_idx| {
4193 assert(first_ref_idx != std.math.maxInt(u32));
4194 var ref_idx = first_ref_idx;
4195 while (ref_idx != std.math.maxInt(u32)) {
4196 const ref = zcu.all_references.items[ref_idx];
4197 const gop = try units.getOrPut(gpa, ref.referenced);
4198 if (!gop.found_existing) {
4199 log.debug("unit '{f}': ref unit '{f}'", .{
4200 zcu.fmtAnalUnit(unit),
4201 zcu.fmtAnalUnit(ref.referenced),
4202 });
4203 gop.value_ptr.* = .{
4204 .referencer = unit,
4205 .src = ref.src,
4206 .inline_frame = ref.inline_frame,
4207 };
4208 }
4209 ref_idx = ref.next;
4210 }
4211 }
4212 if (zcu.type_reference_table.get(unit)) |first_ref_idx| {
4213 assert(first_ref_idx != std.math.maxInt(u32));
4214 var ref_idx = first_ref_idx;
4215 while (ref_idx != std.math.maxInt(u32)) {
4216 const ref = zcu.all_type_references.items[ref_idx];
4217 const gop = try types.getOrPut(gpa, ref.referenced);
4218 if (!gop.found_existing) {
4219 log.debug("unit '{f}': ref type '{f}'", .{
4220 zcu.fmtAnalUnit(unit),
4221 Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
4222 });
4223 gop.value_ptr.* = .{
4224 .referencer = unit,
4225 .src = ref.src,
4226 .inline_frame = .none,
4227 };
4228 }
4229 ref_idx = ref.next;
4230 }
4231 }
4232 continue;
4233 }
4234 break;
4235 }
4236
4237 return units.move();
4238}
4239
4240pub fn analysisRoots(zcu: *Zcu) []*Package.Module {
4241 return zcu.analysis_roots_buffer[0..zcu.analysis_roots_len];
4242}
4243
4244pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
4245 return zcu.intern_pool.filePtr(file_index);
4246}
4247
4248/// Returns the struct that represents this `File`.
4249/// If the struct has not been created, returns `.none`.
4250pub fn fileRootType(zcu: *const Zcu, file_index: File.Index) InternPool.Index {
4251 const ip = &zcu.intern_pool;
4252 const file_index_unwrapped = file_index.unwrap(ip);
4253 const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
4254 return files.view().items(.root_type)[file_index_unwrapped.index];
4255}
4256
4257pub fn setFileRootType(zcu: *Zcu, file_index: File.Index, root_type: InternPool.Index) void {
4258 const ip = &zcu.intern_pool;
4259 const file_index_unwrapped = file_index.unwrap(ip);
4260 const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
4261 files.view().items(.root_type)[file_index_unwrapped.index] = root_type;
4262}
4263
4264pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
4265 const ip = &zcu.intern_pool;
4266 return .{
4267 .base_node_inst = ip.getNav(nav_index).srcInst(ip),
4268 .offset = LazySrcLoc.Offset.nodeOffset(.zero),
4269 };
4270}
4271
4272pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc {
4273 _ = zcu;
4274 _ = ty_index;
4275 @panic("TODO");
4276}
4277
4278pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File {
4279 _ = zcu;
4280 _ = ty_index;
4281 @panic("TODO");
4282}
4283
4284pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
4285 const ip = &zcu.intern_pool;
4286 const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
4287 const zir = zcu.fileByIndex(inst_info.file).zir;
4288 return zir.?.getDeclaration(inst_info.inst).src_line;
4289}
4290
4291pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value {
4292 return Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.fully_resolved.val);
4293}
4294
4295pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index {
4296 const ip = &zcu.intern_pool;
4297 return ip.getNav(nav).srcInst(ip).resolveFile(ip);
4298}
4299
4300pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
4301 return zcu.fileByIndex(zcu.navFileScopeIndex(nav));
4302}
4303
4304pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Alt(FormatAnalUnit, formatAnalUnit) {
4305 return .{ .data = .{ .unit = unit, .zcu = zcu } };
4306}
4307pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Alt(FormatDependee, formatDependee) {
4308 return .{ .data = .{ .dependee = d, .zcu = zcu } };
4309}
4310
4311const FormatAnalUnit = struct {
4312 unit: AnalUnit,
4313 zcu: *Zcu,
4314};
4315
4316fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void {
4317 const zcu = data.zcu;
4318 const ip = &zcu.intern_pool;
4319 switch (data.unit.unwrap()) {
4320 .@"comptime" => |cu_id| {
4321 const cu = ip.getComptimeUnit(cu_id);
4322 if (cu.zir_index.resolveFull(ip)) |resolved| {
4323 const file_path = zcu.fileByIndex(resolved.file).path;
4324 return writer.print("comptime(inst=('{f}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
4325 } else {
4326 return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
4327 }
4328 },
4329 .nav_val => |nav| return writer.print("nav_val('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
4330 .nav_ty => |nav| return writer.print("nav_ty('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
4331 .type => |ty| return writer.print("ty('{f}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
4332 .func => |func| {
4333 const nav = zcu.funcInfo(func).owner_nav;
4334 return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
4335 },
4336 .memoized_state => return writer.writeAll("memoized_state"),
4337 }
4338}
4339
4340const FormatDependee = struct { dependee: InternPool.Dependee, zcu: *Zcu };
4341
4342fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void {
4343 const zcu = data.zcu;
4344 const ip = &zcu.intern_pool;
4345 switch (data.dependee) {
4346 .src_hash => |ti| {
4347 const info = ti.resolveFull(ip) orelse {
4348 return writer.writeAll("inst(<lost>)");
4349 };
4350 const file_path = zcu.fileByIndex(info.file).path;
4351 return writer.print("inst('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
4352 },
4353 .nav_val => |nav| {
4354 const fqn = ip.getNav(nav).fqn;
4355 return writer.print("nav_val('{f}')", .{fqn.fmt(ip)});
4356 },
4357 .nav_ty => |nav| {
4358 const fqn = ip.getNav(nav).fqn;
4359 return writer.print("nav_ty('{f}')", .{fqn.fmt(ip)});
4360 },
4361 .interned => |ip_index| switch (ip.indexToKey(ip_index)) {
4362 .struct_type, .union_type, .enum_type => return writer.print("type('{f}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
4363 .func => |f| return writer.print("ies('{f}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
4364 else => unreachable,
4365 },
4366 .zon_file => |file| {
4367 const file_path = zcu.fileByIndex(file).path;
4368 return writer.print("zon_file('{f}')", .{file_path.fmt(zcu.comp)});
4369 },
4370 .embed_file => |ef_idx| {
4371 const ef = ef_idx.get(zcu);
4372 return writer.print("embed_file('{f}')", .{ef.path.fmt(zcu.comp)});
4373 },
4374 .namespace => |ti| {
4375 const info = ti.resolveFull(ip) orelse {
4376 return writer.writeAll("namespace(<lost>)");
4377 };
4378 const file_path = zcu.fileByIndex(info.file).path;
4379 return writer.print("namespace('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
4380 },
4381 .namespace_name => |k| {
4382 const info = k.namespace.resolveFull(ip) orelse {
4383 return writer.print("namespace(<lost>, '{f}')", .{k.name.fmt(ip)});
4384 };
4385 const file_path = zcu.fileByIndex(info.file).path;
4386 return writer.print("namespace('{f}', %{d}, '{f}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
4387 },
4388 .memoized_state => return writer.writeAll("memoized_state"),
4389 }
4390}
4391
4392/// Given the `InternPool.Index` of a function, set its resolved IES to `.none` if it
4393/// may be outdated. `Sema` should do this before ever loading a resolved IES.
4394pub fn maybeUnresolveIes(zcu: *Zcu, func_index: InternPool.Index) !void {
4395 const unit = AnalUnit.wrap(.{ .func = func_index });
4396 if (zcu.outdated.contains(unit) or zcu.potentially_outdated.contains(unit)) {
4397 // We're consulting the resolved IES now, but the function is outdated, so its
4398 // IES may have changed. We have to assume the IES is outdated and set the resolved
4399 // set back to `.none`.
4400 //
4401 // This will cause `PerThread.analyzeFnBody` to mark the IES as outdated when it's
4402 // eventually hit.
4403 //
4404 // Since the IES needs to be resolved, the function body will now definitely need
4405 // re-analysis (even if the IES turns out to be the same!), so mark it as
4406 // definitely-outdated if it's only PO.
4407 if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
4408 const gpa = zcu.gpa;
4409 try zcu.outdated.putNoClobber(gpa, unit, kv.value);
4410 if (kv.value == 0) {
4411 try zcu.outdated_ready.put(gpa, unit, {});
4412 }
4413 }
4414 zcu.intern_pool.funcSetIesResolved(func_index, .none);
4415 }
4416}
4417
4418pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enum) {
4419 ok,
4420 bad_arch: []const std.Target.Cpu.Arch, // value is allowed archs for cc
4421 bad_backend: std.builtin.CompilerBackend, // value is current backend
4422} {
4423 const target = zcu.getTarget();
4424 const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
4425 switch (cc) {
4426 .auto, .@"inline" => return .ok,
4427 .async => return .{ .bad_backend = backend }, // nothing supports async currently
4428 .naked => {}, // depends only on backend
4429 else => for (cc.archs()) |allowed_arch| {
4430 if (allowed_arch == target.cpu.arch) break;
4431 } else return .{ .bad_arch = cc.archs() },
4432 }
4433 const backend_ok = switch (backend) {
4434 .stage1 => unreachable,
4435 .other => unreachable,
4436 _ => unreachable,
4437
4438 .stage2_llvm => @import("codegen/llvm.zig").toLlvmCallConv(cc, target) != null,
4439 .stage2_c => ok: {
4440 if (target.cCallingConvention()) |default_c| {
4441 if (cc.eql(default_c)) {
4442 break :ok true;
4443 }
4444 }
4445 break :ok switch (cc) {
4446 .x86_16_cdecl,
4447 .x86_16_stdcall,
4448 .x86_16_regparmcall,
4449 .x86_16_interrupt,
4450 .x86_64_sysv,
4451 .x86_64_win,
4452 .x86_64_vectorcall,
4453 .x86_64_regcall_v3_sysv,
4454 .x86_64_regcall_v4_win,
4455 .x86_64_interrupt,
4456 .x86_fastcall,
4457 .x86_thiscall,
4458 .x86_vectorcall,
4459 .x86_regcall_v3,
4460 .x86_regcall_v4_win,
4461 .x86_interrupt,
4462 .aarch64_vfabi,
4463 .aarch64_vfabi_sve,
4464 .arm_aapcs,
4465 .csky_interrupt,
4466 .riscv64_lp64_v,
4467 .riscv32_ilp32_v,
4468 .m68k_rtd,
4469 .m68k_interrupt,
4470 .msp430_interrupt,
4471 => |opts| opts.incoming_stack_alignment == null,
4472
4473 .arm_aapcs_vfp,
4474 => |opts| opts.incoming_stack_alignment == null,
4475
4476 .arc_interrupt,
4477 => |opts| opts.incoming_stack_alignment == null,
4478
4479 .arm_interrupt,
4480 => |opts| opts.incoming_stack_alignment == null,
4481
4482 .microblaze_interrupt,
4483 => |opts| opts.incoming_stack_alignment == null,
4484
4485 .mips_interrupt,
4486 .mips64_interrupt,
4487 => |opts| opts.incoming_stack_alignment == null,
4488
4489 .riscv32_interrupt,
4490 .riscv64_interrupt,
4491 => |opts| opts.incoming_stack_alignment == null,
4492
4493 .sh_interrupt,
4494 => |opts| opts.incoming_stack_alignment == null,
4495
4496 .x86_sysv,
4497 .x86_win,
4498 .x86_stdcall,
4499 => |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
4500
4501 .avr_interrupt,
4502 .avr_signal,
4503 => true,
4504
4505 .naked => true,
4506
4507 else => false,
4508 };
4509 },
4510 .stage2_wasm => switch (cc) {
4511 .wasm_mvp => |opts| opts.incoming_stack_alignment == null,
4512 else => false,
4513 },
4514 .stage2_arm => switch (cc) {
4515 .arm_aapcs => |opts| opts.incoming_stack_alignment == null,
4516 .naked => true,
4517 else => false,
4518 },
4519 .stage2_x86_64 => switch (cc) {
4520 .x86_64_sysv, .x86_64_win, .naked => true, // incoming stack alignment supported
4521 else => false,
4522 },
4523 .stage2_aarch64 => switch (cc) {
4524 .aarch64_aapcs, .aarch64_aapcs_darwin, .naked => true,
4525 else => false,
4526 },
4527 .stage2_x86 => switch (cc) {
4528 .x86_sysv,
4529 .x86_win,
4530 => |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
4531 .naked => true,
4532 else => false,
4533 },
4534 .stage2_powerpc => switch (target.cpu.arch) {
4535 .powerpc, .powerpcle => switch (cc) {
4536 .powerpc_sysv,
4537 .powerpc_sysv_altivec,
4538 .powerpc_aix,
4539 .powerpc_aix_altivec,
4540 .naked,
4541 => true,
4542 else => false,
4543 },
4544 .powerpc64, .powerpc64le => switch (cc) {
4545 .powerpc64_elf,
4546 .powerpc64_elf_altivec,
4547 .powerpc64_elf_v2,
4548 .naked,
4549 => true,
4550 else => false,
4551 },
4552 else => unreachable,
4553 },
4554 .stage2_riscv64 => switch (cc) {
4555 .riscv64_lp64 => |opts| opts.incoming_stack_alignment == null,
4556 .naked => true,
4557 else => false,
4558 },
4559 .stage2_sparc64 => switch (cc) {
4560 .sparc64_sysv => |opts| opts.incoming_stack_alignment == null,
4561 .naked => true,
4562 else => false,
4563 },
4564 .stage2_spirv => switch (cc) {
4565 .spirv_device, .spirv_kernel => true,
4566 .spirv_fragment, .spirv_vertex => target.os.tag == .vulkan or target.os.tag == .opengl,
4567 else => false,
4568 },
4569 };
4570 if (!backend_ok) return .{ .bad_backend = backend };
4571 return .ok;
4572}
4573
4574pub const CodegenFailError = error{
4575 /// Indicates the error message has been already stored at `Zcu.failed_codegen`.
4576 CodegenFail,
4577 OutOfMemory,
4578};
4579
4580pub fn codegenFail(
4581 zcu: *Zcu,
4582 nav_index: InternPool.Nav.Index,
4583 comptime format: []const u8,
4584 args: anytype,
4585) CodegenFailError {
4586 const msg = try Zcu.ErrorMsg.create(zcu.gpa, zcu.navSrcLoc(nav_index), format, args);
4587 return zcu.codegenFailMsg(nav_index, msg);
4588}
4589
4590/// Takes ownership of `msg`, even on OOM.
4591pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
4592 const gpa = zcu.gpa;
4593 {
4594 zcu.comp.mutex.lock();
4595 defer zcu.comp.mutex.unlock();
4596 errdefer msg.deinit(gpa);
4597 try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
4598 }
4599 return error.CodegenFail;
4600}
4601
4602/// Asserts that `zcu.failed_codegen` contains the key `nav`, with the necessary lock held.
4603pub fn assertCodegenFailed(zcu: *Zcu, nav: InternPool.Nav.Index) void {
4604 zcu.comp.mutex.lock();
4605 defer zcu.comp.mutex.unlock();
4606 assert(zcu.failed_codegen.contains(nav));
4607}
4608
4609pub fn codegenFailType(
4610 zcu: *Zcu,
4611 ty_index: InternPool.Index,
4612 comptime format: []const u8,
4613 args: anytype,
4614) CodegenFailError {
4615 const gpa = zcu.gpa;
4616 try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
4617 const msg = try Zcu.ErrorMsg.create(gpa, zcu.typeSrcLoc(ty_index), format, args);
4618 zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
4619 return error.CodegenFail;
4620}
4621
4622pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg) CodegenFailError {
4623 const gpa = zcu.gpa;
4624 {
4625 errdefer msg.deinit(gpa);
4626 try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
4627 }
4628 zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
4629 return error.CodegenFail;
4630}
4631
4632/// Asserts that `zcu.multi_module_err != null`.
4633pub fn addFileInMultipleModulesError(
4634 zcu: *Zcu,
4635 eb: *std.zig.ErrorBundle.Wip,
4636) Allocator.Error!void {
4637 const gpa = zcu.gpa;
4638
4639 const info = zcu.multi_module_err.?;
4640 const file = info.file;
4641
4642 // error: file exists in modules 'root.foo' and 'root.bar'
4643 // note: files must belong to only one module
4644 // note: file is imported here
4645 // note: which is imported here
4646 // note: which is the root of module 'root.foo' imported here
4647 // note: file is the root of module 'root.bar' imported here
4648
4649 const file_src = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb);
4650 const root_msg = try eb.printString("file exists in modules '{s}' and '{s}'", .{
4651 info.modules[0].fully_qualified_name,
4652 info.modules[1].fully_qualified_name,
4653 });
4654
4655 var notes: std.ArrayList(std.zig.ErrorBundle.MessageIndex) = .empty;
4656 defer notes.deinit(gpa);
4657
4658 try notes.append(gpa, try eb.addErrorMessage(.{
4659 .msg = try eb.addString("files must belong to only one module"),
4660 .src_loc = file_src,
4661 }));
4662
4663 try zcu.explainWhyFileIsInModule(eb, ¬es, file, info.modules[0], info.refs[0]);
4664 try zcu.explainWhyFileIsInModule(eb, ¬es, file, info.modules[1], info.refs[1]);
4665
4666 try eb.addRootErrorMessage(.{
4667 .msg = root_msg,
4668 .src_loc = file_src,
4669 .notes_len = @intCast(notes.items.len),
4670 });
4671 const notes_start = try eb.reserveNotes(@intCast(notes.items.len));
4672 const notes_slice: []std.zig.ErrorBundle.MessageIndex = @ptrCast(eb.extra.items[notes_start..]);
4673 @memcpy(notes_slice, notes.items);
4674}
4675
4676fn explainWhyFileIsInModule(
4677 zcu: *Zcu,
4678 eb: *std.zig.ErrorBundle.Wip,
4679 notes_out: *std.ArrayList(std.zig.ErrorBundle.MessageIndex),
4680 file: File.Index,
4681 in_module: *Package.Module,
4682 ref: File.Reference,
4683) Allocator.Error!void {
4684 const gpa = zcu.gpa;
4685
4686 // error: file is the root of module 'foo'
4687 //
4688 // error: file is imported here by the root of module 'foo'
4689 //
4690 // error: file is imported here
4691 // note: which is imported here
4692 // note: which is imported here by the root of module 'foo'
4693
4694 var import = switch (ref) {
4695 .analysis_root => |mod| {
4696 assert(mod == in_module);
4697 try notes_out.append(gpa, try eb.addErrorMessage(.{
4698 .msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
4699 .src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
4700 }));
4701 return;
4702 },
4703 .import => |import| if (import.module) |mod| {
4704 assert(mod == in_module);
4705 try notes_out.append(gpa, try eb.addErrorMessage(.{
4706 .msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
4707 .src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
4708 }));
4709 return;
4710 } else import,
4711 };
4712
4713 var is_first = true;
4714 while (true) {
4715 const thing: []const u8 = if (is_first) "file" else "which";
4716 is_first = false;
4717
4718 const importer_file = zcu.fileByIndex(import.importer);
4719 // `errorBundleTokenSrc` expects the tree to be loaded
4720 _ = importer_file.getTree(zcu) catch |err| {
4721 try Compilation.unableToLoadZcuFile(zcu, eb, importer_file, err);
4722 return; // stop the explanation early
4723 };
4724 const import_src = try importer_file.errorBundleTokenSrc(import.tok, zcu, eb);
4725
4726 const importer_ref = zcu.alive_files.get(import.importer).?;
4727 const importer_root: ?*Package.Module = switch (importer_ref) {
4728 .analysis_root => |mod| mod,
4729 .import => |i| i.module,
4730 };
4731
4732 if (importer_root) |m| {
4733 try notes_out.append(gpa, try eb.addErrorMessage(.{
4734 .msg = try eb.printString("{s} is imported here by the root of module '{s}'", .{ thing, m.fully_qualified_name }),
4735 .src_loc = import_src,
4736 }));
4737 return;
4738 }
4739
4740 try notes_out.append(gpa, try eb.addErrorMessage(.{
4741 .msg = try eb.printString("{s} is imported here", .{thing}),
4742 .src_loc = import_src,
4743 }));
4744
4745 import = importer_ref.import;
4746 }
4747}
4748
4749const TrackedUnitSema = struct {
4750 /// `null` means we created the node, so should end it.
4751 old_name: ?[std.Progress.Node.max_name_len]u8,
4752 old_analysis_timer: ?Compilation.Timer,
4753 analysis_timer_decl: ?InternPool.TrackedInst.Index,
4754 pub fn end(tus: TrackedUnitSema, zcu: *Zcu) void {
4755 const comp = zcu.comp;
4756 if (tus.old_name) |old_name| {
4757 zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion
4758 zcu.cur_sema_prog_node.setName(&old_name);
4759 } else {
4760 zcu.cur_sema_prog_node.end();
4761 zcu.cur_sema_prog_node = .none;
4762 }
4763 report_time: {
4764 const sema_ns = zcu.cur_analysis_timer.?.finish() orelse break :report_time;
4765 const zir_decl = tus.analysis_timer_decl orelse break :report_time;
4766 comp.mutex.lock();
4767 defer comp.mutex.unlock();
4768 comp.time_report.?.stats.cpu_ns_sema += sema_ns;
4769 const gop = comp.time_report.?.decl_sema_info.getOrPut(comp.gpa, zir_decl) catch |err| switch (err) {
4770 error.OutOfMemory => {
4771 comp.setAllocFailure();
4772 break :report_time;
4773 },
4774 };
4775 if (!gop.found_existing) gop.value_ptr.* = .{ .ns = 0, .count = 0 };
4776 gop.value_ptr.ns += sema_ns;
4777 gop.value_ptr.count += 1;
4778 }
4779 zcu.cur_analysis_timer = tus.old_analysis_timer;
4780 if (zcu.cur_analysis_timer) |*t| t.@"resume"();
4781 }
4782};
4783pub fn trackUnitSema(zcu: *Zcu, name: []const u8, zir_inst: ?InternPool.TrackedInst.Index) TrackedUnitSema {
4784 if (zcu.cur_analysis_timer) |*t| t.pause();
4785 const old_analysis_timer = zcu.cur_analysis_timer;
4786 zcu.cur_analysis_timer = zcu.comp.startTimer();
4787 const old_name: ?[std.Progress.Node.max_name_len]u8 = old_name: {
4788 if (zcu.cur_sema_prog_node.index == .none) {
4789 zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0);
4790 break :old_name null;
4791 }
4792 const old_name = zcu.cur_sema_prog_node.getName();
4793 zcu.cur_sema_prog_node.setName(name);
4794 break :old_name old_name;
4795 };
4796 return .{
4797 .old_name = old_name,
4798 .old_analysis_timer = old_analysis_timer,
4799 .analysis_timer_decl = zir_inst,
4800 };
4801}