master
   1//! This type provides a wrapper around a `*Zcu` for uses which require a thread `Id`.
   2//! Any operation which mutates `InternPool` state lives here rather than on `Zcu`.
   3
   4const std = @import("std");
   5const Allocator = std.mem.Allocator;
   6const assert = std.debug.assert;
   7const Ast = std.zig.Ast;
   8const AstGen = std.zig.AstGen;
   9const BigIntConst = std.math.big.int.Const;
  10const BigIntMutable = std.math.big.int.Mutable;
  11const Cache = std.Build.Cache;
  12const log = std.log.scoped(.zcu);
  13const mem = std.mem;
  14const Zir = std.zig.Zir;
  15const Zoir = std.zig.Zoir;
  16const ZonGen = std.zig.ZonGen;
  17const Io = std.Io;
  18
  19const Air = @import("../Air.zig");
  20const Builtin = @import("../Builtin.zig");
  21const build_options = @import("build_options");
  22const builtin = @import("builtin");
  23const dev = @import("../dev.zig");
  24const InternPool = @import("../InternPool.zig");
  25const AnalUnit = InternPool.AnalUnit;
  26const introspect = @import("../introspect.zig");
  27const Module = @import("../Package.zig").Module;
  28const Sema = @import("../Sema.zig");
  29const target_util = @import("../target.zig");
  30const trace = @import("../tracy.zig").trace;
  31const Type = @import("../Type.zig");
  32const Value = @import("../Value.zig");
  33const Zcu = @import("../Zcu.zig");
  34const Compilation = @import("../Compilation.zig");
  35const codegen = @import("../codegen.zig");
  36const crash_report = @import("../crash_report.zig");
  37
  38zcu: *Zcu,
  39
  40/// Dense, per-thread unique index.
  41tid: Id,
  42
  43pub const IdBacking = u7;
  44pub const Id = if (InternPool.single_threaded) enum { main } else enum(IdBacking) { main, _ };
  45
  46pub fn activate(zcu: *Zcu, tid: Id) Zcu.PerThread {
  47    zcu.intern_pool.activate();
  48    return .{ .zcu = zcu, .tid = tid };
  49}
  50
  51pub fn deactivate(pt: Zcu.PerThread) void {
  52    pt.zcu.intern_pool.deactivate();
  53}
  54
  55fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
  56    const zcu = pt.zcu;
  57    const gpa = zcu.gpa;
  58    const file = zcu.fileByIndex(file_index);
  59    log.debug("deinit File {f}", .{file.path.fmt(zcu.comp)});
  60    file.path.deinit(gpa);
  61    file.unload(gpa);
  62    if (file.prev_zir) |prev_zir| {
  63        prev_zir.deinit(gpa);
  64        gpa.destroy(prev_zir);
  65    }
  66    file.* = undefined;
  67}
  68
  69pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
  70    const gpa = pt.zcu.gpa;
  71    const file = pt.zcu.fileByIndex(file_index);
  72    pt.deinitFile(file_index);
  73    gpa.destroy(file);
  74}
  75
  76/// Ensures that `file` has up-to-date ZIR. If not, loads the ZIR cache or runs
  77/// AstGen as needed. Also updates `file.status`. Does not assume that `file.mod`
  78/// is populated. Does not return `error.AnalysisFail` on AstGen failures.
  79pub fn updateFile(
  80    pt: Zcu.PerThread,
  81    file_index: Zcu.File.Index,
  82    file: *Zcu.File,
  83) !void {
  84    dev.check(.ast_gen);
  85
  86    const tracy = trace(@src());
  87    defer tracy.end();
  88
  89    const zcu = pt.zcu;
  90    const comp = zcu.comp;
  91    const gpa = zcu.gpa;
  92    const io = comp.io;
  93
  94    // In any case we need to examine the stat of the file to determine the course of action.
  95    var source_file = f: {
  96        const dir, const sub_path = file.path.openInfo(comp.dirs);
  97        break :f try dir.openFile(sub_path, .{});
  98    };
  99    defer source_file.close();
 100
 101    const stat = try source_file.stat();
 102
 103    const want_local_cache = switch (file.path.root) {
 104        .none, .local_cache => true,
 105        .global_cache, .zig_lib => false,
 106    };
 107
 108    const hex_digest: Cache.HexDigest = d: {
 109        var h: Cache.HashHelper = .{};
 110        // As well as the file path, we also include the compiler version in case of backwards-incompatible ZIR changes.
 111        file.path.addToHasher(&h.hasher);
 112        h.addBytes(build_options.version);
 113        h.add(builtin.zig_backend);
 114        break :d h.final();
 115    };
 116
 117    const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache;
 118    const zir_dir = cache_directory.handle;
 119
 120    // Determine whether we need to reload the file from disk and redo parsing and AstGen.
 121    var lock: std.fs.File.Lock = switch (file.status) {
 122        .never_loaded, .retryable_failure => lock: {
 123            // First, load the cached ZIR code, if any.
 124            log.debug("AstGen checking cache: {f} (local={}, digest={s})", .{
 125                file.path.fmt(comp), want_local_cache, &hex_digest,
 126            });
 127
 128            break :lock .shared;
 129        },
 130        .astgen_failure, .success => lock: {
 131            const unchanged_metadata =
 132                stat.size == file.stat.size and
 133                stat.mtime.nanoseconds == file.stat.mtime.nanoseconds and
 134                stat.inode == file.stat.inode;
 135
 136            if (unchanged_metadata) {
 137                log.debug("unmodified metadata of file: {f}", .{file.path.fmt(comp)});
 138                return;
 139            }
 140
 141            log.debug("metadata changed: {f}", .{file.path.fmt(comp)});
 142
 143            break :lock .exclusive;
 144        },
 145    };
 146
 147    // The old compile error, if any, is no longer relevant.
 148    pt.lockAndClearFileCompileError(file_index, file);
 149
 150    // If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`.
 151    // We need to keep it around!
 152    // As an optimization, also check `loweringFailed`; if true, but `prev_zir == null`, then this
 153    // file has never passed AstGen, so we actually need not cache the old ZIR.
 154    if (file.zir != null and file.prev_zir == null and !file.zir.?.loweringFailed()) {
 155        assert(file.prev_zir == null);
 156        const prev_zir_ptr = try gpa.create(Zir);
 157        file.prev_zir = prev_zir_ptr;
 158        prev_zir_ptr.* = file.zir.?;
 159        file.zir = null;
 160    }
 161
 162    // If ZOIR is changing, then we need to invalidate dependencies on it
 163    if (file.zoir != null) file.zoir_invalidated = true;
 164
 165    // We're going to re-load everything, so unload source, AST, ZIR, ZOIR.
 166    file.unload(gpa);
 167
 168    // We ask for a lock in order to coordinate with other zig processes.
 169    // If another process is already working on this file, we will get the cached
 170    // version. Likewise if we're working on AstGen and another process asks for
 171    // the cached file, they'll get it.
 172    const cache_file = while (true) {
 173        break zir_dir.createFile(&hex_digest, .{
 174            .read = true,
 175            .truncate = false,
 176            .lock = lock,
 177        }) catch |err| switch (err) {
 178            error.NotDir => unreachable, // no dir components
 179            error.BadPathName => unreachable, // it's a hex encoded name
 180            error.NameTooLong => unreachable, // it's a fixed size name
 181            error.PipeBusy => unreachable, // it's not a pipe
 182            error.NoDevice => unreachable, // it's not a pipe
 183            error.WouldBlock => unreachable, // not asking for non-blocking I/O
 184            error.FileNotFound => {
 185                // There are no dir components, so the only possibility should
 186                // be that the directory behind the handle has been deleted,
 187                // however we have observed on macOS two processes racing to do
 188                // openat() with O_CREAT manifest in ENOENT.
 189                //
 190                // As a workaround, we retry with exclusive=true which
 191                // disambiguates by returning EEXIST, indicating original
 192                // failure was a race, or ENOENT, indicating deletion of the
 193                // directory of our open handle.
 194                if (!builtin.os.tag.isDarwin()) {
 195                    std.process.fatal("cache directory '{f}' unexpectedly removed during compiler execution", .{
 196                        cache_directory,
 197                    });
 198                }
 199                break zir_dir.createFile(&hex_digest, .{
 200                    .read = true,
 201                    .truncate = false,
 202                    .lock = lock,
 203                    .exclusive = true,
 204                }) catch |excl_err| switch (excl_err) {
 205                    error.PathAlreadyExists => continue,
 206                    error.FileNotFound => {
 207                        std.process.fatal("cache directory '{f}' unexpectedly removed during compiler execution", .{
 208                            cache_directory,
 209                        });
 210                    },
 211                    else => |e| return e,
 212                };
 213            },
 214
 215            else => |e| return e, // Retryable errors are handled at callsite.
 216        };
 217    };
 218    defer cache_file.close();
 219
 220    // Under `--time-report`, ignore cache hits; do the work anyway for those juicy numbers.
 221    const ignore_hit = comp.time_report != null;
 222
 223    const need_update = while (true) {
 224        const result = switch (file.getMode()) {
 225            inline else => |mode| try loadZirZoirCache(zcu, cache_file, stat, file, mode),
 226        };
 227        switch (result) {
 228            .success => if (!ignore_hit) {
 229                log.debug("AstGen cached success: {f}", .{file.path.fmt(comp)});
 230                break false;
 231            },
 232            .invalid => {},
 233            .truncated => log.warn("unexpected EOF reading cached ZIR for {f}", .{file.path.fmt(comp)}),
 234            .stale => log.debug("AstGen cache stale: {f}", .{file.path.fmt(comp)}),
 235        }
 236
 237        // If we already have the exclusive lock then it is our job to update.
 238        if (builtin.os.tag == .wasi or lock == .exclusive) break true;
 239        // Otherwise, unlock to give someone a chance to get the exclusive lock
 240        // and then upgrade to an exclusive lock.
 241        cache_file.unlock();
 242        lock = .exclusive;
 243        try cache_file.lock(lock);
 244    };
 245
 246    if (need_update) {
 247        // The cache is definitely stale so delete the contents to avoid an underwrite later.
 248        cache_file.setEndPos(0) catch |err| switch (err) {
 249            error.FileTooBig => unreachable, // 0 is not too big
 250            else => |e| return e,
 251        };
 252        try cache_file.seekTo(0);
 253
 254        if (stat.size > std.math.maxInt(u32))
 255            return error.FileTooBig;
 256
 257        const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
 258        defer if (file.source == null) gpa.free(source);
 259        var source_fr = source_file.reader(io, &.{});
 260        source_fr.size = stat.size;
 261        source_fr.interface.readSliceAll(source) catch |err| switch (err) {
 262            error.ReadFailed => return source_fr.err.?,
 263            error.EndOfStream => return error.UnexpectedEndOfFile,
 264        };
 265
 266        file.source = source;
 267
 268        var timer = comp.startTimer();
 269        // Any potential AST errors are converted to ZIR errors when we run AstGen/ZonGen.
 270        file.tree = try Ast.parse(gpa, source, file.getMode());
 271        if (timer.finish()) |ns_parse| {
 272            comp.mutex.lock();
 273            defer comp.mutex.unlock();
 274            comp.time_report.?.stats.cpu_ns_parse += ns_parse;
 275        }
 276
 277        timer = comp.startTimer();
 278        switch (file.getMode()) {
 279            .zig => {
 280                file.zir = try AstGen.generate(gpa, file.tree.?);
 281                Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) {
 282                    error.OutOfMemory => |e| return e,
 283                    else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {s}", .{
 284                        file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
 285                    }),
 286                };
 287            },
 288            .zon => {
 289                file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
 290                Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| {
 291                    log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {s}", .{
 292                        file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
 293                    });
 294                };
 295            },
 296        }
 297        if (timer.finish()) |ns_astgen| {
 298            comp.mutex.lock();
 299            defer comp.mutex.unlock();
 300            comp.time_report.?.stats.cpu_ns_astgen += ns_astgen;
 301        }
 302
 303        log.debug("AstGen fresh success: {f}", .{file.path.fmt(comp)});
 304    }
 305
 306    file.stat = .{
 307        .size = stat.size,
 308        .inode = stat.inode,
 309        .mtime = stat.mtime,
 310    };
 311
 312    // Now, `zir` or `zoir` is definitely populated and up-to-date.
 313    // Mark file successes/failures as needed.
 314
 315    switch (file.getMode()) {
 316        .zig => {
 317            if (file.zir.?.hasCompileErrors()) {
 318                comp.mutex.lock();
 319                defer comp.mutex.unlock();
 320                try zcu.failed_files.putNoClobber(gpa, file_index, null);
 321            }
 322            if (file.zir.?.loweringFailed()) {
 323                file.status = .astgen_failure;
 324            } else {
 325                file.status = .success;
 326            }
 327        },
 328        .zon => {
 329            if (file.zoir.?.hasCompileErrors()) {
 330                file.status = .astgen_failure;
 331                comp.mutex.lock();
 332                defer comp.mutex.unlock();
 333                try zcu.failed_files.putNoClobber(gpa, file_index, null);
 334            } else {
 335                file.status = .success;
 336            }
 337        },
 338    }
 339
 340    switch (file.status) {
 341        .never_loaded => unreachable,
 342        .retryable_failure => unreachable,
 343        .astgen_failure, .success => {},
 344    }
 345}
 346
 347fn loadZirZoirCache(
 348    zcu: *Zcu,
 349    cache_file: std.fs.File,
 350    stat: std.fs.File.Stat,
 351    file: *Zcu.File,
 352    comptime mode: Ast.Mode,
 353) !enum { success, invalid, truncated, stale } {
 354    assert(file.getMode() == mode);
 355
 356    const gpa = zcu.gpa;
 357    const io = zcu.comp.io;
 358
 359    const Header = switch (mode) {
 360        .zig => Zir.Header,
 361        .zon => Zoir.Header,
 362    };
 363
 364    var buffer: [2000]u8 = undefined;
 365    var cache_fr = cache_file.reader(io, &buffer);
 366    cache_fr.size = stat.size;
 367    const cache_br = &cache_fr.interface;
 368
 369    // First we read the header to determine the lengths of arrays.
 370    const header = (cache_br.takeStructPointer(Header) catch |err| switch (err) {
 371        error.ReadFailed => return cache_fr.err.?,
 372        // This can happen if Zig bails out of this function between creating
 373        // the cached file and writing it.
 374        error.EndOfStream => return .invalid,
 375        else => |e| return e,
 376    }).*;
 377
 378    const unchanged_metadata =
 379        stat.size == header.stat_size and
 380        stat.mtime.nanoseconds == header.stat_mtime and
 381        stat.inode == header.stat_inode;
 382
 383    if (!unchanged_metadata) {
 384        return .stale;
 385    }
 386
 387    switch (mode) {
 388        .zig => file.zir = Zcu.loadZirCacheBody(gpa, header, cache_br) catch |err| switch (err) {
 389            error.ReadFailed => return cache_fr.err.?,
 390            error.EndOfStream => return .truncated,
 391            else => |e| return e,
 392        },
 393        .zon => file.zoir = Zcu.loadZoirCacheBody(gpa, header, cache_br) catch |err| switch (err) {
 394            error.ReadFailed => return cache_fr.err.?,
 395            error.EndOfStream => return .truncated,
 396            else => |e| return e,
 397        },
 398    }
 399
 400    return .success;
 401}
 402
 403const UpdatedFile = struct {
 404    file: *Zcu.File,
 405    inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
 406};
 407
 408fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile)) void {
 409    for (updated_files.values()) |*elem| elem.inst_map.deinit(gpa);
 410    updated_files.deinit(gpa);
 411}
 412
 413pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
 414    assert(pt.tid == .main);
 415    const zcu = pt.zcu;
 416    const comp = zcu.comp;
 417    const ip = &zcu.intern_pool;
 418    const gpa = zcu.gpa;
 419
 420    // We need to visit every updated File for every TrackedInst in InternPool.
 421    // This only includes Zig files; ZON files are omitted.
 422    var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty;
 423    defer cleanupUpdatedFiles(gpa, &updated_files);
 424
 425    for (zcu.import_table.keys()) |file_index| {
 426        if (!zcu.alive_files.contains(file_index)) continue;
 427        const file = zcu.fileByIndex(file_index);
 428        assert(file.status == .success);
 429        if (file.module_changed) {
 430            try updated_files.putNoClobber(gpa, file_index, .{
 431                .file = file,
 432                // We intentionally don't map any instructions here; that's the point, the whole file is outdated!
 433                .inst_map = .{},
 434            });
 435            continue;
 436        }
 437        switch (file.getMode()) {
 438            .zig => {}, // logic below
 439            .zon => {
 440                if (file.zoir_invalidated) {
 441                    try zcu.markDependeeOutdated(.not_marked_po, .{ .zon_file = file_index });
 442                    file.zoir_invalidated = false;
 443                }
 444                continue;
 445            },
 446        }
 447        const old_zir = file.prev_zir orelse continue;
 448        const new_zir = file.zir.?;
 449        const gop = try updated_files.getOrPut(gpa, file_index);
 450        assert(!gop.found_existing);
 451        gop.value_ptr.* = .{
 452            .file = file,
 453            .inst_map = .{},
 454        };
 455        try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, &gop.value_ptr.inst_map);
 456    }
 457
 458    if (updated_files.count() == 0)
 459        return;
 460
 461    for (ip.locals, 0..) |*local, tid| {
 462        const tracked_insts_list = local.getMutableTrackedInsts(gpa);
 463        for (tracked_insts_list.viewAllowEmpty().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
 464            const file_index = tracked_inst.file;
 465            const updated_file = updated_files.get(file_index) orelse continue;
 466
 467            const file = updated_file.file;
 468
 469            const old_inst = tracked_inst.inst.unwrap() orelse continue; // we can't continue tracking lost insts
 470            const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{
 471                .tid = @enumFromInt(tid),
 472                .index = @intCast(tracked_inst_unwrapped_index),
 473            }).wrap(ip);
 474            const new_inst = updated_file.inst_map.get(old_inst) orelse {
 475                // Tracking failed for this instruction due to changes in the ZIR.
 476                // Invalidate associated `src_hash` deps.
 477                log.debug("tracking failed for %{d}", .{old_inst});
 478                tracked_inst.inst = .lost;
 479                try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
 480                continue;
 481            };
 482            tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst);
 483
 484            const old_zir = file.prev_zir.?.*;
 485            const new_zir = file.zir.?;
 486            const old_tag = old_zir.instructions.items(.tag)[@intFromEnum(old_inst)];
 487            const old_data = old_zir.instructions.items(.data)[@intFromEnum(old_inst)];
 488
 489            switch (old_tag) {
 490                .declaration => {
 491                    const old_line = old_zir.getDeclaration(old_inst).src_line;
 492                    const new_line = new_zir.getDeclaration(new_inst).src_line;
 493                    if (old_line != new_line) {
 494                        try comp.queueJob(.{ .update_line_number = tracked_inst_index });
 495                    }
 496                },
 497                else => {},
 498            }
 499
 500            if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
 501                if (new_zir.getAssociatedSrcHash(new_inst)) |new_hash| {
 502                    if (std.zig.srcHashEql(old_hash, new_hash)) {
 503                        break :hash_changed;
 504                    }
 505                    log.debug("hash for (%{d} -> %{d}) changed: {x} -> {x}", .{
 506                        old_inst, new_inst, &old_hash, &new_hash,
 507                    });
 508                }
 509                // The source hash associated with this instruction changed - invalidate relevant dependencies.
 510                try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
 511            }
 512
 513            // If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
 514            const has_namespace = switch (old_tag) {
 515                .extended => switch (old_data.extended.opcode) {
 516                    .struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
 517                    else => false,
 518                },
 519                else => false,
 520            };
 521            if (!has_namespace) continue;
 522
 523            // Value is whether the declaration is `pub`.
 524            var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, bool) = .empty;
 525            defer old_names.deinit(zcu.gpa);
 526            {
 527                var it = old_zir.declIterator(old_inst);
 528                while (it.next()) |decl_inst| {
 529                    const old_decl = old_zir.getDeclaration(decl_inst);
 530                    if (old_decl.name == .empty) continue;
 531                    const name_ip = try zcu.intern_pool.getOrPutString(
 532                        zcu.gpa,
 533                        pt.tid,
 534                        old_zir.nullTerminatedString(old_decl.name),
 535                        .no_embedded_nulls,
 536                    );
 537                    try old_names.put(zcu.gpa, name_ip, old_decl.is_pub);
 538                }
 539            }
 540            var any_change = false;
 541            {
 542                var it = new_zir.declIterator(new_inst);
 543                while (it.next()) |decl_inst| {
 544                    const new_decl = new_zir.getDeclaration(decl_inst);
 545                    if (new_decl.name == .empty) continue;
 546                    const name_ip = try zcu.intern_pool.getOrPutString(
 547                        zcu.gpa,
 548                        pt.tid,
 549                        new_zir.nullTerminatedString(new_decl.name),
 550                        .no_embedded_nulls,
 551                    );
 552                    if (old_names.fetchSwapRemove(name_ip)) |kv| {
 553                        if (kv.value == new_decl.is_pub) continue;
 554                    }
 555                    // Name added, or changed whether it's pub
 556                    any_change = true;
 557                    try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
 558                        .namespace = tracked_inst_index,
 559                        .name = name_ip,
 560                    } });
 561                }
 562            }
 563            // The only elements remaining in `old_names` now are any names which were removed.
 564            for (old_names.keys()) |name_ip| {
 565                any_change = true;
 566                try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
 567                    .namespace = tracked_inst_index,
 568                    .name = name_ip,
 569                } });
 570            }
 571
 572            if (any_change) {
 573                try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace = tracked_inst_index });
 574            }
 575        }
 576    }
 577
 578    try ip.rehashTrackedInsts(gpa, pt.tid);
 579
 580    for (updated_files.keys(), updated_files.values()) |file_index, updated_file| {
 581        const file = updated_file.file;
 582
 583        if (file.prev_zir) |prev_zir| {
 584            prev_zir.deinit(gpa);
 585            gpa.destroy(prev_zir);
 586            file.prev_zir = null;
 587        }
 588        file.module_changed = false;
 589
 590        // For every file which has changed, re-scan the namespace of the file's root struct type.
 591        // These types are special-cased because they don't have an enclosing declaration which will
 592        // be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this
 593        // now because this work is fast (no actual Sema work is happening, we're just updating the
 594        // namespace contents). We must do this after updating ZIR refs above, since `scanNamespace`
 595        // will track some instructions.
 596        try pt.updateFileNamespace(file_index);
 597    }
 598}
 599
 600/// Ensures that `zcu.fileRootType` on this `file_index` gives an up-to-date answer.
 601/// Returns `error.AnalysisFail` if the file has an error.
 602pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
 603    const file_root_type = pt.zcu.fileRootType(file_index);
 604    if (file_root_type != .none) {
 605        if (pt.ensureTypeUpToDate(file_root_type)) |_| {
 606            return;
 607        } else |err| switch (err) {
 608            error.AnalysisFail => {
 609                // The file's root `struct_decl` has, at some point, been lost, because the file failed AstGen.
 610                // Clear `file_root_type`, and try the `semaFile` call below, in case the instruction has since
 611                // been discovered under a new `TrackedInst.Index`.
 612                pt.zcu.setFileRootType(file_index, .none);
 613            },
 614            else => |e| return e,
 615        }
 616    }
 617    return pt.semaFile(file_index);
 618}
 619
 620/// Ensures that all memoized state on `Zcu` is up-to-date, performing re-analysis if necessary.
 621/// Returns `error.AnalysisFail` if an analysis error is encountered; the caller is free to ignore
 622/// this, since the error is already registered, but it must not use the value of memoized fields.
 623pub fn ensureMemoizedStateUpToDate(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage) Zcu.SemaError!void {
 624    const tracy = trace(@src());
 625    defer tracy.end();
 626
 627    const zcu = pt.zcu;
 628    const gpa = zcu.gpa;
 629
 630    const unit: AnalUnit = .wrap(.{ .memoized_state = stage });
 631
 632    log.debug("ensureMemoizedStateUpToDate", .{});
 633
 634    assert(!zcu.analysis_in_progress.contains(unit));
 635
 636    const was_outdated = zcu.outdated.swapRemove(unit) or zcu.potentially_outdated.swapRemove(unit);
 637    const prev_failed = zcu.failed_analysis.contains(unit) or zcu.transitive_failed_analysis.contains(unit);
 638
 639    if (was_outdated) {
 640        dev.check(.incremental);
 641        _ = zcu.outdated_ready.swapRemove(unit);
 642        // No need for `deleteUnitExports` because we never export anything.
 643        zcu.deleteUnitReferences(unit);
 644        zcu.deleteUnitCompileLogs(unit);
 645        if (zcu.failed_analysis.fetchSwapRemove(unit)) |kv| {
 646            kv.value.destroy(gpa);
 647        }
 648        _ = zcu.transitive_failed_analysis.swapRemove(unit);
 649    } else {
 650        if (prev_failed) return error.AnalysisFail;
 651        // We use an arbitrary element to check if the state has been resolved yet.
 652        const to_check: Zcu.BuiltinDecl = switch (stage) {
 653            .main => .Type,
 654            .panic => .panic,
 655            .va_list => .VaList,
 656            .assembly => .assembly,
 657        };
 658        if (zcu.builtin_decl_values.get(to_check) != .none) return;
 659    }
 660
 661    if (zcu.comp.debugIncremental()) {
 662        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, unit);
 663        info.last_update_gen = zcu.generation;
 664        info.deps.clearRetainingCapacity();
 665    }
 666
 667    const any_changed: bool, const new_failed: bool = if (pt.analyzeMemoizedState(stage)) |any_changed|
 668        .{ any_changed or prev_failed, false }
 669    else |err| switch (err) {
 670        error.AnalysisFail => res: {
 671            if (!zcu.failed_analysis.contains(unit)) {
 672                // If this unit caused the error, it would have an entry in `failed_analysis`.
 673                // Since it does not, this must be a transitive failure.
 674                try zcu.transitive_failed_analysis.put(gpa, unit, {});
 675                log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(unit)});
 676            }
 677            break :res .{ !prev_failed, true };
 678        },
 679        error.OutOfMemory => {
 680            // TODO: same as for `ensureComptimeUnitUpToDate` etc
 681            return error.OutOfMemory;
 682        },
 683        error.Canceled => |e| return e,
 684        error.ComptimeReturn => unreachable,
 685        error.ComptimeBreak => unreachable,
 686    };
 687
 688    if (was_outdated) {
 689        const dependee: InternPool.Dependee = .{ .memoized_state = stage };
 690        if (any_changed) {
 691            try zcu.markDependeeOutdated(.marked_po, dependee);
 692        } else {
 693            try zcu.markPoDependeeUpToDate(dependee);
 694        }
 695    }
 696
 697    if (new_failed) return error.AnalysisFail;
 698}
 699
 700fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage) Zcu.CompileError!bool {
 701    const zcu = pt.zcu;
 702    const ip = &zcu.intern_pool;
 703    const gpa = zcu.gpa;
 704
 705    const unit: AnalUnit = .wrap(.{ .memoized_state = stage });
 706
 707    try zcu.analysis_in_progress.putNoClobber(gpa, unit, {});
 708    defer assert(zcu.analysis_in_progress.swapRemove(unit));
 709
 710    // Before we begin, collect:
 711    // * The type `std`, and its namespace
 712    // * The type `std.builtin`, and its namespace
 713    // * A semi-reasonable source location
 714    const std_file_index = zcu.module_roots.get(zcu.std_mod).?.unwrap().?;
 715    try pt.ensureFileAnalyzed(std_file_index);
 716    const std_type: Type = .fromInterned(zcu.fileRootType(std_file_index));
 717    const std_namespace = std_type.getNamespaceIndex(zcu);
 718    try pt.ensureNamespaceUpToDate(std_namespace);
 719    const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);
 720    const builtin_nav = zcu.namespacePtr(std_namespace).pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }) orelse
 721        @panic("lib/std.zig is corrupt and missing 'builtin'");
 722    try pt.ensureNavValUpToDate(builtin_nav);
 723    const builtin_type: Type = .fromInterned(ip.getNav(builtin_nav).status.fully_resolved.val);
 724    const builtin_namespace = builtin_type.getNamespaceIndex(zcu);
 725    try pt.ensureNamespaceUpToDate(builtin_namespace);
 726    const src: Zcu.LazySrcLoc = .{
 727        .base_node_inst = builtin_type.typeDeclInst(zcu).?,
 728        .offset = .{ .byte_abs = 0 },
 729    };
 730
 731    var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
 732    defer analysis_arena.deinit();
 733
 734    var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
 735    defer comptime_err_ret_trace.deinit();
 736
 737    var sema: Sema = .{
 738        .pt = pt,
 739        .gpa = gpa,
 740        .arena = analysis_arena.allocator(),
 741        .code = .{ .instructions = .empty, .string_bytes = &.{}, .extra = &.{} },
 742        .owner = unit,
 743        .func_index = .none,
 744        .func_is_naked = false,
 745        .fn_ret_ty = .void,
 746        .fn_ret_ty_ies = null,
 747        .comptime_err_ret_trace = &comptime_err_ret_trace,
 748    };
 749    defer sema.deinit();
 750
 751    var block: Sema.Block = .{
 752        .parent = null,
 753        .sema = &sema,
 754        .namespace = std_namespace,
 755        .instructions = .{},
 756        .inlining = null,
 757        .comptime_reason = .{ .reason = .{
 758            .src = src,
 759            .r = .{ .simple = .type },
 760        } },
 761        .src_base_inst = src.base_node_inst,
 762        .type_name_ctx = .empty,
 763    };
 764    defer block.instructions.deinit(gpa);
 765
 766    return sema.analyzeMemoizedState(&block, src, builtin_namespace, stage);
 767}
 768
 769/// Ensures that the state of the given `ComptimeUnit` is fully up-to-date, performing re-analysis
 770/// if necessary. Returns `error.AnalysisFail` if an analysis error is encountered; the caller is
 771/// free to ignore this, since the error is already registered.
 772pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu.SemaError!void {
 773    const tracy = trace(@src());
 774    defer tracy.end();
 775
 776    const zcu = pt.zcu;
 777    const gpa = zcu.gpa;
 778
 779    const anal_unit: AnalUnit = .wrap(.{ .@"comptime" = cu_id });
 780
 781    log.debug("ensureComptimeUnitUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
 782
 783    assert(!zcu.analysis_in_progress.contains(anal_unit));
 784
 785    // Determine whether or not this `ComptimeUnit` is outdated. For this kind of `AnalUnit`, that's
 786    // the only indicator as to whether or not analysis is required; when a `ComptimeUnit` is first
 787    // created, it's marked as outdated.
 788    //
 789    // Note that if the unit is PO, we pessimistically assume that it *does* require re-analysis, to
 790    // ensure that the unit is definitely up-to-date when this function returns. This mechanism could
 791    // result in over-analysis if analysis occurs in a poor order; we do our best to avoid this by
 792    // carefully choosing which units to re-analyze. See `Zcu.findOutdatedToAnalyze`.
 793
 794    const was_outdated = zcu.outdated.swapRemove(anal_unit) or
 795        zcu.potentially_outdated.swapRemove(anal_unit);
 796
 797    if (was_outdated) {
 798        _ = zcu.outdated_ready.swapRemove(anal_unit);
 799        // `was_outdated` can be true in the initial update for comptime units, so this isn't a `dev.check`.
 800        if (dev.env.supports(.incremental)) {
 801            zcu.deleteUnitExports(anal_unit);
 802            zcu.deleteUnitReferences(anal_unit);
 803            zcu.deleteUnitCompileLogs(anal_unit);
 804            if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
 805                kv.value.destroy(gpa);
 806            }
 807            _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
 808            zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
 809        }
 810    } else {
 811        // We can trust the current information about this unit.
 812        if (zcu.failed_analysis.contains(anal_unit)) return error.AnalysisFail;
 813        if (zcu.transitive_failed_analysis.contains(anal_unit)) return error.AnalysisFail;
 814        return;
 815    }
 816
 817    if (zcu.comp.debugIncremental()) {
 818        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
 819        info.last_update_gen = zcu.generation;
 820        info.deps.clearRetainingCapacity();
 821    }
 822
 823    const unit_tracking = zcu.trackUnitSema(
 824        "comptime",
 825        zcu.intern_pool.getComptimeUnit(cu_id).zir_index,
 826    );
 827    defer unit_tracking.end(zcu);
 828
 829    return pt.analyzeComptimeUnit(cu_id) catch |err| switch (err) {
 830        error.AnalysisFail => {
 831            if (!zcu.failed_analysis.contains(anal_unit)) {
 832                // If this unit caused the error, it would have an entry in `failed_analysis`.
 833                // Since it does not, this must be a transitive failure.
 834                try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
 835                log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
 836            }
 837            return error.AnalysisFail;
 838        },
 839        error.OutOfMemory => {
 840            // TODO: it's unclear how to gracefully handle this.
 841            // To report the error cleanly, we need to add a message to `failed_analysis` and a
 842            // corresponding entry to `retryable_failures`; but either of these things is quite
 843            // likely to OOM at this point.
 844            // If that happens, what do we do? Perhaps we could have a special field on `Zcu`
 845            // for reporting OOM errors without allocating.
 846            return error.OutOfMemory;
 847        },
 848        error.Canceled => |e| return e,
 849        error.ComptimeReturn => unreachable,
 850        error.ComptimeBreak => unreachable,
 851    };
 852}
 853
 854/// Re-analyzes a `ComptimeUnit`. The unit has already been determined to be out-of-date, and old
 855/// side effects (exports/references/etc) have been dropped. If semantic analysis fails, this
 856/// function will return `error.AnalysisFail`, and it is the caller's reponsibility to add an entry
 857/// to `transitive_failed_analysis` if necessary.
 858fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu.CompileError!void {
 859    const zcu = pt.zcu;
 860    const gpa = zcu.gpa;
 861    const ip = &zcu.intern_pool;
 862
 863    const anal_unit: AnalUnit = .wrap(.{ .@"comptime" = cu_id });
 864    const comptime_unit = ip.getComptimeUnit(cu_id);
 865
 866    log.debug("analyzeComptimeUnit {f}", .{zcu.fmtAnalUnit(anal_unit)});
 867
 868    const inst_resolved = comptime_unit.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
 869    const file = zcu.fileByIndex(inst_resolved.file);
 870    const zir = file.zir.?;
 871
 872    try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
 873    defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
 874
 875    var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
 876    defer analysis_arena.deinit();
 877
 878    var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
 879    defer comptime_err_ret_trace.deinit();
 880
 881    var sema: Sema = .{
 882        .pt = pt,
 883        .gpa = gpa,
 884        .arena = analysis_arena.allocator(),
 885        .code = zir,
 886        .owner = anal_unit,
 887        .func_index = .none,
 888        .func_is_naked = false,
 889        .fn_ret_ty = .void,
 890        .fn_ret_ty_ies = null,
 891        .comptime_err_ret_trace = &comptime_err_ret_trace,
 892    };
 893    defer sema.deinit();
 894
 895    // The comptime unit declares on the source of the corresponding `comptime` declaration.
 896    try sema.declareDependency(.{ .src_hash = comptime_unit.zir_index });
 897
 898    var block: Sema.Block = .{
 899        .parent = null,
 900        .sema = &sema,
 901        .namespace = comptime_unit.namespace,
 902        .instructions = .{},
 903        .inlining = null,
 904        .comptime_reason = .{ .reason = .{
 905            .src = .{
 906                .base_node_inst = comptime_unit.zir_index,
 907                .offset = .{ .token_offset = .zero },
 908            },
 909            .r = .{ .simple = .comptime_keyword },
 910        } },
 911        .src_base_inst = comptime_unit.zir_index,
 912        .type_name_ctx = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}.comptime", .{
 913            Type.fromInterned(zcu.namespacePtr(comptime_unit.namespace).owner_type).containerTypeName(ip).fmt(ip),
 914        }, .no_embedded_nulls),
 915    };
 916    defer block.instructions.deinit(gpa);
 917
 918    const zir_decl = zir.getDeclaration(inst_resolved.inst);
 919    assert(zir_decl.kind == .@"comptime");
 920    assert(zir_decl.type_body == null);
 921    assert(zir_decl.align_body == null);
 922    assert(zir_decl.linksection_body == null);
 923    assert(zir_decl.addrspace_body == null);
 924    const value_body = zir_decl.value_body.?;
 925
 926    const result_ref = try sema.resolveInlineBody(&block, value_body, inst_resolved.inst);
 927    assert(result_ref == .void_value); // AstGen should always uphold this
 928
 929    // Nothing else to do -- for a comptime decl, all we care about are the side effects.
 930    // Just make sure to `flushExports`.
 931    try sema.flushExports();
 932}
 933
 934/// Ensures that the resolved value of the given `Nav` is fully up-to-date, performing re-analysis
 935/// if necessary. Returns `error.AnalysisFail` if an analysis error is encountered; the caller is
 936/// free to ignore this, since the error is already registered.
 937pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.SemaError!void {
 938    const tracy = trace(@src());
 939    defer tracy.end();
 940
 941    // TODO: document this elsewhere mlugg!
 942    // For my own benefit, here's how a namespace update for a normal (non-file-root) type works:
 943    // `const S = struct { ... };`
 944    // We are adding or removing a declaration within this `struct`.
 945    // * `S` registers a dependency on `.{ .src_hash = (declaration of S) }`
 946    // * Any change to the `struct` body -- including changing a declaration -- invalidates this
 947    // * `S` is re-analyzed, but notes:
 948    //   * there is an existing struct instance (at this `TrackedInst` with these captures)
 949    //   * the struct's resolution is up-to-date (because nothing about the fields changed)
 950    // * so, it uses the same `struct`
 951    // * but this doesn't stop it from updating the namespace!
 952    //   * we basically do `scanDecls`, updating the namespace as needed
 953    // * so everyone lived happily ever after
 954
 955    const zcu = pt.zcu;
 956    const gpa = zcu.gpa;
 957    const ip = &zcu.intern_pool;
 958
 959    _ = zcu.nav_val_analysis_queued.swapRemove(nav_id);
 960
 961    const anal_unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
 962    const nav = ip.getNav(nav_id);
 963
 964    log.debug("ensureNavValUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
 965
 966    assert(!zcu.analysis_in_progress.contains(anal_unit));
 967
 968    // Determine whether or not this `Nav`'s value is outdated. This also includes checking if the
 969    // status is `.unresolved`, which indicates that the value is outdated because it has *never*
 970    // been analyzed so far.
 971    //
 972    // Note that if the unit is PO, we pessimistically assume that it *does* require re-analysis, to
 973    // ensure that the unit is definitely up-to-date when this function returns. This mechanism could
 974    // result in over-analysis if analysis occurs in a poor order; we do our best to avoid this by
 975    // carefully choosing which units to re-analyze. See `Zcu.findOutdatedToAnalyze`.
 976
 977    const was_outdated = zcu.outdated.swapRemove(anal_unit) or
 978        zcu.potentially_outdated.swapRemove(anal_unit);
 979
 980    const prev_failed = zcu.failed_analysis.contains(anal_unit) or
 981        zcu.transitive_failed_analysis.contains(anal_unit);
 982
 983    if (was_outdated) {
 984        dev.check(.incremental);
 985        _ = zcu.outdated_ready.swapRemove(anal_unit);
 986        zcu.deleteUnitExports(anal_unit);
 987        zcu.deleteUnitReferences(anal_unit);
 988        zcu.deleteUnitCompileLogs(anal_unit);
 989        if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
 990            kv.value.destroy(gpa);
 991        }
 992        _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
 993        ip.removeDependenciesForDepender(gpa, anal_unit);
 994    } else {
 995        // We can trust the current information about this unit.
 996        if (prev_failed) return error.AnalysisFail;
 997        switch (nav.status) {
 998            .unresolved, .type_resolved => {},
 999            .fully_resolved => return,
1000        }
1001    }
1002
1003    if (zcu.comp.debugIncremental()) {
1004        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
1005        info.last_update_gen = zcu.generation;
1006        info.deps.clearRetainingCapacity();
1007    }
1008
1009    const unit_tracking = zcu.trackUnitSema(nav.fqn.toSlice(ip), nav.srcInst(ip));
1010    defer unit_tracking.end(zcu);
1011
1012    const invalidate_value: bool, const new_failed: bool = if (pt.analyzeNavVal(nav_id)) |result| res: {
1013        break :res .{
1014            // If the unit has gone from failed to success, we still need to invalidate the dependencies.
1015            result.val_changed or prev_failed,
1016            false,
1017        };
1018    } else |err| switch (err) {
1019        error.AnalysisFail => res: {
1020            if (!zcu.failed_analysis.contains(anal_unit)) {
1021                // If this unit caused the error, it would have an entry in `failed_analysis`.
1022                // Since it does not, this must be a transitive failure.
1023                try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
1024                log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
1025            }
1026            break :res .{ !prev_failed, true };
1027        },
1028        error.OutOfMemory => {
1029            // TODO: it's unclear how to gracefully handle this.
1030            // To report the error cleanly, we need to add a message to `failed_analysis` and a
1031            // corresponding entry to `retryable_failures`; but either of these things is quite
1032            // likely to OOM at this point.
1033            // If that happens, what do we do? Perhaps we could have a special field on `Zcu`
1034            // for reporting OOM errors without allocating.
1035            return error.OutOfMemory;
1036        },
1037        error.Canceled => |e| return e,
1038        error.ComptimeReturn => unreachable,
1039        error.ComptimeBreak => unreachable,
1040    };
1041
1042    if (was_outdated) {
1043        const dependee: InternPool.Dependee = .{ .nav_val = nav_id };
1044        if (invalidate_value) {
1045            // This dependency was marked as PO, meaning dependees were waiting
1046            // on its analysis result, and it has turned out to be outdated.
1047            // Update dependees accordingly.
1048            try zcu.markDependeeOutdated(.marked_po, dependee);
1049        } else {
1050            // This dependency was previously PO, but turned out to be up-to-date.
1051            // We do not need to queue successive analysis.
1052            try zcu.markPoDependeeUpToDate(dependee);
1053        }
1054    }
1055
1056    // If there isn't a type annotation, then we have also just resolved the type. That means the
1057    // the type is up-to-date, so it won't have the chance to mark its own dependency on the value;
1058    // we must do that ourselves.
1059    type_deps_on_val: {
1060        const inst_resolved = nav.analysis.?.zir_index.resolveFull(ip) orelse break :type_deps_on_val;
1061        const file = zcu.fileByIndex(inst_resolved.file);
1062        const zir_decl = file.zir.?.getDeclaration(inst_resolved.inst);
1063        if (zir_decl.type_body != null) break :type_deps_on_val;
1064        // The type does indeed depend on the value. We are responsible for populating all state of
1065        // the `nav_ty`, including exports, references, errors, and dependencies.
1066        const ty_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
1067        const ty_was_outdated = zcu.outdated.swapRemove(ty_unit) or
1068            zcu.potentially_outdated.swapRemove(ty_unit);
1069        if (ty_was_outdated) {
1070            _ = zcu.outdated_ready.swapRemove(ty_unit);
1071            zcu.deleteUnitExports(ty_unit);
1072            zcu.deleteUnitReferences(ty_unit);
1073            zcu.deleteUnitCompileLogs(ty_unit);
1074            if (zcu.failed_analysis.fetchSwapRemove(ty_unit)) |kv| {
1075                kv.value.destroy(gpa);
1076            }
1077            _ = zcu.transitive_failed_analysis.swapRemove(ty_unit);
1078            ip.removeDependenciesForDepender(gpa, ty_unit);
1079        }
1080        try pt.addDependency(ty_unit, .{ .nav_val = nav_id });
1081        if (new_failed) try zcu.transitive_failed_analysis.put(gpa, ty_unit, {});
1082        if (ty_was_outdated) try zcu.markDependeeOutdated(.marked_po, .{ .nav_ty = nav_id });
1083    }
1084
1085    if (new_failed) return error.AnalysisFail;
1086}
1087
1088fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileError!struct { val_changed: bool } {
1089    const zcu = pt.zcu;
1090    const gpa = zcu.gpa;
1091    const ip = &zcu.intern_pool;
1092
1093    const anal_unit: AnalUnit = .wrap(.{ .nav_val = nav_id });
1094    const old_nav = ip.getNav(nav_id);
1095
1096    log.debug("analyzeNavVal {f}", .{zcu.fmtAnalUnit(anal_unit)});
1097
1098    const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
1099    const file = zcu.fileByIndex(inst_resolved.file);
1100    const zir = file.zir.?;
1101    const zir_decl = zir.getDeclaration(inst_resolved.inst);
1102
1103    try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
1104    errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
1105
1106    // If there's no type body, we are also resolving the type here.
1107    if (zir_decl.type_body == null) {
1108        try zcu.analysis_in_progress.putNoClobber(gpa, .wrap(.{ .nav_ty = nav_id }), {});
1109    }
1110    errdefer if (zir_decl.type_body == null) {
1111        _ = zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id }));
1112    };
1113
1114    var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
1115    defer analysis_arena.deinit();
1116
1117    var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
1118    defer comptime_err_ret_trace.deinit();
1119
1120    var sema: Sema = .{
1121        .pt = pt,
1122        .gpa = gpa,
1123        .arena = analysis_arena.allocator(),
1124        .code = zir,
1125        .owner = anal_unit,
1126        .func_index = .none,
1127        .func_is_naked = false,
1128        .fn_ret_ty = .void,
1129        .fn_ret_ty_ies = null,
1130        .comptime_err_ret_trace = &comptime_err_ret_trace,
1131    };
1132    defer sema.deinit();
1133
1134    // Every `Nav` declares a dependency on the source of the corresponding declaration.
1135    try sema.declareDependency(.{ .src_hash = old_nav.analysis.?.zir_index });
1136
1137    // In theory, we would also add a reference to the corresponding `nav_val` unit here: there are
1138    // always references in both directions between a `nav_val` and `nav_ty`. However, to save memory,
1139    // these references are known implicitly. See logic in `Zcu.resolveReferences`.
1140
1141    var block: Sema.Block = .{
1142        .parent = null,
1143        .sema = &sema,
1144        .namespace = old_nav.analysis.?.namespace,
1145        .instructions = .{},
1146        .inlining = null,
1147        .comptime_reason = undefined, // set below
1148        .src_base_inst = old_nav.analysis.?.zir_index,
1149        .type_name_ctx = old_nav.fqn,
1150    };
1151    defer block.instructions.deinit(gpa);
1152
1153    const ty_src = block.src(.{ .node_offset_var_decl_ty = .zero });
1154    const init_src = block.src(.{ .node_offset_var_decl_init = .zero });
1155    const align_src = block.src(.{ .node_offset_var_decl_align = .zero });
1156    const section_src = block.src(.{ .node_offset_var_decl_section = .zero });
1157    const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = .zero });
1158
1159    block.comptime_reason = .{ .reason = .{
1160        .src = init_src,
1161        .r = .{ .simple = .container_var_init },
1162    } };
1163
1164    const maybe_ty: ?Type = if (zir_decl.type_body != null) ty: {
1165        // Since we have a type body, the type is resolved separately!
1166        // Of course, we need to make sure we depend on it properly.
1167        try sema.declareDependency(.{ .nav_ty = nav_id });
1168        try pt.ensureNavTypeUpToDate(nav_id);
1169        break :ty .fromInterned(ip.getNav(nav_id).typeOf(ip));
1170    } else null;
1171
1172    const final_val: ?Value = if (zir_decl.value_body) |value_body| val: {
1173        if (maybe_ty) |ty| {
1174            // Put the resolved type into `inst_map` to be used as the result type of the init.
1175            try sema.inst_map.ensureSpaceForInstructions(gpa, &.{inst_resolved.inst});
1176            sema.inst_map.putAssumeCapacity(inst_resolved.inst, Air.internedToRef(ty.toIntern()));
1177            const uncoerced_result_ref = try sema.resolveInlineBody(&block, value_body, inst_resolved.inst);
1178            assert(sema.inst_map.remove(inst_resolved.inst));
1179
1180            const result_ref = try sema.coerce(&block, ty, uncoerced_result_ref, init_src);
1181            break :val try sema.resolveFinalDeclValue(&block, init_src, result_ref);
1182        } else {
1183            // Just analyze the value; we have no type to offer.
1184            const result_ref = try sema.resolveInlineBody(&block, value_body, inst_resolved.inst);
1185            break :val try sema.resolveFinalDeclValue(&block, init_src, result_ref);
1186        }
1187    } else null;
1188
1189    const nav_ty: Type = maybe_ty orelse final_val.?.typeOf(zcu);
1190
1191    // First, we must resolve the declaration's type. To do this, we analyze the type body if available,
1192    // or otherwise, we analyze the value body, populating `early_val` in the process.
1193
1194    const is_const = is_const: switch (zir_decl.kind) {
1195        .@"comptime" => unreachable, // this is not a Nav
1196        .unnamed_test, .@"test", .decltest => {
1197            assert(nav_ty.zigTypeTag(zcu) == .@"fn");
1198            break :is_const true;
1199        },
1200        .@"const" => true,
1201        .@"var" => {
1202            try sema.validateVarType(
1203                &block,
1204                if (zir_decl.type_body != null) ty_src else init_src,
1205                nav_ty,
1206                zir_decl.linkage == .@"extern",
1207            );
1208            break :is_const false;
1209        },
1210    };
1211
1212    // Now that we know the type, we can evaluate the alignment, linksection, and addrspace, to determine
1213    // the full pointer type of this declaration.
1214
1215    const modifiers: Sema.NavPtrModifiers = if (zir_decl.type_body != null) m: {
1216        // `analyzeNavType` (from the `ensureNavTypeUpToDate` call above) has already populated this data into
1217        // the `Nav`. Load the new one, and pull the modifiers out.
1218        switch (ip.getNav(nav_id).status) {
1219            .unresolved => unreachable, // `analyzeNavType` will never leave us in this state
1220            inline .type_resolved, .fully_resolved => |r| break :m .{
1221                .alignment = r.alignment,
1222                .@"linksection" = r.@"linksection",
1223                .@"addrspace" = r.@"addrspace",
1224            },
1225        }
1226    } else m: {
1227        // `analyzeNavType` is essentially a stub which calls us. We are responsible for resolving this data.
1228        break :m try sema.resolveNavPtrModifiers(&block, zir_decl, inst_resolved.inst, nav_ty);
1229    };
1230
1231    // Lastly, we must figure out the actual interned value to store to the `Nav`.
1232    // This isn't necessarily the same as `final_val`!
1233
1234    const nav_val: Value = switch (zir_decl.linkage) {
1235        .normal, .@"export" => switch (zir_decl.kind) {
1236            .@"var" => .fromInterned(try pt.intern(.{ .variable = .{
1237                .ty = nav_ty.toIntern(),
1238                .init = final_val.?.toIntern(),
1239                .owner_nav = nav_id,
1240                .is_threadlocal = zir_decl.is_threadlocal,
1241            } })),
1242            else => final_val.?,
1243        },
1244        .@"extern" => val: {
1245            assert(final_val == null); // extern decls do not have a value body
1246            const lib_name: ?[]const u8 = if (zir_decl.lib_name != .empty) l: {
1247                break :l zir.nullTerminatedString(zir_decl.lib_name);
1248            } else null;
1249            if (lib_name) |l| {
1250                const lib_name_src = block.src(.{ .node_offset_lib_name = .zero });
1251                try sema.handleExternLibName(&block, lib_name_src, l);
1252            }
1253            break :val .fromInterned(try pt.getExtern(.{
1254                .name = old_nav.name,
1255                .ty = nav_ty.toIntern(),
1256                .lib_name = try ip.getOrPutStringOpt(gpa, pt.tid, lib_name, .no_embedded_nulls),
1257                .is_threadlocal = zir_decl.is_threadlocal,
1258                .linkage = .strong,
1259                .visibility = .default,
1260                .is_dll_import = false,
1261                .relocation = .any,
1262                .is_const = is_const,
1263                .alignment = modifiers.alignment,
1264                .@"addrspace" = modifiers.@"addrspace",
1265                .zir_index = old_nav.analysis.?.zir_index, // `declaration` instruction
1266                .owner_nav = undefined, // ignored by `getExtern`
1267                .source = .syntax,
1268            }));
1269        },
1270    };
1271
1272    switch (nav_val.toIntern()) {
1273        .unreachable_value => unreachable, // assertion failure
1274        else => {},
1275    }
1276
1277    // This resolves the type of the resolved value, not that value itself. If `nav_val` is a struct type,
1278    // this resolves the type `type` (which needs no resolution), not the struct itself.
1279    try nav_ty.resolveLayout(pt);
1280
1281    const queue_linker_work, const is_owned_fn = switch (ip.indexToKey(nav_val.toIntern())) {
1282        .func => |f| .{ true, f.owner_nav == nav_id }, // note that this lets function aliases reach codegen
1283        .variable => |v| .{ v.owner_nav == nav_id, false },
1284        .@"extern" => |e| .{
1285            false,
1286            Type.fromInterned(e.ty).zigTypeTag(zcu) == .@"fn" and zir_decl.linkage == .@"extern",
1287        },
1288        else => .{ true, false },
1289    };
1290
1291    if (is_owned_fn) {
1292        // linksection etc are legal, except some targets do not support function alignment.
1293        if (zir_decl.align_body != null and !target_util.supportsFunctionAlignment(zcu.getTarget())) {
1294            return sema.fail(&block, align_src, "target does not support function alignment", .{});
1295        }
1296    } else if (try nav_ty.comptimeOnlySema(pt)) {
1297        // alignment, linksection, addrspace annotations are not allowed for comptime-only types.
1298        const reason: []const u8 = switch (ip.indexToKey(nav_val.toIntern())) {
1299            .func => "function alias", // slightly clearer message, since you *can* specify these on function *declarations*
1300            else => "comptime-only type",
1301        };
1302        if (zir_decl.align_body != null) {
1303            return sema.fail(&block, align_src, "cannot specify alignment of {s}", .{reason});
1304        }
1305        if (zir_decl.linksection_body != null) {
1306            return sema.fail(&block, section_src, "cannot specify linksection of {s}", .{reason});
1307        }
1308        if (zir_decl.addrspace_body != null) {
1309            return sema.fail(&block, addrspace_src, "cannot specify addrspace of {s}", .{reason});
1310        }
1311    }
1312
1313    ip.resolveNavValue(nav_id, .{
1314        .val = nav_val.toIntern(),
1315        .is_const = is_const,
1316        .alignment = modifiers.alignment,
1317        .@"linksection" = modifiers.@"linksection",
1318        .@"addrspace" = modifiers.@"addrspace",
1319    });
1320
1321    // Mark the unit as completed before evaluating the export!
1322    assert(zcu.analysis_in_progress.swapRemove(anal_unit));
1323    if (zir_decl.type_body == null) {
1324        assert(zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id })));
1325    }
1326
1327    if (zir_decl.linkage == .@"export") {
1328        const export_src = block.src(.{ .token_offset = @enumFromInt(@intFromBool(zir_decl.is_pub)) });
1329        const name_slice = zir.nullTerminatedString(zir_decl.name);
1330        const name_ip = try ip.getOrPutString(gpa, pt.tid, name_slice, .no_embedded_nulls);
1331        try sema.analyzeExport(&block, export_src, .{ .name = name_ip }, nav_id);
1332    }
1333
1334    try sema.flushExports();
1335
1336    queue_codegen: {
1337        if (!queue_linker_work) break :queue_codegen;
1338
1339        if (!try nav_ty.hasRuntimeBitsSema(pt)) {
1340            if (zcu.comp.config.use_llvm) break :queue_codegen;
1341            if (file.mod.?.strip) break :queue_codegen;
1342        }
1343
1344        // This job depends on any resolve_type_fully jobs queued up before it.
1345        zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
1346        try zcu.comp.queueJob(.{ .link_nav = nav_id });
1347    }
1348
1349    switch (old_nav.status) {
1350        .unresolved, .type_resolved => return .{ .val_changed = true },
1351        .fully_resolved => |old| return .{ .val_changed = old.val != nav_val.toIntern() },
1352    }
1353}
1354
1355pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.SemaError!void {
1356    const tracy = trace(@src());
1357    defer tracy.end();
1358
1359    const zcu = pt.zcu;
1360    const gpa = zcu.gpa;
1361    const ip = &zcu.intern_pool;
1362
1363    const anal_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
1364    const nav = ip.getNav(nav_id);
1365
1366    log.debug("ensureNavTypeUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
1367
1368    assert(!zcu.analysis_in_progress.contains(anal_unit));
1369
1370    const type_resolved_by_value: bool = from_val: {
1371        const analysis = nav.analysis orelse break :from_val false;
1372        const inst_resolved = analysis.zir_index.resolveFull(ip) orelse break :from_val false;
1373        const file = zcu.fileByIndex(inst_resolved.file);
1374        const zir_decl = file.zir.?.getDeclaration(inst_resolved.inst);
1375        break :from_val zir_decl.type_body == null;
1376    };
1377    if (type_resolved_by_value) {
1378        // Logic at the end of `ensureNavValUpToDate` is directly responsible for populating our state.
1379        return pt.ensureNavValUpToDate(nav_id);
1380    }
1381
1382    // Determine whether or not this `Nav`'s type is outdated. This also includes checking if the
1383    // status is `.unresolved`, which indicates that the value is outdated because it has *never*
1384    // been analyzed so far.
1385    //
1386    // Note that if the unit is PO, we pessimistically assume that it *does* require re-analysis, to
1387    // ensure that the unit is definitely up-to-date when this function returns. This mechanism could
1388    // result in over-analysis if analysis occurs in a poor order; we do our best to avoid this by
1389    // carefully choosing which units to re-analyze. See `Zcu.findOutdatedToAnalyze`.
1390
1391    const was_outdated = zcu.outdated.swapRemove(anal_unit) or
1392        zcu.potentially_outdated.swapRemove(anal_unit);
1393
1394    const prev_failed = zcu.failed_analysis.contains(anal_unit) or
1395        zcu.transitive_failed_analysis.contains(anal_unit);
1396
1397    if (was_outdated) {
1398        dev.check(.incremental);
1399        _ = zcu.outdated_ready.swapRemove(anal_unit);
1400        zcu.deleteUnitExports(anal_unit);
1401        zcu.deleteUnitReferences(anal_unit);
1402        zcu.deleteUnitCompileLogs(anal_unit);
1403        if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
1404            kv.value.destroy(gpa);
1405        }
1406        _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
1407        ip.removeDependenciesForDepender(gpa, anal_unit);
1408    } else {
1409        // We can trust the current information about this unit.
1410        if (prev_failed) return error.AnalysisFail;
1411        switch (nav.status) {
1412            .unresolved => {},
1413            .type_resolved, .fully_resolved => return,
1414        }
1415    }
1416
1417    if (zcu.comp.debugIncremental()) {
1418        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
1419        info.last_update_gen = zcu.generation;
1420        info.deps.clearRetainingCapacity();
1421    }
1422
1423    const unit_tracking = zcu.trackUnitSema(nav.fqn.toSlice(ip), nav.srcInst(ip));
1424    defer unit_tracking.end(zcu);
1425
1426    const invalidate_type: bool, const new_failed: bool = if (pt.analyzeNavType(nav_id)) |result| res: {
1427        break :res .{
1428            // If the unit has gone from failed to success, we still need to invalidate the dependencies.
1429            result.type_changed or prev_failed,
1430            false,
1431        };
1432    } else |err| switch (err) {
1433        error.AnalysisFail => res: {
1434            if (!zcu.failed_analysis.contains(anal_unit)) {
1435                // If this unit caused the error, it would have an entry in `failed_analysis`.
1436                // Since it does not, this must be a transitive failure.
1437                try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
1438                log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
1439            }
1440            break :res .{ !prev_failed, true };
1441        },
1442        error.OutOfMemory => {
1443            // TODO: it's unclear how to gracefully handle this.
1444            // To report the error cleanly, we need to add a message to `failed_analysis` and a
1445            // corresponding entry to `retryable_failures`; but either of these things is quite
1446            // likely to OOM at this point.
1447            // If that happens, what do we do? Perhaps we could have a special field on `Zcu`
1448            // for reporting OOM errors without allocating.
1449            return error.OutOfMemory;
1450        },
1451        error.Canceled => |e| return e,
1452        error.ComptimeReturn => unreachable,
1453        error.ComptimeBreak => unreachable,
1454    };
1455
1456    if (was_outdated) {
1457        const dependee: InternPool.Dependee = .{ .nav_ty = nav_id };
1458        if (invalidate_type) {
1459            // This dependency was marked as PO, meaning dependees were waiting
1460            // on its analysis result, and it has turned out to be outdated.
1461            // Update dependees accordingly.
1462            try zcu.markDependeeOutdated(.marked_po, dependee);
1463        } else {
1464            // This dependency was previously PO, but turned out to be up-to-date.
1465            // We do not need to queue successive analysis.
1466            try zcu.markPoDependeeUpToDate(dependee);
1467        }
1468    }
1469
1470    if (new_failed) return error.AnalysisFail;
1471}
1472
1473fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileError!struct { type_changed: bool } {
1474    const zcu = pt.zcu;
1475    const gpa = zcu.gpa;
1476    const ip = &zcu.intern_pool;
1477
1478    const anal_unit: AnalUnit = .wrap(.{ .nav_ty = nav_id });
1479    const old_nav = ip.getNav(nav_id);
1480
1481    log.debug("analyzeNavType {f}", .{zcu.fmtAnalUnit(anal_unit)});
1482
1483    const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
1484    const file = zcu.fileByIndex(inst_resolved.file);
1485    const zir = file.zir.?;
1486
1487    try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
1488    defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
1489
1490    const zir_decl = zir.getDeclaration(inst_resolved.inst);
1491    const type_body = zir_decl.type_body.?;
1492
1493    var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
1494    defer analysis_arena.deinit();
1495
1496    var comptime_err_ret_trace: std.array_list.Managed(Zcu.LazySrcLoc) = .init(gpa);
1497    defer comptime_err_ret_trace.deinit();
1498
1499    var sema: Sema = .{
1500        .pt = pt,
1501        .gpa = gpa,
1502        .arena = analysis_arena.allocator(),
1503        .code = zir,
1504        .owner = anal_unit,
1505        .func_index = .none,
1506        .func_is_naked = false,
1507        .fn_ret_ty = .void,
1508        .fn_ret_ty_ies = null,
1509        .comptime_err_ret_trace = &comptime_err_ret_trace,
1510    };
1511    defer sema.deinit();
1512
1513    // Every `Nav` declares a dependency on the source of the corresponding declaration.
1514    try sema.declareDependency(.{ .src_hash = old_nav.analysis.?.zir_index });
1515
1516    // In theory, we would also add a reference to the corresponding `nav_val` unit here: there are
1517    // always references in both directions between a `nav_val` and `nav_ty`. However, to save memory,
1518    // these references are known implicitly. See logic in `Zcu.resolveReferences`.
1519
1520    var block: Sema.Block = .{
1521        .parent = null,
1522        .sema = &sema,
1523        .namespace = old_nav.analysis.?.namespace,
1524        .instructions = .{},
1525        .inlining = null,
1526        .comptime_reason = undefined, // set below
1527        .src_base_inst = old_nav.analysis.?.zir_index,
1528        .type_name_ctx = old_nav.fqn,
1529    };
1530    defer block.instructions.deinit(gpa);
1531
1532    const ty_src = block.src(.{ .node_offset_var_decl_ty = .zero });
1533
1534    block.comptime_reason = .{ .reason = .{
1535        .src = ty_src,
1536        .r = .{ .simple = .type },
1537    } };
1538
1539    const resolved_ty: Type = ty: {
1540        const uncoerced_type_ref = try sema.resolveInlineBody(&block, type_body, inst_resolved.inst);
1541        const type_ref = try sema.coerce(&block, .type, uncoerced_type_ref, ty_src);
1542        break :ty .fromInterned(type_ref.toInterned().?);
1543    };
1544
1545    try resolved_ty.resolveLayout(pt);
1546
1547    // In the case where the type is specified, this function is also responsible for resolving
1548    // the pointer modifiers, i.e. alignment, linksection, addrspace.
1549    const modifiers = try sema.resolveNavPtrModifiers(&block, zir_decl, inst_resolved.inst, resolved_ty);
1550
1551    const is_const = switch (zir_decl.kind) {
1552        .@"comptime" => unreachable,
1553        .unnamed_test, .@"test", .decltest, .@"const" => true,
1554        .@"var" => false,
1555    };
1556
1557    const is_extern_decl = zir_decl.linkage == .@"extern";
1558
1559    // Now for the question of the day: are the type and modifiers the same as before?
1560    // If they are, then we should actually keep the `Nav` as `fully_resolved` if it currently is.
1561    // That's because `analyzeNavVal` will later want to look at the resolved value to figure out
1562    // whether it's changed: if we threw that data away now, it would have to assume that the value
1563    // had changed, potentially spinning off loads of unnecessary re-analysis!
1564    const changed = switch (old_nav.status) {
1565        .unresolved => true,
1566        .type_resolved => |r| r.type != resolved_ty.toIntern() or
1567            r.alignment != modifiers.alignment or
1568            r.@"linksection" != modifiers.@"linksection" or
1569            r.@"addrspace" != modifiers.@"addrspace" or
1570            r.is_const != is_const or
1571            r.is_extern_decl != is_extern_decl,
1572        .fully_resolved => |r| ip.typeOf(r.val) != resolved_ty.toIntern() or
1573            r.alignment != modifiers.alignment or
1574            r.@"linksection" != modifiers.@"linksection" or
1575            r.@"addrspace" != modifiers.@"addrspace" or
1576            r.is_const != is_const or
1577            (old_nav.getExtern(ip) != null) != is_extern_decl,
1578    };
1579
1580    if (!changed) return .{ .type_changed = false };
1581
1582    ip.resolveNavType(nav_id, .{
1583        .type = resolved_ty.toIntern(),
1584        .is_const = is_const,
1585        .alignment = modifiers.alignment,
1586        .@"linksection" = modifiers.@"linksection",
1587        .@"addrspace" = modifiers.@"addrspace",
1588        .is_threadlocal = zir_decl.is_threadlocal,
1589        .is_extern_decl = is_extern_decl,
1590    });
1591
1592    return .{ .type_changed = true };
1593}
1594
1595pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!void {
1596    dev.check(.sema);
1597
1598    const tracy = trace(@src());
1599    defer tracy.end();
1600
1601    const zcu = pt.zcu;
1602    const gpa = zcu.gpa;
1603    const ip = &zcu.intern_pool;
1604
1605    _ = zcu.func_body_analysis_queued.swapRemove(func_index);
1606
1607    const anal_unit: AnalUnit = .wrap(.{ .func = func_index });
1608
1609    log.debug("ensureFuncBodyUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
1610
1611    assert(!zcu.analysis_in_progress.contains(anal_unit));
1612
1613    const func = zcu.funcInfo(func_index);
1614
1615    assert(func.ty == func.uncoerced_ty); // analyze the body of the original function, not a coerced one
1616
1617    const was_outdated = zcu.outdated.swapRemove(anal_unit) or
1618        zcu.potentially_outdated.swapRemove(anal_unit);
1619
1620    const prev_failed = zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit);
1621
1622    if (was_outdated) {
1623        dev.check(.incremental);
1624        _ = zcu.outdated_ready.swapRemove(anal_unit);
1625        zcu.deleteUnitExports(anal_unit);
1626        zcu.deleteUnitReferences(anal_unit);
1627        zcu.deleteUnitCompileLogs(anal_unit);
1628        if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
1629            kv.value.destroy(gpa);
1630        }
1631        _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
1632    } else {
1633        // We can trust the current information about this function.
1634        if (prev_failed) {
1635            return error.AnalysisFail;
1636        }
1637        if (func.analysisUnordered(ip).is_analyzed) return;
1638    }
1639
1640    if (zcu.comp.debugIncremental()) {
1641        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
1642        info.last_update_gen = zcu.generation;
1643        info.deps.clearRetainingCapacity();
1644    }
1645
1646    const owner_nav = ip.getNav(func.owner_nav);
1647    const unit_tracking = zcu.trackUnitSema(
1648        owner_nav.fqn.toSlice(ip),
1649        owner_nav.srcInst(ip),
1650    );
1651    defer unit_tracking.end(zcu);
1652
1653    const ies_outdated, const new_failed = if (pt.analyzeFuncBody(func_index)) |result|
1654        .{ prev_failed or result.ies_outdated, false }
1655    else |err| switch (err) {
1656        error.AnalysisFail => res: {
1657            if (!zcu.failed_analysis.contains(anal_unit)) {
1658                // If this function caused the error, it would have an entry in `failed_analysis`.
1659                // Since it does not, this must be a transitive failure.
1660                try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
1661                log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
1662            }
1663            // We consider the IES to be outdated if the function previously succeeded analysis; in this case,
1664            // we need to re-analyze dependants to ensure they hit a transitive error here, rather than reporting
1665            // a different error later (which may now be invalid).
1666            break :res .{ !prev_failed, true };
1667        },
1668        error.OutOfMemory => {
1669            // TODO: it's unclear how to gracefully handle this.
1670            // To report the error cleanly, we need to add a message to `failed_analysis` and a
1671            // corresponding entry to `retryable_failures`; but either of these things is quite
1672            // likely to OOM at this point.
1673            // If that happens, what do we do? Perhaps we could have a special field on `Zcu`
1674            // for reporting OOM errors without allocating.
1675            return error.OutOfMemory;
1676        },
1677        error.Canceled => |e| return e,
1678    };
1679
1680    if (was_outdated) {
1681        if (ies_outdated) {
1682            try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index });
1683        } else {
1684            try zcu.markPoDependeeUpToDate(.{ .interned = func_index });
1685        }
1686    }
1687
1688    if (new_failed) return error.AnalysisFail;
1689}
1690
1691fn analyzeFuncBody(
1692    pt: Zcu.PerThread,
1693    func_index: InternPool.Index,
1694) Zcu.SemaError!struct { ies_outdated: bool } {
1695    const zcu = pt.zcu;
1696    const gpa = zcu.gpa;
1697    const ip = &zcu.intern_pool;
1698
1699    const func = zcu.funcInfo(func_index);
1700    const anal_unit = AnalUnit.wrap(.{ .func = func_index });
1701
1702    // Make sure that this function is still owned by the same `Nav`. Otherwise, analyzing
1703    // it would be a waste of time in the best case, and could cause codegen to give bogus
1704    // results in the worst case.
1705
1706    if (func.generic_owner == .none) {
1707        // Among another things, this ensures that the function's `zir_body_inst` is correct.
1708        try pt.ensureNavValUpToDate(func.owner_nav);
1709        if (ip.getNav(func.owner_nav).status.fully_resolved.val != func_index) {
1710            // This function is no longer referenced! There's no point in re-analyzing it.
1711            // Just mark a transitive failure and move on.
1712            return error.AnalysisFail;
1713        }
1714    } else {
1715        const go_nav = zcu.funcInfo(func.generic_owner).owner_nav;
1716        // Among another things, this ensures that the function's `zir_body_inst` is correct.
1717        try pt.ensureNavValUpToDate(go_nav);
1718        if (ip.getNav(go_nav).status.fully_resolved.val != func.generic_owner) {
1719            // The generic owner is no longer referenced, so this function is also unreferenced.
1720            // There's no point in re-analyzing it. Just mark a transitive failure and move on.
1721            return error.AnalysisFail;
1722        }
1723    }
1724
1725    // We'll want to remember what the IES used to be before the update for
1726    // dependency invalidation purposes.
1727    const old_resolved_ies = if (func.analysisUnordered(ip).inferred_error_set)
1728        func.resolvedErrorSetUnordered(ip)
1729    else
1730        .none;
1731
1732    log.debug("analyze and generate fn body {f}", .{zcu.fmtAnalUnit(anal_unit)});
1733
1734    var air = try pt.analyzeFnBodyInner(func_index);
1735    errdefer air.deinit(gpa);
1736
1737    const ies_outdated = !func.analysisUnordered(ip).inferred_error_set or
1738        func.resolvedErrorSetUnordered(ip) != old_resolved_ies;
1739
1740    const comp = zcu.comp;
1741
1742    const dump_air = build_options.enable_debug_extensions and comp.verbose_air;
1743    const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null);
1744
1745    if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
1746        air.deinit(gpa);
1747        return .{ .ies_outdated = ies_outdated };
1748    }
1749
1750    // This job depends on any resolve_type_fully jobs queued up before it.
1751    zcu.codegen_prog_node.increaseEstimatedTotalItems(1);
1752    comp.link_prog_node.increaseEstimatedTotalItems(1);
1753    try comp.queueJob(.{ .codegen_func = .{
1754        .func = func_index,
1755        .air = air,
1756    } });
1757
1758    return .{ .ies_outdated = ies_outdated };
1759}
1760
1761pub fn semaMod(pt: Zcu.PerThread, mod: *Module) !void {
1762    dev.check(.sema);
1763    const file_index = pt.zcu.module_roots.get(mod).?.unwrap().?;
1764    const root_type = pt.zcu.fileRootType(file_index);
1765    if (root_type == .none) {
1766        return pt.semaFile(file_index);
1767    }
1768}
1769
1770fn createFileRootStruct(
1771    pt: Zcu.PerThread,
1772    file_index: Zcu.File.Index,
1773    namespace_index: Zcu.Namespace.Index,
1774    replace_existing: bool,
1775) Allocator.Error!InternPool.Index {
1776    const zcu = pt.zcu;
1777    const gpa = zcu.gpa;
1778    const ip = &zcu.intern_pool;
1779    const file = zcu.fileByIndex(file_index);
1780    const extended = file.zir.?.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
1781    assert(extended.opcode == .struct_decl);
1782    const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
1783    assert(!small.has_captures_len);
1784    assert(!small.has_backing_int);
1785    assert(small.layout == .auto);
1786    var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
1787    const fields_len = if (small.has_fields_len) blk: {
1788        const fields_len = file.zir.?.extra[extra_index];
1789        extra_index += 1;
1790        break :blk fields_len;
1791    } else 0;
1792    const decls_len = if (small.has_decls_len) blk: {
1793        const decls_len = file.zir.?.extra[extra_index];
1794        extra_index += 1;
1795        break :blk decls_len;
1796    } else 0;
1797    const decls = file.zir.?.bodySlice(extra_index, decls_len);
1798    extra_index += decls_len;
1799
1800    const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
1801        .file = file_index,
1802        .inst = .main_struct_inst,
1803    });
1804    const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
1805        .layout = .auto,
1806        .fields_len = fields_len,
1807        .known_non_opv = small.known_non_opv,
1808        .requires_comptime = if (small.known_comptime_only) .yes else .unknown,
1809        .any_comptime_fields = small.any_comptime_fields,
1810        .any_default_inits = small.any_default_inits,
1811        .inits_resolved = false,
1812        .any_aligned_fields = small.any_aligned_fields,
1813        .key = .{ .declared = .{
1814            .zir_index = tracked_inst,
1815            .captures = &.{},
1816        } },
1817    }, replace_existing)) {
1818        .existing => unreachable, // we wouldn't be analysing the file root if this type existed
1819        .wip => |wip| wip,
1820    };
1821    errdefer wip_ty.cancel(ip, pt.tid);
1822
1823    wip_ty.setName(ip, try file.internFullyQualifiedName(pt), .none);
1824    ip.namespacePtr(namespace_index).owner_type = wip_ty.index;
1825
1826    if (zcu.comp.config.incremental) {
1827        try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = tracked_inst });
1828    }
1829
1830    try pt.scanNamespace(namespace_index, decls);
1831    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
1832    codegen_type: {
1833        if (file.mod.?.strip) break :codegen_type;
1834        // This job depends on any resolve_type_fully jobs queued up before it.
1835        zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
1836        try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
1837    }
1838    zcu.setFileRootType(file_index, wip_ty.index);
1839    if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
1840    return wip_ty.finish(ip, namespace_index);
1841}
1842
1843/// Re-scan the namespace of a file's root struct type on an incremental update.
1844/// The file must have successfully populated ZIR.
1845/// If the file's root struct type is not populated (the file is unreferenced), nothing is done.
1846/// This is called by `updateZirRefs` for all updated files before the main work loop.
1847/// This function does not perform any semantic analysis.
1848fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.Error!void {
1849    const zcu = pt.zcu;
1850
1851    const file = zcu.fileByIndex(file_index);
1852    const file_root_type = zcu.fileRootType(file_index);
1853    if (file_root_type == .none) return;
1854
1855    log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{
1856        file.mod.?.fully_qualified_name,
1857        file.sub_file_path,
1858    });
1859
1860    const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu);
1861    const decls = decls: {
1862        const extended = file.zir.?.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
1863        const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
1864
1865        var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len;
1866        extra_index += @intFromBool(small.has_fields_len);
1867        const decls_len = if (small.has_decls_len) blk: {
1868            const decls_len = file.zir.?.extra[extra_index];
1869            extra_index += 1;
1870            break :blk decls_len;
1871        } else 0;
1872        break :decls file.zir.?.bodySlice(extra_index, decls_len);
1873    };
1874    try pt.scanNamespace(namespace_index, decls);
1875    zcu.namespacePtr(namespace_index).generation = zcu.generation;
1876}
1877
1878fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
1879    const tracy = trace(@src());
1880    defer tracy.end();
1881
1882    const zcu = pt.zcu;
1883    const file = zcu.fileByIndex(file_index);
1884    assert(file.getMode() == .zig);
1885    assert(zcu.fileRootType(file_index) == .none);
1886
1887    assert(file.zir != null);
1888
1889    const new_namespace_index = try pt.createNamespace(.{
1890        .parent = .none,
1891        .owner_type = undefined, // set in `createFileRootStruct`
1892        .file_scope = file_index,
1893        .generation = zcu.generation,
1894    });
1895    const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false);
1896    errdefer zcu.intern_pool.remove(pt.tid, struct_ty);
1897
1898    if (zcu.comp.time_report) |*tr| {
1899        tr.stats.n_imported_files += 1;
1900    }
1901}
1902
1903/// Called by AstGen worker threads when an import is seen. If `new_file` is returned, the caller is
1904/// then responsible for queueing a new AstGen job for the new file.
1905/// Assumes that `comp.mutex` is NOT locked. It will be locked by this function where necessary.
1906pub fn discoverImport(
1907    pt: Zcu.PerThread,
1908    importer_path: Compilation.Path,
1909    import_string: []const u8,
1910) Allocator.Error!union(enum) {
1911    module,
1912    existing_file: Zcu.File.Index,
1913    new_file: struct {
1914        index: Zcu.File.Index,
1915        file: *Zcu.File,
1916    },
1917} {
1918    const zcu = pt.zcu;
1919    const gpa = zcu.gpa;
1920
1921    if (!mem.endsWith(u8, import_string, ".zig") and !mem.endsWith(u8, import_string, ".zon")) {
1922        return .module;
1923    }
1924
1925    const new_path = try importer_path.upJoin(gpa, zcu.comp.dirs, import_string);
1926    errdefer new_path.deinit(gpa);
1927
1928    // We're about to do a GOP on `import_table`, so we need the mutex.
1929    zcu.comp.mutex.lock();
1930    defer zcu.comp.mutex.unlock();
1931
1932    const gop = try zcu.import_table.getOrPutAdapted(gpa, new_path, Zcu.ImportTableAdapter{ .zcu = zcu });
1933    errdefer _ = zcu.import_table.pop();
1934    if (gop.found_existing) {
1935        new_path.deinit(gpa); // we didn't need it for `File.path`
1936        return .{ .existing_file = gop.key_ptr.* };
1937    }
1938
1939    zcu.import_table.lockPointers();
1940    defer zcu.import_table.unlockPointers();
1941
1942    const new_file = try gpa.create(Zcu.File);
1943    errdefer gpa.destroy(new_file);
1944
1945    const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
1946        .bin_digest = new_path.digest(),
1947        .file = new_file,
1948        .root_type = .none,
1949    });
1950    errdefer comptime unreachable; // because we don't remove the file from the internpool
1951
1952    gop.key_ptr.* = new_file_index;
1953    new_file.* = .{
1954        .status = .never_loaded,
1955        .path = new_path,
1956        .stat = undefined,
1957        .is_builtin = false,
1958        .source = null,
1959        .tree = null,
1960        .zir = null,
1961        .zoir = null,
1962        .mod = null,
1963        .sub_file_path = undefined,
1964        .module_changed = false,
1965        .prev_zir = null,
1966        .zoir_invalidated = false,
1967    };
1968
1969    return .{ .new_file = .{
1970        .index = new_file_index,
1971        .file = new_file,
1972    } };
1973}
1974
1975pub fn doImport(
1976    pt: Zcu.PerThread,
1977    /// This file must have its `mod` populated.
1978    importer: *Zcu.File,
1979    import_string: []const u8,
1980) error{
1981    OutOfMemory,
1982    ModuleNotFound,
1983    IllegalZigImport,
1984}!struct {
1985    file: Zcu.File.Index,
1986    module_root: ?*Module,
1987} {
1988    const zcu = pt.zcu;
1989    const gpa = zcu.gpa;
1990    const imported_mod: ?*Module = m: {
1991        if (mem.eql(u8, import_string, "std")) break :m zcu.std_mod;
1992        if (mem.eql(u8, import_string, "root")) break :m zcu.root_mod;
1993        if (mem.eql(u8, import_string, "builtin")) {
1994            const opts = importer.mod.?.getBuiltinOptions(zcu.comp.config);
1995            break :m zcu.builtin_modules.get(opts.hash()).?;
1996        }
1997        break :m importer.mod.?.deps.get(import_string);
1998    };
1999    if (imported_mod) |mod| {
2000        if (zcu.module_roots.get(mod).?.unwrap()) |file_index| {
2001            return .{
2002                .file = file_index,
2003                .module_root = mod,
2004            };
2005        }
2006    }
2007    if (!std.mem.endsWith(u8, import_string, ".zig") and
2008        !std.mem.endsWith(u8, import_string, ".zon"))
2009    {
2010        return error.ModuleNotFound;
2011    }
2012    const path = try importer.path.upJoin(gpa, zcu.comp.dirs, import_string);
2013    defer path.deinit(gpa);
2014    if (try path.isIllegalZigImport(gpa, zcu.comp.dirs)) {
2015        return error.IllegalZigImport;
2016    }
2017    return .{
2018        .file = zcu.import_table.getKeyAdapted(path, Zcu.ImportTableAdapter{ .zcu = zcu }).?,
2019        .module_root = null,
2020    };
2021}
2022/// This is called once during `Compilation.create` and never again. "builtin" modules don't yet
2023/// exist, so are not added to `module_roots` here. They must be added when they are created.
2024pub fn populateModuleRootTable(pt: Zcu.PerThread) error{
2025    OutOfMemory,
2026    /// One of the specified modules had its root source file at an illegal path.
2027    IllegalZigImport,
2028}!void {
2029    const zcu = pt.zcu;
2030    const gpa = zcu.gpa;
2031
2032    // We'll initially add [mod, undefined] pairs, and when we reach the pair while
2033    // iterating, rewrite the undefined value.
2034    const roots = &zcu.module_roots;
2035    roots.clearRetainingCapacity();
2036
2037    // Start with:
2038    // * `std_mod`, which is the main root of analysis
2039    // * `root_mod`, which is `@import("root")`
2040    // * `main_mod`, which is a special analysis root in tests (and otherwise equal to `root_mod`)
2041    // All other modules will be found by traversing their dependency tables.
2042    try roots.ensureTotalCapacity(gpa, 3);
2043    roots.putAssumeCapacity(zcu.std_mod, undefined);
2044    roots.putAssumeCapacity(zcu.root_mod, undefined);
2045    roots.putAssumeCapacity(zcu.main_mod, undefined);
2046    var i: usize = 0;
2047    while (i < roots.count()) {
2048        const mod = roots.keys()[i];
2049        try roots.ensureUnusedCapacity(gpa, mod.deps.count());
2050        for (mod.deps.values()) |dep| {
2051            const gop = roots.getOrPutAssumeCapacity(dep);
2052            _ = gop; // we want to leave the value undefined if it was added
2053        }
2054
2055        const root_file_out = &roots.values()[i];
2056        roots.lockPointers();
2057        defer roots.unlockPointers();
2058
2059        i += 1;
2060
2061        if (Zcu.File.modeFromPath(mod.root_src_path) == null) {
2062            root_file_out.* = .none;
2063            continue;
2064        }
2065
2066        const path = try mod.root.join(gpa, zcu.comp.dirs, mod.root_src_path);
2067        errdefer path.deinit(gpa);
2068
2069        if (try path.isIllegalZigImport(gpa, zcu.comp.dirs)) {
2070            return error.IllegalZigImport;
2071        }
2072
2073        const gop = try zcu.import_table.getOrPutAdapted(gpa, path, Zcu.ImportTableAdapter{ .zcu = zcu });
2074        errdefer _ = zcu.import_table.pop();
2075
2076        if (gop.found_existing) {
2077            path.deinit(gpa);
2078            root_file_out.* = gop.key_ptr.*.toOptional();
2079            continue;
2080        }
2081
2082        zcu.import_table.lockPointers();
2083        defer zcu.import_table.unlockPointers();
2084
2085        const new_file = try gpa.create(Zcu.File);
2086        errdefer gpa.destroy(new_file);
2087
2088        const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
2089            .bin_digest = path.digest(),
2090            .file = new_file,
2091            .root_type = .none,
2092        });
2093        errdefer comptime unreachable; // because we don't remove the file from the internpool
2094
2095        gop.key_ptr.* = new_file_index;
2096        root_file_out.* = new_file_index.toOptional();
2097        new_file.* = .{
2098            .status = .never_loaded,
2099            .path = path,
2100            .stat = undefined,
2101            .is_builtin = false,
2102            .source = null,
2103            .tree = null,
2104            .zir = null,
2105            .zoir = null,
2106            .mod = null,
2107            .sub_file_path = undefined,
2108            .module_changed = false,
2109            .prev_zir = null,
2110            .zoir_invalidated = false,
2111        };
2112    }
2113}
2114
2115/// Clears and re-populates `pt.zcu.alive_files`, and determines the module identity of every alive
2116/// file. If a file's module changes, its `module_changed` flag is set for `updateZirRefs` to see.
2117/// Also clears and re-populates `failed_imports` and `multi_module_err` based on the set of alive
2118/// files.
2119///
2120/// Live files are also added as file system inputs if necessary.
2121///
2122/// Returns whether there is any live file which is failed. Howewver, this function does *not*
2123/// modify `pt.zcu.skip_analysis_this_update`.
2124///
2125/// If an error is returned, `pt.zcu.alive_files` might contain undefined values.
2126pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
2127    const zcu = pt.zcu;
2128    const comp = zcu.comp;
2129    const gpa = zcu.gpa;
2130
2131    var any_fatal_files = false;
2132    zcu.multi_module_err = null;
2133    zcu.failed_imports.clearRetainingCapacity();
2134    zcu.alive_files.clearRetainingCapacity();
2135
2136    // This function will iterate the keys of `alive_files`, adding new entries as it discovers
2137    // imports. Once a file is in `alive_files`, it has its `mod` field up-to-date. If conflicting
2138    // imports are discovered for a file, we will set `multi_module_err`. Crucially, this traversal
2139    // is single-threaded, and depends only on the order of the imports map from AstGen, which makes
2140    // its behavior (in terms of which multi module errors are discovered) entirely consistent in a
2141    // multi-threaded environment (where things like file indices could differ between compiler runs).
2142
2143    // The roots of our file liveness analysis will be the analysis roots.
2144    const analysis_roots = zcu.analysisRoots();
2145    try zcu.alive_files.ensureTotalCapacity(gpa, analysis_roots.len);
2146    for (analysis_roots) |mod| {
2147        const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
2148        const file = zcu.fileByIndex(file_index);
2149
2150        file.mod = mod;
2151        file.sub_file_path = mod.root_src_path;
2152
2153        zcu.alive_files.putAssumeCapacityNoClobber(file_index, .{ .analysis_root = mod });
2154    }
2155
2156    var live_check_idx: usize = 0;
2157    while (live_check_idx < zcu.alive_files.count()) {
2158        const file_idx = zcu.alive_files.keys()[live_check_idx];
2159        const file = zcu.fileByIndex(file_idx);
2160        live_check_idx += 1;
2161
2162        switch (file.status) {
2163            .never_loaded => unreachable, // everything reachable is loaded by the AstGen workers
2164            .retryable_failure, .astgen_failure => any_fatal_files = true,
2165            .success => {},
2166        }
2167
2168        try comp.appendFileSystemInput(file.path);
2169
2170        switch (file.getMode()) {
2171            .zig => {}, // continue to logic below
2172            .zon => continue, // ZON can't import anything
2173        }
2174
2175        if (file.status != .success) continue; // ZIR not valid if there was a file failure
2176
2177        const zir = file.zir.?;
2178        const imports_index = zir.extra[@intFromEnum(Zir.ExtraIndex.imports)];
2179        if (imports_index == 0) continue; // this Zig file has no imports
2180        const extra = zir.extraData(Zir.Inst.Imports, imports_index);
2181        var extra_index = extra.end;
2182        try zcu.alive_files.ensureUnusedCapacity(gpa, extra.data.imports_len);
2183        for (0..extra.data.imports_len) |_| {
2184            const item = zir.extraData(Zir.Inst.Imports.Item, extra_index);
2185            extra_index = item.end;
2186            const import_path = zir.nullTerminatedString(item.data.name);
2187
2188            if (std.mem.eql(u8, import_path, "builtin")) {
2189                // We've not necessarily generated builtin modules yet, so `doImport` could fail. Instead,
2190                // create the module here. Then, since we know that `builtin.zig` doesn't have an error and
2191                // has no imports other than 'std', we can just continue onto the next import.
2192                try pt.updateBuiltinModule(file.mod.?.getBuiltinOptions(comp.config));
2193                continue;
2194            }
2195
2196            const res = pt.doImport(file, import_path) catch |err| switch (err) {
2197                error.OutOfMemory => |e| return e,
2198                error.ModuleNotFound => {
2199                    // It'd be nice if this were a file-level error, but allowing this turns out to
2200                    // be quite important in practice, e.g. for optional dependencies whose import
2201                    // is behind a comptime condition. So, the error here happens in `Sema` instead.
2202                    continue;
2203                },
2204                error.IllegalZigImport => {
2205                    try zcu.failed_imports.append(gpa, .{
2206                        .file_index = file_idx,
2207                        .import_string = item.data.name,
2208                        .import_token = item.data.token,
2209                        .kind = .illegal_zig_import,
2210                    });
2211                    continue;
2212                },
2213            };
2214
2215            // If the import was not of a module, we propagate our own module.
2216            const imported_mod = res.module_root orelse file.mod.?;
2217            const imported_file = zcu.fileByIndex(res.file);
2218
2219            const imported_ref: Zcu.File.Reference = .{ .import = .{
2220                .importer = file_idx,
2221                .tok = item.data.token,
2222                .module = res.module_root,
2223            } };
2224
2225            const gop = zcu.alive_files.getOrPutAssumeCapacity(res.file);
2226            if (gop.found_existing) {
2227                // This means `imported_file.mod` is already populated. If it doesn't match
2228                // `imported_mod`, then this file exists in multiple modules.
2229                if (imported_file.mod.? != imported_mod) {
2230                    // We only report the first multi-module error we see. Thanks to this traversal
2231                    // being deterministic, this doesn't raise consistency issues. Moreover, it's a
2232                    // useful behavior; we know that this error can be reached *without* realising
2233                    // that any other files are multi-module, so it's probably approximately where
2234                    // the problem "begins". Any compilation with a multi-module file is likely to
2235                    // have a huge number of them by transitive imports, so just reporting this one
2236                    // hopefully keeps the error focused.
2237                    zcu.multi_module_err = .{
2238                        .file = file_idx,
2239                        .modules = .{ imported_file.mod.?, imported_mod },
2240                        .refs = .{ gop.value_ptr.*, imported_ref },
2241                    };
2242                    // If we discover a multi-module error, it's the only error which matters, and we
2243                    // can't discern any useful information about the file's own imports; so just do
2244                    // an early exit now we've populated `zcu.multi_module_err`.
2245                    return any_fatal_files;
2246                }
2247                continue;
2248            }
2249            // We're the first thing we've found referencing `res.file`.
2250            gop.value_ptr.* = imported_ref;
2251            if (imported_file.mod) |m| {
2252                if (m == imported_mod) {
2253                    // Great, the module and sub path are already populated correctly.
2254                    continue;
2255                }
2256            }
2257            // We need to set the file's module, meaning we also need to compute its sub path.
2258            // This string is externally managed and has a lifetime at least equal to the
2259            // lifetime of `imported_file`. `null` means the file is outside its module root.
2260            switch (imported_file.path.isNested(imported_mod.root)) {
2261                .yes => |sub_path| {
2262                    if (imported_file.mod != null) {
2263                        // There was a module from a previous update; instruct `updateZirRefs` to
2264                        // invalidate everything.
2265                        imported_file.module_changed = true;
2266                    }
2267                    imported_file.mod = imported_mod;
2268                    imported_file.sub_file_path = sub_path;
2269                },
2270                .different_roots, .no => {
2271                    try zcu.failed_imports.append(gpa, .{
2272                        .file_index = file_idx,
2273                        .import_string = item.data.name,
2274                        .import_token = item.data.token,
2275                        .kind = .file_outside_module_root,
2276                    });
2277                    _ = zcu.alive_files.pop(); // we failed to populate `mod`/`sub_file_path`
2278                },
2279            }
2280        }
2281    }
2282
2283    return any_fatal_files;
2284}
2285
2286/// Ensures that the `@import("builtin")` module corresponding to `opts` is available in
2287/// `builtin_modules`, and that its file is populated. Also ensures the file on disk is
2288/// up-to-date, setting a misc failure if updating it fails.
2289/// Asserts that the imported `builtin.zig` has no ZIR errors, and that it has only one
2290/// import, which is 'std'.
2291pub fn updateBuiltinModule(pt: Zcu.PerThread, opts: Builtin) Allocator.Error!void {
2292    const zcu = pt.zcu;
2293    const comp = zcu.comp;
2294    const gpa = zcu.gpa;
2295
2296    const gop = try zcu.builtin_modules.getOrPut(gpa, opts.hash());
2297    if (gop.found_existing) return; // the `File` is up-to-date
2298    errdefer _ = zcu.builtin_modules.pop();
2299
2300    const mod: *Module = try .createBuiltin(comp.arena, opts, comp.dirs);
2301    assert(std.mem.eql(u8, &mod.getBuiltinOptions(comp.config).hash(), gop.key_ptr)); // builtin is its own builtin
2302
2303    const path = try mod.root.join(gpa, comp.dirs, "builtin.zig");
2304    errdefer path.deinit(gpa);
2305
2306    const file_gop = try zcu.import_table.getOrPutAdapted(gpa, path, Zcu.ImportTableAdapter{ .zcu = zcu });
2307    // `Compilation.Path.isIllegalZigImport` checks guard file creation, so
2308    // there isn't an `import_table` entry for this path yet.
2309    assert(!file_gop.found_existing);
2310    errdefer _ = zcu.import_table.pop();
2311
2312    try zcu.module_roots.ensureUnusedCapacity(gpa, 1);
2313
2314    const file = try gpa.create(Zcu.File);
2315    errdefer gpa.destroy(file);
2316
2317    file.* = .{
2318        .status = .never_loaded,
2319        .stat = undefined,
2320        .path = path,
2321        .is_builtin = true,
2322        .source = null,
2323        .tree = null,
2324        .zir = null,
2325        .zoir = null,
2326        .mod = mod,
2327        .sub_file_path = "builtin.zig",
2328        .module_changed = false,
2329        .prev_zir = null,
2330        .zoir_invalidated = false,
2331    };
2332
2333    const file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
2334        .bin_digest = path.digest(),
2335        .file = file,
2336        .root_type = .none,
2337    });
2338
2339    gop.value_ptr.* = mod;
2340    file_gop.key_ptr.* = file_index;
2341    zcu.module_roots.putAssumeCapacityNoClobber(mod, file_index.toOptional());
2342    try opts.populateFile(gpa, file);
2343
2344    assert(file.status == .success);
2345    assert(!file.zir.?.hasCompileErrors());
2346    {
2347        // Check that it has only one import, which is 'std'.
2348        const imports_idx = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
2349        assert(imports_idx != 0); // there is an import
2350        const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_idx);
2351        assert(extra.data.imports_len == 1); // there is exactly one import
2352        const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra.end);
2353        const import_path = file.zir.?.nullTerminatedString(item.data.name);
2354        assert(mem.eql(u8, import_path, "std")); // the single import is of 'std'
2355    }
2356
2357    Builtin.updateFileOnDisk(file, comp) catch |err| comp.setMiscFailure(
2358        .write_builtin_zig,
2359        "unable to write '{f}': {s}",
2360        .{ file.path.fmt(comp), @errorName(err) },
2361    );
2362}
2363
2364pub fn embedFile(
2365    pt: Zcu.PerThread,
2366    cur_file: *Zcu.File,
2367    import_string: []const u8,
2368) error{
2369    OutOfMemory,
2370    Canceled,
2371    ImportOutsideModulePath,
2372    CurrentWorkingDirectoryUnlinked,
2373}!Zcu.EmbedFile.Index {
2374    const zcu = pt.zcu;
2375    const gpa = zcu.gpa;
2376
2377    const opt_mod: ?*Module = m: {
2378        if (mem.eql(u8, import_string, "std")) break :m zcu.std_mod;
2379        if (mem.eql(u8, import_string, "root")) break :m zcu.root_mod;
2380        if (mem.eql(u8, import_string, "builtin")) {
2381            const opts = cur_file.mod.?.getBuiltinOptions(zcu.comp.config);
2382            break :m zcu.builtin_modules.get(opts.hash()).?;
2383        }
2384        break :m cur_file.mod.?.deps.get(import_string);
2385    };
2386    if (opt_mod) |mod| {
2387        const path = try mod.root.join(gpa, zcu.comp.dirs, mod.root_src_path);
2388        errdefer path.deinit(gpa);
2389
2390        const gop = try zcu.embed_table.getOrPutAdapted(gpa, path, Zcu.EmbedTableAdapter{});
2391        if (gop.found_existing) {
2392            path.deinit(gpa); // we're not using this key
2393            return @enumFromInt(gop.index);
2394        }
2395        errdefer _ = zcu.embed_table.pop();
2396        gop.key_ptr.* = try pt.newEmbedFile(path);
2397        return @enumFromInt(gop.index);
2398    }
2399
2400    const embed_file: *Zcu.EmbedFile, const embed_file_idx: Zcu.EmbedFile.Index = ef: {
2401        const path = try cur_file.path.upJoin(gpa, zcu.comp.dirs, import_string);
2402        errdefer path.deinit(gpa);
2403        const gop = try zcu.embed_table.getOrPutAdapted(gpa, path, Zcu.EmbedTableAdapter{});
2404        if (gop.found_existing) {
2405            path.deinit(gpa); // we're not using this key
2406            break :ef .{ gop.key_ptr.*, @enumFromInt(gop.index) };
2407        } else {
2408            errdefer _ = zcu.embed_table.pop();
2409            gop.key_ptr.* = try pt.newEmbedFile(path);
2410            break :ef .{ gop.key_ptr.*, @enumFromInt(gop.index) };
2411        }
2412    };
2413
2414    switch (embed_file.path.isNested(cur_file.mod.?.root)) {
2415        .yes => {},
2416        .different_roots, .no => return error.ImportOutsideModulePath,
2417    }
2418
2419    return embed_file_idx;
2420}
2421
2422pub fn updateEmbedFile(
2423    pt: Zcu.PerThread,
2424    ef: *Zcu.EmbedFile,
2425    /// If not `null`, the interned file data is stored here, if it was loaded.
2426    /// `newEmbedFile` uses this to add the file to the `whole` cache manifest.
2427    ip_str_out: ?*?InternPool.String,
2428) Allocator.Error!void {
2429    pt.updateEmbedFileInner(ef, ip_str_out) catch |err| switch (err) {
2430        error.OutOfMemory => |e| return e,
2431        else => |e| {
2432            ef.val = .none;
2433            ef.err = e;
2434            ef.stat = undefined;
2435        },
2436    };
2437}
2438
2439fn updateEmbedFileInner(
2440    pt: Zcu.PerThread,
2441    ef: *Zcu.EmbedFile,
2442    ip_str_out: ?*?InternPool.String,
2443) !void {
2444    const tid = pt.tid;
2445    const zcu = pt.zcu;
2446    const gpa = zcu.gpa;
2447    const io = zcu.comp.io;
2448    const ip = &zcu.intern_pool;
2449
2450    var file = f: {
2451        const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs);
2452        break :f try dir.openFile(sub_path, .{});
2453    };
2454    defer file.close();
2455
2456    const stat: Cache.File.Stat = .fromFs(try file.stat());
2457
2458    if (ef.val != .none) {
2459        const old_stat = ef.stat;
2460        const unchanged_metadata =
2461            stat.size == old_stat.size and
2462            stat.mtime.nanoseconds == old_stat.mtime.nanoseconds and
2463            stat.inode == old_stat.inode;
2464        if (unchanged_metadata) return;
2465    }
2466
2467    const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
2468    const size_plus_one = std.math.add(usize, size, 1) catch return error.FileTooBig;
2469
2470    // The loaded bytes of the file, including a sentinel 0 byte.
2471    const ip_str: InternPool.String = str: {
2472        const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
2473        const old_len = string_bytes.mutate.len;
2474        errdefer string_bytes.shrinkRetainingCapacity(old_len);
2475        const bytes = (try string_bytes.addManyAsSlice(size_plus_one))[0];
2476        var fr = file.reader(io, &.{});
2477        fr.size = stat.size;
2478        fr.interface.readSliceAll(bytes[0..size]) catch |err| switch (err) {
2479            error.ReadFailed => return fr.err.?,
2480            error.EndOfStream => return error.UnexpectedEof,
2481        };
2482        bytes[size] = 0;
2483        break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), .maybe_embedded_nulls);
2484    };
2485    if (ip_str_out) |p| p.* = ip_str;
2486
2487    const array_ty = try pt.arrayType(.{
2488        .len = size,
2489        .sentinel = .zero_u8,
2490        .child = .u8_type,
2491    });
2492    const ptr_ty = try pt.singleConstPtrType(array_ty);
2493
2494    const array_val = try pt.intern(.{ .aggregate = .{
2495        .ty = array_ty.toIntern(),
2496        .storage = .{ .bytes = ip_str },
2497    } });
2498    const ptr_val = try pt.intern(.{ .ptr = .{
2499        .ty = ptr_ty.toIntern(),
2500        .base_addr = .{ .uav = .{
2501            .val = array_val,
2502            .orig_ty = ptr_ty.toIntern(),
2503        } },
2504        .byte_offset = 0,
2505    } });
2506
2507    ef.val = ptr_val;
2508    ef.err = null;
2509    ef.stat = stat;
2510}
2511
2512/// Assumes that `path` is allocated into `gpa`. Takes ownership of `path` on success.
2513fn newEmbedFile(
2514    pt: Zcu.PerThread,
2515    path: Compilation.Path,
2516) !*Zcu.EmbedFile {
2517    const zcu = pt.zcu;
2518    const comp = zcu.comp;
2519    const gpa = zcu.gpa;
2520    const ip = &zcu.intern_pool;
2521
2522    const new_file = try gpa.create(Zcu.EmbedFile);
2523    errdefer gpa.destroy(new_file);
2524
2525    new_file.* = .{
2526        .path = path,
2527        .val = .none,
2528        .err = null,
2529        .stat = undefined,
2530    };
2531
2532    var opt_ip_str: ?InternPool.String = null;
2533    try pt.updateEmbedFile(new_file, &opt_ip_str);
2534
2535    try comp.appendFileSystemInput(path);
2536
2537    // Add the file contents to the `whole` cache manifest if necessary.
2538    cache: {
2539        const whole = switch (zcu.comp.cache_use) {
2540            .whole => |whole| whole,
2541            .incremental, .none => break :cache,
2542        };
2543        const man = whole.cache_manifest orelse break :cache;
2544        const ip_str = opt_ip_str orelse break :cache; // this will be a compile error
2545
2546        const array_len = Value.fromInterned(new_file.val).typeOf(zcu).childType(zcu).arrayLen(zcu);
2547        const contents = ip_str.toSlice(array_len, ip);
2548
2549        const path_str = try path.toAbsolute(comp.dirs, gpa);
2550        defer gpa.free(path_str);
2551
2552        whole.cache_manifest_mutex.lock();
2553        defer whole.cache_manifest_mutex.unlock();
2554
2555        man.addFilePostContents(path_str, contents, new_file.stat) catch |err| switch (err) {
2556            error.Unexpected => unreachable,
2557            else => |e| return e,
2558        };
2559    }
2560
2561    return new_file;
2562}
2563
2564pub fn scanNamespace(
2565    pt: Zcu.PerThread,
2566    namespace_index: Zcu.Namespace.Index,
2567    decls: []const Zir.Inst.Index,
2568) Allocator.Error!void {
2569    const tracy = trace(@src());
2570    defer tracy.end();
2571
2572    const zcu = pt.zcu;
2573    const ip = &zcu.intern_pool;
2574    const gpa = zcu.gpa;
2575    const namespace = zcu.namespacePtr(namespace_index);
2576
2577    const tracked_unit = zcu.trackUnitSema(
2578        Type.fromInterned(namespace.owner_type).containerTypeName(ip).toSlice(ip),
2579        null,
2580    );
2581    defer tracked_unit.end(zcu);
2582
2583    // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather
2584    // than their name. We'll build an efficient mapping now, then discard the current `decls`.
2585    // We map to the `AnalUnit`, since not every declaration has a `Nav`.
2586    var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.AnalUnit) = .empty;
2587    defer existing_by_inst.deinit(gpa);
2588
2589    try existing_by_inst.ensureTotalCapacity(gpa, @intCast(
2590        namespace.pub_decls.count() + namespace.priv_decls.count() +
2591            namespace.comptime_decls.items.len +
2592            namespace.test_decls.items.len,
2593    ));
2594
2595    for (namespace.pub_decls.keys()) |nav| {
2596        const zir_index = ip.getNav(nav).analysis.?.zir_index;
2597        existing_by_inst.putAssumeCapacityNoClobber(zir_index, .wrap(.{ .nav_val = nav }));
2598    }
2599    for (namespace.priv_decls.keys()) |nav| {
2600        const zir_index = ip.getNav(nav).analysis.?.zir_index;
2601        existing_by_inst.putAssumeCapacityNoClobber(zir_index, .wrap(.{ .nav_val = nav }));
2602    }
2603    for (namespace.comptime_decls.items) |cu| {
2604        const zir_index = ip.getComptimeUnit(cu).zir_index;
2605        existing_by_inst.putAssumeCapacityNoClobber(zir_index, .wrap(.{ .@"comptime" = cu }));
2606    }
2607    for (namespace.test_decls.items) |nav| {
2608        const zir_index = ip.getNav(nav).analysis.?.zir_index;
2609        existing_by_inst.putAssumeCapacityNoClobber(zir_index, .wrap(.{ .nav_val = nav }));
2610        // This test will be re-added to `test_functions` later on if it's still alive. Remove it for now.
2611        _ = zcu.test_functions.swapRemove(nav);
2612    }
2613
2614    var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
2615    defer seen_decls.deinit(gpa);
2616
2617    namespace.pub_decls.clearRetainingCapacity();
2618    namespace.priv_decls.clearRetainingCapacity();
2619    namespace.comptime_decls.clearRetainingCapacity();
2620    namespace.test_decls.clearRetainingCapacity();
2621
2622    var scan_decl_iter: ScanDeclIter = .{
2623        .pt = pt,
2624        .namespace_index = namespace_index,
2625        .seen_decls = &seen_decls,
2626        .existing_by_inst = &existing_by_inst,
2627        .pass = .named,
2628    };
2629    for (decls) |decl_inst| {
2630        try scan_decl_iter.scanDecl(decl_inst);
2631    }
2632    scan_decl_iter.pass = .unnamed;
2633    for (decls) |decl_inst| {
2634        try scan_decl_iter.scanDecl(decl_inst);
2635    }
2636}
2637
2638const ScanDeclIter = struct {
2639    pt: Zcu.PerThread,
2640    namespace_index: Zcu.Namespace.Index,
2641    seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
2642    existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.AnalUnit),
2643    /// Decl scanning is run in two passes, so that we can detect when a generated
2644    /// name would clash with an explicit name and use a different one.
2645    pass: enum { named, unnamed },
2646    unnamed_test_index: usize = 0,
2647
2648    fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString {
2649        const pt = iter.pt;
2650        const gpa = pt.zcu.gpa;
2651        const ip = &pt.zcu.intern_pool;
2652        var name = try ip.getOrPutStringFmt(gpa, pt.tid, fmt, args, .no_embedded_nulls);
2653        var gop = try iter.seen_decls.getOrPut(gpa, name);
2654        var next_suffix: u32 = 0;
2655        while (gop.found_existing) {
2656            name = try ip.getOrPutStringFmt(gpa, pt.tid, "{f}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
2657            gop = try iter.seen_decls.getOrPut(gpa, name);
2658            next_suffix += 1;
2659        }
2660        return name;
2661    }
2662
2663    fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void {
2664        const tracy = trace(@src());
2665        defer tracy.end();
2666
2667        const pt = iter.pt;
2668        const zcu = pt.zcu;
2669        const comp = zcu.comp;
2670        const namespace_index = iter.namespace_index;
2671        const namespace = zcu.namespacePtr(namespace_index);
2672        const gpa = zcu.gpa;
2673        const file = namespace.fileScope(zcu);
2674        const zir = file.zir.?;
2675        const ip = &zcu.intern_pool;
2676
2677        const decl = zir.getDeclaration(decl_inst);
2678
2679        const maybe_name: InternPool.OptionalNullTerminatedString = switch (decl.kind) {
2680            .@"comptime" => name: {
2681                if (iter.pass != .unnamed) return;
2682                break :name .none;
2683            },
2684            .unnamed_test => name: {
2685                if (iter.pass != .unnamed) return;
2686                const i = iter.unnamed_test_index;
2687                iter.unnamed_test_index += 1;
2688                break :name (try iter.avoidNameConflict("test_{d}", .{i})).toOptional();
2689            },
2690            .@"test", .decltest => |kind| name: {
2691                // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary.
2692                if (iter.pass != .unnamed) return;
2693                const prefix = @tagName(kind);
2694                break :name (try iter.avoidNameConflict("{s}.{s}", .{ prefix, zir.nullTerminatedString(decl.name) })).toOptional();
2695            },
2696            .@"const", .@"var" => name: {
2697                if (iter.pass != .named) return;
2698                const name = try ip.getOrPutString(
2699                    gpa,
2700                    pt.tid,
2701                    zir.nullTerminatedString(decl.name),
2702                    .no_embedded_nulls,
2703                );
2704                try iter.seen_decls.putNoClobber(gpa, name, {});
2705                break :name name.toOptional();
2706            },
2707        };
2708
2709        const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
2710            .file = namespace.file_scope,
2711            .inst = decl_inst,
2712        });
2713
2714        const existing_unit = iter.existing_by_inst.get(tracked_inst);
2715
2716        const unit, const want_analysis = switch (decl.kind) {
2717            .@"comptime" => unit: {
2718                const cu = if (existing_unit) |eu|
2719                    eu.unwrap().@"comptime"
2720                else
2721                    try ip.createComptimeUnit(gpa, pt.tid, tracked_inst, namespace_index);
2722
2723                const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
2724
2725                try namespace.comptime_decls.append(gpa, cu);
2726
2727                if (existing_unit == null) {
2728                    // For a `comptime` declaration, whether to analyze is based solely on whether the unit
2729                    // is outdated. So, add this fresh one to `outdated` and `outdated_ready`.
2730                    try zcu.outdated.ensureUnusedCapacity(gpa, 1);
2731                    try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
2732                    zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
2733                    zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
2734                }
2735
2736                break :unit .{ unit, true };
2737            },
2738            else => unit: {
2739                const name = maybe_name.unwrap().?;
2740                const fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, name);
2741                const nav = if (existing_unit) |eu| eu.unwrap().nav_val else nav: {
2742                    const nav = try ip.createDeclNav(gpa, pt.tid, name, fqn, tracked_inst, namespace_index);
2743                    if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newNav(zcu, nav);
2744                    break :nav nav;
2745                };
2746
2747                const unit: AnalUnit = .wrap(.{ .nav_val = nav });
2748
2749                assert(ip.getNav(nav).name == name);
2750                assert(ip.getNav(nav).fqn == fqn);
2751
2752                const want_analysis = switch (decl.kind) {
2753                    .@"comptime" => unreachable,
2754                    .unnamed_test, .@"test", .decltest => a: {
2755                        const is_named = decl.kind != .unnamed_test;
2756                        try namespace.test_decls.append(gpa, nav);
2757                        // TODO: incremental compilation!
2758                        // * remove from `test_functions` if no longer matching filter
2759                        // * add to `test_functions` if newly passing filter
2760                        // This logic is unaware of incremental: we'll end up with duplicates.
2761                        // Perhaps we should add all test indiscriminately and filter at the end of the update.
2762                        if (!comp.config.is_test) break :a false;
2763                        if (file.mod != zcu.main_mod) break :a false;
2764                        if (is_named and comp.test_filters.len > 0) {
2765                            const fqn_slice = fqn.toSlice(ip);
2766                            for (comp.test_filters) |test_filter| {
2767                                if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
2768                            } else break :a false;
2769                        }
2770                        try zcu.test_functions.put(gpa, nav, {});
2771                        break :a true;
2772                    },
2773                    .@"const", .@"var" => a: {
2774                        if (decl.is_pub) {
2775                            try namespace.pub_decls.putContext(gpa, nav, {}, .{ .zcu = zcu });
2776                        } else {
2777                            try namespace.priv_decls.putContext(gpa, nav, {}, .{ .zcu = zcu });
2778                        }
2779                        break :a false;
2780                    },
2781                };
2782                break :unit .{ unit, want_analysis };
2783            },
2784        };
2785
2786        if (existing_unit == null and (want_analysis or decl.linkage == .@"export")) {
2787            log.debug(
2788                "scanDecl queue analyze_comptime_unit file='{s}' unit={f}",
2789                .{ namespace.fileScope(zcu).sub_file_path, zcu.fmtAnalUnit(unit) },
2790            );
2791            try comp.queueJob(.{ .analyze_comptime_unit = unit });
2792        }
2793    }
2794};
2795
2796fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!Air {
2797    const tracy = trace(@src());
2798    defer tracy.end();
2799
2800    const zcu = pt.zcu;
2801    const gpa = zcu.gpa;
2802    const ip = &zcu.intern_pool;
2803
2804    const anal_unit = AnalUnit.wrap(.{ .func = func_index });
2805    const func = zcu.funcInfo(func_index);
2806    const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail;
2807    const file = zcu.fileByIndex(inst_info.file);
2808    const zir = file.zir.?;
2809
2810    try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
2811    errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
2812
2813    func.setAnalyzed(ip);
2814    if (func.analysisUnordered(ip).inferred_error_set) {
2815        func.setResolvedErrorSet(ip, .none);
2816    }
2817
2818    if (zcu.comp.time_report) |*tr| {
2819        if (func.generic_owner != .none) {
2820            tr.stats.n_generic_instances += 1;
2821        }
2822    }
2823
2824    // This is the `Nau` corresponding to the `declaration` instruction which the function or its generic owner originates from.
2825    const decl_nav = ip.getNav(if (func.generic_owner == .none)
2826        func.owner_nav
2827    else
2828        zcu.funcInfo(func.generic_owner).owner_nav);
2829
2830    const func_nav = ip.getNav(func.owner_nav);
2831
2832    zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
2833
2834    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
2835    defer analysis_arena.deinit();
2836
2837    var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
2838    defer comptime_err_ret_trace.deinit();
2839
2840    // In the case of a generic function instance, this is the type of the
2841    // instance, which has comptime parameters elided. In other words, it is
2842    // the runtime-known parameters only, not to be confused with the
2843    // generic_owner function type, which potentially has more parameters,
2844    // including comptime parameters.
2845    const fn_ty = Type.fromInterned(func.ty);
2846    const fn_ty_info = zcu.typeToFunc(fn_ty).?;
2847
2848    var sema: Sema = .{
2849        .pt = pt,
2850        .gpa = gpa,
2851        .arena = analysis_arena.allocator(),
2852        .code = zir,
2853        .owner = anal_unit,
2854        .func_index = func_index,
2855        .func_is_naked = fn_ty_info.cc == .naked,
2856        .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type),
2857        .fn_ret_ty_ies = null,
2858        .branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota),
2859        .comptime_err_ret_trace = &comptime_err_ret_trace,
2860    };
2861    defer sema.deinit();
2862
2863    // Every runtime function has a dependency on the source of the Decl it originates from.
2864    // It also depends on the value of its owner Decl.
2865    try sema.declareDependency(.{ .src_hash = decl_nav.analysis.?.zir_index });
2866    try sema.declareDependency(.{ .nav_val = func.owner_nav });
2867
2868    if (func.analysisUnordered(ip).inferred_error_set) {
2869        const ies = try analysis_arena.allocator().create(Sema.InferredErrorSet);
2870        ies.* = .{ .func = func_index };
2871        sema.fn_ret_ty_ies = ies;
2872    }
2873
2874    // reset in case calls to errorable functions are removed.
2875    ip.funcSetHasErrorTrace(func_index, fn_ty_info.cc == .auto);
2876
2877    // First few indexes of extra are reserved and set at the end.
2878    const reserved_count = @typeInfo(Air.ExtraIndex).@"enum".fields.len;
2879    try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
2880    sema.air_extra.items.len += reserved_count;
2881
2882    var inner_block: Sema.Block = .{
2883        .parent = null,
2884        .sema = &sema,
2885        .namespace = decl_nav.analysis.?.namespace,
2886        .instructions = .{},
2887        .inlining = null,
2888        .comptime_reason = null,
2889        .src_base_inst = decl_nav.analysis.?.zir_index,
2890        .type_name_ctx = func_nav.fqn,
2891    };
2892    defer inner_block.instructions.deinit(gpa);
2893
2894    const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip) orelse return error.AnalysisFail);
2895
2896    // Here we are performing "runtime semantic analysis" for a function body, which means
2897    // we must map the parameter ZIR instructions to `arg` AIR instructions.
2898    // AIR requires the `arg` parameters to be the first N instructions.
2899    // This could be a generic function instantiation, however, in which case we need to
2900    // map the comptime parameters to constant values and only emit arg AIR instructions
2901    // for the runtime ones.
2902    const runtime_params_len = fn_ty_info.param_types.len;
2903    try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len);
2904    try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len);
2905    try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
2906
2907    // In the case of a generic function instance, pre-populate all the comptime args.
2908    if (func.comptime_args.len != 0) {
2909        for (
2910            fn_info.param_body[0..func.comptime_args.len],
2911            func.comptime_args.get(ip),
2912        ) |inst, comptime_arg| {
2913            if (comptime_arg == .none) continue;
2914            sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg));
2915        }
2916    }
2917
2918    const src_params_len = if (func.comptime_args.len != 0)
2919        func.comptime_args.len
2920    else
2921        runtime_params_len;
2922
2923    var runtime_param_index: usize = 0;
2924    for (fn_info.param_body[0..src_params_len], 0..) |inst, zir_param_index| {
2925        const gop = sema.inst_map.getOrPutAssumeCapacity(inst);
2926        if (gop.found_existing) continue; // provided above by comptime arg
2927
2928        const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index];
2929        runtime_param_index += 1;
2930
2931        const opt_opv = sema.typeHasOnePossibleValue(Type.fromInterned(param_ty)) catch |err| switch (err) {
2932            error.ComptimeReturn => unreachable,
2933            error.ComptimeBreak => unreachable,
2934            else => |e| return e,
2935        };
2936        if (opt_opv) |opv| {
2937            gop.value_ptr.* = Air.internedToRef(opv.toIntern());
2938            continue;
2939        }
2940        const arg_index: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
2941        gop.value_ptr.* = arg_index.toRef();
2942        inner_block.instructions.appendAssumeCapacity(arg_index);
2943        sema.air_instructions.appendAssumeCapacity(.{
2944            .tag = .arg,
2945            .data = .{ .arg = .{
2946                .ty = Air.internedToRef(param_ty),
2947                .zir_param_index = @intCast(zir_param_index),
2948            } },
2949        });
2950    }
2951
2952    const last_arg_index = inner_block.instructions.items.len;
2953
2954    // Save the error trace as our first action in the function.
2955    // If this is unnecessary after all, Liveness will clean it up for us.
2956    const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&inner_block);
2957    sema.error_return_trace_index_on_fn_entry = error_return_trace_index;
2958    inner_block.error_return_trace_index = error_return_trace_index;
2959
2960    sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) {
2961        error.ComptimeReturn => unreachable,
2962        else => |e| return e,
2963    };
2964
2965    for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| {
2966        // The lack of a resolve_inferred_alloc means that this instruction
2967        // is unused so it just has to be a no-op.
2968        sema.air_instructions.set(@intFromEnum(ptr_inst), .{
2969            .tag = .alloc,
2970            .data = .{ .ty = .ptr_const_comptime_int },
2971        });
2972    }
2973
2974    func.setBranchHint(ip, sema.branch_hint orelse .none);
2975
2976    if (zcu.comp.config.any_error_tracing and func.analysisUnordered(ip).has_error_trace and fn_ty_info.cc != .auto) {
2977        // We're using an error trace, but didn't start out with one from the caller.
2978        // We'll have to create it at the start of the function.
2979        sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) {
2980            error.ComptimeReturn => unreachable,
2981            error.ComptimeBreak => unreachable,
2982            else => |e| return e,
2983        };
2984    }
2985
2986    // Copy the block into place and mark that as the main block.
2987    try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
2988        inner_block.instructions.items.len);
2989    const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
2990        .body_len = @intCast(inner_block.instructions.items.len),
2991    });
2992    sema.air_extra.appendSliceAssumeCapacity(@ptrCast(inner_block.instructions.items));
2993    sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
2994
2995    // Resolving inferred error sets is done *before* setting the function
2996    // state to success, so that "unable to resolve inferred error set" errors
2997    // can be emitted here.
2998    if (sema.fn_ret_ty_ies) |ies| {
2999        sema.resolveInferredErrorSetPtr(&inner_block, .{
3000            .base_node_inst = inner_block.src_base_inst,
3001            .offset = Zcu.LazySrcLoc.Offset.nodeOffset(.zero),
3002        }, ies) catch |err| switch (err) {
3003            error.ComptimeReturn => unreachable,
3004            error.ComptimeBreak => unreachable,
3005            else => |e| return e,
3006        };
3007        assert(ies.resolved != .none);
3008        func.setResolvedErrorSet(ip, ies.resolved);
3009    }
3010
3011    assert(zcu.analysis_in_progress.swapRemove(anal_unit));
3012
3013    // Finally we must resolve the return type and parameter types so that backends
3014    // have full access to type information.
3015    // Crucially, this happens *after* we set the function state to success above,
3016    // so that dependencies on the function body will now be satisfied rather than
3017    // result in circular dependency errors.
3018    // TODO: this can go away once we fix backends having to resolve `StackTrace`.
3019    // The codegen timing guarantees that the parameter types will be populated.
3020    sema.resolveFnTypes(fn_ty, inner_block.nodeOffset(.zero)) catch |err| switch (err) {
3021        error.ComptimeReturn => unreachable,
3022        error.ComptimeBreak => unreachable,
3023        else => |e| return e,
3024    };
3025
3026    try sema.flushExports();
3027
3028    defer {
3029        sema.air_instructions = .empty;
3030        sema.air_extra = .empty;
3031    }
3032    return .{
3033        .instructions = sema.air_instructions.slice(),
3034        .extra = sema.air_extra,
3035    };
3036}
3037
3038pub fn createNamespace(pt: Zcu.PerThread, initialization: Zcu.Namespace) !Zcu.Namespace.Index {
3039    return pt.zcu.intern_pool.createNamespace(pt.zcu.gpa, pt.tid, initialization);
3040}
3041
3042pub fn destroyNamespace(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) void {
3043    return pt.zcu.intern_pool.destroyNamespace(pt.tid, namespace_index);
3044}
3045
3046pub fn getErrorValue(
3047    pt: Zcu.PerThread,
3048    name: InternPool.NullTerminatedString,
3049) Allocator.Error!Zcu.ErrorInt {
3050    return pt.zcu.intern_pool.getErrorValue(pt.zcu.gpa, pt.tid, name);
3051}
3052
3053pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Zcu.ErrorInt {
3054    return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, name));
3055}
3056
3057/// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed.
3058/// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry.
3059fn lockAndClearFileCompileError(pt: Zcu.PerThread, file_index: Zcu.File.Index, file: *Zcu.File) void {
3060    const maybe_has_error = switch (file.status) {
3061        .never_loaded => false,
3062        .retryable_failure => true,
3063        .astgen_failure => true,
3064        .success => switch (file.getMode()) {
3065            .zig => has_error: {
3066                const zir = file.zir orelse break :has_error false;
3067                break :has_error zir.hasCompileErrors();
3068            },
3069            .zon => has_error: {
3070                const zoir = file.zoir orelse break :has_error false;
3071                break :has_error zoir.hasCompileErrors();
3072            },
3073        },
3074    };
3075
3076    // If runtime safety is on, let's quickly lock the mutex and check anyway.
3077    if (!maybe_has_error and !std.debug.runtime_safety) {
3078        return;
3079    }
3080
3081    pt.zcu.comp.mutex.lock();
3082    defer pt.zcu.comp.mutex.unlock();
3083    if (pt.zcu.failed_files.fetchSwapRemove(file_index)) |kv| {
3084        assert(maybe_has_error); // the runtime safety case above
3085        if (kv.value) |msg| pt.zcu.gpa.free(msg); // delete previous error message
3086    }
3087}
3088
3089/// Called from `Compilation.update`, after everything is done, just before
3090/// reporting compile errors. In this function we emit exported symbol collision
3091/// errors and communicate exported symbols to the linker backend.
3092pub fn processExports(pt: Zcu.PerThread) !void {
3093    const zcu = pt.zcu;
3094    const gpa = zcu.gpa;
3095
3096    if (zcu.single_exports.count() == 0 and zcu.multi_exports.count() == 0) {
3097        // We can avoid a call to `resolveReferences` in this case.
3098        return;
3099    }
3100
3101    // First, construct a mapping of every exported value and Nav to the indices of all its different exports.
3102    var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayList(Zcu.Export.Index)) = .empty;
3103    var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayList(Zcu.Export.Index)) = .empty;
3104    defer {
3105        for (nav_exports.values()) |*exports| {
3106            exports.deinit(gpa);
3107        }
3108        nav_exports.deinit(gpa);
3109        for (uav_exports.values()) |*exports| {
3110            exports.deinit(gpa);
3111        }
3112        uav_exports.deinit(gpa);
3113    }
3114
3115    // We note as a heuristic:
3116    // * It is rare to export a value.
3117    // * It is rare for one Nav to be exported multiple times.
3118    // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization.
3119    try nav_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count());
3120
3121    const unit_references = try zcu.resolveReferences();
3122
3123    for (zcu.single_exports.keys(), zcu.single_exports.values()) |exporter, export_idx| {
3124        const exp = export_idx.ptr(zcu);
3125        if (!unit_references.contains(exporter)) {
3126            // This export might already have been sent to the linker on a previous update, in which case we need to delete it.
3127            // The linker export API should be modified to eliminate this call. #23616
3128            if (zcu.comp.bin_file) |lf| {
3129                if (zcu.llvm_object == null) {
3130                    lf.deleteExport(exp.exported, exp.opts.name);
3131                }
3132            }
3133            continue;
3134        }
3135        const value_ptr, const found_existing = switch (exp.exported) {
3136            .nav => |nav| gop: {
3137                const gop = try nav_exports.getOrPut(gpa, nav);
3138                break :gop .{ gop.value_ptr, gop.found_existing };
3139            },
3140            .uav => |uav| gop: {
3141                const gop = try uav_exports.getOrPut(gpa, uav);
3142                break :gop .{ gop.value_ptr, gop.found_existing };
3143            },
3144        };
3145        if (!found_existing) value_ptr.* = .{};
3146        try value_ptr.append(gpa, export_idx);
3147    }
3148
3149    for (zcu.multi_exports.keys(), zcu.multi_exports.values()) |exporter, info| {
3150        const exports = zcu.all_exports.items[info.index..][0..info.len];
3151        if (!unit_references.contains(exporter)) {
3152            // This export might already have been sent to the linker on a previous update, in which case we need to delete it.
3153            // The linker export API should be modified to eliminate this loop. #23616
3154            if (zcu.comp.bin_file) |lf| {
3155                if (zcu.llvm_object == null) {
3156                    for (exports) |exp| {
3157                        lf.deleteExport(exp.exported, exp.opts.name);
3158                    }
3159                }
3160            }
3161            continue;
3162        }
3163        for (exports, info.index..) |exp, export_idx| {
3164            const value_ptr, const found_existing = switch (exp.exported) {
3165                .nav => |nav| gop: {
3166                    const gop = try nav_exports.getOrPut(gpa, nav);
3167                    break :gop .{ gop.value_ptr, gop.found_existing };
3168                },
3169                .uav => |uav| gop: {
3170                    const gop = try uav_exports.getOrPut(gpa, uav);
3171                    break :gop .{ gop.value_ptr, gop.found_existing };
3172                },
3173            };
3174            if (!found_existing) value_ptr.* = .{};
3175            try value_ptr.append(gpa, @enumFromInt(export_idx));
3176        }
3177    }
3178
3179    // If there are compile errors, we won't call `updateExports`. Not only would it be redundant
3180    // work, but the linker may not have seen an exported `Nav` due to a compile error, so linker
3181    // implementations would have to handle that case. This early return avoids that.
3182    const skip_linker_work = zcu.comp.anyErrors();
3183
3184    // Map symbol names to `Export` for name collision detection.
3185    var symbol_exports: SymbolExports = .{};
3186    defer symbol_exports.deinit(gpa);
3187
3188    for (nav_exports.keys(), nav_exports.values()) |exported_nav, exports_list| {
3189        const exported: Zcu.Exported = .{ .nav = exported_nav };
3190        try pt.processExportsInner(&symbol_exports, exported, exports_list.items, skip_linker_work);
3191    }
3192
3193    for (uav_exports.keys(), uav_exports.values()) |exported_uav, exports_list| {
3194        const exported: Zcu.Exported = .{ .uav = exported_uav };
3195        try pt.processExportsInner(&symbol_exports, exported, exports_list.items, skip_linker_work);
3196    }
3197}
3198
3199const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Zcu.Export.Index);
3200
3201fn processExportsInner(
3202    pt: Zcu.PerThread,
3203    symbol_exports: *SymbolExports,
3204    exported: Zcu.Exported,
3205    export_indices: []const Zcu.Export.Index,
3206    skip_linker_work: bool,
3207) error{OutOfMemory}!void {
3208    const zcu = pt.zcu;
3209    const gpa = zcu.gpa;
3210    const ip = &zcu.intern_pool;
3211
3212    for (export_indices) |export_idx| {
3213        const new_export = export_idx.ptr(zcu);
3214        const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name);
3215        if (gop.found_existing) {
3216            new_export.status = .failed_retryable;
3217            try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
3218            const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {f}", .{
3219                new_export.opts.name.fmt(ip),
3220            });
3221            errdefer msg.destroy(gpa);
3222            const other_export = gop.value_ptr.ptr(zcu);
3223            try zcu.errNote(other_export.src, msg, "other symbol here", .{});
3224            zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
3225            new_export.status = .failed;
3226        } else {
3227            gop.value_ptr.* = export_idx;
3228        }
3229    }
3230
3231    switch (exported) {
3232        .nav => |nav_index| if (failed: {
3233            const nav = ip.getNav(nav_index);
3234            if (zcu.failed_codegen.contains(nav_index)) break :failed true;
3235            if (nav.analysis != null) {
3236                const unit: AnalUnit = .wrap(.{ .nav_val = nav_index });
3237                if (zcu.failed_analysis.contains(unit)) break :failed true;
3238                if (zcu.transitive_failed_analysis.contains(unit)) break :failed true;
3239            }
3240            const val = switch (nav.status) {
3241                .unresolved, .type_resolved => break :failed true,
3242                .fully_resolved => |r| Value.fromInterned(r.val),
3243            };
3244            // If the value is a function, we also need to check if that function succeeded analysis.
3245            if (val.typeOf(zcu).zigTypeTag(zcu) == .@"fn") {
3246                const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() });
3247                if (zcu.failed_analysis.contains(func_unit)) break :failed true;
3248                if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true;
3249            }
3250            break :failed false;
3251        }) {
3252            // This `Nav` is failed, so was never sent to codegen. There should be a compile error.
3253            assert(skip_linker_work);
3254        },
3255        .uav => {},
3256    }
3257
3258    if (skip_linker_work) return;
3259
3260    if (zcu.llvm_object) |llvm_object| {
3261        try zcu.handleUpdateExports(export_indices, llvm_object.updateExports(pt, exported, export_indices));
3262    } else if (zcu.comp.bin_file) |lf| {
3263        try zcu.handleUpdateExports(export_indices, lf.updateExports(pt, exported, export_indices));
3264    }
3265}
3266
3267pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
3268    const zcu = pt.zcu;
3269    const gpa = zcu.gpa;
3270    const ip = &zcu.intern_pool;
3271
3272    // Our job is to correctly set the value of the `test_functions` declaration if it has been
3273    // analyzed and sent to codegen, It usually will have been, because the test runner will
3274    // reference it, and `std.builtin` shouldn't have type errors. However, if it hasn't been
3275    // analyzed, we will just terminate early, since clearly the test runner hasn't referenced
3276    // `test_functions` so there's no point populating it. More to the the point, we potentially
3277    // *can't* populate it without doing some type resolution, and... let's try to leave Sema in
3278    // the past here.
3279
3280    const builtin_mod = zcu.builtin_modules.get(zcu.root_mod.getBuiltinOptions(zcu.comp.config).hash()).?;
3281    const builtin_file_index = zcu.module_roots.get(builtin_mod).?.unwrap().?;
3282    const builtin_root_type = zcu.fileRootType(builtin_file_index);
3283    if (builtin_root_type == .none) return; // `@import("builtin")` never analyzed
3284    const builtin_namespace = Type.fromInterned(builtin_root_type).getNamespace(zcu).unwrap().?;
3285    // We know that the namespace has a `test_functions`...
3286    const nav_index = zcu.namespacePtr(builtin_namespace).pub_decls.getKeyAdapted(
3287        try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls),
3288        Zcu.Namespace.NameAdapter{ .zcu = zcu },
3289    ).?;
3290    // ...but it might not be populated, so let's check that!
3291    if (zcu.failed_analysis.contains(.wrap(.{ .nav_val = nav_index })) or
3292        zcu.transitive_failed_analysis.contains(.wrap(.{ .nav_val = nav_index })) or
3293        ip.getNav(nav_index).status != .fully_resolved)
3294    {
3295        // The value of `builtin.test_functions` was either never referenced, or failed analysis.
3296        // Either way, we don't need to do anything.
3297        return;
3298    }
3299
3300    // Okay, `builtin.test_functions` is (potentially) referenced and valid. Our job now is to swap
3301    // its placeholder `&.{}` value for the actual list of all test functions.
3302
3303    const test_fns_val = zcu.navValue(nav_index);
3304    const test_fn_ty = test_fns_val.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu);
3305
3306    const array_anon_decl: InternPool.Key.Ptr.BaseAddr.Uav = array: {
3307        // Add zcu.test_functions to an array decl then make the test_functions
3308        // decl reference it as a slice.
3309        const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count());
3310        defer gpa.free(test_fn_vals);
3311
3312        for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_nav_index| {
3313            const test_nav = ip.getNav(test_nav_index);
3314
3315            {
3316                // The test declaration might have failed; if that's the case, just return, as we'll
3317                // be emitting a compile error anyway.
3318                const anal_unit: AnalUnit = .wrap(.{ .nav_val = test_nav_index });
3319                if (zcu.failed_analysis.contains(anal_unit) or
3320                    zcu.transitive_failed_analysis.contains(anal_unit))
3321                {
3322                    return;
3323                }
3324            }
3325
3326            const test_nav_name = test_nav.fqn;
3327            const test_nav_name_len = test_nav_name.length(ip);
3328            const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.Uav = n: {
3329                const test_name_ty = try pt.arrayType(.{
3330                    .len = test_nav_name_len,
3331                    .child = .u8_type,
3332                });
3333                const test_name_val = try pt.intern(.{ .aggregate = .{
3334                    .ty = test_name_ty.toIntern(),
3335                    .storage = .{ .bytes = test_nav_name.toString() },
3336                } });
3337                break :n .{
3338                    .orig_ty = (try pt.singleConstPtrType(test_name_ty)).toIntern(),
3339                    .val = test_name_val,
3340                };
3341            };
3342
3343            const test_fn_fields = .{
3344                // name
3345                try pt.intern(.{ .slice = .{
3346                    .ty = .slice_const_u8_type,
3347                    .ptr = try pt.intern(.{ .ptr = .{
3348                        .ty = .manyptr_const_u8_type,
3349                        .base_addr = .{ .uav = test_name_anon_decl },
3350                        .byte_offset = 0,
3351                    } }),
3352                    .len = try pt.intern(.{ .int = .{
3353                        .ty = .usize_type,
3354                        .storage = .{ .u64 = test_nav_name_len },
3355                    } }),
3356                } }),
3357                // func
3358                try pt.intern(.{ .ptr = .{
3359                    .ty = (try pt.navPtrType(test_nav_index)).toIntern(),
3360                    .base_addr = .{ .nav = test_nav_index },
3361                    .byte_offset = 0,
3362                } }),
3363            };
3364            test_fn_val.* = (try pt.aggregateValue(test_fn_ty, &test_fn_fields)).toIntern();
3365        }
3366
3367        const array_ty = try pt.arrayType(.{
3368            .len = test_fn_vals.len,
3369            .child = test_fn_ty.toIntern(),
3370            .sentinel = .none,
3371        });
3372        break :array .{
3373            .orig_ty = (try pt.singleConstPtrType(array_ty)).toIntern(),
3374            .val = (try pt.aggregateValue(array_ty, test_fn_vals)).toIntern(),
3375        };
3376    };
3377
3378    {
3379        const new_ty = try pt.ptrType(.{
3380            .child = test_fn_ty.toIntern(),
3381            .flags = .{
3382                .is_const = true,
3383                .size = .slice,
3384            },
3385        });
3386        const new_init = try pt.intern(.{ .slice = .{
3387            .ty = new_ty.toIntern(),
3388            .ptr = try pt.intern(.{ .ptr = .{
3389                .ty = new_ty.slicePtrFieldType(zcu).toIntern(),
3390                .base_addr = .{ .uav = array_anon_decl },
3391                .byte_offset = 0,
3392            } }),
3393            .len = (try pt.intValue(Type.usize, zcu.test_functions.count())).toIntern(),
3394        } });
3395        ip.mutateVarInit(test_fns_val.toIntern(), new_init);
3396    }
3397    // The linker thread is not running, so we actually need to dispatch this task directly.
3398    @import("../link.zig").linkTestFunctionsNav(pt, nav_index);
3399}
3400
3401/// Stores an error in `pt.zcu.failed_files` for this file, and sets the file
3402/// status to `retryable_failure`.
3403pub fn reportRetryableFileError(
3404    pt: Zcu.PerThread,
3405    file_index: Zcu.File.Index,
3406    comptime format: []const u8,
3407    args: anytype,
3408) error{OutOfMemory}!void {
3409    const zcu = pt.zcu;
3410    const gpa = zcu.gpa;
3411
3412    const file = zcu.fileByIndex(file_index);
3413
3414    file.status = .retryable_failure;
3415
3416    const msg = try std.fmt.allocPrint(gpa, format, args);
3417    errdefer gpa.free(msg);
3418
3419    const old_msg: ?[]u8 = old_msg: {
3420        zcu.comp.mutex.lock();
3421        defer zcu.comp.mutex.unlock();
3422
3423        const gop = try zcu.failed_files.getOrPut(gpa, file_index);
3424        const old: ?[]u8 = if (gop.found_existing) old: {
3425            break :old gop.value_ptr.*;
3426        } else null;
3427        gop.value_ptr.* = msg;
3428
3429        break :old_msg old;
3430    };
3431    if (old_msg) |m| gpa.free(m);
3432}
3433
3434/// Shortcut for calling `intern_pool.get`.
3435pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
3436    return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);
3437}
3438
3439/// Shortcut for calling `intern_pool.getUnion`.
3440pub fn internUnion(pt: Zcu.PerThread, un: InternPool.Key.Union) Allocator.Error!InternPool.Index {
3441    return pt.zcu.intern_pool.getUnion(pt.zcu.gpa, pt.tid, un);
3442}
3443
3444/// Essentially a shortcut for calling `intern_pool.getCoerced`.
3445/// However, this function also allows coercing `extern`s. The `InternPool` function can't do
3446/// this because it requires potentially pushing to the job queue.
3447pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!Value {
3448    const ip = &pt.zcu.intern_pool;
3449    switch (ip.indexToKey(val.toIntern())) {
3450        .@"extern" => |e| {
3451            const coerced = try pt.getExtern(.{
3452                .name = e.name,
3453                .ty = new_ty.toIntern(),
3454                .lib_name = e.lib_name,
3455                .is_const = e.is_const,
3456                .is_threadlocal = e.is_threadlocal,
3457                .linkage = e.linkage,
3458                .visibility = e.visibility,
3459                .is_dll_import = e.is_dll_import,
3460                .relocation = e.relocation,
3461                .alignment = e.alignment,
3462                .@"addrspace" = e.@"addrspace",
3463                .zir_index = e.zir_index,
3464                .owner_nav = undefined, // ignored by `getExtern`.
3465                .source = e.source,
3466            });
3467            return Value.fromInterned(coerced);
3468        },
3469        else => {},
3470    }
3471    return Value.fromInterned(try ip.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern()));
3472}
3473
3474pub fn intType(pt: Zcu.PerThread, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type {
3475    return Type.fromInterned(try pt.intern(.{ .int_type = .{
3476        .signedness = signedness,
3477        .bits = bits,
3478    } }));
3479}
3480
3481pub fn errorIntType(pt: Zcu.PerThread) std.mem.Allocator.Error!Type {
3482    return pt.intType(.unsigned, pt.zcu.errorSetBits());
3483}
3484
3485pub fn arrayType(pt: Zcu.PerThread, info: InternPool.Key.ArrayType) Allocator.Error!Type {
3486    return Type.fromInterned(try pt.intern(.{ .array_type = info }));
3487}
3488
3489pub fn vectorType(pt: Zcu.PerThread, info: InternPool.Key.VectorType) Allocator.Error!Type {
3490    return Type.fromInterned(try pt.intern(.{ .vector_type = info }));
3491}
3492
3493pub fn optionalType(pt: Zcu.PerThread, child_type: InternPool.Index) Allocator.Error!Type {
3494    return Type.fromInterned(try pt.intern(.{ .opt_type = child_type }));
3495}
3496
3497pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!Type {
3498    var canon_info = info;
3499
3500    if (info.flags.size == .c) canon_info.flags.is_allowzero = true;
3501
3502    // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee
3503    // type, we change it to 0 here. If this causes an assertion trip because the
3504    // pointee type needs to be resolved more, that needs to be done before calling
3505    // this ptr() function.
3506    if (info.flags.alignment != .none and
3507        info.flags.alignment == Type.fromInterned(info.child).abiAlignment(pt.zcu))
3508    {
3509        canon_info.flags.alignment = .none;
3510    }
3511
3512    switch (info.flags.vector_index) {
3513        // Canonicalize host_size. If it matches the bit size of the pointee type,
3514        // we change it to 0 here. If this causes an assertion trip, the pointee type
3515        // needs to be resolved before calling this ptr() function.
3516        .none => if (info.packed_offset.host_size != 0) {
3517            const elem_bit_size = Type.fromInterned(info.child).bitSize(pt.zcu);
3518            assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
3519            if (info.packed_offset.host_size * 8 == elem_bit_size) {
3520                canon_info.packed_offset.host_size = 0;
3521            }
3522        },
3523        _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
3524    }
3525
3526    return Type.fromInterned(try pt.intern(.{ .ptr_type = canon_info }));
3527}
3528
3529/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer
3530/// child type's alignment is resolved so that an invalid alignment is not used.
3531/// In general, prefer this function during semantic analysis.
3532pub fn ptrTypeSema(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Zcu.SemaError!Type {
3533    if (info.flags.alignment != .none) {
3534        _ = try Type.fromInterned(info.child).abiAlignmentSema(pt);
3535    }
3536    return pt.ptrType(info);
3537}
3538
3539pub fn singleMutPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
3540    return pt.ptrType(.{ .child = child_type.toIntern() });
3541}
3542
3543pub fn singleConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
3544    return pt.ptrType(.{
3545        .child = child_type.toIntern(),
3546        .flags = .{
3547            .is_const = true,
3548        },
3549    });
3550}
3551
3552pub fn manyConstPtrType(pt: Zcu.PerThread, child_type: Type) Allocator.Error!Type {
3553    return pt.ptrType(.{
3554        .child = child_type.toIntern(),
3555        .flags = .{
3556            .size = .many,
3557            .is_const = true,
3558        },
3559    });
3560}
3561
3562pub fn adjustPtrTypeChild(pt: Zcu.PerThread, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
3563    var info = ptr_ty.ptrInfo(pt.zcu);
3564    info.child = new_child.toIntern();
3565    return pt.ptrType(info);
3566}
3567
3568pub fn funcType(pt: Zcu.PerThread, key: InternPool.GetFuncTypeKey) Allocator.Error!Type {
3569    return Type.fromInterned(try pt.zcu.intern_pool.getFuncType(pt.zcu.gpa, pt.tid, key));
3570}
3571
3572/// Use this for `anyframe->T` only.
3573/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly.
3574pub fn anyframeType(pt: Zcu.PerThread, payload_ty: Type) Allocator.Error!Type {
3575    return Type.fromInterned(try pt.intern(.{ .anyframe_type = payload_ty.toIntern() }));
3576}
3577
3578pub fn errorUnionType(pt: Zcu.PerThread, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type {
3579    return Type.fromInterned(try pt.intern(.{ .error_union_type = .{
3580        .error_set_type = error_set_ty.toIntern(),
3581        .payload_type = payload_ty.toIntern(),
3582    } }));
3583}
3584
3585pub fn singleErrorSetType(pt: Zcu.PerThread, name: InternPool.NullTerminatedString) Allocator.Error!Type {
3586    const names: *const [1]InternPool.NullTerminatedString = &name;
3587    return Type.fromInterned(try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names));
3588}
3589
3590/// Sorts `names` in place.
3591pub fn errorSetFromUnsortedNames(
3592    pt: Zcu.PerThread,
3593    names: []InternPool.NullTerminatedString,
3594) Allocator.Error!Type {
3595    std.mem.sort(
3596        InternPool.NullTerminatedString,
3597        names,
3598        {},
3599        InternPool.NullTerminatedString.indexLessThan,
3600    );
3601    const new_ty = try pt.zcu.intern_pool.getErrorSetType(pt.zcu.gpa, pt.tid, names);
3602    return Type.fromInterned(new_ty);
3603}
3604
3605/// Supports only pointers, not pointer-like optionals.
3606pub fn ptrIntValue(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
3607    const zcu = pt.zcu;
3608    assert(ty.zigTypeTag(zcu) == .pointer and !ty.isSlice(zcu));
3609    assert(x != 0 or ty.isAllowzeroPtr(zcu));
3610    return Value.fromInterned(try pt.intern(.{ .ptr = .{
3611        .ty = ty.toIntern(),
3612        .base_addr = .int,
3613        .byte_offset = x,
3614    } }));
3615}
3616
3617/// Creates an enum tag value based on the integer tag value.
3618pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value {
3619    if (std.debug.runtime_safety) {
3620        const tag = ty.zigTypeTag(pt.zcu);
3621        assert(tag == .@"enum");
3622    }
3623    return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
3624        .ty = ty.toIntern(),
3625        .int = tag_int,
3626    } }));
3627}
3628
3629/// Creates an enum tag value based on the field index according to source code
3630/// declaration order.
3631pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Allocator.Error!Value {
3632    const ip = &pt.zcu.intern_pool;
3633    const enum_type = ip.loadEnumType(ty.toIntern());
3634
3635    if (enum_type.values.len == 0) {
3636        // Auto-numbered fields.
3637        return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
3638            .ty = ty.toIntern(),
3639            .int = try pt.intern(.{ .int = .{
3640                .ty = enum_type.tag_ty,
3641                .storage = .{ .u64 = field_index },
3642            } }),
3643        } }));
3644    }
3645
3646    return Value.fromInterned(try pt.intern(.{ .enum_tag = .{
3647        .ty = ty.toIntern(),
3648        .int = enum_type.values.get(ip)[field_index],
3649    } }));
3650}
3651
3652pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value {
3653    return Value.fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
3654}
3655
3656pub fn undefRef(pt: Zcu.PerThread, ty: Type) Allocator.Error!Air.Inst.Ref {
3657    return Air.internedToRef((try pt.undefValue(ty)).toIntern());
3658}
3659
3660pub fn intValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
3661    if (std.math.cast(u64, x)) |casted| return pt.intValue_u64(ty, casted);
3662    if (std.math.cast(i64, x)) |casted| return pt.intValue_i64(ty, casted);
3663    var limbs_buffer: [4]usize = undefined;
3664    var big_int = BigIntMutable.init(&limbs_buffer, x);
3665    return pt.intValue_big(ty, big_int.toConst());
3666}
3667
3668pub fn intRef(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Air.Inst.Ref {
3669    return Air.internedToRef((try pt.intValue(ty, x)).toIntern());
3670}
3671
3672pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value {
3673    if (ty.toIntern() != .comptime_int_type) {
3674        const int_info = ty.intInfo(pt.zcu);
3675        assert(x.fitsInTwosComp(int_info.signedness, int_info.bits));
3676    }
3677    return .fromInterned(try pt.intern(.{ .int = .{
3678        .ty = ty.toIntern(),
3679        .storage = .{ .big_int = x },
3680    } }));
3681}
3682
3683pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
3684    if (ty.toIntern() != .comptime_int_type and x != 0) {
3685        const int_info = ty.intInfo(pt.zcu);
3686        const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
3687        assert(unsigned_bits >= std.math.log2(x) + 1);
3688    }
3689    return .fromInterned(try pt.intern(.{ .int = .{
3690        .ty = ty.toIntern(),
3691        .storage = .{ .u64 = x },
3692    } }));
3693}
3694
3695pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value {
3696    if (ty.toIntern() != .comptime_int_type and x != 0) {
3697        const int_info = ty.intInfo(pt.zcu);
3698        const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
3699        if (x > 0) {
3700            assert(unsigned_bits >= std.math.log2(x) + 1);
3701        } else {
3702            assert(int_info.signedness == .signed);
3703            assert(unsigned_bits >= std.math.log2_int_ceil(u64, @abs(x)));
3704        }
3705    }
3706    return .fromInterned(try pt.intern(.{ .int = .{
3707        .ty = ty.toIntern(),
3708        .storage = .{ .i64 = x },
3709    } }));
3710}
3711
3712pub fn unionValue(pt: Zcu.PerThread, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
3713    const zcu = pt.zcu;
3714    return Value.fromInterned(try zcu.intern_pool.getUnion(zcu.gpa, pt.tid, .{
3715        .ty = union_ty.toIntern(),
3716        .tag = tag.toIntern(),
3717        .val = val.toIntern(),
3718    }));
3719}
3720
3721pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Index) Allocator.Error!Value {
3722    for (elems) |elem| {
3723        if (!Value.fromInterned(elem).isUndef(pt.zcu)) break;
3724    } else if (elems.len > 0) {
3725        return pt.undefValue(ty); // all-undef
3726    }
3727    return .fromInterned(try pt.intern(.{ .aggregate = .{
3728        .ty = ty.toIntern(),
3729        .storage = .{ .elems = elems },
3730    } }));
3731}
3732
3733/// Asserts that `ty` is either an array or a vector.
3734pub fn aggregateSplatValue(pt: Zcu.PerThread, ty: Type, repeated_elem: Value) Allocator.Error!Value {
3735    switch (ty.zigTypeTag(pt.zcu)) {
3736        .array, .vector => {},
3737        else => unreachable,
3738    }
3739    if (repeated_elem.isUndef(pt.zcu)) return pt.undefValue(ty);
3740    return .fromInterned(try pt.intern(.{ .aggregate = .{
3741        .ty = ty.toIntern(),
3742        .storage = .{ .repeated_elem = repeated_elem.toIntern() },
3743    } }));
3744}
3745
3746/// This function casts the float representation down to the representation of the type, potentially
3747/// losing data if the representation wasn't correct.
3748pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value {
3749    const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(pt.zcu.getTarget())) {
3750        16 => .{ .f16 = @as(f16, @floatCast(x)) },
3751        32 => .{ .f32 = @as(f32, @floatCast(x)) },
3752        64 => .{ .f64 = @as(f64, @floatCast(x)) },
3753        80 => .{ .f80 = @as(f80, @floatCast(x)) },
3754        128 => .{ .f128 = @as(f128, @floatCast(x)) },
3755        else => unreachable,
3756    };
3757    return Value.fromInterned(try pt.intern(.{ .float = .{
3758        .ty = ty.toIntern(),
3759        .storage = storage,
3760    } }));
3761}
3762
3763pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value {
3764    assert(pt.zcu.intern_pool.isOptionalType(opt_ty.toIntern()));
3765    return Value.fromInterned(try pt.intern(.{ .opt = .{
3766        .ty = opt_ty.toIntern(),
3767        .val = .none,
3768    } }));
3769}
3770
3771/// `ty` is an integer or a vector of integers.
3772pub fn overflowArithmeticTupleType(pt: Zcu.PerThread, ty: Type) !Type {
3773    const zcu = pt.zcu;
3774    const ip = &zcu.intern_pool;
3775    const ov_ty: Type = if (ty.zigTypeTag(zcu) == .vector) try pt.vectorType(.{
3776        .len = ty.vectorLen(zcu),
3777        .child = .u1_type,
3778    }) else .u1;
3779    const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{
3780        .types = &.{ ty.toIntern(), ov_ty.toIntern() },
3781        .values = &.{ .none, .none },
3782    });
3783    return .fromInterned(tuple_ty);
3784}
3785
3786pub fn smallestUnsignedInt(pt: Zcu.PerThread, max: u64) Allocator.Error!Type {
3787    return pt.intType(.unsigned, Type.smallestUnsignedBits(max));
3788}
3789
3790/// Returns the smallest possible integer type containing both `min` and
3791/// `max`. Asserts that neither value is undef.
3792/// TODO: if #3806 is implemented, this becomes trivial
3793pub fn intFittingRange(pt: Zcu.PerThread, min: Value, max: Value) !Type {
3794    const zcu = pt.zcu;
3795    assert(!min.isUndef(zcu));
3796    assert(!max.isUndef(zcu));
3797
3798    if (std.debug.runtime_safety) {
3799        assert(Value.order(min, max, zcu).compare(.lte));
3800    }
3801
3802    const sign = min.orderAgainstZero(zcu) == .lt;
3803
3804    const min_val_bits = pt.intBitsForValue(min, sign);
3805    const max_val_bits = pt.intBitsForValue(max, sign);
3806
3807    return pt.intType(
3808        if (sign) .signed else .unsigned,
3809        @max(min_val_bits, max_val_bits),
3810    );
3811}
3812
3813/// Given a value representing an integer, returns the number of bits necessary to represent
3814/// this value in an integer. If `sign` is true, returns the number of bits necessary in a
3815/// twos-complement integer; otherwise in an unsigned integer.
3816/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true.
3817pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
3818    const zcu = pt.zcu;
3819    assert(!val.isUndef(zcu));
3820
3821    const key = zcu.intern_pool.indexToKey(val.toIntern());
3822    switch (key.int.storage) {
3823        .i64 => |x| {
3824            if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @intFromBool(sign);
3825            assert(sign);
3826            // Protect against overflow in the following negation.
3827            if (x == std.math.minInt(i64)) return 64;
3828            return Type.smallestUnsignedBits(@as(u64, @intCast(-(x + 1)))) + 1;
3829        },
3830        .u64 => |x| {
3831            return Type.smallestUnsignedBits(x) + @intFromBool(sign);
3832        },
3833        .big_int => |big| {
3834            if (big.positive) return @as(u16, @intCast(big.bitCountAbs() + @intFromBool(sign)));
3835
3836            // Zero is still a possibility, in which case unsigned is fine
3837            if (big.eqlZero()) return 0;
3838
3839            return @as(u16, @intCast(big.bitCountTwosComp()));
3840        },
3841        .lazy_align => |lazy_ty| {
3842            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(pt.zcu).toByteUnits() orelse 0) + @intFromBool(sign);
3843        },
3844        .lazy_size => |lazy_ty| {
3845            return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(pt.zcu)) + @intFromBool(sign);
3846        },
3847    }
3848}
3849
3850pub fn navPtrType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Allocator.Error!Type {
3851    const zcu = pt.zcu;
3852    const ip = &zcu.intern_pool;
3853    const ty, const alignment, const @"addrspace", const is_const = switch (ip.getNav(nav_id).status) {
3854        .unresolved => unreachable,
3855        .type_resolved => |r| .{ r.type, r.alignment, r.@"addrspace", r.is_const },
3856        .fully_resolved => |r| .{ ip.typeOf(r.val), r.alignment, r.@"addrspace", r.is_const },
3857    };
3858    return pt.ptrType(.{
3859        .child = ty,
3860        .flags = .{
3861            .alignment = if (alignment == Type.fromInterned(ty).abiAlignment(zcu))
3862                .none
3863            else
3864                alignment,
3865            .address_space = @"addrspace",
3866            .is_const = is_const,
3867        },
3868    });
3869}
3870
3871/// Intern an `.@"extern"`, creating a corresponding owner `Nav` if necessary.
3872/// If necessary, the new `Nav` is queued for codegen.
3873/// `key.owner_nav` is ignored and may be `undefined`.
3874pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index {
3875    const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
3876    if (result.new_nav.unwrap()) |nav| {
3877        // This job depends on any resolve_type_fully jobs queued up before it.
3878        pt.zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
3879        try pt.zcu.comp.queueJob(.{ .link_nav = nav });
3880        if (pt.zcu.comp.debugIncremental()) try pt.zcu.incremental_debug_state.newNav(pt.zcu, nav);
3881    }
3882    return result.index;
3883}
3884
3885// TODO: this shouldn't need a `PerThread`! Fix the signature of `Type.abiAlignment`.
3886pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPool.Alignment {
3887    const zcu = pt.zcu;
3888    const ty: Type, const alignment = switch (zcu.intern_pool.getNav(nav_index).status) {
3889        .unresolved => unreachable,
3890        .type_resolved => |r| .{ .fromInterned(r.type), r.alignment },
3891        .fully_resolved => |r| .{ Value.fromInterned(r.val).typeOf(zcu), r.alignment },
3892    };
3893    if (alignment != .none) return alignment;
3894    return ty.abiAlignment(zcu);
3895}
3896
3897/// `ty` is a container type requiring resolution (struct, union, or enum).
3898/// If `ty` is outdated, it is recreated at a new `InternPool.Index`, which is returned.
3899/// If the type cannot be recreated because it has been lost, `error.AnalysisFail` is returned.
3900/// If `ty` is not outdated, that same `InternPool.Index` is returned.
3901/// If `ty` has already been replaced by this function, the new index will not be returned again.
3902/// Also, if `ty` is an enum, this function will resolve the new type if needed, and the call site
3903/// is responsible for checking `[transitive_]failed_analysis` to detect resolution failures.
3904pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index) Zcu.SemaError!InternPool.Index {
3905    const zcu = pt.zcu;
3906    const gpa = zcu.gpa;
3907    const ip = &zcu.intern_pool;
3908
3909    const anal_unit: AnalUnit = .wrap(.{ .type = ty });
3910    const outdated = zcu.outdated.swapRemove(anal_unit) or
3911        zcu.potentially_outdated.swapRemove(anal_unit);
3912
3913    if (outdated) {
3914        _ = zcu.outdated_ready.swapRemove(anal_unit);
3915        try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
3916    }
3917
3918    const ty_key = switch (ip.indexToKey(ty)) {
3919        .struct_type, .union_type, .enum_type => |key| key,
3920        else => unreachable,
3921    };
3922    const declared_ty_key = switch (ty_key) {
3923        .reified => unreachable, // never outdated
3924        .generated_tag => unreachable, // never outdated
3925        .declared => |d| d,
3926    };
3927
3928    if (declared_ty_key.zir_index.resolve(ip) == null) {
3929        // The instruction has been lost -- this type is dead.
3930        return error.AnalysisFail;
3931    }
3932
3933    if (!outdated) return ty;
3934
3935    // We will recreate the type at a new `InternPool.Index`.
3936
3937    // Delete old state which is no longer in use. Technically, this is not necessary: these exports,
3938    // references, etc, will be ignored because the type itself is unreferenced. However, it allows
3939    // reusing the memory which is currently being used to track this state.
3940    zcu.deleteUnitExports(anal_unit);
3941    zcu.deleteUnitReferences(anal_unit);
3942    zcu.deleteUnitCompileLogs(anal_unit);
3943    if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
3944        kv.value.destroy(gpa);
3945    }
3946    _ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
3947    zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit);
3948
3949    if (zcu.comp.debugIncremental()) {
3950        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
3951        info.last_update_gen = zcu.generation;
3952        info.deps.clearRetainingCapacity();
3953    }
3954
3955    switch (ip.indexToKey(ty)) {
3956        .struct_type => return pt.recreateStructType(ty, declared_ty_key),
3957        .union_type => return pt.recreateUnionType(ty, declared_ty_key),
3958        .enum_type => return pt.recreateEnumType(ty, declared_ty_key),
3959        else => unreachable,
3960    }
3961}
3962
3963fn recreateStructType(
3964    pt: Zcu.PerThread,
3965    old_ty: InternPool.Index,
3966    key: InternPool.Key.NamespaceType.Declared,
3967) Allocator.Error!InternPool.Index {
3968    const zcu = pt.zcu;
3969    const gpa = zcu.gpa;
3970    const ip = &zcu.intern_pool;
3971
3972    const inst_info = key.zir_index.resolveFull(ip).?;
3973    const file = zcu.fileByIndex(inst_info.file);
3974    const zir = file.zir.?;
3975
3976    assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
3977    const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
3978    assert(extended.opcode == .struct_decl);
3979    const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
3980    const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
3981    var extra_index = extra.end;
3982
3983    const captures_len = if (small.has_captures_len) blk: {
3984        const captures_len = zir.extra[extra_index];
3985        extra_index += 1;
3986        break :blk captures_len;
3987    } else 0;
3988    const fields_len = if (small.has_fields_len) blk: {
3989        const fields_len = zir.extra[extra_index];
3990        extra_index += 1;
3991        break :blk fields_len;
3992    } else 0;
3993
3994    assert(captures_len == key.captures.owned.len); // synchronises with logic in `Zcu.mapOldZirToNew`
3995
3996    const struct_obj = ip.loadStructType(old_ty);
3997
3998    const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
3999        .layout = small.layout,
4000        .fields_len = fields_len,
4001        .known_non_opv = small.known_non_opv,
4002        .requires_comptime = if (small.known_comptime_only) .yes else .unknown,
4003        .any_comptime_fields = small.any_comptime_fields,
4004        .any_default_inits = small.any_default_inits,
4005        .inits_resolved = false,
4006        .any_aligned_fields = small.any_aligned_fields,
4007        .key = .{ .declared_owned_captures = .{
4008            .zir_index = key.zir_index,
4009            .captures = key.captures.owned,
4010        } },
4011    }, true)) {
4012        .wip => |wip| wip,
4013        .existing => unreachable, // we passed `replace_existing`
4014    };
4015    errdefer wip_ty.cancel(ip, pt.tid);
4016
4017    wip_ty.setName(ip, struct_obj.name, struct_obj.name_nav);
4018    try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = key.zir_index });
4019    zcu.namespacePtr(struct_obj.namespace).owner_type = wip_ty.index;
4020    // No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive.
4021    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
4022
4023    codegen_type: {
4024        if (file.mod.?.strip) break :codegen_type;
4025        // This job depends on any resolve_type_fully jobs queued up before it.
4026        zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
4027        try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
4028    }
4029
4030    if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
4031    const new_ty = wip_ty.finish(ip, struct_obj.namespace);
4032    if (inst_info.inst == .main_struct_inst) {
4033        // This is the root type of a file! Update the reference.
4034        zcu.setFileRootType(inst_info.file, new_ty);
4035    }
4036    return new_ty;
4037}
4038
4039fn recreateUnionType(
4040    pt: Zcu.PerThread,
4041    old_ty: InternPool.Index,
4042    key: InternPool.Key.NamespaceType.Declared,
4043) Allocator.Error!InternPool.Index {
4044    const zcu = pt.zcu;
4045    const gpa = zcu.gpa;
4046    const ip = &zcu.intern_pool;
4047
4048    const inst_info = key.zir_index.resolveFull(ip).?;
4049    const file = zcu.fileByIndex(inst_info.file);
4050    const zir = file.zir.?;
4051
4052    assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
4053    const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
4054    assert(extended.opcode == .union_decl);
4055    const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
4056    const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
4057    var extra_index = extra.end;
4058
4059    extra_index += @intFromBool(small.has_tag_type);
4060    const captures_len = if (small.has_captures_len) blk: {
4061        const captures_len = zir.extra[extra_index];
4062        extra_index += 1;
4063        break :blk captures_len;
4064    } else 0;
4065    extra_index += @intFromBool(small.has_body_len);
4066    const fields_len = if (small.has_fields_len) blk: {
4067        const fields_len = zir.extra[extra_index];
4068        extra_index += 1;
4069        break :blk fields_len;
4070    } else 0;
4071
4072    assert(captures_len == key.captures.owned.len); // synchronises with logic in `Zcu.mapOldZirToNew`
4073
4074    const union_obj = ip.loadUnionType(old_ty);
4075
4076    const namespace_index = union_obj.namespace;
4077
4078    const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
4079        .flags = .{
4080            .layout = small.layout,
4081            .status = .none,
4082            .runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
4083                .tagged
4084            else if (small.layout != .auto)
4085                .none
4086            else switch (true) { // TODO
4087                true => .safety,
4088                false => .none,
4089            },
4090            .any_aligned_fields = small.any_aligned_fields,
4091            .requires_comptime = .unknown,
4092            .assumed_runtime_bits = false,
4093            .assumed_pointer_aligned = false,
4094            .alignment = .none,
4095        },
4096        .fields_len = fields_len,
4097        .enum_tag_ty = .none, // set later
4098        .field_types = &.{}, // set later
4099        .field_aligns = &.{}, // set later
4100        .key = .{ .declared_owned_captures = .{
4101            .zir_index = key.zir_index,
4102            .captures = key.captures.owned,
4103        } },
4104    }, true)) {
4105        .wip => |wip| wip,
4106        .existing => unreachable, // we passed `replace_existing`
4107    };
4108    errdefer wip_ty.cancel(ip, pt.tid);
4109
4110    wip_ty.setName(ip, union_obj.name, union_obj.name_nav);
4111    try pt.addDependency(.wrap(.{ .type = wip_ty.index }), .{ .src_hash = key.zir_index });
4112    zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
4113    // No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive.
4114    try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
4115
4116    codegen_type: {
4117        if (file.mod.?.strip) break :codegen_type;
4118        // This job depends on any resolve_type_fully jobs queued up before it.
4119        zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
4120        try zcu.comp.queueJob(.{ .link_type = wip_ty.index });
4121    }
4122
4123    if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
4124    return wip_ty.finish(ip, namespace_index);
4125}
4126
4127/// This *does* call `Sema.resolveDeclaredEnum`, but errors from it are not propagated.
4128/// Call sites are resposible for checking `[transitive_]failed_analysis` after `ensureTypeUpToDate`
4129/// returns in order to detect resolution failures.
4130fn recreateEnumType(
4131    pt: Zcu.PerThread,
4132    old_ty: InternPool.Index,
4133    key: InternPool.Key.NamespaceType.Declared,
4134) (Allocator.Error || Io.Cancelable)!InternPool.Index {
4135    const zcu = pt.zcu;
4136    const gpa = zcu.gpa;
4137    const ip = &zcu.intern_pool;
4138
4139    const inst_info = key.zir_index.resolveFull(ip).?;
4140    const file = zcu.fileByIndex(inst_info.file);
4141    const zir = file.zir.?;
4142
4143    assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
4144    const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
4145    assert(extended.opcode == .enum_decl);
4146    const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
4147    const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
4148    var extra_index = extra.end;
4149
4150    const tag_type_ref = if (small.has_tag_type) blk: {
4151        const tag_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
4152        extra_index += 1;
4153        break :blk tag_type_ref;
4154    } else .none;
4155
4156    const captures_len = if (small.has_captures_len) blk: {
4157        const captures_len = zir.extra[extra_index];
4158        extra_index += 1;
4159        break :blk captures_len;
4160    } else 0;
4161
4162    const body_len = if (small.has_body_len) blk: {
4163        const body_len = zir.extra[extra_index];
4164        extra_index += 1;
4165        break :blk body_len;
4166    } else 0;
4167
4168    const fields_len = if (small.has_fields_len) blk: {
4169        const fields_len = zir.extra[extra_index];
4170        extra_index += 1;
4171        break :blk fields_len;
4172    } else 0;
4173
4174    const decls_len = if (small.has_decls_len) blk: {
4175        const decls_len = zir.extra[extra_index];
4176        extra_index += 1;
4177        break :blk decls_len;
4178    } else 0;
4179
4180    assert(captures_len == key.captures.owned.len); // synchronises with logic in `Zcu.mapOldZirToNew`
4181
4182    extra_index += captures_len * 2;
4183    extra_index += decls_len;
4184
4185    const body = zir.bodySlice(extra_index, body_len);
4186    extra_index += body.len;
4187
4188    const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
4189    const body_end = extra_index;
4190    extra_index += bit_bags_count;
4191
4192    const any_values = for (zir.extra[body_end..][0..bit_bags_count]) |bag| {
4193        if (bag != 0) break true;
4194    } else false;
4195
4196    const enum_obj = ip.loadEnumType(old_ty);
4197
4198    const namespace_index = enum_obj.namespace;
4199
4200    const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
4201        .has_values = any_values,
4202        .tag_mode = if (small.nonexhaustive)
4203            .nonexhaustive
4204        else if (tag_type_ref == .none)
4205            .auto
4206        else
4207            .explicit,
4208        .fields_len = fields_len,
4209        .key = .{ .declared_owned_captures = .{
4210            .zir_index = key.zir_index,
4211            .captures = key.captures.owned,
4212        } },
4213    }, true)) {
4214        .wip => |wip| wip,
4215        .existing => unreachable, // we passed `replace_existing`
4216    };
4217    var done = true;
4218    errdefer if (!done) wip_ty.cancel(ip, pt.tid);
4219
4220    wip_ty.setName(ip, enum_obj.name, enum_obj.name_nav);
4221
4222    zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
4223    // No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive.
4224
4225    if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index);
4226    wip_ty.prepare(ip, namespace_index);
4227    done = true;
4228
4229    Sema.resolveDeclaredEnum(
4230        pt,
4231        wip_ty,
4232        inst_info.inst,
4233        key.zir_index,
4234        namespace_index,
4235        enum_obj.name,
4236        small,
4237        body,
4238        tag_type_ref,
4239        any_values,
4240        fields_len,
4241        zir,
4242        body_end,
4243    ) catch |err| switch (err) {
4244        error.OutOfMemory => |e| return e,
4245        error.Canceled => |e| return e,
4246        error.AnalysisFail => {}, // call sites are responsible for checking `[transitive_]failed_analysis` to detect this
4247    };
4248
4249    return wip_ty.index;
4250}
4251
4252/// Given a namespace, re-scan its declarations from the type definition if they have not
4253/// yet been re-scanned on this update.
4254/// If the type declaration instruction has been lost, returns `error.AnalysisFail`.
4255/// This will effectively short-circuit the caller, which will be semantic analysis of a
4256/// guaranteed-unreferenced `AnalUnit`, to trigger a transitive analysis error.
4257pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) Zcu.SemaError!void {
4258    const zcu = pt.zcu;
4259    const ip = &zcu.intern_pool;
4260    const namespace = zcu.namespacePtr(namespace_index);
4261
4262    if (namespace.generation == zcu.generation) return;
4263
4264    const Container = enum { @"struct", @"union", @"enum", @"opaque" };
4265    const container: Container, const full_key = switch (ip.indexToKey(namespace.owner_type)) {
4266        .struct_type => |k| .{ .@"struct", k },
4267        .union_type => |k| .{ .@"union", k },
4268        .enum_type => |k| .{ .@"enum", k },
4269        .opaque_type => |k| .{ .@"opaque", k },
4270        else => unreachable, // namespaces are owned by a container type
4271    };
4272
4273    const key = switch (full_key) {
4274        .reified, .generated_tag => {
4275            // Namespace always empty, so up-to-date.
4276            namespace.generation = zcu.generation;
4277            return;
4278        },
4279        .declared => |d| d,
4280    };
4281
4282    // Namespace outdated -- re-scan the type if necessary.
4283
4284    const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
4285    const file = zcu.fileByIndex(inst_info.file);
4286    const zir = file.zir.?;
4287
4288    assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
4289    const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
4290
4291    const decls = switch (container) {
4292        .@"struct" => decls: {
4293            assert(extended.opcode == .struct_decl);
4294            const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
4295            const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
4296            var extra_index = extra.end;
4297            const captures_len = if (small.has_captures_len) blk: {
4298                const captures_len = zir.extra[extra_index];
4299                extra_index += 1;
4300                break :blk captures_len;
4301            } else 0;
4302            extra_index += @intFromBool(small.has_fields_len);
4303            const decls_len = if (small.has_decls_len) blk: {
4304                const decls_len = zir.extra[extra_index];
4305                extra_index += 1;
4306                break :blk decls_len;
4307            } else 0;
4308            extra_index += captures_len * 2;
4309            if (small.has_backing_int) {
4310                const backing_int_body_len = zir.extra[extra_index];
4311                extra_index += 1; // backing_int_body_len
4312                if (backing_int_body_len == 0) {
4313                    extra_index += 1; // backing_int_ref
4314                } else {
4315                    extra_index += backing_int_body_len; // backing_int_body_inst
4316                }
4317            }
4318            break :decls zir.bodySlice(extra_index, decls_len);
4319        },
4320        .@"union" => decls: {
4321            assert(extended.opcode == .union_decl);
4322            const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
4323            const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
4324            var extra_index = extra.end;
4325            extra_index += @intFromBool(small.has_tag_type);
4326            const captures_len = if (small.has_captures_len) blk: {
4327                const captures_len = zir.extra[extra_index];
4328                extra_index += 1;
4329                break :blk captures_len;
4330            } else 0;
4331            extra_index += @intFromBool(small.has_body_len);
4332            extra_index += @intFromBool(small.has_fields_len);
4333            const decls_len = if (small.has_decls_len) blk: {
4334                const decls_len = zir.extra[extra_index];
4335                extra_index += 1;
4336                break :blk decls_len;
4337            } else 0;
4338            extra_index += captures_len * 2;
4339            break :decls zir.bodySlice(extra_index, decls_len);
4340        },
4341        .@"enum" => decls: {
4342            assert(extended.opcode == .enum_decl);
4343            const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
4344            const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
4345            var extra_index = extra.end;
4346            extra_index += @intFromBool(small.has_tag_type);
4347            const captures_len = if (small.has_captures_len) blk: {
4348                const captures_len = zir.extra[extra_index];
4349                extra_index += 1;
4350                break :blk captures_len;
4351            } else 0;
4352            extra_index += @intFromBool(small.has_body_len);
4353            extra_index += @intFromBool(small.has_fields_len);
4354            const decls_len = if (small.has_decls_len) blk: {
4355                const decls_len = zir.extra[extra_index];
4356                extra_index += 1;
4357                break :blk decls_len;
4358            } else 0;
4359            extra_index += captures_len * 2;
4360            break :decls zir.bodySlice(extra_index, decls_len);
4361        },
4362        .@"opaque" => decls: {
4363            assert(extended.opcode == .opaque_decl);
4364            const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
4365            const extra = zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
4366            var extra_index = extra.end;
4367            const captures_len = if (small.has_captures_len) blk: {
4368                const captures_len = zir.extra[extra_index];
4369                extra_index += 1;
4370                break :blk captures_len;
4371            } else 0;
4372            const decls_len = if (small.has_decls_len) blk: {
4373                const decls_len = zir.extra[extra_index];
4374                extra_index += 1;
4375                break :blk decls_len;
4376            } else 0;
4377            extra_index += captures_len * 2;
4378            break :decls zir.bodySlice(extra_index, decls_len);
4379        },
4380    };
4381
4382    try pt.scanNamespace(namespace_index, decls);
4383    namespace.generation = zcu.generation;
4384}
4385
4386pub fn refValue(pt: Zcu.PerThread, val: InternPool.Index) Zcu.SemaError!InternPool.Index {
4387    const ptr_ty = (try pt.ptrTypeSema(.{
4388        .child = pt.zcu.intern_pool.typeOf(val),
4389        .flags = .{
4390            .alignment = .none,
4391            .is_const = true,
4392            .address_space = .generic,
4393        },
4394    })).toIntern();
4395    return pt.intern(.{ .ptr = .{
4396        .ty = ptr_ty,
4397        .base_addr = .{ .uav = .{
4398            .val = val,
4399            .orig_ty = ptr_ty,
4400        } },
4401        .byte_offset = 0,
4402    } });
4403}
4404
4405pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dependee) Allocator.Error!void {
4406    const zcu = pt.zcu;
4407    const gpa = zcu.gpa;
4408    try zcu.intern_pool.addDependency(gpa, unit, dependee);
4409    if (zcu.comp.debugIncremental()) {
4410        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, unit);
4411        try info.deps.append(gpa, dependee);
4412    }
4413}
4414
4415/// Performs code generation, which comes after `Sema` but before `link` in the pipeline.
4416/// This part of the pipeline is self-contained/"pure", so can be run in parallel with most
4417/// other code. This function is currently run either on the main thread, or on a separate
4418/// codegen thread, depending on whether the backend supports `Zcu.Feature.separate_thread`.
4419pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, out: *@import("../link.zig").ZcuTask.LinkFunc.SharedMir) void {
4420    const zcu = pt.zcu;
4421
4422    crash_report.CodegenFunc.start(zcu, func_index);
4423    defer crash_report.CodegenFunc.stop(func_index);
4424
4425    var timer = zcu.comp.startTimer();
4426
4427    const success: bool = if (runCodegenInner(pt, func_index, air)) |mir| success: {
4428        out.value = mir;
4429        break :success true;
4430    } else |err| success: {
4431        switch (err) {
4432            error.OutOfMemory => zcu.comp.setAllocFailure(),
4433            error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
4434            error.NoLinkFile => assert(zcu.comp.bin_file == null),
4435            error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
4436                &zcu.root_mod.resolved_target.result,
4437                zcu.comp.config.use_llvm,
4438            )) {
4439                else => unreachable, // assertion failure
4440                .stage2_spirv,
4441                .stage2_llvm,
4442                => {},
4443            },
4444        }
4445        break :success false;
4446    };
4447
4448    if (timer.finish()) |ns_codegen| report_time: {
4449        const ip = &zcu.intern_pool;
4450        const nav = ip.indexToKey(func_index).func.owner_nav;
4451        const zir_decl = ip.getNav(nav).srcInst(ip);
4452        zcu.comp.mutex.lock();
4453        defer zcu.comp.mutex.unlock();
4454        const tr = &zcu.comp.time_report.?;
4455        tr.stats.cpu_ns_codegen += ns_codegen;
4456        const gop = tr.decl_codegen_ns.getOrPut(zcu.gpa, zir_decl) catch |err| switch (err) {
4457            error.OutOfMemory => {
4458                zcu.comp.setAllocFailure();
4459                break :report_time;
4460            },
4461        };
4462        if (!gop.found_existing) gop.value_ptr.* = 0;
4463        gop.value_ptr.* += ns_codegen;
4464    }
4465
4466    // release `out.value` with this store; synchronizes with acquire loads in `link`
4467    out.status.store(if (success) .ready else .failed, .release);
4468    zcu.comp.link_task_queue.mirReady(zcu.comp, func_index, out);
4469    if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
4470        // Decremented to 0, so all done.
4471        zcu.codegen_prog_node.end();
4472        zcu.codegen_prog_node = .none;
4473    }
4474}
4475fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
4476    OutOfMemory,
4477    CodegenFail,
4478    NoLinkFile,
4479    BackendDoesNotProduceMir,
4480}!codegen.AnyMir {
4481    const zcu = pt.zcu;
4482    const gpa = zcu.gpa;
4483    const ip = &zcu.intern_pool;
4484    const comp = zcu.comp;
4485
4486    const nav = zcu.funcInfo(func_index).owner_nav;
4487    const fqn = ip.getNav(nav).fqn;
4488
4489    const codegen_prog_node = zcu.codegen_prog_node.start(fqn.toSlice(ip), 0);
4490    defer codegen_prog_node.end();
4491
4492    if (codegen.legalizeFeatures(pt, nav)) |features| {
4493        try air.legalize(pt, features);
4494    }
4495
4496    var liveness: ?Air.Liveness = if (codegen.wantsLiveness(pt, nav))
4497        try .analyze(zcu, air.*, ip)
4498    else
4499        null;
4500    defer if (liveness) |*l| l.deinit(gpa);
4501
4502    if (build_options.enable_debug_extensions and comp.verbose_air) {
4503        const stderr, _ = std.debug.lockStderrWriter(&.{});
4504        defer std.debug.unlockStderrWriter();
4505        stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {};
4506        air.write(stderr, pt, liveness);
4507        stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
4508    }
4509
4510    if (std.debug.runtime_safety) verify_liveness: {
4511        var verify: Air.Liveness.Verify = .{
4512            .gpa = gpa,
4513            .zcu = zcu,
4514            .air = air.*,
4515            .liveness = liveness orelse break :verify_liveness,
4516            .intern_pool = ip,
4517        };
4518        defer verify.deinit();
4519
4520        verify.verify() catch |err| switch (err) {
4521            error.OutOfMemory => return error.OutOfMemory,
4522            else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}),
4523        };
4524    }
4525
4526    // The LLVM backend is special, because we only need to do codegen. There is no equivalent to the
4527    // "emit" step because LLVM does not support incremental linking. Our linker (LLD or self-hosted)
4528    // will just see the ZCU object file which LLVM ultimately emits.
4529    if (zcu.llvm_object) |llvm_object| {
4530        assert(pt.tid == .main); // LLVM has a lot of shared state
4531        try llvm_object.updateFunc(pt, func_index, air, &liveness);
4532        return error.BackendDoesNotProduceMir;
4533    }
4534
4535    const lf = comp.bin_file orelse return error.NoLinkFile;
4536
4537    // Just like LLVM, the SPIR-V backend can't multi-threaded due to SPIR-V design limitations.
4538    if (lf.cast(.spirv)) |spirv_file| {
4539        assert(pt.tid == .main); // SPIR-V has a lot of shared state
4540        spirv_file.updateFunc(pt, func_index, air, &liveness) catch |err| {
4541            switch (err) {
4542                error.OutOfMemory => comp.link_diags.setAllocFailure(),
4543            }
4544            return error.CodegenFail;
4545        };
4546        return error.BackendDoesNotProduceMir;
4547    }
4548
4549    return codegen.generateFunction(lf, pt, zcu.navSrcLoc(nav), func_index, air, &liveness) catch |err| switch (err) {
4550        error.OutOfMemory,
4551        error.CodegenFail,
4552        => |e| return e,
4553        error.Overflow,
4554        error.RelocationNotByteAligned,
4555        => return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}),
4556    };
4557}