master
    1//! All interned objects have both a value and a type.
    2//! This data structure is self-contained.
    3
    4const builtin = @import("builtin");
    5const std = @import("std");
    6const Allocator = std.mem.Allocator;
    7const assert = std.debug.assert;
    8const BigIntConst = std.math.big.int.Const;
    9const BigIntMutable = std.math.big.int.Mutable;
   10const Cache = std.Build.Cache;
   11const Limb = std.math.big.Limb;
   12const Hash = std.hash.Wyhash;
   13
   14const InternPool = @This();
   15const Zcu = @import("Zcu.zig");
   16const Zir = std.zig.Zir;
   17
   18/// One item per thread, indexed by `tid`, which is dense and unique per thread.
   19locals: []Local,
   20/// Length must be a power of two and represents the number of simultaneous
   21/// writers that can mutate any single sharded data structure.
   22shards: []Shard,
   23/// Key is the error name, index is the error tag value. Index 0 has a length-0 string.
   24global_error_set: GlobalErrorSet,
   25/// Cached number of active bits in a `tid`.
   26tid_width: if (single_threaded) u0 else std.math.Log2Int(u32),
   27/// Cached shift amount to put a `tid` in the top bits of a 30-bit value.
   28tid_shift_30: if (single_threaded) u0 else std.math.Log2Int(u32),
   29/// Cached shift amount to put a `tid` in the top bits of a 31-bit value.
   30tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32),
   31/// Cached shift amount to put a `tid` in the top bits of a 32-bit value.
   32tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32),
   33
   34/// Dependencies on the source code hash associated with a ZIR instruction.
   35/// * For a `declaration`, this is the entire declaration body.
   36/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
   37/// * For a `func`, this is the source of the full function signature.
   38/// These are also invalidated if tracking fails for this instruction.
   39/// Value is index into `dep_entries` of the first dependency on this hash.
   40src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index),
   41/// Dependencies on the value of a Nav.
   42/// Value is index into `dep_entries` of the first dependency on this Nav value.
   43nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index),
   44/// Dependencies on the type of a Nav.
   45/// Value is index into `dep_entries` of the first dependency on this Nav value.
   46nav_ty_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index),
   47/// Dependencies on an interned value, either:
   48/// * a runtime function (invalidated when its IES changes)
   49/// * a container type requiring resolution (invalidated when the type must be recreated at a new index)
   50/// Value is index into `dep_entries` of the first dependency on this interned value.
   51interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index),
   52/// Dependencies on a ZON file. Triggered by `@import` of ZON.
   53/// Value is index into `dep_entries` of the first dependency on this ZON file.
   54zon_file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index),
   55/// Dependencies on an embedded file.
   56/// Introduced by `@embedFile`; invalidated when the file changes.
   57/// Value is index into `dep_entries` of the first dependency on this `Zcu.EmbedFile`.
   58embed_file_deps: std.AutoArrayHashMapUnmanaged(Zcu.EmbedFile.Index, DepEntry.Index),
   59/// Dependencies on the full set of names in a ZIR namespace.
   60/// Key refers to a `struct_decl`, `union_decl`, etc.
   61/// Value is index into `dep_entries` of the first dependency on this namespace.
   62namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index),
   63/// Dependencies on the (non-)existence of some name in a namespace.
   64/// Value is index into `dep_entries` of the first dependency on this name.
   65namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index),
   66// Dependencies on the value of fields memoized on `Zcu` (`panic_messages` etc).
   67// If set, these are indices into `dep_entries` of the first dependency on this state.
   68memoized_state_main_deps: DepEntry.Index.Optional,
   69memoized_state_panic_deps: DepEntry.Index.Optional,
   70memoized_state_va_list_deps: DepEntry.Index.Optional,
   71memoized_state_assembly_deps: DepEntry.Index.Optional,
   72
   73/// Given a `Depender`, points to an entry in `dep_entries` whose `depender`
   74/// matches. The `next_dependee` field can be used to iterate all such entries
   75/// and remove them from the corresponding lists.
   76first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index),
   77
   78/// Stores dependency information. The hashmaps declared above are used to look
   79/// up entries in this list as required. This is not stored in `extra` so that
   80/// we can use `free_dep_entries` to track free indices, since dependencies are
   81/// removed frequently.
   82dep_entries: std.ArrayList(DepEntry),
   83/// Stores unused indices in `dep_entries` which can be reused without a full
   84/// garbage collection pass.
   85free_dep_entries: std.ArrayList(DepEntry.Index),
   86
   87/// Whether a multi-threaded intern pool is useful.
   88/// Currently `false` until the intern pool is actually accessed
   89/// from multiple threads to reduce the cost of this data structure.
   90const want_multi_threaded = true;
   91
   92/// Whether a single-threaded intern pool impl is in use.
   93pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
   94
   95pub const empty: InternPool = .{
   96    .locals = &.{},
   97    .shards = &.{},
   98    .global_error_set = .empty,
   99    .tid_width = 0,
  100    .tid_shift_30 = if (single_threaded) 0 else 31,
  101    .tid_shift_31 = if (single_threaded) 0 else 31,
  102    .tid_shift_32 = if (single_threaded) 0 else 31,
  103    .src_hash_deps = .empty,
  104    .nav_val_deps = .empty,
  105    .nav_ty_deps = .empty,
  106    .interned_deps = .empty,
  107    .zon_file_deps = .empty,
  108    .embed_file_deps = .empty,
  109    .namespace_deps = .empty,
  110    .namespace_name_deps = .empty,
  111    .memoized_state_main_deps = .none,
  112    .memoized_state_panic_deps = .none,
  113    .memoized_state_va_list_deps = .none,
  114    .memoized_state_assembly_deps = .none,
  115    .first_dependency = .empty,
  116    .dep_entries = .empty,
  117    .free_dep_entries = .empty,
  118};
  119
  120/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole
  121/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both
  122/// the file which the instruction lives in, and the instruction index itself, which is updated on
  123/// incremental updates by `Zcu.updateZirRefs`.
  124pub const TrackedInst = extern struct {
  125    file: FileIndex,
  126    inst: Zir.Inst.Index,
  127
  128    /// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in
  129    /// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values
  130    /// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc
  131    /// return `null` when the `TrackedInst` being resolved has been lost.
  132    pub const MaybeLost = extern struct {
  133        file: FileIndex,
  134        inst: ZirIndex,
  135        pub const ZirIndex = enum(u32) {
  136            /// Tracking failed for this ZIR instruction. Uses of it should fail.
  137            lost = std.math.maxInt(u32),
  138            _,
  139            pub fn unwrap(inst: ZirIndex) ?Zir.Inst.Index {
  140                return switch (inst) {
  141                    .lost => null,
  142                    _ => @enumFromInt(@intFromEnum(inst)),
  143                };
  144            }
  145            pub fn wrap(inst: Zir.Inst.Index) ZirIndex {
  146                return @enumFromInt(@intFromEnum(inst));
  147            }
  148        };
  149        comptime {
  150            // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
  151            assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(ZirIndex));
  152        }
  153    };
  154
  155    pub const Index = enum(u32) {
  156        _,
  157        pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) ?TrackedInst {
  158            const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
  159            const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
  160            const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
  161            return .{
  162                .file = maybe_lost.file,
  163                .inst = maybe_lost.inst.unwrap() orelse return null,
  164            };
  165        }
  166        pub fn resolveFile(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) FileIndex {
  167            const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
  168            const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
  169            const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
  170            return maybe_lost.file;
  171        }
  172        pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) ?Zir.Inst.Index {
  173            return (i.resolveFull(ip) orelse return null).inst;
  174        }
  175
  176        pub fn toOptional(i: TrackedInst.Index) Optional {
  177            return @enumFromInt(@intFromEnum(i));
  178        }
  179        pub const Optional = enum(u32) {
  180            none = std.math.maxInt(u32),
  181            _,
  182            pub fn unwrap(opt: Optional) ?TrackedInst.Index {
  183                return switch (opt) {
  184                    .none => null,
  185                    _ => @enumFromInt(@intFromEnum(opt)),
  186                };
  187            }
  188
  189            const debug_state = InternPool.debug_state;
  190        };
  191
  192        pub const Unwrapped = struct {
  193            tid: Zcu.PerThread.Id,
  194            index: u32,
  195
  196            pub fn wrap(unwrapped: Unwrapped, ip: *const InternPool) TrackedInst.Index {
  197                assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
  198                assert(unwrapped.index <= ip.getIndexMask(u32));
  199                return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
  200                    unwrapped.index);
  201            }
  202        };
  203        pub fn unwrap(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) Unwrapped {
  204            return .{
  205                .tid = @enumFromInt(@intFromEnum(tracked_inst_index) >> ip.tid_shift_32 & ip.getTidMask()),
  206                .index = @intFromEnum(tracked_inst_index) & ip.getIndexMask(u32),
  207            };
  208        }
  209
  210        const debug_state = InternPool.debug_state;
  211    };
  212};
  213
  214pub fn trackZir(
  215    ip: *InternPool,
  216    gpa: Allocator,
  217    tid: Zcu.PerThread.Id,
  218    key: TrackedInst,
  219) Allocator.Error!TrackedInst.Index {
  220    const maybe_lost_key: TrackedInst.MaybeLost = .{
  221        .file = key.file,
  222        .inst = TrackedInst.MaybeLost.ZirIndex.wrap(key.inst),
  223    };
  224    const full_hash = Hash.hash(0, std.mem.asBytes(&maybe_lost_key));
  225    const hash: u32 = @truncate(full_hash >> 32);
  226    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
  227    var map = shard.shared.tracked_inst_map.acquire();
  228    const Map = @TypeOf(map);
  229    var map_mask = map.header().mask();
  230    var map_index = hash;
  231    while (true) : (map_index += 1) {
  232        map_index &= map_mask;
  233        const entry = &map.entries[map_index];
  234        const index = entry.acquire().unwrap() orelse break;
  235        if (entry.hash != hash) continue;
  236        if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
  237    }
  238    shard.mutate.tracked_inst_map.mutex.lock();
  239    defer shard.mutate.tracked_inst_map.mutex.unlock();
  240    if (map.entries != shard.shared.tracked_inst_map.entries) {
  241        map = shard.shared.tracked_inst_map;
  242        map_mask = map.header().mask();
  243        map_index = hash;
  244    }
  245    while (true) : (map_index += 1) {
  246        map_index &= map_mask;
  247        const entry = &map.entries[map_index];
  248        const index = entry.acquire().unwrap() orelse break;
  249        if (entry.hash != hash) continue;
  250        if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
  251    }
  252    defer shard.mutate.tracked_inst_map.len += 1;
  253    const local = ip.getLocal(tid);
  254    const list = local.getMutableTrackedInsts(gpa);
  255    try list.ensureUnusedCapacity(1);
  256    const map_header = map.header().*;
  257    if (shard.mutate.tracked_inst_map.len < map_header.capacity * 3 / 5) {
  258        const entry = &map.entries[map_index];
  259        entry.hash = hash;
  260        const index = (TrackedInst.Index.Unwrapped{
  261            .tid = tid,
  262            .index = list.mutate.len,
  263        }).wrap(ip);
  264        list.appendAssumeCapacity(.{maybe_lost_key});
  265        entry.release(index.toOptional());
  266        return index;
  267    }
  268    const arena_state = &local.mutate.arena;
  269    var arena = arena_state.promote(gpa);
  270    defer arena_state.* = arena.state;
  271    const new_map_capacity = map_header.capacity * 2;
  272    const new_map_buf = try arena.allocator().alignedAlloc(
  273        u8,
  274        .fromByteUnits(Map.alignment),
  275        Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
  276    );
  277    const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
  278    new_map.header().* = .{ .capacity = new_map_capacity };
  279    @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
  280    const new_map_mask = new_map.header().mask();
  281    map_index = 0;
  282    while (map_index < map_header.capacity) : (map_index += 1) {
  283        const entry = &map.entries[map_index];
  284        const index = entry.value.unwrap() orelse continue;
  285        const item_hash = entry.hash;
  286        var new_map_index = item_hash;
  287        while (true) : (new_map_index += 1) {
  288            new_map_index &= new_map_mask;
  289            const new_entry = &new_map.entries[new_map_index];
  290            if (new_entry.value != .none) continue;
  291            new_entry.* = .{
  292                .value = index.toOptional(),
  293                .hash = item_hash,
  294            };
  295            break;
  296        }
  297    }
  298    map = new_map;
  299    map_index = hash;
  300    while (true) : (map_index += 1) {
  301        map_index &= new_map_mask;
  302        if (map.entries[map_index].value == .none) break;
  303    }
  304    const index = (TrackedInst.Index.Unwrapped{
  305        .tid = tid,
  306        .index = list.mutate.len,
  307    }).wrap(ip);
  308    list.appendAssumeCapacity(.{maybe_lost_key});
  309    map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash };
  310    shard.shared.tracked_inst_map.release(new_map);
  311    return index;
  312}
  313
  314/// At the start of an incremental update, we update every entry in `tracked_insts` to include
  315/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups
  316/// return correct entries where they already exist.
  317pub fn rehashTrackedInsts(
  318    ip: *InternPool,
  319    gpa: Allocator,
  320    tid: Zcu.PerThread.Id,
  321) Allocator.Error!void {
  322    assert(tid == .main); // we shouldn't have any other threads active right now
  323
  324    // TODO: this function doesn't handle OOM well. What should it do?
  325
  326    // We don't lock anything, as this function assumes that no other thread is
  327    // accessing `tracked_insts`. This is necessary because we're going to be
  328    // iterating the `TrackedInst`s in each `Local`, so we have to know that
  329    // none will be added as we work.
  330
  331    // Figure out how big each shard need to be and store it in its mutate `len`.
  332    for (ip.shards) |*shard| shard.mutate.tracked_inst_map.len = 0;
  333    for (ip.locals) |*local| {
  334        // `getMutableTrackedInsts` is okay only because no other thread is currently active.
  335        // We need the `mutate` for the len.
  336        for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| {
  337            if (tracked_inst.inst == .lost) continue; // we can ignore this one!
  338            const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
  339            const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
  340            shard.mutate.tracked_inst_map.len += 1;
  341        }
  342    }
  343
  344    const Map = Shard.Map(TrackedInst.Index.Optional);
  345
  346    const arena_state = &ip.getLocal(tid).mutate.arena;
  347
  348    // We know how big each shard must be, so ensure we have the capacity we need.
  349    for (ip.shards) |*shard| {
  350        const want_capacity = if (shard.mutate.tracked_inst_map.len == 0) 0 else cap: {
  351            // We need to return a capacity of at least 2 to make sure we don't have the `Map(...).empty` value.
  352            // For this reason, note the `+ 1` in the below expression. This matches the behavior of `trackZir`.
  353            break :cap std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3 + 1) catch unreachable;
  354        };
  355        const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex
  356        if (have_capacity >= want_capacity) {
  357            if (have_capacity == 1) {
  358                // The map is `.empty` -- we can't memset the entries, or we'll segfault, because
  359                // the buffer is secretly constant.
  360            } else {
  361                @memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
  362            }
  363            continue;
  364        }
  365        var arena = arena_state.promote(gpa);
  366        defer arena_state.* = arena.state;
  367        const new_map_buf = try arena.allocator().alignedAlloc(
  368            u8,
  369            .fromByteUnits(Map.alignment),
  370            Map.entries_offset + want_capacity * @sizeOf(Map.Entry),
  371        );
  372        const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
  373        new_map.header().* = .{ .capacity = want_capacity };
  374        @memset(new_map.entries[0..want_capacity], .{ .value = .none, .hash = undefined });
  375        shard.shared.tracked_inst_map.release(new_map);
  376    }
  377
  378    // Now, actually insert the items.
  379    for (ip.locals, 0..) |*local, local_tid| {
  380        // `getMutableTrackedInsts` is okay only because no other thread is currently active.
  381        // We need the `mutate` for the len.
  382        for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| {
  383            if (tracked_inst.inst == .lost) continue; // we can ignore this one!
  384            const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
  385            const hash: u32 = @truncate(full_hash >> 32);
  386            const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
  387            const map = shard.shared.tracked_inst_map; // no acquire because we hold the mutex
  388            const map_mask = map.header().mask();
  389            var map_index = hash;
  390            const entry = while (true) : (map_index += 1) {
  391                map_index &= map_mask;
  392                const entry = &map.entries[map_index];
  393                if (entry.acquire() == .none) break entry;
  394            };
  395            const index = TrackedInst.Index.Unwrapped.wrap(.{
  396                .tid = @enumFromInt(local_tid),
  397                .index = @intCast(local_inst_index),
  398            }, ip);
  399            entry.hash = hash;
  400            entry.release(index.toOptional());
  401        }
  402    }
  403}
  404
  405/// Analysis Unit. Represents a single entity which undergoes semantic analysis.
  406/// This is the "source" of an incremental dependency edge.
  407pub const AnalUnit = packed struct(u64) {
  408    kind: Kind,
  409    id: u32,
  410
  411    pub const Kind = enum(u32) {
  412        @"comptime",
  413        nav_val,
  414        nav_ty,
  415        type,
  416        func,
  417        memoized_state,
  418    };
  419
  420    pub const Unwrapped = union(Kind) {
  421        /// This `AnalUnit` analyzes the body of the given `comptime` declaration.
  422        @"comptime": ComptimeUnit.Id,
  423        /// This `AnalUnit` resolves the value of the given `Nav`.
  424        nav_val: Nav.Index,
  425        /// This `AnalUnit` resolves the type of the given `Nav`.
  426        nav_ty: Nav.Index,
  427        /// This `AnalUnit` resolves the given `struct`/`union`/`enum` type.
  428        /// Generated tag enums are never used here (they do not undergo type resolution).
  429        type: InternPool.Index,
  430        /// This `AnalUnit` analyzes the body of the given runtime function.
  431        func: InternPool.Index,
  432        /// This `AnalUnit` resolves all state which is memoized in fields on `Zcu`.
  433        memoized_state: MemoizedStateStage,
  434    };
  435
  436    pub fn unwrap(au: AnalUnit) Unwrapped {
  437        return switch (au.kind) {
  438            inline else => |tag| @unionInit(
  439                Unwrapped,
  440                @tagName(tag),
  441                @enumFromInt(au.id),
  442            ),
  443        };
  444    }
  445    pub fn wrap(raw: Unwrapped) AnalUnit {
  446        return switch (raw) {
  447            inline else => |id, tag| .{
  448                .kind = tag,
  449                .id = @intFromEnum(id),
  450            },
  451        };
  452    }
  453
  454    pub fn toOptional(as: AnalUnit) Optional {
  455        return @enumFromInt(@as(u64, @bitCast(as)));
  456    }
  457    pub const Optional = enum(u64) {
  458        none = std.math.maxInt(u64),
  459        _,
  460        pub fn unwrap(opt: Optional) ?AnalUnit {
  461            return switch (opt) {
  462                .none => null,
  463                _ => @bitCast(@intFromEnum(opt)),
  464            };
  465        }
  466    };
  467};
  468
  469pub const MemoizedStateStage = enum(u32) {
  470    /// Everything other than panics and `VaList`.
  471    main,
  472    /// Everything within `std.builtin.Panic`.
  473    /// Since the panic handler is user-provided, this must be able to reference the other memoized state.
  474    panic,
  475    /// Specifically `std.builtin.VaList`. See `Zcu.BuiltinDecl.stage`.
  476    va_list,
  477    /// Everything within `std.builtin.assembly`. See `Zcu.BuiltinDecl.stage`.
  478    assembly,
  479};
  480
  481pub const ComptimeUnit = extern struct {
  482    zir_index: TrackedInst.Index,
  483    namespace: NamespaceIndex,
  484
  485    comptime {
  486        assert(std.meta.hasUniqueRepresentation(ComptimeUnit));
  487    }
  488
  489    pub const Id = enum(u32) {
  490        _,
  491        const Unwrapped = struct {
  492            tid: Zcu.PerThread.Id,
  493            index: u32,
  494            fn wrap(unwrapped: Unwrapped, ip: *const InternPool) ComptimeUnit.Id {
  495                assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
  496                assert(unwrapped.index <= ip.getIndexMask(u32));
  497                return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
  498                    unwrapped.index);
  499            }
  500        };
  501        fn unwrap(id: Id, ip: *const InternPool) Unwrapped {
  502            return .{
  503                .tid = @enumFromInt(@intFromEnum(id) >> ip.tid_shift_32 & ip.getTidMask()),
  504                .index = @intFromEnum(id) & ip.getIndexMask(u31),
  505            };
  506        }
  507
  508        const debug_state = InternPool.debug_state;
  509    };
  510};
  511
  512/// Named Addressable Value. Represents a global value with a name and address. This name may be
  513/// generated, and the type (and hence address) may be comptime-only. A `Nav` whose type has runtime
  514/// bits is sent to the linker to be emitted to the binary.
  515///
  516/// * Every ZIR `declaration` which is not a `comptime` declaration has a `Nav` (post-instantiation)
  517///   which stores the declaration's resolved value.
  518/// * Generic instances have a `Nav` corresponding to the instantiated function.
  519/// * `@extern` calls create a `Nav` whose value is a `.@"extern"`.
  520///
  521/// This data structure is optimized for the `analysis_info != null` case, because this is much more
  522/// common in practice; the other case is used only for externs and for generic instances. At the time
  523/// of writing, in the compiler itself, around 74% of all `Nav`s have `analysis_info != null`.
  524/// (Specifically, 104225 / 140923)
  525///
  526/// `Nav.Repr` is the in-memory representation.
  527pub const Nav = struct {
  528    /// The unqualified name of this `Nav`. Namespace lookups use this name, and error messages may use it.
  529    /// Additionally, extern `Nav`s (i.e. those whose value is an `extern`) use this name.
  530    name: NullTerminatedString,
  531    /// The fully-qualified name of this `Nav`.
  532    fqn: NullTerminatedString,
  533    /// This field is populated iff this `Nav` is resolved by semantic analysis.
  534    /// If this is `null`, then `status == .fully_resolved` always.
  535    analysis: ?struct {
  536        namespace: NamespaceIndex,
  537        zir_index: TrackedInst.Index,
  538    },
  539    status: union(enum) {
  540        /// This `Nav` is pending semantic analysis.
  541        unresolved,
  542        /// The type of this `Nav` is resolved; the value is queued for resolution.
  543        type_resolved: struct {
  544            type: InternPool.Index,
  545            is_const: bool,
  546            alignment: Alignment,
  547            @"linksection": OptionalNullTerminatedString,
  548            @"addrspace": std.builtin.AddressSpace,
  549            is_threadlocal: bool,
  550            /// This field is whether this `Nav` is a literal `extern` definition.
  551            /// It does *not* tell you whether this might alias an extern fn (see #21027).
  552            is_extern_decl: bool,
  553        },
  554        /// The value of this `Nav` is resolved.
  555        fully_resolved: struct {
  556            val: InternPool.Index,
  557            is_const: bool,
  558            alignment: Alignment,
  559            @"linksection": OptionalNullTerminatedString,
  560            @"addrspace": std.builtin.AddressSpace,
  561        },
  562    },
  563
  564    /// Asserts that `status != .unresolved`.
  565    pub fn typeOf(nav: Nav, ip: *const InternPool) InternPool.Index {
  566        return switch (nav.status) {
  567            .unresolved => unreachable,
  568            .type_resolved => |r| r.type,
  569            .fully_resolved => |r| ip.typeOf(r.val),
  570        };
  571    }
  572
  573    /// This function is intended to be used by code generation, since semantic
  574    /// analysis will ensure that any `Nav` which is potentially `extern` is
  575    /// fully resolved.
  576    /// Asserts that `status == .fully_resolved`.
  577    pub fn getResolvedExtern(nav: Nav, ip: *const InternPool) ?Key.Extern {
  578        assert(nav.status == .fully_resolved);
  579        return nav.getExtern(ip);
  580    }
  581
  582    /// Always returns `null` for `status == .type_resolved`. This function is inteded
  583    /// to be used by code generation, since semantic analysis will ensure that any `Nav`
  584    /// which is potentially `extern` is fully resolved.
  585    /// Asserts that `status != .unresolved`.
  586    pub fn getExtern(nav: Nav, ip: *const InternPool) ?Key.Extern {
  587        return switch (nav.status) {
  588            .unresolved => unreachable,
  589            .type_resolved => null,
  590            .fully_resolved => |r| switch (ip.indexToKey(r.val)) {
  591                .@"extern" => |e| e,
  592                else => null,
  593            },
  594        };
  595    }
  596
  597    /// Asserts that `status != .unresolved`.
  598    pub fn getAddrspace(nav: Nav) std.builtin.AddressSpace {
  599        return switch (nav.status) {
  600            .unresolved => unreachable,
  601            .type_resolved => |r| r.@"addrspace",
  602            .fully_resolved => |r| r.@"addrspace",
  603        };
  604    }
  605
  606    /// Asserts that `status != .unresolved`.
  607    pub fn getAlignment(nav: Nav) Alignment {
  608        return switch (nav.status) {
  609            .unresolved => unreachable,
  610            .type_resolved => |r| r.alignment,
  611            .fully_resolved => |r| r.alignment,
  612        };
  613    }
  614
  615    /// Asserts that `status != .unresolved`.
  616    pub fn getLinkSection(nav: Nav) OptionalNullTerminatedString {
  617        return switch (nav.status) {
  618            .unresolved => unreachable,
  619            .type_resolved => |r| r.@"linksection",
  620            .fully_resolved => |r| r.@"linksection",
  621        };
  622    }
  623
  624    /// Asserts that `status != .unresolved`.
  625    pub fn isThreadlocal(nav: Nav, ip: *const InternPool) bool {
  626        return switch (nav.status) {
  627            .unresolved => unreachable,
  628            .type_resolved => |r| r.is_threadlocal,
  629            .fully_resolved => |r| switch (ip.indexToKey(r.val)) {
  630                .@"extern" => |e| e.is_threadlocal,
  631                .variable => |v| v.is_threadlocal,
  632                else => false,
  633            },
  634        };
  635    }
  636
  637    pub fn isFn(nav: Nav, ip: *const InternPool) bool {
  638        return switch (nav.status) {
  639            .unresolved => unreachable,
  640            .type_resolved => |r| {
  641                const tag = ip.zigTypeTag(r.type);
  642                return tag == .@"fn";
  643            },
  644            .fully_resolved => |r| {
  645                const tag = ip.zigTypeTag(ip.typeOf(r.val));
  646                return tag == .@"fn";
  647            },
  648        };
  649    }
  650
  651    /// If this returns `true`, then a pointer to this `Nav` might actually be encoded as a pointer
  652    /// to some other `Nav` due to an extern definition or extern alias (see #21027).
  653    /// This query is valid on `Nav`s for whom only the type is resolved.
  654    /// Asserts that `status != .unresolved`.
  655    pub fn isExternOrFn(nav: Nav, ip: *const InternPool) bool {
  656        return switch (nav.status) {
  657            .unresolved => unreachable,
  658            .type_resolved => |r| {
  659                if (r.is_extern_decl) return true;
  660                const tag = ip.zigTypeTag(r.type);
  661                if (tag == .@"fn") return true;
  662                return false;
  663            },
  664            .fully_resolved => |r| {
  665                if (ip.indexToKey(r.val) == .@"extern") return true;
  666                const tag = ip.zigTypeTag(ip.typeOf(r.val));
  667                if (tag == .@"fn") return true;
  668                return false;
  669            },
  670        };
  671    }
  672
  673    /// Get the ZIR instruction corresponding to this `Nav`, used to resolve source locations.
  674    /// This is a `declaration`.
  675    pub fn srcInst(nav: Nav, ip: *const InternPool) TrackedInst.Index {
  676        if (nav.analysis) |a| {
  677            return a.zir_index;
  678        }
  679        // A `Nav` which does not undergo analysis always has a resolved value.
  680        return switch (ip.indexToKey(nav.status.fully_resolved.val)) {
  681            .func => |func| {
  682                // Since `analysis` was not populated, this must be an instantiation.
  683                // Go up to the generic owner and consult *its* `analysis` field.
  684                const go_nav = ip.getNav(ip.indexToKey(func.generic_owner).func.owner_nav);
  685                return go_nav.analysis.?.zir_index;
  686            },
  687            .@"extern" => |@"extern"| @"extern".zir_index, // extern / @extern
  688            else => unreachable,
  689        };
  690    }
  691
  692    pub const Index = enum(u32) {
  693        _,
  694        pub const Optional = enum(u32) {
  695            none = std.math.maxInt(u32),
  696            _,
  697            pub fn unwrap(opt: Optional) ?Nav.Index {
  698                return switch (opt) {
  699                    .none => null,
  700                    _ => @enumFromInt(@intFromEnum(opt)),
  701                };
  702            }
  703
  704            const debug_state = InternPool.debug_state;
  705        };
  706        pub fn toOptional(i: Nav.Index) Optional {
  707            return @enumFromInt(@intFromEnum(i));
  708        }
  709        const Unwrapped = struct {
  710            tid: Zcu.PerThread.Id,
  711            index: u32,
  712
  713            fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Nav.Index {
  714                assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
  715                assert(unwrapped.index <= ip.getIndexMask(u32));
  716                return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
  717                    unwrapped.index);
  718            }
  719        };
  720        fn unwrap(nav_index: Nav.Index, ip: *const InternPool) Unwrapped {
  721            return .{
  722                .tid = @enumFromInt(@intFromEnum(nav_index) >> ip.tid_shift_32 & ip.getTidMask()),
  723                .index = @intFromEnum(nav_index) & ip.getIndexMask(u32),
  724            };
  725        }
  726
  727        const debug_state = InternPool.debug_state;
  728    };
  729
  730    /// The compact in-memory representation of a `Nav`.
  731    /// 26 bytes.
  732    const Repr = struct {
  733        name: NullTerminatedString,
  734        fqn: NullTerminatedString,
  735        // The following 1 fields are either both populated, or both `.none`.
  736        analysis_namespace: OptionalNamespaceIndex,
  737        analysis_zir_index: TrackedInst.Index.Optional,
  738        /// Populated only if `bits.status != .unresolved`.
  739        type_or_val: InternPool.Index,
  740        /// Populated only if `bits.status != .unresolved`.
  741        @"linksection": OptionalNullTerminatedString,
  742        bits: Bits,
  743
  744        const Bits = packed struct(u16) {
  745            status: enum(u2) { unresolved, type_resolved, fully_resolved, type_resolved_extern_decl },
  746            /// Populated only if `bits.status != .unresolved`.
  747            is_const: bool,
  748            /// Populated only if `bits.status != .unresolved`.
  749            alignment: Alignment,
  750            /// Populated only if `bits.status != .unresolved`.
  751            @"addrspace": std.builtin.AddressSpace,
  752            /// Populated only if `bits.status == .type_resolved`.
  753            is_threadlocal: bool,
  754            _: u1 = 0,
  755        };
  756
  757        fn unpack(repr: Repr) Nav {
  758            return .{
  759                .name = repr.name,
  760                .fqn = repr.fqn,
  761                .analysis = if (repr.analysis_namespace.unwrap()) |namespace| .{
  762                    .namespace = namespace,
  763                    .zir_index = repr.analysis_zir_index.unwrap().?,
  764                } else a: {
  765                    assert(repr.analysis_zir_index == .none);
  766                    break :a null;
  767                },
  768                .status = switch (repr.bits.status) {
  769                    .unresolved => .unresolved,
  770                    .type_resolved, .type_resolved_extern_decl => .{ .type_resolved = .{
  771                        .type = repr.type_or_val,
  772                        .is_const = repr.bits.is_const,
  773                        .alignment = repr.bits.alignment,
  774                        .@"linksection" = repr.@"linksection",
  775                        .@"addrspace" = repr.bits.@"addrspace",
  776                        .is_threadlocal = repr.bits.is_threadlocal,
  777                        .is_extern_decl = repr.bits.status == .type_resolved_extern_decl,
  778                    } },
  779                    .fully_resolved => .{ .fully_resolved = .{
  780                        .val = repr.type_or_val,
  781                        .is_const = repr.bits.is_const,
  782                        .alignment = repr.bits.alignment,
  783                        .@"linksection" = repr.@"linksection",
  784                        .@"addrspace" = repr.bits.@"addrspace",
  785                    } },
  786                },
  787            };
  788        }
  789    };
  790
  791    fn pack(nav: Nav) Repr {
  792        // Note that in the `unresolved` case, we do not mark fields as `undefined`, even though they should not be used.
  793        // This is to avoid writing undefined bytes to disk when serializing buffers.
  794        return .{
  795            .name = nav.name,
  796            .fqn = nav.fqn,
  797            .analysis_namespace = if (nav.analysis) |a| a.namespace.toOptional() else .none,
  798            .analysis_zir_index = if (nav.analysis) |a| a.zir_index.toOptional() else .none,
  799            .type_or_val = switch (nav.status) {
  800                .unresolved => .none,
  801                .type_resolved => |r| r.type,
  802                .fully_resolved => |r| r.val,
  803            },
  804            .@"linksection" = switch (nav.status) {
  805                .unresolved => .none,
  806                .type_resolved => |r| r.@"linksection",
  807                .fully_resolved => |r| r.@"linksection",
  808            },
  809            .bits = switch (nav.status) {
  810                .unresolved => .{
  811                    .status = .unresolved,
  812                    .is_const = false,
  813                    .alignment = .none,
  814                    .@"addrspace" = .generic,
  815                    .is_threadlocal = false,
  816                },
  817                .type_resolved => |r| .{
  818                    .status = if (r.is_extern_decl) .type_resolved_extern_decl else .type_resolved,
  819                    .is_const = r.is_const,
  820                    .alignment = r.alignment,
  821                    .@"addrspace" = r.@"addrspace",
  822                    .is_threadlocal = r.is_threadlocal,
  823                },
  824                .fully_resolved => |r| .{
  825                    .status = .fully_resolved,
  826                    .is_const = r.is_const,
  827                    .alignment = r.alignment,
  828                    .@"addrspace" = r.@"addrspace",
  829                    .is_threadlocal = false,
  830                },
  831            },
  832        };
  833    }
  834};
  835
  836pub const Dependee = union(enum) {
  837    src_hash: TrackedInst.Index,
  838    nav_val: Nav.Index,
  839    nav_ty: Nav.Index,
  840    interned: Index,
  841    zon_file: FileIndex,
  842    embed_file: Zcu.EmbedFile.Index,
  843    namespace: TrackedInst.Index,
  844    namespace_name: NamespaceNameKey,
  845    memoized_state: MemoizedStateStage,
  846};
  847
  848pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalUnit) void {
  849    var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional();
  850
  851    while (opt_idx.unwrap()) |idx| {
  852        const dep = ip.dep_entries.items[@intFromEnum(idx)];
  853        opt_idx = dep.next_dependee;
  854
  855        const prev_idx = dep.prev.unwrap() orelse {
  856            // This entry is the start of a list in some `*_deps`.
  857            // We cannot easily remove this mapping, so this must remain as a dummy entry.
  858            ip.dep_entries.items[@intFromEnum(idx)].depender = .none;
  859            continue;
  860        };
  861
  862        ip.dep_entries.items[@intFromEnum(prev_idx)].next = dep.next;
  863        if (dep.next.unwrap()) |next_idx| {
  864            ip.dep_entries.items[@intFromEnum(next_idx)].prev = dep.prev;
  865        }
  866
  867        ip.free_dep_entries.append(gpa, idx) catch {
  868            // This memory will be reclaimed on the next garbage collection.
  869            // Thus, we do not need to propagate this error.
  870        };
  871    }
  872}
  873
  874pub const DependencyIterator = struct {
  875    ip: *const InternPool,
  876    next_entry: DepEntry.Index.Optional,
  877    pub fn next(it: *DependencyIterator) ?AnalUnit {
  878        while (true) {
  879            const idx = it.next_entry.unwrap() orelse return null;
  880            const entry = it.ip.dep_entries.items[@intFromEnum(idx)];
  881            it.next_entry = entry.next;
  882            if (entry.depender.unwrap()) |depender| return depender;
  883        }
  884    }
  885};
  886
  887pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator {
  888    const first_entry = switch (dependee) {
  889        .src_hash => |x| ip.src_hash_deps.get(x),
  890        .nav_val => |x| ip.nav_val_deps.get(x),
  891        .nav_ty => |x| ip.nav_ty_deps.get(x),
  892        .interned => |x| ip.interned_deps.get(x),
  893        .zon_file => |x| ip.zon_file_deps.get(x),
  894        .embed_file => |x| ip.embed_file_deps.get(x),
  895        .namespace => |x| ip.namespace_deps.get(x),
  896        .namespace_name => |x| ip.namespace_name_deps.get(x),
  897        .memoized_state => |stage| switch (stage) {
  898            .main => ip.memoized_state_main_deps.unwrap(),
  899            .panic => ip.memoized_state_panic_deps.unwrap(),
  900            .va_list => ip.memoized_state_va_list_deps.unwrap(),
  901            .assembly => ip.memoized_state_assembly_deps.unwrap(),
  902        },
  903    } orelse return .{
  904        .ip = ip,
  905        .next_entry = .none,
  906    };
  907    return .{
  908        .ip = ip,
  909        .next_entry = first_entry.toOptional(),
  910    };
  911}
  912
  913pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, dependee: Dependee) Allocator.Error!void {
  914    const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: {
  915        // The entry already exists, so there is capacity to overwrite it later.
  916        break :dep idx.toOptional();
  917    } else none: {
  918        // Ensure there is capacity available to add this dependency later.
  919        try ip.first_dependency.ensureUnusedCapacity(gpa, 1);
  920        break :none .none;
  921    };
  922
  923    // We're very likely to need space for a new entry - reserve it now to avoid
  924    // the need for error cleanup logic.
  925    if (ip.free_dep_entries.items.len == 0) {
  926        try ip.dep_entries.ensureUnusedCapacity(gpa, 1);
  927    }
  928
  929    // This block should allocate an entry and prepend it to the relevant `*_deps` list.
  930    // The `next` field should be correctly initialized; all other fields may be undefined.
  931    const new_index: DepEntry.Index = switch (dependee) {
  932        .memoized_state => |stage| new_index: {
  933            const deps = switch (stage) {
  934                .main => &ip.memoized_state_main_deps,
  935                .panic => &ip.memoized_state_panic_deps,
  936                .va_list => &ip.memoized_state_va_list_deps,
  937                .assembly => &ip.memoized_state_assembly_deps,
  938            };
  939
  940            if (deps.unwrap()) |first| {
  941                if (ip.dep_entries.items[@intFromEnum(first)].depender == .none) {
  942                    // Dummy entry, so we can reuse it rather than allocating a new one!
  943                    break :new_index first;
  944                }
  945            }
  946
  947            // Prepend a new dependency.
  948            const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.pop()) |new_index| new: {
  949                break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
  950            } else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
  951            if (deps.unwrap()) |old_first| {
  952                ptr.next = old_first.toOptional();
  953                ip.dep_entries.items[@intFromEnum(old_first)].prev = new_index.toOptional();
  954            } else {
  955                ptr.next = .none;
  956            }
  957            deps.* = new_index.toOptional();
  958            break :new_index new_index;
  959        },
  960        inline else => |dependee_payload, tag| new_index: {
  961            const gop = try switch (tag) {
  962                .src_hash => ip.src_hash_deps,
  963                .nav_val => ip.nav_val_deps,
  964                .nav_ty => ip.nav_ty_deps,
  965                .interned => ip.interned_deps,
  966                .zon_file => ip.zon_file_deps,
  967                .embed_file => ip.embed_file_deps,
  968                .namespace => ip.namespace_deps,
  969                .namespace_name => ip.namespace_name_deps,
  970                .memoized_state => comptime unreachable,
  971            }.getOrPut(gpa, dependee_payload);
  972
  973            if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
  974                // Dummy entry, so we can reuse it rather than allocating a new one!
  975                break :new_index gop.value_ptr.*;
  976            }
  977
  978            // Prepend a new dependency.
  979            const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.pop()) |new_index| new: {
  980                break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
  981            } else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
  982            if (gop.found_existing) {
  983                ptr.next = gop.value_ptr.*.toOptional();
  984                ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional();
  985            } else {
  986                ptr.next = .none;
  987            }
  988            gop.value_ptr.* = new_index;
  989            break :new_index new_index;
  990        },
  991    };
  992
  993    ip.dep_entries.items[@intFromEnum(new_index)].depender = depender.toOptional();
  994    ip.dep_entries.items[@intFromEnum(new_index)].prev = .none;
  995    ip.dep_entries.items[@intFromEnum(new_index)].next_dependee = first_depender_dep;
  996    ip.first_dependency.putAssumeCapacity(depender, new_index);
  997}
  998
  999/// String is the name whose existence the dependency is on.
 1000/// DepEntry.Index refers to the first such dependency.
 1001pub const NamespaceNameKey = struct {
 1002    /// The instruction (`struct_decl` etc) which owns the namespace in question.
 1003    namespace: TrackedInst.Index,
 1004    /// The name whose existence the dependency is on.
 1005    name: NullTerminatedString,
 1006};
 1007
 1008pub const DepEntry = extern struct {
 1009    /// If null, this is a dummy entry. `next_dependee` is undefined. This is the first
 1010    /// entry in one of `*_deps`, and does not appear in any list by `first_dependency`,
 1011    /// but is not in `free_dep_entries` since `*_deps` stores a reference to it.
 1012    depender: AnalUnit.Optional,
 1013    /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
 1014    /// Used to iterate all dependers for a given dependee during an update.
 1015    /// null if this is the end of the list.
 1016    next: DepEntry.Index.Optional,
 1017    /// The other link for `next`.
 1018    /// null if this is the start of the list.
 1019    prev: DepEntry.Index.Optional,
 1020    /// Index into `dep_entries` forming a singly linked list of dependencies *of* `depender`.
 1021    /// Used to efficiently remove all `DepEntry`s for a single `depender` when it is re-analyzed.
 1022    /// null if this is the end of the list.
 1023    next_dependee: DepEntry.Index.Optional,
 1024
 1025    pub const Index = enum(u32) {
 1026        _,
 1027        pub fn toOptional(dep: DepEntry.Index) Optional {
 1028            return @enumFromInt(@intFromEnum(dep));
 1029        }
 1030        pub const Optional = enum(u32) {
 1031            none = std.math.maxInt(u32),
 1032            _,
 1033            pub fn unwrap(opt: Optional) ?DepEntry.Index {
 1034                return switch (opt) {
 1035                    .none => null,
 1036                    _ => @enumFromInt(@intFromEnum(opt)),
 1037                };
 1038            }
 1039        };
 1040    };
 1041};
 1042
 1043const Local = struct {
 1044    /// These fields can be accessed from any thread by calling `acquire`.
 1045    /// They are only modified by the owning thread.
 1046    shared: Shared align(std.atomic.cache_line),
 1047    /// This state is fully local to the owning thread and does not require any
 1048    /// atomic access.
 1049    mutate: struct {
 1050        /// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is
 1051        /// allocated into this `arena` (for the `Id` of the thread performing the mutation). An
 1052        /// arena is used to avoid contention on the GPA, and to ensure that any code which retains
 1053        /// references to old state remains valid. For instance, when reallocing hashmap metadata,
 1054        /// a racing lookup on another thread may still retain a handle to the old metadata pointer,
 1055        /// so it must remain valid.
 1056        /// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on
 1057        /// garbage collection (currently vaporware).
 1058        arena: std.heap.ArenaAllocator.State,
 1059
 1060        items: ListMutate,
 1061        extra: ListMutate,
 1062        limbs: ListMutate,
 1063        strings: ListMutate,
 1064        string_bytes: ListMutate,
 1065        tracked_insts: ListMutate,
 1066        files: ListMutate,
 1067        maps: ListMutate,
 1068        navs: ListMutate,
 1069        comptime_units: ListMutate,
 1070
 1071        namespaces: BucketListMutate,
 1072    } align(std.atomic.cache_line),
 1073
 1074    const Shared = struct {
 1075        items: List(Item),
 1076        extra: Extra,
 1077        limbs: Limbs,
 1078        strings: Strings,
 1079        string_bytes: StringBytes,
 1080        tracked_insts: TrackedInsts,
 1081        files: List(File),
 1082        maps: Maps,
 1083        navs: Navs,
 1084        comptime_units: ComptimeUnits,
 1085
 1086        namespaces: Namespaces,
 1087
 1088        pub fn getLimbs(shared: *const Local.Shared) Limbs {
 1089            return switch (@sizeOf(Limb)) {
 1090                @sizeOf(u32) => shared.extra,
 1091                @sizeOf(u64) => shared.limbs,
 1092                else => @compileError("unsupported host"),
 1093            }.acquire();
 1094        }
 1095    };
 1096
 1097    const Extra = List(struct { u32 });
 1098    const Limbs = switch (@sizeOf(Limb)) {
 1099        @sizeOf(u32) => Extra,
 1100        @sizeOf(u64) => List(struct { u64 }),
 1101        else => @compileError("unsupported host"),
 1102    };
 1103    const Strings = List(struct { u32 });
 1104    const StringBytes = List(struct { u8 });
 1105    const TrackedInsts = List(struct { TrackedInst.MaybeLost });
 1106    const Maps = List(struct { FieldMap });
 1107    const Navs = List(Nav.Repr);
 1108    const ComptimeUnits = List(struct { ComptimeUnit });
 1109
 1110    const namespaces_bucket_width = 8;
 1111    const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1;
 1112    const namespace_next_free_field = "owner_type";
 1113    const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace });
 1114
 1115    const ListMutate = struct {
 1116        mutex: std.Thread.Mutex,
 1117        len: u32,
 1118
 1119        const empty: ListMutate = .{
 1120            .mutex = .{},
 1121            .len = 0,
 1122        };
 1123    };
 1124
 1125    const BucketListMutate = struct {
 1126        last_bucket_len: u32,
 1127        buckets_list: ListMutate,
 1128        free_list: u32,
 1129
 1130        const free_list_sentinel = std.math.maxInt(u32);
 1131
 1132        const empty: BucketListMutate = .{
 1133            .last_bucket_len = 0,
 1134            .buckets_list = ListMutate.empty,
 1135            .free_list = free_list_sentinel,
 1136        };
 1137    };
 1138
 1139    fn List(comptime Elem: type) type {
 1140        assert(@typeInfo(Elem) == .@"struct");
 1141        return struct {
 1142            bytes: [*]align(@alignOf(Elem)) u8,
 1143
 1144            const ListSelf = @This();
 1145            const Mutable = struct {
 1146                gpa: Allocator,
 1147                arena: *std.heap.ArenaAllocator.State,
 1148                mutate: *ListMutate,
 1149                list: *ListSelf,
 1150
 1151                const fields = std.enums.values(std.meta.FieldEnum(Elem));
 1152
 1153                fn PtrArrayElem(comptime len: usize) type {
 1154                    const elem_info = @typeInfo(Elem).@"struct";
 1155                    const elem_fields = elem_info.fields;
 1156                    var new_names: [elem_fields.len][]const u8 = undefined;
 1157                    var new_types: [elem_fields.len]type = undefined;
 1158                    for (elem_fields, &new_names, &new_types) |elem_field, *new_name, *NewType| {
 1159                        new_name.* = elem_field.name;
 1160                        NewType.* = *[len]elem_field.type;
 1161                    }
 1162                    if (elem_info.is_tuple) {
 1163                        return @Tuple(&new_types);
 1164                    } else {
 1165                        return @Struct(.auto, null, &new_names, &new_types, &@splat(.{}));
 1166                    }
 1167                }
 1168                fn PtrElem(comptime opts: struct {
 1169                    size: std.builtin.Type.Pointer.Size,
 1170                    is_const: bool = false,
 1171                }) type {
 1172                    const elem_info = @typeInfo(Elem).@"struct";
 1173                    const elem_fields = elem_info.fields;
 1174                    var new_names: [elem_fields.len][]const u8 = undefined;
 1175                    var new_types: [elem_fields.len]type = undefined;
 1176                    for (elem_fields, &new_names, &new_types) |elem_field, *new_name, *NewType| {
 1177                        new_name.* = elem_field.name;
 1178                        NewType.* = @Pointer(opts.size, .{ .@"const" = opts.is_const }, elem_field.type, null);
 1179                    }
 1180                    if (elem_info.is_tuple) {
 1181                        return @Tuple(&new_types);
 1182                    } else {
 1183                        return @Struct(.auto, null, &new_names, &new_types, &@splat(.{}));
 1184                    }
 1185                }
 1186
 1187                pub fn addOne(mutable: Mutable) Allocator.Error!PtrElem(.{ .size = .one }) {
 1188                    try mutable.ensureUnusedCapacity(1);
 1189                    return mutable.addOneAssumeCapacity();
 1190                }
 1191
 1192                pub fn addOneAssumeCapacity(mutable: Mutable) PtrElem(.{ .size = .one }) {
 1193                    const index = mutable.mutate.len;
 1194                    assert(index < mutable.list.header().capacity);
 1195                    mutable.mutate.len = index + 1;
 1196                    const mutable_view = mutable.view().slice();
 1197                    var ptr: PtrElem(.{ .size = .one }) = undefined;
 1198                    inline for (fields) |field| {
 1199                        @field(ptr, @tagName(field)) = &mutable_view.items(field)[index];
 1200                    }
 1201                    return ptr;
 1202                }
 1203
 1204                pub fn append(mutable: Mutable, elem: Elem) Allocator.Error!void {
 1205                    try mutable.ensureUnusedCapacity(1);
 1206                    mutable.appendAssumeCapacity(elem);
 1207                }
 1208
 1209                pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void {
 1210                    var mutable_view = mutable.view();
 1211                    defer mutable.mutate.len = @intCast(mutable_view.len);
 1212                    mutable_view.appendAssumeCapacity(elem);
 1213                }
 1214
 1215                pub fn appendSliceAssumeCapacity(
 1216                    mutable: Mutable,
 1217                    slice: PtrElem(.{ .size = .slice, .is_const = true }),
 1218                ) void {
 1219                    if (fields.len == 0) return;
 1220                    const start = mutable.mutate.len;
 1221                    const slice_len = @field(slice, @tagName(fields[0])).len;
 1222                    assert(slice_len <= mutable.list.header().capacity - start);
 1223                    mutable.mutate.len = @intCast(start + slice_len);
 1224                    const mutable_view = mutable.view().slice();
 1225                    inline for (fields) |field| {
 1226                        const field_slice = @field(slice, @tagName(field));
 1227                        assert(field_slice.len == slice_len);
 1228                        @memcpy(mutable_view.items(field)[start..][0..slice_len], field_slice);
 1229                    }
 1230                }
 1231
 1232                pub fn appendNTimes(mutable: Mutable, elem: Elem, len: usize) Allocator.Error!void {
 1233                    try mutable.ensureUnusedCapacity(len);
 1234                    mutable.appendNTimesAssumeCapacity(elem, len);
 1235                }
 1236
 1237                pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void {
 1238                    const start = mutable.mutate.len;
 1239                    assert(len <= mutable.list.header().capacity - start);
 1240                    mutable.mutate.len = @intCast(start + len);
 1241                    const mutable_view = mutable.view().slice();
 1242                    inline for (fields) |field| {
 1243                        @memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field)));
 1244                    }
 1245                }
 1246
 1247                pub fn addManyAsArray(mutable: Mutable, comptime len: usize) Allocator.Error!PtrArrayElem(len) {
 1248                    try mutable.ensureUnusedCapacity(len);
 1249                    return mutable.addManyAsArrayAssumeCapacity(len);
 1250                }
 1251
 1252                pub fn addManyAsArrayAssumeCapacity(mutable: Mutable, comptime len: usize) PtrArrayElem(len) {
 1253                    const start = mutable.mutate.len;
 1254                    assert(len <= mutable.list.header().capacity - start);
 1255                    mutable.mutate.len = @intCast(start + len);
 1256                    const mutable_view = mutable.view().slice();
 1257                    var ptr_array: PtrArrayElem(len) = undefined;
 1258                    inline for (fields) |field| {
 1259                        @field(ptr_array, @tagName(field)) = mutable_view.items(field)[start..][0..len];
 1260                    }
 1261                    return ptr_array;
 1262                }
 1263
 1264                pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!PtrElem(.{ .size = .slice }) {
 1265                    try mutable.ensureUnusedCapacity(len);
 1266                    return mutable.addManyAsSliceAssumeCapacity(len);
 1267                }
 1268
 1269                pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) PtrElem(.{ .size = .slice }) {
 1270                    const start = mutable.mutate.len;
 1271                    assert(len <= mutable.list.header().capacity - start);
 1272                    mutable.mutate.len = @intCast(start + len);
 1273                    const mutable_view = mutable.view().slice();
 1274                    var slice: PtrElem(.{ .size = .slice }) = undefined;
 1275                    inline for (fields) |field| {
 1276                        @field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len];
 1277                    }
 1278                    return slice;
 1279                }
 1280
 1281                pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void {
 1282                    assert(len <= mutable.mutate.len);
 1283                    mutable.mutate.len = @intCast(len);
 1284                }
 1285
 1286                pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void {
 1287                    try mutable.ensureTotalCapacity(@intCast(mutable.mutate.len + unused_capacity));
 1288                }
 1289
 1290                pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void {
 1291                    const old_capacity = mutable.list.header().capacity;
 1292                    if (old_capacity >= total_capacity) return;
 1293                    var new_capacity = old_capacity;
 1294                    while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2;
 1295                    try mutable.setCapacity(new_capacity);
 1296                }
 1297
 1298                fn setCapacity(mutable: Mutable, capacity: u32) Allocator.Error!void {
 1299                    var arena = mutable.arena.promote(mutable.gpa);
 1300                    defer mutable.arena.* = arena.state;
 1301                    const buf = try arena.allocator().alignedAlloc(
 1302                        u8,
 1303                        .fromByteUnits(alignment),
 1304                        bytes_offset + View.capacityInBytes(capacity),
 1305                    );
 1306                    var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) };
 1307                    new_list.header().* = .{ .capacity = capacity };
 1308                    const len = mutable.mutate.len;
 1309                    // this cold, quickly predictable, condition enables
 1310                    // the `MultiArrayList` optimization in `view`
 1311                    if (len > 0) {
 1312                        const old_slice = mutable.list.view().slice();
 1313                        const new_slice = new_list.view().slice();
 1314                        inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]);
 1315                    }
 1316                    mutable.mutate.mutex.lock();
 1317                    defer mutable.mutate.mutex.unlock();
 1318                    mutable.list.release(new_list);
 1319                }
 1320
 1321                pub fn viewAllowEmpty(mutable: Mutable) View {
 1322                    const capacity = mutable.list.header().capacity;
 1323                    return .{
 1324                        .bytes = mutable.list.bytes,
 1325                        .len = mutable.mutate.len,
 1326                        .capacity = capacity,
 1327                    };
 1328                }
 1329                pub fn view(mutable: Mutable) View {
 1330                    const capacity = mutable.list.header().capacity;
 1331                    assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
 1332                    return .{
 1333                        .bytes = mutable.list.bytes,
 1334                        .len = mutable.mutate.len,
 1335                        .capacity = capacity,
 1336                    };
 1337                }
 1338            };
 1339
 1340            const empty: ListSelf = .{ .bytes = @constCast(&(extern struct {
 1341                header: Header,
 1342                bytes: [0]u8 align(@alignOf(Elem)),
 1343            }{
 1344                .header = .{ .capacity = 0 },
 1345                .bytes = .{},
 1346            }).bytes) };
 1347
 1348            const alignment = @max(@alignOf(Header), @alignOf(Elem));
 1349            const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem));
 1350            const View = std.MultiArrayList(Elem);
 1351
 1352            /// Must be called when accessing from another thread.
 1353            pub fn acquire(list: *const ListSelf) ListSelf {
 1354                return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) };
 1355            }
 1356            fn release(list: *ListSelf, new_list: ListSelf) void {
 1357                @atomicStore([*]align(@alignOf(Elem)) u8, &list.bytes, new_list.bytes, .release);
 1358            }
 1359
 1360            const Header = extern struct {
 1361                capacity: u32,
 1362            };
 1363            fn header(list: ListSelf) *Header {
 1364                return @ptrCast(@alignCast(list.bytes - bytes_offset));
 1365            }
 1366            pub fn view(list: ListSelf) View {
 1367                const capacity = list.header().capacity;
 1368                assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
 1369                return .{
 1370                    .bytes = list.bytes,
 1371                    .len = capacity,
 1372                    .capacity = capacity,
 1373                };
 1374            }
 1375        };
 1376    }
 1377
 1378    pub fn getMutableItems(local: *Local, gpa: Allocator) List(Item).Mutable {
 1379        return .{
 1380            .gpa = gpa,
 1381            .arena = &local.mutate.arena,
 1382            .mutate = &local.mutate.items,
 1383            .list = &local.shared.items,
 1384        };
 1385    }
 1386
 1387    pub fn getMutableExtra(local: *Local, gpa: Allocator) Extra.Mutable {
 1388        return .{
 1389            .gpa = gpa,
 1390            .arena = &local.mutate.arena,
 1391            .mutate = &local.mutate.extra,
 1392            .list = &local.shared.extra,
 1393        };
 1394    }
 1395
 1396    /// On 32-bit systems, this array is ignored and extra is used for everything.
 1397    /// On 64-bit systems, this array is used for big integers and associated metadata.
 1398    /// Use the helper methods instead of accessing this directly in order to not
 1399    /// violate the above mechanism.
 1400    pub fn getMutableLimbs(local: *Local, gpa: Allocator) Limbs.Mutable {
 1401        return switch (@sizeOf(Limb)) {
 1402            @sizeOf(u32) => local.getMutableExtra(gpa),
 1403            @sizeOf(u64) => .{
 1404                .gpa = gpa,
 1405                .arena = &local.mutate.arena,
 1406                .mutate = &local.mutate.limbs,
 1407                .list = &local.shared.limbs,
 1408            },
 1409            else => @compileError("unsupported host"),
 1410        };
 1411    }
 1412
 1413    /// A list of offsets into `string_bytes` for each string.
 1414    pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable {
 1415        return .{
 1416            .gpa = gpa,
 1417            .arena = &local.mutate.arena,
 1418            .mutate = &local.mutate.strings,
 1419            .list = &local.shared.strings,
 1420        };
 1421    }
 1422
 1423    /// In order to store references to strings in fewer bytes, we copy all
 1424    /// string bytes into here. String bytes can be null. It is up to whomever
 1425    /// is referencing the data here whether they want to store both index and length,
 1426    /// thus allowing null bytes, or store only index, and use null-termination. The
 1427    /// `strings_bytes` array is agnostic to either usage.
 1428    pub fn getMutableStringBytes(local: *Local, gpa: Allocator) StringBytes.Mutable {
 1429        return .{
 1430            .gpa = gpa,
 1431            .arena = &local.mutate.arena,
 1432            .mutate = &local.mutate.string_bytes,
 1433            .list = &local.shared.string_bytes,
 1434        };
 1435    }
 1436
 1437    /// An index into `tracked_insts` gives a reference to a single ZIR instruction which
 1438    /// persists across incremental updates.
 1439    pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator) TrackedInsts.Mutable {
 1440        return .{
 1441            .gpa = gpa,
 1442            .arena = &local.mutate.arena,
 1443            .mutate = &local.mutate.tracked_insts,
 1444            .list = &local.shared.tracked_insts,
 1445        };
 1446    }
 1447
 1448    /// Elements are ordered identically to the `import_table` field of `Zcu`.
 1449    ///
 1450    /// Unlike `import_table`, this data is serialized as part of incremental
 1451    /// compilation state.
 1452    ///
 1453    /// Key is the hash of the path to this file, used to store
 1454    /// `InternPool.TrackedInst`.
 1455    pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable {
 1456        return .{
 1457            .gpa = gpa,
 1458            .arena = &local.mutate.arena,
 1459            .mutate = &local.mutate.files,
 1460            .list = &local.shared.files,
 1461        };
 1462    }
 1463
 1464    /// Some types such as enums, structs, and unions need to store mappings from field names
 1465    /// to field index, or value to field index. In such cases, they will store the underlying
 1466    /// field names and values directly, relying on one of these maps, stored separately,
 1467    /// to provide lookup.
 1468    /// These are not serialized; it is computed upon deserialization.
 1469    pub fn getMutableMaps(local: *Local, gpa: Allocator) Maps.Mutable {
 1470        return .{
 1471            .gpa = gpa,
 1472            .arena = &local.mutate.arena,
 1473            .mutate = &local.mutate.maps,
 1474            .list = &local.shared.maps,
 1475        };
 1476    }
 1477
 1478    pub fn getMutableNavs(local: *Local, gpa: Allocator) Navs.Mutable {
 1479        return .{
 1480            .gpa = gpa,
 1481            .arena = &local.mutate.arena,
 1482            .mutate = &local.mutate.navs,
 1483            .list = &local.shared.navs,
 1484        };
 1485    }
 1486
 1487    pub fn getMutableComptimeUnits(local: *Local, gpa: Allocator) ComptimeUnits.Mutable {
 1488        return .{
 1489            .gpa = gpa,
 1490            .arena = &local.mutate.arena,
 1491            .mutate = &local.mutate.comptime_units,
 1492            .list = &local.shared.comptime_units,
 1493        };
 1494    }
 1495
 1496    /// Rather than allocating Namespace objects with an Allocator, we instead allocate
 1497    /// them with this BucketList. This provides four advantages:
 1498    ///  * Stable memory so that one thread can access a Namespace object while another
 1499    ///    thread allocates additional Namespace objects from this list.
 1500    ///  * It allows us to use u32 indexes to reference Namespace objects rather than
 1501    ///    pointers, saving memory in types.
 1502    ///  * Using integers to reference Namespace objects rather than pointers makes
 1503    ///    serialization trivial.
 1504    ///  * It provides a unique integer to be used for anonymous symbol names, avoiding
 1505    ///    multi-threaded contention on an atomic counter.
 1506    pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable {
 1507        return .{
 1508            .gpa = gpa,
 1509            .arena = &local.mutate.arena,
 1510            .mutate = &local.mutate.namespaces.buckets_list,
 1511            .list = &local.shared.namespaces,
 1512        };
 1513    }
 1514};
 1515
 1516pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local {
 1517    return &ip.locals[@intFromEnum(tid)];
 1518}
 1519
 1520pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared {
 1521    return &ip.locals[@intFromEnum(tid)].shared;
 1522}
 1523
 1524const Shard = struct {
 1525    shared: struct {
 1526        map: Map(Index),
 1527        string_map: Map(OptionalNullTerminatedString),
 1528        tracked_inst_map: Map(TrackedInst.Index.Optional),
 1529    } align(std.atomic.cache_line),
 1530    mutate: struct {
 1531        // TODO: measure cost of sharing unrelated mutate state
 1532        map: Mutate align(std.atomic.cache_line),
 1533        string_map: Mutate align(std.atomic.cache_line),
 1534        tracked_inst_map: Mutate align(std.atomic.cache_line),
 1535    },
 1536
 1537    const Mutate = struct {
 1538        mutex: std.Thread.Mutex.Recursive,
 1539        len: u32,
 1540
 1541        const empty: Mutate = .{
 1542            .mutex = std.Thread.Mutex.Recursive.init,
 1543            .len = 0,
 1544        };
 1545    };
 1546
 1547    fn Map(comptime Value: type) type {
 1548        comptime assert(@typeInfo(Value).@"enum".tag_type == u32);
 1549        _ = @as(Value, .none); // expected .none key
 1550        return struct {
 1551            /// header: Header,
 1552            /// entries: [header.capacity]Entry,
 1553            entries: [*]Entry,
 1554
 1555            const empty: @This() = .{ .entries = @constCast(&(extern struct {
 1556                header: Header,
 1557                entries: [1]Entry,
 1558            }{
 1559                .header = .{ .capacity = 1 },
 1560                .entries = .{.{ .value = .none, .hash = undefined }},
 1561            }).entries) };
 1562
 1563            const alignment = @max(@alignOf(Header), @alignOf(Entry));
 1564            const entries_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Entry));
 1565
 1566            /// Must be called unless the mutate mutex is locked.
 1567            fn acquire(map: *const @This()) @This() {
 1568                return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) };
 1569            }
 1570            fn release(map: *@This(), new_map: @This()) void {
 1571                @atomicStore([*]Entry, &map.entries, new_map.entries, .release);
 1572            }
 1573
 1574            const Header = extern struct {
 1575                capacity: u32,
 1576
 1577                fn mask(head: *const Header) u32 {
 1578                    assert(std.math.isPowerOfTwo(head.capacity));
 1579                    return head.capacity - 1;
 1580                }
 1581            };
 1582            fn header(map: @This()) *Header {
 1583                return @ptrCast(@alignCast(@as([*]u8, @ptrCast(map.entries)) - entries_offset));
 1584            }
 1585
 1586            const Entry = extern struct {
 1587                value: Value,
 1588                hash: u32,
 1589
 1590                fn acquire(entry: *const Entry) Value {
 1591                    return @atomicLoad(Value, &entry.value, .acquire);
 1592                }
 1593                fn release(entry: *Entry, value: Value) void {
 1594                    assert(value != .none);
 1595                    @atomicStore(Value, &entry.value, value, .release);
 1596                }
 1597                fn resetUnordered(entry: *Entry) void {
 1598                    @atomicStore(Value, &entry.value, .none, .unordered);
 1599                }
 1600            };
 1601        };
 1602    }
 1603};
 1604
 1605fn getTidMask(ip: *const InternPool) u32 {
 1606    return @shlExact(@as(u32, 1), ip.tid_width) - 1;
 1607}
 1608
 1609fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 {
 1610    return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width;
 1611}
 1612
 1613const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false);
 1614
 1615/// An index into `maps` which might be `none`.
 1616pub const OptionalMapIndex = enum(u32) {
 1617    none = std.math.maxInt(u32),
 1618    _,
 1619
 1620    pub fn unwrap(oi: OptionalMapIndex) ?MapIndex {
 1621        if (oi == .none) return null;
 1622        return @enumFromInt(@intFromEnum(oi));
 1623    }
 1624};
 1625
 1626/// An index into `maps`.
 1627pub const MapIndex = enum(u32) {
 1628    _,
 1629
 1630    pub fn get(map_index: MapIndex, ip: *const InternPool) *FieldMap {
 1631        const unwrapped_map_index = map_index.unwrap(ip);
 1632        const maps = ip.getLocalShared(unwrapped_map_index.tid).maps.acquire();
 1633        return &maps.view().items(.@"0")[unwrapped_map_index.index];
 1634    }
 1635
 1636    pub fn toOptional(i: MapIndex) OptionalMapIndex {
 1637        return @enumFromInt(@intFromEnum(i));
 1638    }
 1639
 1640    const Unwrapped = struct {
 1641        tid: Zcu.PerThread.Id,
 1642        index: u32,
 1643
 1644        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) MapIndex {
 1645            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
 1646            assert(unwrapped.index <= ip.getIndexMask(u32));
 1647            return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
 1648                unwrapped.index);
 1649        }
 1650    };
 1651    fn unwrap(map_index: MapIndex, ip: *const InternPool) Unwrapped {
 1652        return .{
 1653            .tid = @enumFromInt(@intFromEnum(map_index) >> ip.tid_shift_32 & ip.getTidMask()),
 1654            .index = @intFromEnum(map_index) & ip.getIndexMask(u32),
 1655        };
 1656    }
 1657};
 1658
 1659pub const ComptimeAllocIndex = enum(u32) { _ };
 1660
 1661pub const NamespaceIndex = enum(u32) {
 1662    _,
 1663
 1664    const Unwrapped = struct {
 1665        tid: Zcu.PerThread.Id,
 1666        bucket_index: u32,
 1667        index: u32,
 1668
 1669        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) NamespaceIndex {
 1670            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
 1671            assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.namespaces_bucket_width);
 1672            assert(unwrapped.index <= Local.namespaces_bucket_mask);
 1673            return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
 1674                unwrapped.bucket_index << Local.namespaces_bucket_width |
 1675                unwrapped.index);
 1676        }
 1677    };
 1678    fn unwrap(namespace_index: NamespaceIndex, ip: *const InternPool) Unwrapped {
 1679        const index = @intFromEnum(namespace_index) & ip.getIndexMask(u32);
 1680        return .{
 1681            .tid = @enumFromInt(@intFromEnum(namespace_index) >> ip.tid_shift_32 & ip.getTidMask()),
 1682            .bucket_index = index >> Local.namespaces_bucket_width,
 1683            .index = index & Local.namespaces_bucket_mask,
 1684        };
 1685    }
 1686
 1687    pub fn toOptional(i: NamespaceIndex) OptionalNamespaceIndex {
 1688        return @enumFromInt(@intFromEnum(i));
 1689    }
 1690};
 1691
 1692pub const OptionalNamespaceIndex = enum(u32) {
 1693    none = std.math.maxInt(u32),
 1694    _,
 1695
 1696    pub fn init(oi: ?NamespaceIndex) OptionalNamespaceIndex {
 1697        return @enumFromInt(@intFromEnum(oi orelse return .none));
 1698    }
 1699
 1700    pub fn unwrap(oi: OptionalNamespaceIndex) ?NamespaceIndex {
 1701        if (oi == .none) return null;
 1702        return @enumFromInt(@intFromEnum(oi));
 1703    }
 1704};
 1705
 1706pub const FileIndex = enum(u32) {
 1707    _,
 1708
 1709    const Unwrapped = struct {
 1710        tid: Zcu.PerThread.Id,
 1711        index: u32,
 1712
 1713        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) FileIndex {
 1714            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
 1715            assert(unwrapped.index <= ip.getIndexMask(u32));
 1716            return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
 1717                unwrapped.index);
 1718        }
 1719    };
 1720    pub fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped {
 1721        return .{
 1722            .tid = @enumFromInt(@intFromEnum(file_index) >> ip.tid_shift_32 & ip.getTidMask()),
 1723            .index = @intFromEnum(file_index) & ip.getIndexMask(u32),
 1724        };
 1725    }
 1726    pub fn toOptional(i: FileIndex) Optional {
 1727        return @enumFromInt(@intFromEnum(i));
 1728    }
 1729    pub const Optional = enum(u32) {
 1730        none = std.math.maxInt(u32),
 1731        _,
 1732        pub fn unwrap(opt: Optional) ?FileIndex {
 1733            return switch (opt) {
 1734                .none => null,
 1735                _ => @enumFromInt(@intFromEnum(opt)),
 1736            };
 1737        }
 1738    };
 1739};
 1740
 1741const File = struct {
 1742    bin_digest: Cache.BinDigest,
 1743    file: *Zcu.File,
 1744    /// `.none` means no type has been created yet.
 1745    root_type: InternPool.Index,
 1746};
 1747
 1748/// An index into `strings`.
 1749pub const String = enum(u32) {
 1750    /// An empty string.
 1751    empty = 0,
 1752    _,
 1753
 1754    pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 {
 1755        return string.toOverlongSlice(ip)[0..@intCast(len)];
 1756    }
 1757
 1758    pub fn at(string: String, index: u64, ip: *const InternPool) u8 {
 1759        return string.toOverlongSlice(ip)[@intCast(index)];
 1760    }
 1761
 1762    pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString {
 1763        assert(std.mem.indexOfScalar(u8, string.toSlice(len, ip), 0) == null);
 1764        assert(string.at(len, ip) == 0);
 1765        return @enumFromInt(@intFromEnum(string));
 1766    }
 1767
 1768    const Unwrapped = struct {
 1769        tid: Zcu.PerThread.Id,
 1770        index: u32,
 1771
 1772        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String {
 1773            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
 1774            assert(unwrapped.index <= ip.getIndexMask(u32));
 1775            return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_32) |
 1776                unwrapped.index);
 1777        }
 1778    };
 1779    fn unwrap(string: String, ip: *const InternPool) Unwrapped {
 1780        return .{
 1781            .tid = @enumFromInt(@intFromEnum(string) >> ip.tid_shift_32 & ip.getTidMask()),
 1782            .index = @intFromEnum(string) & ip.getIndexMask(u32),
 1783        };
 1784    }
 1785
 1786    fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 {
 1787        const unwrapped = string.unwrap(ip);
 1788        const local_shared = ip.getLocalShared(unwrapped.tid);
 1789        const strings = local_shared.strings.acquire().view().items(.@"0");
 1790        const string_bytes = local_shared.string_bytes.acquire().view().items(.@"0");
 1791        return string_bytes[strings[unwrapped.index]..];
 1792    }
 1793
 1794    const debug_state = InternPool.debug_state;
 1795};
 1796
 1797/// An index into `strings` which might be `none`.
 1798pub const OptionalString = enum(u32) {
 1799    /// This is distinct from `none` - it is a valid index that represents empty string.
 1800    empty = 0,
 1801    none = std.math.maxInt(u32),
 1802    _,
 1803
 1804    pub fn unwrap(string: OptionalString) ?String {
 1805        return if (string != .none) @enumFromInt(@intFromEnum(string)) else null;
 1806    }
 1807
 1808    pub fn toSlice(string: OptionalString, len: u64, ip: *const InternPool) ?[]const u8 {
 1809        return (string.unwrap() orelse return null).toSlice(len, ip);
 1810    }
 1811
 1812    const debug_state = InternPool.debug_state;
 1813};
 1814
 1815/// An index into `strings`.
 1816pub const NullTerminatedString = enum(u32) {
 1817    /// An empty string.
 1818    empty = 0,
 1819    _,
 1820
 1821    /// An array of `NullTerminatedString` existing within the `extra` array.
 1822    /// This type exists to provide a struct with lifetime that is
 1823    /// not invalidated when items are added to the `InternPool`.
 1824    pub const Slice = struct {
 1825        tid: Zcu.PerThread.Id,
 1826        start: u32,
 1827        len: u32,
 1828
 1829        pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 };
 1830
 1831        pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString {
 1832            const extra = ip.getLocalShared(slice.tid).extra.acquire();
 1833            return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]);
 1834        }
 1835    };
 1836
 1837    pub fn toString(self: NullTerminatedString) String {
 1838        return @enumFromInt(@intFromEnum(self));
 1839    }
 1840
 1841    pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString {
 1842        return @enumFromInt(@intFromEnum(self));
 1843    }
 1844
 1845    pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 {
 1846        const unwrapped = string.toString().unwrap(ip);
 1847        const local_shared = ip.getLocalShared(unwrapped.tid);
 1848        const strings = local_shared.strings.acquire().view().items(.@"0");
 1849        const string_bytes = local_shared.string_bytes.acquire().view().items(.@"0");
 1850        return string_bytes[strings[unwrapped.index] .. strings[unwrapped.index + 1] - 1 :0];
 1851    }
 1852
 1853    pub fn length(string: NullTerminatedString, ip: *const InternPool) u32 {
 1854        const unwrapped = string.toString().unwrap(ip);
 1855        const local_shared = ip.getLocalShared(unwrapped.tid);
 1856        const strings = local_shared.strings.acquire().view().items(.@"0");
 1857        return strings[unwrapped.index + 1] - 1 - strings[unwrapped.index];
 1858    }
 1859
 1860    pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool {
 1861        const overlong_slice = string.toString().toOverlongSlice(ip);
 1862        return overlong_slice.len > slice.len and
 1863            std.mem.eql(u8, overlong_slice[0..slice.len], slice) and
 1864            overlong_slice[slice.len] == 0;
 1865    }
 1866
 1867    const Adapter = struct {
 1868        strings: []const NullTerminatedString,
 1869
 1870        pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool {
 1871            _ = b_void;
 1872            return a == ctx.strings[b_map_index];
 1873        }
 1874
 1875        pub fn hash(ctx: @This(), a: NullTerminatedString) u32 {
 1876            _ = ctx;
 1877            return std.hash.int(@intFromEnum(a));
 1878        }
 1879    };
 1880
 1881    /// Compare based on integer value alone, ignoring the string contents.
 1882    pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool {
 1883        _ = ctx;
 1884        return @intFromEnum(a) < @intFromEnum(b);
 1885    }
 1886
 1887    pub fn toUnsigned(string: NullTerminatedString, ip: *const InternPool) ?u32 {
 1888        const slice = string.toSlice(ip);
 1889        if (slice.len > 1 and slice[0] == '0') return null;
 1890        if (std.mem.indexOfScalar(u8, slice, '_')) |_| return null;
 1891        return std.fmt.parseUnsigned(u32, slice, 10) catch null;
 1892    }
 1893
 1894    const FormatData = struct {
 1895        string: NullTerminatedString,
 1896        ip: *const InternPool,
 1897        id: bool,
 1898    };
 1899    fn format(data: FormatData, writer: *std.Io.Writer) std.Io.Writer.Error!void {
 1900        const slice = data.string.toSlice(data.ip);
 1901        if (!data.id) {
 1902            try writer.writeAll(slice);
 1903        } else {
 1904            try writer.print("{f}", .{std.zig.fmtIdP(slice)});
 1905        }
 1906    }
 1907
 1908    pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Alt(FormatData, format) {
 1909        return .{ .data = .{ .string = string, .ip = ip, .id = false } };
 1910    }
 1911
 1912    pub fn fmtId(string: NullTerminatedString, ip: *const InternPool) std.fmt.Alt(FormatData, format) {
 1913        return .{ .data = .{ .string = string, .ip = ip, .id = true } };
 1914    }
 1915
 1916    const debug_state = InternPool.debug_state;
 1917};
 1918
 1919/// An index into `strings` which might be `none`.
 1920pub const OptionalNullTerminatedString = enum(u32) {
 1921    /// This is distinct from `none` - it is a valid index that represents empty string.
 1922    empty = 0,
 1923    none = std.math.maxInt(u32),
 1924    _,
 1925
 1926    pub fn unwrap(string: OptionalNullTerminatedString) ?NullTerminatedString {
 1927        return if (string != .none) @enumFromInt(@intFromEnum(string)) else null;
 1928    }
 1929
 1930    pub fn toSlice(string: OptionalNullTerminatedString, ip: *const InternPool) ?[:0]const u8 {
 1931        return (string.unwrap() orelse return null).toSlice(ip);
 1932    }
 1933
 1934    const debug_state = InternPool.debug_state;
 1935};
 1936
 1937/// A single value captured in the closure of a namespace type. This is not a plain
 1938/// `Index` because we must differentiate between the following cases:
 1939/// * runtime-known value (where we store the type)
 1940/// * comptime-known value (where we store the value)
 1941/// * `Nav` val (so that we can analyze the value lazily)
 1942/// * `Nav` ref (so that we can analyze the reference lazily)
 1943pub const CaptureValue = packed struct(u32) {
 1944    tag: enum(u2) { @"comptime", runtime, nav_val, nav_ref },
 1945    idx: u30,
 1946
 1947    pub fn wrap(val: Unwrapped) CaptureValue {
 1948        return switch (val) {
 1949            .@"comptime" => |i| .{ .tag = .@"comptime", .idx = @intCast(@intFromEnum(i)) },
 1950            .runtime => |i| .{ .tag = .runtime, .idx = @intCast(@intFromEnum(i)) },
 1951            .nav_val => |i| .{ .tag = .nav_val, .idx = @intCast(@intFromEnum(i)) },
 1952            .nav_ref => |i| .{ .tag = .nav_ref, .idx = @intCast(@intFromEnum(i)) },
 1953        };
 1954    }
 1955    pub fn unwrap(val: CaptureValue) Unwrapped {
 1956        return switch (val.tag) {
 1957            .@"comptime" => .{ .@"comptime" = @enumFromInt(val.idx) },
 1958            .runtime => .{ .runtime = @enumFromInt(val.idx) },
 1959            .nav_val => .{ .nav_val = @enumFromInt(val.idx) },
 1960            .nav_ref => .{ .nav_ref = @enumFromInt(val.idx) },
 1961        };
 1962    }
 1963
 1964    pub const Unwrapped = union(enum) {
 1965        /// Index refers to the value.
 1966        @"comptime": Index,
 1967        /// Index refers to the type.
 1968        runtime: Index,
 1969        nav_val: Nav.Index,
 1970        nav_ref: Nav.Index,
 1971    };
 1972
 1973    pub const Slice = struct {
 1974        tid: Zcu.PerThread.Id,
 1975        start: u32,
 1976        len: u32,
 1977
 1978        pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 };
 1979
 1980        pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue {
 1981            const extra = ip.getLocalShared(slice.tid).extra.acquire();
 1982            return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]);
 1983        }
 1984    };
 1985};
 1986
 1987pub const Key = union(enum) {
 1988    int_type: IntType,
 1989    ptr_type: PtrType,
 1990    array_type: ArrayType,
 1991    vector_type: VectorType,
 1992    opt_type: Index,
 1993    /// `anyframe->T`. The payload is the child type, which may be `none` to indicate
 1994    /// `anyframe`.
 1995    anyframe_type: Index,
 1996    error_union_type: ErrorUnionType,
 1997    simple_type: SimpleType,
 1998    /// This represents a struct that has been explicitly declared in source code,
 1999    /// or was created with `@Struct`. It is unique and based on a declaration.
 2000    struct_type: NamespaceType,
 2001    /// This is a tuple type. Tuples are logically similar to structs, but have some
 2002    /// important differences in semantics; they do not undergo staged type resolution,
 2003    /// so cannot be self-referential, and they are not considered container/namespace
 2004    /// types, so cannot have declarations and have structural equality properties.
 2005    tuple_type: TupleType,
 2006    union_type: NamespaceType,
 2007    opaque_type: NamespaceType,
 2008    enum_type: NamespaceType,
 2009    func_type: FuncType,
 2010    error_set_type: ErrorSetType,
 2011    /// The payload is the function body, either a `func_decl` or `func_instance`.
 2012    inferred_error_set_type: Index,
 2013
 2014    /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
 2015    /// via `simple_value` and has a named `Index` tag for it.
 2016    undef: Index,
 2017    simple_value: SimpleValue,
 2018    variable: Variable,
 2019    @"extern": Extern,
 2020    func: Func,
 2021    int: Key.Int,
 2022    err: Error,
 2023    error_union: ErrorUnion,
 2024    enum_literal: NullTerminatedString,
 2025    /// A specific enum tag, indicated by the integer tag value.
 2026    enum_tag: EnumTag,
 2027    /// An empty enum or union. TODO: this value's existence is strange, because such a type in
 2028    /// reality has no values. See #15909.
 2029    /// Payload is the type for which we are an empty value.
 2030    empty_enum_value: Index,
 2031    float: Float,
 2032    ptr: Ptr,
 2033    slice: Slice,
 2034    opt: Opt,
 2035    /// An instance of a struct, array, or vector.
 2036    /// Each element/field stored as an `Index`.
 2037    /// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
 2038    /// so the slice length will be one more than the type's array length.
 2039    /// There must be at least one element which is not `undefined`. If all elements are
 2040    /// undefined, instead create an undefined value of the aggregate type.
 2041    aggregate: Aggregate,
 2042    /// An instance of a union.
 2043    un: Union,
 2044
 2045    /// A comptime function call with a memoized result.
 2046    memoized_call: Key.MemoizedCall,
 2047
 2048    pub const TypeValue = extern struct {
 2049        ty: Index,
 2050        val: Index,
 2051    };
 2052
 2053    pub const IntType = std.builtin.Type.Int;
 2054
 2055    /// Extern for hashing via memory reinterpretation.
 2056    pub const ErrorUnionType = extern struct {
 2057        error_set_type: Index,
 2058        payload_type: Index,
 2059    };
 2060
 2061    pub const ErrorSetType = struct {
 2062        /// Set of error names, sorted by null terminated string index.
 2063        names: NullTerminatedString.Slice,
 2064        /// This is ignored by `get` but will always be provided by `indexToKey`.
 2065        names_map: OptionalMapIndex = .none,
 2066
 2067        /// Look up field index based on field name.
 2068        pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
 2069            const map = self.names_map.unwrap().?.get(ip);
 2070            const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
 2071            const field_index = map.getIndexAdapted(name, adapter) orelse return null;
 2072            return @intCast(field_index);
 2073        }
 2074    };
 2075
 2076    /// Extern layout so it can be hashed with `std.mem.asBytes`.
 2077    pub const PtrType = extern struct {
 2078        child: Index,
 2079        sentinel: Index = .none,
 2080        flags: Flags = .{},
 2081        packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 },
 2082
 2083        pub const VectorIndex = enum(u16) {
 2084            none = std.math.maxInt(u16),
 2085            _,
 2086        };
 2087
 2088        pub const Flags = packed struct(u32) {
 2089            size: Size = .one,
 2090            /// `none` indicates the ABI alignment of the pointee_type. In this
 2091            /// case, this field *must* be set to `none`, otherwise the
 2092            /// `InternPool` equality and hashing functions will return incorrect
 2093            /// results.
 2094            alignment: Alignment = .none,
 2095            is_const: bool = false,
 2096            is_volatile: bool = false,
 2097            is_allowzero: bool = false,
 2098            /// See src/target.zig defaultAddressSpace function for how to obtain
 2099            /// an appropriate value for this field.
 2100            address_space: AddressSpace = .generic,
 2101            vector_index: VectorIndex = .none,
 2102        };
 2103
 2104        pub const PackedOffset = packed struct(u32) {
 2105            /// If this is non-zero it means the pointer points to a sub-byte
 2106            /// range of data, which is backed by a "host integer" with this
 2107            /// number of bytes.
 2108            /// When host_size=pointee_abi_size and bit_offset=0, this must be
 2109            /// represented with host_size=0 instead.
 2110            host_size: u16,
 2111            bit_offset: u16,
 2112        };
 2113
 2114        pub const Size = std.builtin.Type.Pointer.Size;
 2115        pub const AddressSpace = std.builtin.AddressSpace;
 2116    };
 2117
 2118    /// Extern so that hashing can be done via memory reinterpreting.
 2119    pub const ArrayType = extern struct {
 2120        len: u64,
 2121        child: Index,
 2122        sentinel: Index = .none,
 2123
 2124        pub fn lenIncludingSentinel(array_type: ArrayType) u64 {
 2125            return array_type.len + @intFromBool(array_type.sentinel != .none);
 2126        }
 2127    };
 2128
 2129    /// Extern so that hashing can be done via memory reinterpreting.
 2130    pub const VectorType = extern struct {
 2131        len: u32,
 2132        child: Index,
 2133    };
 2134
 2135    pub const TupleType = struct {
 2136        types: Index.Slice,
 2137        /// These elements may be `none`, indicating runtime-known.
 2138        values: Index.Slice,
 2139    };
 2140
 2141    /// This is the hashmap key. To fetch other data associated with the type, see:
 2142    /// * `loadStructType`
 2143    /// * `loadUnionType`
 2144    /// * `loadEnumType`
 2145    /// * `loadOpaqueType`
 2146    pub const NamespaceType = union(enum) {
 2147        /// This type corresponds to an actual source declaration, e.g. `struct { ... }`.
 2148        /// It is hashed based on its ZIR instruction index and set of captures.
 2149        declared: Declared,
 2150        /// This type is an automatically-generated enum tag type for a union.
 2151        /// It is hashed based on the index of the union type it corresponds to.
 2152        generated_tag: struct {
 2153            /// The union for which this is a tag type.
 2154            union_type: Index,
 2155        },
 2156        /// This type originates from a reification via `@Enum`, `@Struct`, `@Union` or from an anonymous initialization.
 2157        /// It is hashed based on its ZIR instruction index and fields, attributes, etc.
 2158        /// To avoid making this key overly complex, the type-specific data is hashed by Sema.
 2159        reified: struct {
 2160            /// A `reify`, `struct_init`, `struct_init_ref`, or `struct_init_anon` instruction.
 2161            /// Alternatively, this is `main_struct_inst` of a ZON file.
 2162            zir_index: TrackedInst.Index,
 2163            /// A hash of this type's attributes, fields, etc, generated by Sema.
 2164            type_hash: u64,
 2165        },
 2166
 2167        pub const Declared = struct {
 2168            /// A `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl` instruction.
 2169            zir_index: TrackedInst.Index,
 2170            /// The captured values of this type. These values must be fully resolved per the language spec.
 2171            captures: union(enum) {
 2172                owned: CaptureValue.Slice,
 2173                external: []const CaptureValue,
 2174            },
 2175        };
 2176    };
 2177
 2178    pub const FuncType = struct {
 2179        param_types: Index.Slice,
 2180        return_type: Index,
 2181        /// Tells whether a parameter is comptime. See `paramIsComptime` helper
 2182        /// method for accessing this.
 2183        comptime_bits: u32,
 2184        /// Tells whether a parameter is noalias. See `paramIsNoalias` helper
 2185        /// method for accessing this.
 2186        noalias_bits: u32,
 2187        cc: std.builtin.CallingConvention,
 2188        is_var_args: bool,
 2189        is_generic: bool,
 2190        is_noinline: bool,
 2191
 2192        pub fn paramIsComptime(self: @This(), i: u5) bool {
 2193            assert(i < self.param_types.len);
 2194            return @as(u1, @truncate(self.comptime_bits >> i)) != 0;
 2195        }
 2196
 2197        pub fn paramIsNoalias(self: @This(), i: u5) bool {
 2198            assert(i < self.param_types.len);
 2199            return @as(u1, @truncate(self.noalias_bits >> i)) != 0;
 2200        }
 2201
 2202        pub fn eql(a: FuncType, b: FuncType, ip: *const InternPool) bool {
 2203            return std.mem.eql(Index, a.param_types.get(ip), b.param_types.get(ip)) and
 2204                a.return_type == b.return_type and
 2205                a.comptime_bits == b.comptime_bits and
 2206                a.noalias_bits == b.noalias_bits and
 2207                a.is_var_args == b.is_var_args and
 2208                a.is_generic == b.is_generic and
 2209                a.is_noinline == b.is_noinline and
 2210                std.meta.eql(a.cc, b.cc);
 2211        }
 2212
 2213        pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void {
 2214            for (self.param_types.get(ip)) |param_type| {
 2215                std.hash.autoHash(hasher, param_type);
 2216            }
 2217            std.hash.autoHash(hasher, self.return_type);
 2218            std.hash.autoHash(hasher, self.comptime_bits);
 2219            std.hash.autoHash(hasher, self.noalias_bits);
 2220            std.hash.autoHash(hasher, self.cc);
 2221            std.hash.autoHash(hasher, self.is_var_args);
 2222            std.hash.autoHash(hasher, self.is_generic);
 2223            std.hash.autoHash(hasher, self.is_noinline);
 2224        }
 2225    };
 2226
 2227    /// A runtime variable defined in this `Zcu`.
 2228    pub const Variable = struct {
 2229        ty: Index,
 2230        init: Index,
 2231        owner_nav: Nav.Index,
 2232        is_threadlocal: bool,
 2233    };
 2234
 2235    pub const Extern = struct {
 2236        /// The name of the extern symbol.
 2237        name: NullTerminatedString,
 2238        /// The type of the extern symbol itself.
 2239        /// This may be `.anyopaque_type`, in which case the value may not be loaded.
 2240        ty: Index,
 2241        /// Library name if specified.
 2242        /// For example `extern "c" fn write(...) usize` would have 'c' as library name.
 2243        /// Index into the string table bytes.
 2244        lib_name: OptionalNullTerminatedString,
 2245        linkage: std.builtin.GlobalLinkage,
 2246        visibility: std.builtin.SymbolVisibility,
 2247        is_threadlocal: bool,
 2248        is_dll_import: bool,
 2249        relocation: std.builtin.ExternOptions.Relocation,
 2250        is_const: bool,
 2251        alignment: Alignment,
 2252        @"addrspace": std.builtin.AddressSpace,
 2253        /// The ZIR instruction which created this extern; used only for source locations.
 2254        /// This is a `declaration`.
 2255        zir_index: TrackedInst.Index,
 2256        /// The `Nav` corresponding to this extern symbol.
 2257        /// This is ignored by hashing and equality.
 2258        owner_nav: Nav.Index,
 2259        source: Tag.Extern.Flags.Source,
 2260    };
 2261
 2262    pub const Func = struct {
 2263        tid: Zcu.PerThread.Id,
 2264        /// In the case of a generic function, this type will potentially have fewer parameters
 2265        /// than the generic owner's type, because the comptime parameters will be deleted.
 2266        ty: Index,
 2267        /// If this is a function body that has been coerced to a different type, for example
 2268        /// ```
 2269        /// fn f2() !void {}
 2270        /// const f: fn()anyerror!void = f2;
 2271        /// ```
 2272        /// then it contains the original type of the function body.
 2273        uncoerced_ty: Index,
 2274        /// Index into extra array of the `FuncAnalysis` corresponding to this function.
 2275        /// Used for mutating that data.
 2276        analysis_extra_index: u32,
 2277        /// Index into extra array of the `zir_body_inst` corresponding to this function.
 2278        /// Used for mutating that data.
 2279        zir_body_inst_extra_index: u32,
 2280        /// Index into extra array of the resolved inferred error set for this function.
 2281        /// Used for mutating that data.
 2282        /// 0 when the function does not have an inferred error set.
 2283        resolved_error_set_extra_index: u32,
 2284        /// When a generic function is instantiated, branch_quota is inherited from the
 2285        /// active Sema context. Importantly, this value is also updated when an existing
 2286        /// generic function instantiation is found and called.
 2287        /// This field contains the index into the extra array of this value,
 2288        /// so that it can be mutated.
 2289        /// This will be 0 when the function is not a generic function instantiation.
 2290        branch_quota_extra_index: u32,
 2291        owner_nav: Nav.Index,
 2292        /// The ZIR instruction that is a function instruction. Use this to find
 2293        /// the body. We store this rather than the body directly so that when ZIR
 2294        /// is regenerated on update(), we can map this to the new corresponding
 2295        /// ZIR instruction.
 2296        zir_body_inst: TrackedInst.Index,
 2297        /// Relative to owner Decl.
 2298        lbrace_line: u32,
 2299        /// Relative to owner Decl.
 2300        rbrace_line: u32,
 2301        lbrace_column: u32,
 2302        rbrace_column: u32,
 2303
 2304        /// The `func_decl` which is the generic function from whence this instance was spawned.
 2305        /// If this is `none` it means the function is not a generic instantiation.
 2306        generic_owner: Index,
 2307        /// If this is a generic function instantiation, this will be non-empty.
 2308        /// Corresponds to the parameters of the `generic_owner` type, which
 2309        /// may have more parameters than `ty`.
 2310        /// Each element is the comptime-known value the generic function was instantiated with,
 2311        /// or `none` if the element is runtime-known.
 2312        /// TODO: as a follow-up optimization, don't store `none` values here since that data
 2313        /// is redundant with `comptime_bits` stored elsewhere.
 2314        comptime_args: Index.Slice,
 2315
 2316        /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
 2317        fn analysisPtr(func: Func, ip: *const InternPool) *FuncAnalysis {
 2318            const extra = ip.getLocalShared(func.tid).extra.acquire();
 2319            return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]);
 2320        }
 2321
 2322        pub fn analysisUnordered(func: Func, ip: *const InternPool) FuncAnalysis {
 2323            return @atomicLoad(FuncAnalysis, func.analysisPtr(ip), .unordered);
 2324        }
 2325
 2326        pub fn setBranchHint(func: Func, ip: *InternPool, hint: std.builtin.BranchHint) void {
 2327            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
 2328            extra_mutex.lock();
 2329            defer extra_mutex.unlock();
 2330
 2331            const analysis_ptr = func.analysisPtr(ip);
 2332            var analysis = analysis_ptr.*;
 2333            analysis.branch_hint = hint;
 2334            @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
 2335        }
 2336
 2337        pub fn setAnalyzed(func: Func, ip: *InternPool) void {
 2338            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
 2339            extra_mutex.lock();
 2340            defer extra_mutex.unlock();
 2341
 2342            const analysis_ptr = func.analysisPtr(ip);
 2343            var analysis = analysis_ptr.*;
 2344            analysis.is_analyzed = true;
 2345            @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
 2346        }
 2347
 2348        /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
 2349        fn zirBodyInstPtr(func: Func, ip: *const InternPool) *TrackedInst.Index {
 2350            const extra = ip.getLocalShared(func.tid).extra.acquire();
 2351            return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]);
 2352        }
 2353
 2354        pub fn zirBodyInstUnordered(func: Func, ip: *const InternPool) TrackedInst.Index {
 2355            return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(ip), .unordered);
 2356        }
 2357
 2358        /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
 2359        fn branchQuotaPtr(func: Func, ip: *const InternPool) *u32 {
 2360            const extra = ip.getLocalShared(func.tid).extra.acquire();
 2361            return &extra.view().items(.@"0")[func.branch_quota_extra_index];
 2362        }
 2363
 2364        pub fn branchQuotaUnordered(func: Func, ip: *const InternPool) u32 {
 2365            return @atomicLoad(u32, func.branchQuotaPtr(ip), .unordered);
 2366        }
 2367
 2368        pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void {
 2369            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
 2370            extra_mutex.lock();
 2371            defer extra_mutex.unlock();
 2372
 2373            const branch_quota_ptr = func.branchQuotaPtr(ip);
 2374            @atomicStore(u32, branch_quota_ptr, @max(branch_quota_ptr.*, new_branch_quota), .release);
 2375        }
 2376
 2377        /// Returns a pointer that becomes invalid after any additions to the `InternPool`.
 2378        fn resolvedErrorSetPtr(func: Func, ip: *const InternPool) *Index {
 2379            const extra = ip.getLocalShared(func.tid).extra.acquire();
 2380            assert(func.analysisUnordered(ip).inferred_error_set);
 2381            return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]);
 2382        }
 2383
 2384        pub fn resolvedErrorSetUnordered(func: Func, ip: *const InternPool) Index {
 2385            return @atomicLoad(Index, func.resolvedErrorSetPtr(ip), .unordered);
 2386        }
 2387
 2388        pub fn setResolvedErrorSet(func: Func, ip: *InternPool, ies: Index) void {
 2389            const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex;
 2390            extra_mutex.lock();
 2391            defer extra_mutex.unlock();
 2392
 2393            @atomicStore(Index, func.resolvedErrorSetPtr(ip), ies, .release);
 2394        }
 2395    };
 2396
 2397    pub const Int = struct {
 2398        ty: Index,
 2399        storage: Storage,
 2400
 2401        pub const Storage = union(enum) {
 2402            u64: u64,
 2403            i64: i64,
 2404            big_int: BigIntConst,
 2405            lazy_align: Index,
 2406            lazy_size: Index,
 2407
 2408            /// Big enough to fit any non-BigInt value
 2409            pub const BigIntSpace = struct {
 2410                /// The +1 is headroom so that operations such as incrementing once
 2411                /// or decrementing once are possible without using an allocator.
 2412                limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
 2413            };
 2414
 2415            pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst {
 2416                return switch (storage) {
 2417                    .big_int => |x| x,
 2418                    inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
 2419                    .lazy_align, .lazy_size => unreachable,
 2420                };
 2421            }
 2422        };
 2423    };
 2424
 2425    pub const Error = extern struct {
 2426        ty: Index,
 2427        name: NullTerminatedString,
 2428    };
 2429
 2430    pub const ErrorUnion = struct {
 2431        ty: Index,
 2432        val: Value,
 2433
 2434        pub const Value = union(enum) {
 2435            err_name: NullTerminatedString,
 2436            payload: Index,
 2437        };
 2438    };
 2439
 2440    pub const EnumTag = extern struct {
 2441        /// The enum type.
 2442        ty: Index,
 2443        /// The integer tag value which has the integer tag type of the enum.
 2444        int: Index,
 2445    };
 2446
 2447    pub const Float = struct {
 2448        ty: Index,
 2449        /// The storage used must match the size of the float type being represented.
 2450        storage: Storage,
 2451
 2452        pub const Storage = union(enum) {
 2453            f16: f16,
 2454            f32: f32,
 2455            f64: f64,
 2456            f80: f80,
 2457            f128: f128,
 2458        };
 2459    };
 2460
 2461    pub const Ptr = struct {
 2462        /// This is the pointer type, not the element type.
 2463        ty: Index,
 2464        /// The base address which this pointer is offset from.
 2465        base_addr: BaseAddr,
 2466        /// The offset of this pointer from `base_addr` in bytes.
 2467        byte_offset: u64,
 2468
 2469        pub const BaseAddr = union(enum) {
 2470            const Tag = @typeInfo(BaseAddr).@"union".tag_type.?;
 2471
 2472            /// Points to the value of a single `Nav`, which may be constant or a `variable`.
 2473            nav: Nav.Index,
 2474
 2475            /// Points to the value of a single comptime alloc stored in `Sema`.
 2476            comptime_alloc: ComptimeAllocIndex,
 2477
 2478            /// Points to a single unnamed constant value.
 2479            uav: Uav,
 2480
 2481            /// Points to a comptime field of a struct. Index is the field's value.
 2482            ///
 2483            /// TODO: this exists because these fields are semantically mutable. We
 2484            /// should probably change the language so that this isn't the case.
 2485            comptime_field: Index,
 2486
 2487            /// A pointer with a fixed integer address, usually from `@ptrFromInt`.
 2488            ///
 2489            /// The address is stored entirely by `byte_offset`, which will be positive
 2490            /// and in-range of a `usize`. The base address is, for all intents and purposes, 0.
 2491            int,
 2492
 2493            /// A pointer to the payload of an error union. Index is the error union pointer.
 2494            /// To ensure a canonical representation, the type of the base pointer must:
 2495            /// * be a one-pointer
 2496            /// * be `const`, `volatile` and `allowzero`
 2497            /// * have alignment 1
 2498            /// * have the same address space as this pointer
 2499            /// * have a host size, bit offset, and vector index of 0
 2500            /// See `Value.canonicalizeBasePtr` which enforces these properties.
 2501            eu_payload: Index,
 2502
 2503            /// A pointer to the payload of a non-pointer-like optional. Index is the
 2504            /// optional pointer. To ensure a canonical representation, the base
 2505            /// pointer is subject to the same restrictions as in `eu_payload`.
 2506            opt_payload: Index,
 2507
 2508            /// A pointer to a field of a slice, or of an auto-layout struct or union. Slice fields
 2509            /// are referenced according to `Value.slice_ptr_index` and `Value.slice_len_index`.
 2510            /// Base is the aggregate pointer, which is subject to the same restrictions as
 2511            /// in `eu_payload`.
 2512            field: BaseIndex,
 2513
 2514            /// A pointer to an element of a comptime-only array. Base is the
 2515            /// many-pointer we are indexing into. It is subject to the same restrictions
 2516            /// as in `eu_payload`, except it must be a many-pointer rather than a one-pointer.
 2517            ///
 2518            /// The element type of the base pointer must NOT be an array. Additionally, the
 2519            /// base pointer is guaranteed to not be an `arr_elem` into a pointer with the
 2520            /// same child type. Thus, since there are no two comptime-only types which are
 2521            /// IMC to one another, the only case where the base pointer may also be an
 2522            /// `arr_elem` is when this pointer is semantically invalid (e.g. it reinterprets
 2523            /// a `type` as a `comptime_int`). These restrictions are in place to ensure
 2524            /// a canonical representation.
 2525            ///
 2526            /// This kind of base address differs from others in that it may refer to any
 2527            /// sequence of values; for instance, an `arr_elem` at index 2 may refer to
 2528            /// any number of elements starting from index 2.
 2529            ///
 2530            /// Index must not be 0. To refer to the element at index 0, simply reinterpret
 2531            /// the aggregate pointer.
 2532            arr_elem: BaseIndex,
 2533
 2534            pub const BaseIndex = struct {
 2535                base: Index,
 2536                index: u64,
 2537            };
 2538            pub const Uav = extern struct {
 2539                val: Index,
 2540                /// Contains the canonical pointer type of the anonymous
 2541                /// declaration. This may equal `ty` of the `Ptr` or it may be
 2542                /// different. Importantly, when lowering the anonymous decl,
 2543                /// the original pointer type alignment must be used.
 2544                orig_ty: Index,
 2545            };
 2546
 2547            pub fn eql(a: BaseAddr, b: BaseAddr) bool {
 2548                if (@as(Key.Ptr.BaseAddr.Tag, a) != @as(Key.Ptr.BaseAddr.Tag, b)) return false;
 2549
 2550                return switch (a) {
 2551                    .nav => |a_nav| a_nav == b.nav,
 2552                    .comptime_alloc => |a_alloc| a_alloc == b.comptime_alloc,
 2553                    .uav => |ad| ad.val == b.uav.val and
 2554                        ad.orig_ty == b.uav.orig_ty,
 2555                    .int => true,
 2556                    .eu_payload => |a_eu_payload| a_eu_payload == b.eu_payload,
 2557                    .opt_payload => |a_opt_payload| a_opt_payload == b.opt_payload,
 2558                    .comptime_field => |a_comptime_field| a_comptime_field == b.comptime_field,
 2559                    .arr_elem => |a_elem| std.meta.eql(a_elem, b.arr_elem),
 2560                    .field => |a_field| std.meta.eql(a_field, b.field),
 2561                };
 2562            }
 2563        };
 2564    };
 2565
 2566    pub const Slice = struct {
 2567        /// This is the slice type, not the element type.
 2568        ty: Index,
 2569        /// The slice's `ptr` field. Must be a many-ptr with the same properties as `ty`.
 2570        ptr: Index,
 2571        /// The slice's `len` field. Must be a `usize`.
 2572        len: Index,
 2573    };
 2574
 2575    /// `null` is represented by the `val` field being `none`.
 2576    pub const Opt = extern struct {
 2577        /// This is the optional type; not the payload type.
 2578        ty: Index,
 2579        /// This could be `none`, indicating the optional is `null`.
 2580        val: Index,
 2581    };
 2582
 2583    pub const Union = extern struct {
 2584        /// This is the union type; not the field type.
 2585        ty: Index,
 2586        /// Indicates the active field. This could be `none`, which indicates the tag is not known. `none` is only a valid value for extern and packed unions.
 2587        /// In those cases, the type of `val` is:
 2588        ///   extern: a u8 array of the same byte length as the union
 2589        ///   packed: an unsigned integer with the same bit size as the union
 2590        tag: Index,
 2591        /// The value of the active field.
 2592        val: Index,
 2593    };
 2594
 2595    pub const Aggregate = struct {
 2596        ty: Index,
 2597        storage: Storage,
 2598
 2599        pub const Storage = union(enum) {
 2600            bytes: String,
 2601            elems: []const Index,
 2602            repeated_elem: Index,
 2603
 2604            pub fn values(self: *const Storage) []const Index {
 2605                return switch (self.*) {
 2606                    .bytes => &.{},
 2607                    .elems => |elems| elems,
 2608                    .repeated_elem => |*elem| @as(*const [1]Index, elem),
 2609                };
 2610            }
 2611        };
 2612    };
 2613
 2614    pub const MemoizedCall = struct {
 2615        func: Index,
 2616        arg_values: []const Index,
 2617        result: Index,
 2618        branch_count: u32,
 2619    };
 2620
 2621    pub fn hash32(key: Key, ip: *const InternPool) u32 {
 2622        return @truncate(key.hash64(ip));
 2623    }
 2624
 2625    pub fn hash64(key: Key, ip: *const InternPool) u64 {
 2626        const asBytes = std.mem.asBytes;
 2627        const KeyTag = @typeInfo(Key).@"union".tag_type.?;
 2628        const seed = @intFromEnum(@as(KeyTag, key));
 2629        return switch (key) {
 2630            // TODO: assert no padding in these types
 2631            inline .ptr_type,
 2632            .array_type,
 2633            .vector_type,
 2634            .opt_type,
 2635            .anyframe_type,
 2636            .error_union_type,
 2637            .simple_type,
 2638            .simple_value,
 2639            .opt,
 2640            .undef,
 2641            .err,
 2642            .enum_literal,
 2643            .enum_tag,
 2644            .empty_enum_value,
 2645            .inferred_error_set_type,
 2646            .un,
 2647            => |x| Hash.hash(seed, asBytes(&x)),
 2648
 2649            .int_type => |x| Hash.hash(seed + @intFromEnum(x.signedness), asBytes(&x.bits)),
 2650
 2651            .error_union => |x| switch (x.val) {
 2652                .err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)),
 2653                .payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)),
 2654            },
 2655
 2656            .variable => |variable| Hash.hash(seed, asBytes(&variable.owner_nav)),
 2657
 2658            .opaque_type,
 2659            .enum_type,
 2660            .union_type,
 2661            .struct_type,
 2662            => |namespace_type| {
 2663                var hasher = Hash.init(seed);
 2664                std.hash.autoHash(&hasher, std.meta.activeTag(namespace_type));
 2665                switch (namespace_type) {
 2666                    .declared => |declared| {
 2667                        std.hash.autoHash(&hasher, declared.zir_index);
 2668                        const captures = switch (declared.captures) {
 2669                            .owned => |cvs| cvs.get(ip),
 2670                            .external => |cvs| cvs,
 2671                        };
 2672                        for (captures) |cv| {
 2673                            std.hash.autoHash(&hasher, cv);
 2674                        }
 2675                    },
 2676                    .generated_tag => |generated_tag| {
 2677                        std.hash.autoHash(&hasher, generated_tag.union_type);
 2678                    },
 2679                    .reified => |reified| {
 2680                        std.hash.autoHash(&hasher, reified.zir_index);
 2681                        std.hash.autoHash(&hasher, reified.type_hash);
 2682                    },
 2683                }
 2684                return hasher.final();
 2685            },
 2686
 2687            .int => |int| {
 2688                var hasher = Hash.init(seed);
 2689                // Canonicalize all integers by converting them to BigIntConst.
 2690                switch (int.storage) {
 2691                    .u64, .i64, .big_int => {
 2692                        var buffer: Key.Int.Storage.BigIntSpace = undefined;
 2693                        const big_int = int.storage.toBigInt(&buffer);
 2694
 2695                        std.hash.autoHash(&hasher, int.ty);
 2696                        std.hash.autoHash(&hasher, big_int.positive);
 2697                        for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb);
 2698                    },
 2699                    .lazy_align, .lazy_size => |lazy_ty| {
 2700                        std.hash.autoHash(
 2701                            &hasher,
 2702                            @as(@typeInfo(Key.Int.Storage).@"union".tag_type.?, int.storage),
 2703                        );
 2704                        std.hash.autoHash(&hasher, lazy_ty);
 2705                    },
 2706                }
 2707                return hasher.final();
 2708            },
 2709
 2710            .float => |float| {
 2711                var hasher = Hash.init(seed);
 2712                std.hash.autoHash(&hasher, float.ty);
 2713                switch (float.storage) {
 2714                    inline else => |val| std.hash.autoHash(
 2715                        &hasher,
 2716                        @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)),
 2717                    ),
 2718                }
 2719                return hasher.final();
 2720            },
 2721
 2722            .slice => |slice| Hash.hash(seed, asBytes(&slice.ty) ++ asBytes(&slice.ptr) ++ asBytes(&slice.len)),
 2723
 2724            .ptr => |ptr| {
 2725                // Int-to-ptr pointers are hashed separately than decl-referencing pointers.
 2726                // This is sound due to pointer provenance rules.
 2727                const addr_tag: Key.Ptr.BaseAddr.Tag = ptr.base_addr;
 2728                const seed2 = seed + @intFromEnum(addr_tag);
 2729                const big_offset: i128 = ptr.byte_offset;
 2730                const common = asBytes(&ptr.ty) ++ asBytes(&big_offset);
 2731                return switch (ptr.base_addr) {
 2732                    inline .nav,
 2733                    .comptime_alloc,
 2734                    .uav,
 2735                    .int,
 2736                    .eu_payload,
 2737                    .opt_payload,
 2738                    .comptime_field,
 2739                    => |x| Hash.hash(seed2, common ++ asBytes(&x)),
 2740
 2741                    .arr_elem, .field => |x| Hash.hash(
 2742                        seed2,
 2743                        common ++ asBytes(&x.base) ++ asBytes(&x.index),
 2744                    ),
 2745                };
 2746            },
 2747
 2748            .aggregate => |aggregate| {
 2749                var hasher = Hash.init(seed);
 2750                std.hash.autoHash(&hasher, aggregate.ty);
 2751                const len = ip.aggregateTypeLen(aggregate.ty);
 2752                const child = switch (ip.indexToKey(aggregate.ty)) {
 2753                    .array_type => |array_type| array_type.child,
 2754                    .vector_type => |vector_type| vector_type.child,
 2755                    .tuple_type, .struct_type => .none,
 2756                    else => unreachable,
 2757                };
 2758
 2759                if (child == .u8_type) {
 2760                    switch (aggregate.storage) {
 2761                        .bytes => |bytes| for (bytes.toSlice(len, ip)) |byte| {
 2762                            std.hash.autoHash(&hasher, KeyTag.int);
 2763                            std.hash.autoHash(&hasher, byte);
 2764                        },
 2765                        .elems => |elems| for (elems[0..@intCast(len)]) |elem| {
 2766                            const elem_key = ip.indexToKey(elem);
 2767                            std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
 2768                            switch (elem_key) {
 2769                                .undef => {},
 2770                                .int => |int| std.hash.autoHash(
 2771                                    &hasher,
 2772                                    @as(u8, @intCast(int.storage.u64)),
 2773                                ),
 2774                                else => unreachable,
 2775                            }
 2776                        },
 2777                        .repeated_elem => |elem| {
 2778                            const elem_key = ip.indexToKey(elem);
 2779                            var remaining = len;
 2780                            while (remaining > 0) : (remaining -= 1) {
 2781                                std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
 2782                                switch (elem_key) {
 2783                                    .undef => {},
 2784                                    .int => |int| std.hash.autoHash(
 2785                                        &hasher,
 2786                                        @as(u8, @intCast(int.storage.u64)),
 2787                                    ),
 2788                                    else => unreachable,
 2789                                }
 2790                            }
 2791                        },
 2792                    }
 2793                    return hasher.final();
 2794                }
 2795
 2796                switch (aggregate.storage) {
 2797                    .bytes => unreachable,
 2798                    .elems => |elems| for (elems[0..@intCast(len)]) |elem|
 2799                        std.hash.autoHash(&hasher, elem),
 2800                    .repeated_elem => |elem| {
 2801                        var remaining = len;
 2802                        while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem);
 2803                    },
 2804                }
 2805                return hasher.final();
 2806            },
 2807
 2808            .error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))),
 2809
 2810            .tuple_type => |tuple_type| {
 2811                var hasher = Hash.init(seed);
 2812                for (tuple_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
 2813                for (tuple_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
 2814                return hasher.final();
 2815            },
 2816
 2817            .func_type => |func_type| {
 2818                var hasher = Hash.init(seed);
 2819                func_type.hash(&hasher, ip);
 2820                return hasher.final();
 2821            },
 2822
 2823            .memoized_call => |memoized_call| {
 2824                var hasher = Hash.init(seed);
 2825                std.hash.autoHash(&hasher, memoized_call.func);
 2826                for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg);
 2827                return hasher.final();
 2828            },
 2829
 2830            .func => |func| {
 2831                // In the case of a function with an inferred error set, we
 2832                // must not include the inferred error set type in the hash,
 2833                // otherwise we would get false negatives for interning generic
 2834                // function instances which have inferred error sets.
 2835
 2836                if (func.generic_owner == .none and func.resolved_error_set_extra_index == 0) {
 2837                    const bytes = asBytes(&func.owner_nav) ++ asBytes(&func.ty) ++
 2838                        [1]u8{@intFromBool(func.uncoerced_ty == func.ty)};
 2839                    return Hash.hash(seed, bytes);
 2840                }
 2841
 2842                var hasher = Hash.init(seed);
 2843                std.hash.autoHash(&hasher, func.generic_owner);
 2844                std.hash.autoHash(&hasher, func.uncoerced_ty == func.ty);
 2845                for (func.comptime_args.get(ip)) |arg| std.hash.autoHash(&hasher, arg);
 2846                if (func.resolved_error_set_extra_index == 0) {
 2847                    std.hash.autoHash(&hasher, func.ty);
 2848                } else {
 2849                    var ty_info = ip.indexToFuncType(func.ty).?;
 2850                    ty_info.return_type = ip.errorUnionPayload(ty_info.return_type);
 2851                    ty_info.hash(&hasher, ip);
 2852                }
 2853                return hasher.final();
 2854            },
 2855
 2856            .@"extern" => |e| Hash.hash(seed, asBytes(&e.name) ++
 2857                asBytes(&e.ty) ++ asBytes(&e.lib_name) ++
 2858                asBytes(&e.linkage) ++ asBytes(&e.visibility) ++
 2859                asBytes(&e.is_threadlocal) ++ asBytes(&e.is_dll_import) ++
 2860                asBytes(&e.relocation) ++
 2861                asBytes(&e.is_const) ++ asBytes(&e.alignment) ++ asBytes(&e.@"addrspace") ++
 2862                asBytes(&e.zir_index) ++ &[1]u8{@intFromEnum(e.source)}),
 2863        };
 2864    }
 2865
 2866    pub fn eql(a: Key, b: Key, ip: *const InternPool) bool {
 2867        const KeyTag = @typeInfo(Key).@"union".tag_type.?;
 2868        const a_tag: KeyTag = a;
 2869        const b_tag: KeyTag = b;
 2870        if (a_tag != b_tag) return false;
 2871        switch (a) {
 2872            .int_type => |a_info| {
 2873                const b_info = b.int_type;
 2874                return std.meta.eql(a_info, b_info);
 2875            },
 2876            .ptr_type => |a_info| {
 2877                const b_info = b.ptr_type;
 2878                return std.meta.eql(a_info, b_info);
 2879            },
 2880            .array_type => |a_info| {
 2881                const b_info = b.array_type;
 2882                return std.meta.eql(a_info, b_info);
 2883            },
 2884            .vector_type => |a_info| {
 2885                const b_info = b.vector_type;
 2886                return std.meta.eql(a_info, b_info);
 2887            },
 2888            .opt_type => |a_info| {
 2889                const b_info = b.opt_type;
 2890                return a_info == b_info;
 2891            },
 2892            .anyframe_type => |a_info| {
 2893                const b_info = b.anyframe_type;
 2894                return a_info == b_info;
 2895            },
 2896            .error_union_type => |a_info| {
 2897                const b_info = b.error_union_type;
 2898                return std.meta.eql(a_info, b_info);
 2899            },
 2900            .simple_type => |a_info| {
 2901                const b_info = b.simple_type;
 2902                return a_info == b_info;
 2903            },
 2904            .simple_value => |a_info| {
 2905                const b_info = b.simple_value;
 2906                return a_info == b_info;
 2907            },
 2908            .undef => |a_info| {
 2909                const b_info = b.undef;
 2910                return a_info == b_info;
 2911            },
 2912            .opt => |a_info| {
 2913                const b_info = b.opt;
 2914                return std.meta.eql(a_info, b_info);
 2915            },
 2916            .un => |a_info| {
 2917                const b_info = b.un;
 2918                return std.meta.eql(a_info, b_info);
 2919            },
 2920            .err => |a_info| {
 2921                const b_info = b.err;
 2922                return std.meta.eql(a_info, b_info);
 2923            },
 2924            .error_union => |a_info| {
 2925                const b_info = b.error_union;
 2926                return std.meta.eql(a_info, b_info);
 2927            },
 2928            .enum_literal => |a_info| {
 2929                const b_info = b.enum_literal;
 2930                return a_info == b_info;
 2931            },
 2932            .enum_tag => |a_info| {
 2933                const b_info = b.enum_tag;
 2934                return std.meta.eql(a_info, b_info);
 2935            },
 2936            .empty_enum_value => |a_info| {
 2937                const b_info = b.empty_enum_value;
 2938                return a_info == b_info;
 2939            },
 2940
 2941            .variable => |a_info| {
 2942                const b_info = b.variable;
 2943                return a_info.ty == b_info.ty and
 2944                    a_info.init == b_info.init and
 2945                    a_info.owner_nav == b_info.owner_nav and
 2946                    a_info.is_threadlocal == b_info.is_threadlocal;
 2947            },
 2948            .@"extern" => |a_info| {
 2949                const b_info = b.@"extern";
 2950                return a_info.name == b_info.name and
 2951                    a_info.ty == b_info.ty and
 2952                    a_info.lib_name == b_info.lib_name and
 2953                    a_info.linkage == b_info.linkage and
 2954                    a_info.visibility == b_info.visibility and
 2955                    a_info.is_threadlocal == b_info.is_threadlocal and
 2956                    a_info.is_dll_import == b_info.is_dll_import and
 2957                    a_info.relocation == b_info.relocation and
 2958                    a_info.is_const == b_info.is_const and
 2959                    a_info.alignment == b_info.alignment and
 2960                    a_info.@"addrspace" == b_info.@"addrspace" and
 2961                    a_info.zir_index == b_info.zir_index and
 2962                    a_info.source == b_info.source;
 2963            },
 2964            .func => |a_info| {
 2965                const b_info = b.func;
 2966
 2967                if (a_info.generic_owner != b_info.generic_owner)
 2968                    return false;
 2969
 2970                if (a_info.generic_owner == .none) {
 2971                    if (a_info.owner_nav != b_info.owner_nav)
 2972                        return false;
 2973                } else {
 2974                    if (!std.mem.eql(
 2975                        Index,
 2976                        a_info.comptime_args.get(ip),
 2977                        b_info.comptime_args.get(ip),
 2978                    )) return false;
 2979                }
 2980
 2981                if ((a_info.ty == a_info.uncoerced_ty) !=
 2982                    (b_info.ty == b_info.uncoerced_ty))
 2983                {
 2984                    return false;
 2985                }
 2986
 2987                if (a_info.ty == b_info.ty)
 2988                    return true;
 2989
 2990                // There is one case where the types may be inequal but we
 2991                // still want to find the same function body instance. In the
 2992                // case of the functions having an inferred error set, the key
 2993                // used to find an existing function body will necessarily have
 2994                // a unique inferred error set type, because it refers to the
 2995                // function body InternPool Index. To make this case work we
 2996                // omit the inferred error set from the equality check.
 2997                if (a_info.resolved_error_set_extra_index == 0 or
 2998                    b_info.resolved_error_set_extra_index == 0)
 2999                {
 3000                    return false;
 3001                }
 3002                var a_ty_info = ip.indexToFuncType(a_info.ty).?;
 3003                a_ty_info.return_type = ip.errorUnionPayload(a_ty_info.return_type);
 3004                var b_ty_info = ip.indexToFuncType(b_info.ty).?;
 3005                b_ty_info.return_type = ip.errorUnionPayload(b_ty_info.return_type);
 3006                return a_ty_info.eql(b_ty_info, ip);
 3007            },
 3008
 3009            .slice => |a_info| {
 3010                const b_info = b.slice;
 3011                if (a_info.ty != b_info.ty) return false;
 3012                if (a_info.ptr != b_info.ptr) return false;
 3013                if (a_info.len != b_info.len) return false;
 3014                return true;
 3015            },
 3016
 3017            .ptr => |a_info| {
 3018                const b_info = b.ptr;
 3019                if (a_info.ty != b_info.ty) return false;
 3020                if (a_info.byte_offset != b_info.byte_offset) return false;
 3021                if (!a_info.base_addr.eql(b_info.base_addr)) return false;
 3022                return true;
 3023            },
 3024
 3025            .int => |a_info| {
 3026                const b_info = b.int;
 3027
 3028                if (a_info.ty != b_info.ty)
 3029                    return false;
 3030
 3031                return switch (a_info.storage) {
 3032                    .u64 => |aa| switch (b_info.storage) {
 3033                        .u64 => |bb| aa == bb,
 3034                        .i64 => |bb| aa == bb,
 3035                        .big_int => |bb| bb.orderAgainstScalar(aa) == .eq,
 3036                        .lazy_align, .lazy_size => false,
 3037                    },
 3038                    .i64 => |aa| switch (b_info.storage) {
 3039                        .u64 => |bb| aa == bb,
 3040                        .i64 => |bb| aa == bb,
 3041                        .big_int => |bb| bb.orderAgainstScalar(aa) == .eq,
 3042                        .lazy_align, .lazy_size => false,
 3043                    },
 3044                    .big_int => |aa| switch (b_info.storage) {
 3045                        .u64 => |bb| aa.orderAgainstScalar(bb) == .eq,
 3046                        .i64 => |bb| aa.orderAgainstScalar(bb) == .eq,
 3047                        .big_int => |bb| aa.eql(bb),
 3048                        .lazy_align, .lazy_size => false,
 3049                    },
 3050                    .lazy_align => |aa| switch (b_info.storage) {
 3051                        .u64, .i64, .big_int, .lazy_size => false,
 3052                        .lazy_align => |bb| aa == bb,
 3053                    },
 3054                    .lazy_size => |aa| switch (b_info.storage) {
 3055                        .u64, .i64, .big_int, .lazy_align => false,
 3056                        .lazy_size => |bb| aa == bb,
 3057                    },
 3058                };
 3059            },
 3060
 3061            .float => |a_info| {
 3062                const b_info = b.float;
 3063
 3064                if (a_info.ty != b_info.ty)
 3065                    return false;
 3066
 3067                if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) {
 3068                    // These are strange: we'll sometimes represent them as f128, even if the
 3069                    // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80.
 3070                    const a_val: u128 = switch (a_info.storage) {
 3071                        inline else => |val| @bitCast(@as(f128, @floatCast(val))),
 3072                    };
 3073                    const b_val: u128 = switch (b_info.storage) {
 3074                        inline else => |val| @bitCast(@as(f128, @floatCast(val))),
 3075                    };
 3076                    return a_val == b_val;
 3077                }
 3078
 3079                const StorageTag = @typeInfo(Key.Float.Storage).@"union".tag_type.?;
 3080                assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage));
 3081
 3082                switch (a_info.storage) {
 3083                    inline else => |val, tag| {
 3084                        const Bits = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val)));
 3085                        const a_bits: Bits = @bitCast(val);
 3086                        const b_bits: Bits = @bitCast(@field(b_info.storage, @tagName(tag)));
 3087                        return a_bits == b_bits;
 3088                    },
 3089                }
 3090            },
 3091
 3092            inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| {
 3093                const b_info = @field(b, @tagName(a_tag_ct));
 3094                if (std.meta.activeTag(a_info) != b_info) return false;
 3095                switch (a_info) {
 3096                    .declared => |a_d| {
 3097                        const b_d = b_info.declared;
 3098                        if (a_d.zir_index != b_d.zir_index) return false;
 3099                        const a_captures = switch (a_d.captures) {
 3100                            .owned => |s| s.get(ip),
 3101                            .external => |cvs| cvs,
 3102                        };
 3103                        const b_captures = switch (b_d.captures) {
 3104                            .owned => |s| s.get(ip),
 3105                            .external => |cvs| cvs,
 3106                        };
 3107                        return std.mem.eql(u32, @ptrCast(a_captures), @ptrCast(b_captures));
 3108                    },
 3109                    .generated_tag => |a_gt| return a_gt.union_type == b_info.generated_tag.union_type,
 3110                    .reified => |a_r| {
 3111                        const b_r = b_info.reified;
 3112                        return a_r.zir_index == b_r.zir_index and
 3113                            a_r.type_hash == b_r.type_hash;
 3114                    },
 3115                }
 3116            },
 3117            .aggregate => |a_info| {
 3118                const b_info = b.aggregate;
 3119                if (a_info.ty != b_info.ty) return false;
 3120
 3121                const len = ip.aggregateTypeLen(a_info.ty);
 3122                const StorageTag = @typeInfo(Key.Aggregate.Storage).@"union".tag_type.?;
 3123                if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
 3124                    for (0..@intCast(len)) |elem_index| {
 3125                        const a_elem = switch (a_info.storage) {
 3126                            .bytes => |bytes| ip.getIfExists(.{ .int = .{
 3127                                .ty = .u8_type,
 3128                                .storage = .{ .u64 = bytes.at(elem_index, ip) },
 3129                            } }) orelse return false,
 3130                            .elems => |elems| elems[elem_index],
 3131                            .repeated_elem => |elem| elem,
 3132                        };
 3133                        const b_elem = switch (b_info.storage) {
 3134                            .bytes => |bytes| ip.getIfExists(.{ .int = .{
 3135                                .ty = .u8_type,
 3136                                .storage = .{ .u64 = bytes.at(elem_index, ip) },
 3137                            } }) orelse return false,
 3138                            .elems => |elems| elems[elem_index],
 3139                            .repeated_elem => |elem| elem,
 3140                        };
 3141                        if (a_elem != b_elem) return false;
 3142                    }
 3143                    return true;
 3144                }
 3145
 3146                switch (a_info.storage) {
 3147                    .bytes => |a_bytes| {
 3148                        const b_bytes = b_info.storage.bytes;
 3149                        return a_bytes == b_bytes or
 3150                            std.mem.eql(u8, a_bytes.toSlice(len, ip), b_bytes.toSlice(len, ip));
 3151                    },
 3152                    .elems => |a_elems| {
 3153                        const b_elems = b_info.storage.elems;
 3154                        return std.mem.eql(
 3155                            Index,
 3156                            a_elems[0..@intCast(len)],
 3157                            b_elems[0..@intCast(len)],
 3158                        );
 3159                    },
 3160                    .repeated_elem => |a_elem| {
 3161                        const b_elem = b_info.storage.repeated_elem;
 3162                        return a_elem == b_elem;
 3163                    },
 3164                }
 3165            },
 3166            .tuple_type => |a_info| {
 3167                const b_info = b.tuple_type;
 3168                return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and
 3169                    std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip));
 3170            },
 3171            .error_set_type => |a_info| {
 3172                const b_info = b.error_set_type;
 3173                return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip));
 3174            },
 3175            .inferred_error_set_type => |a_info| {
 3176                const b_info = b.inferred_error_set_type;
 3177                return a_info == b_info;
 3178            },
 3179
 3180            .func_type => |a_info| {
 3181                const b_info = b.func_type;
 3182                return Key.FuncType.eql(a_info, b_info, ip);
 3183            },
 3184
 3185            .memoized_call => |a_info| {
 3186                const b_info = b.memoized_call;
 3187                return a_info.func == b_info.func and
 3188                    std.mem.eql(Index, a_info.arg_values, b_info.arg_values);
 3189            },
 3190        }
 3191    }
 3192
 3193    pub fn typeOf(key: Key) Index {
 3194        return switch (key) {
 3195            .int_type,
 3196            .ptr_type,
 3197            .array_type,
 3198            .vector_type,
 3199            .opt_type,
 3200            .anyframe_type,
 3201            .error_union_type,
 3202            .error_set_type,
 3203            .inferred_error_set_type,
 3204            .simple_type,
 3205            .struct_type,
 3206            .union_type,
 3207            .opaque_type,
 3208            .enum_type,
 3209            .tuple_type,
 3210            .func_type,
 3211            => .type_type,
 3212
 3213            inline .ptr,
 3214            .slice,
 3215            .int,
 3216            .float,
 3217            .opt,
 3218            .variable,
 3219            .@"extern",
 3220            .func,
 3221            .err,
 3222            .error_union,
 3223            .enum_tag,
 3224            .aggregate,
 3225            .un,
 3226            => |x| x.ty,
 3227
 3228            .enum_literal => .enum_literal_type,
 3229
 3230            .undef => |x| x,
 3231            .empty_enum_value => |x| x,
 3232
 3233            .simple_value => |s| switch (s) {
 3234                .undefined => .undefined_type,
 3235                .void => .void_type,
 3236                .null => .null_type,
 3237                .false, .true => .bool_type,
 3238                .empty_tuple => .empty_tuple_type,
 3239                .@"unreachable" => .noreturn_type,
 3240            },
 3241
 3242            .memoized_call => unreachable,
 3243        };
 3244    }
 3245};
 3246
 3247pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
 3248
 3249// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
 3250// minimal hashmap key, this type is a convenience type that contains info
 3251// needed by semantic analysis.
 3252pub const LoadedUnionType = struct {
 3253    tid: Zcu.PerThread.Id,
 3254    /// The index of the `Tag.TypeUnion` payload.
 3255    extra_index: u32,
 3256    // TODO: the non-fqn will be needed by the new dwarf structure
 3257    /// The name of this union type.
 3258    name: NullTerminatedString,
 3259    /// Represents the declarations inside this union.
 3260    namespace: NamespaceIndex,
 3261    /// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
 3262    /// Otherwise, this is `.none`.
 3263    name_nav: Nav.Index.Optional,
 3264    /// The enum tag type.
 3265    enum_tag_ty: Index,
 3266    /// List of field types in declaration order.
 3267    /// These are `none` until `status` is `have_field_types` or `have_layout`.
 3268    field_types: Index.Slice,
 3269    /// List of field alignments in declaration order.
 3270    /// `none` means the ABI alignment of the type.
 3271    /// If this slice has length 0 it means all elements are `none`.
 3272    field_aligns: Alignment.Slice,
 3273    /// Index of the union_decl or reify ZIR instruction.
 3274    zir_index: TrackedInst.Index,
 3275    captures: CaptureValue.Slice,
 3276
 3277    pub const RuntimeTag = enum(u2) {
 3278        none,
 3279        safety,
 3280        tagged,
 3281
 3282        pub fn hasTag(self: RuntimeTag) bool {
 3283            return switch (self) {
 3284                .none => false,
 3285                .tagged, .safety => true,
 3286            };
 3287        }
 3288    };
 3289
 3290    pub const Status = enum(u3) {
 3291        none,
 3292        field_types_wip,
 3293        have_field_types,
 3294        layout_wip,
 3295        have_layout,
 3296        fully_resolved_wip,
 3297        /// The types and all its fields have had their layout resolved.
 3298        /// Even through pointer, which `have_layout` does not ensure.
 3299        fully_resolved,
 3300
 3301        pub fn haveFieldTypes(status: Status) bool {
 3302            return switch (status) {
 3303                .none,
 3304                .field_types_wip,
 3305                => false,
 3306                .have_field_types,
 3307                .layout_wip,
 3308                .have_layout,
 3309                .fully_resolved_wip,
 3310                .fully_resolved,
 3311                => true,
 3312            };
 3313        }
 3314
 3315        pub fn haveLayout(status: Status) bool {
 3316            return switch (status) {
 3317                .none,
 3318                .field_types_wip,
 3319                .have_field_types,
 3320                .layout_wip,
 3321                => false,
 3322                .have_layout,
 3323                .fully_resolved_wip,
 3324                .fully_resolved,
 3325                => true,
 3326            };
 3327        }
 3328    };
 3329
 3330    pub fn loadTagType(self: LoadedUnionType, ip: *const InternPool) LoadedEnumType {
 3331        return ip.loadEnumType(self.enum_tag_ty);
 3332    }
 3333
 3334    /// Pointer to an enum type which is used for the tag of the union.
 3335    /// This type is created even for untagged unions, even when the memory
 3336    /// layout does not store the tag.
 3337    /// Whether zig chooses this type or the user specifies it, it is stored here.
 3338    /// This will be set to the null type until status is `have_field_types`.
 3339    /// This accessor is provided so that the tag type can be mutated, and so that
 3340    /// when it is mutated, the mutations are observed.
 3341    /// The returned pointer expires with any addition to the `InternPool`.
 3342    fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
 3343        const extra = ip.getLocalShared(self.tid).extra.acquire();
 3344        const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
 3345        return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
 3346    }
 3347
 3348    pub fn tagTypeUnordered(u: LoadedUnionType, ip: *const InternPool) Index {
 3349        return @atomicLoad(Index, u.tagTypePtr(ip), .unordered);
 3350    }
 3351
 3352    pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void {
 3353        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3354        extra_mutex.lock();
 3355        defer extra_mutex.unlock();
 3356
 3357        @atomicStore(Index, u.tagTypePtr(ip), tag_type, .release);
 3358    }
 3359
 3360    /// The returned pointer expires with any addition to the `InternPool`.
 3361    fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
 3362        const extra = ip.getLocalShared(self.tid).extra.acquire();
 3363        const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
 3364        return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
 3365    }
 3366
 3367    pub fn flagsUnordered(u: LoadedUnionType, ip: *const InternPool) Tag.TypeUnion.Flags {
 3368        return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(ip), .unordered);
 3369    }
 3370
 3371    pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void {
 3372        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3373        extra_mutex.lock();
 3374        defer extra_mutex.unlock();
 3375
 3376        const flags_ptr = u.flagsPtr(ip);
 3377        var flags = flags_ptr.*;
 3378        flags.status = status;
 3379        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3380    }
 3381
 3382    pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, status: Status) void {
 3383        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3384        extra_mutex.lock();
 3385        defer extra_mutex.unlock();
 3386
 3387        const flags_ptr = u.flagsPtr(ip);
 3388        var flags = flags_ptr.*;
 3389        if (flags.status == .layout_wip) flags.status = status;
 3390        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3391    }
 3392
 3393    pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, alignment: Alignment) void {
 3394        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3395        extra_mutex.lock();
 3396        defer extra_mutex.unlock();
 3397
 3398        const flags_ptr = u.flagsPtr(ip);
 3399        var flags = flags_ptr.*;
 3400        flags.alignment = alignment;
 3401        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3402    }
 3403
 3404    pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool) bool {
 3405        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3406        extra_mutex.lock();
 3407        defer extra_mutex.unlock();
 3408
 3409        const flags_ptr = u.flagsPtr(ip);
 3410        var flags = flags_ptr.*;
 3411        defer if (flags.status == .field_types_wip) {
 3412            flags.assumed_runtime_bits = true;
 3413            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3414        };
 3415        return flags.status == .field_types_wip;
 3416    }
 3417
 3418    pub fn requiresComptime(u: LoadedUnionType, ip: *const InternPool) RequiresComptime {
 3419        return u.flagsUnordered(ip).requires_comptime;
 3420    }
 3421
 3422    pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime {
 3423        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3424        extra_mutex.lock();
 3425        defer extra_mutex.unlock();
 3426
 3427        const flags_ptr = u.flagsPtr(ip);
 3428        var flags = flags_ptr.*;
 3429        defer if (flags.requires_comptime == .unknown) {
 3430            flags.requires_comptime = .wip;
 3431            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3432        };
 3433        return flags.requires_comptime;
 3434    }
 3435
 3436    pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, requires_comptime: RequiresComptime) void {
 3437        assert(requires_comptime != .wip); // see setRequiresComptimeWip
 3438
 3439        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3440        extra_mutex.lock();
 3441        defer extra_mutex.unlock();
 3442
 3443        const flags_ptr = u.flagsPtr(ip);
 3444        var flags = flags_ptr.*;
 3445        flags.requires_comptime = requires_comptime;
 3446        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3447    }
 3448
 3449    pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, ptr_align: Alignment) bool {
 3450        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3451        extra_mutex.lock();
 3452        defer extra_mutex.unlock();
 3453
 3454        const flags_ptr = u.flagsPtr(ip);
 3455        var flags = flags_ptr.*;
 3456        defer if (flags.status == .field_types_wip) {
 3457            flags.alignment = ptr_align;
 3458            flags.assumed_pointer_aligned = true;
 3459            @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3460        };
 3461        return flags.status == .field_types_wip;
 3462    }
 3463
 3464    /// The returned pointer expires with any addition to the `InternPool`.
 3465    fn sizePtr(self: LoadedUnionType, ip: *const InternPool) *u32 {
 3466        const extra = ip.getLocalShared(self.tid).extra.acquire();
 3467        const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
 3468        return &extra.view().items(.@"0")[self.extra_index + field_index];
 3469    }
 3470
 3471    pub fn sizeUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
 3472        return @atomicLoad(u32, u.sizePtr(ip), .unordered);
 3473    }
 3474
 3475    /// The returned pointer expires with any addition to the `InternPool`.
 3476    fn paddingPtr(self: LoadedUnionType, ip: *const InternPool) *u32 {
 3477        const extra = ip.getLocalShared(self.tid).extra.acquire();
 3478        const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
 3479        return &extra.view().items(.@"0")[self.extra_index + field_index];
 3480    }
 3481
 3482    pub fn paddingUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
 3483        return @atomicLoad(u32, u.paddingPtr(ip), .unordered);
 3484    }
 3485
 3486    pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
 3487        return self.flagsUnordered(ip).runtime_tag.hasTag();
 3488    }
 3489
 3490    pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool {
 3491        return self.flagsUnordered(ip).status.haveFieldTypes();
 3492    }
 3493
 3494    pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool {
 3495        return self.flagsUnordered(ip).status.haveLayout();
 3496    }
 3497
 3498    pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, size: u32, padding: u32, alignment: Alignment) void {
 3499        const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex;
 3500        extra_mutex.lock();
 3501        defer extra_mutex.unlock();
 3502
 3503        @atomicStore(u32, u.sizePtr(ip), size, .unordered);
 3504        @atomicStore(u32, u.paddingPtr(ip), padding, .unordered);
 3505        const flags_ptr = u.flagsPtr(ip);
 3506        var flags = flags_ptr.*;
 3507        flags.alignment = alignment;
 3508        flags.status = .have_layout;
 3509        @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release);
 3510    }
 3511
 3512    pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment {
 3513        if (self.field_aligns.len == 0) return .none;
 3514        return self.field_aligns.get(ip)[field_index];
 3515    }
 3516
 3517    /// This does not mutate the field of LoadedUnionType.
 3518    pub fn setZirIndex(self: LoadedUnionType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
 3519        const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
 3520        const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?;
 3521        const ptr: *TrackedInst.Index.Optional =
 3522            @ptrCast(&ip.extra_.items[self.flags_index - flags_field_index + zir_index_field_index]);
 3523        ptr.* = new_zir_index;
 3524    }
 3525
 3526    pub fn setFieldTypes(self: LoadedUnionType, ip: *const InternPool, types: []const Index) void {
 3527        @memcpy(self.field_types.get(ip), types);
 3528    }
 3529
 3530    pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void {
 3531        if (aligns.len == 0) return;
 3532        assert(self.flagsUnordered(ip).any_aligned_fields);
 3533        @memcpy(self.field_aligns.get(ip), aligns);
 3534    }
 3535};
 3536
 3537pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType {
 3538    const unwrapped_index = index.unwrap(ip);
 3539    const extra_list = unwrapped_index.getExtra(ip);
 3540    const data = unwrapped_index.getData(ip);
 3541    const type_union = extraDataTrail(extra_list, Tag.TypeUnion, data);
 3542    const fields_len = type_union.data.fields_len;
 3543
 3544    var extra_index = type_union.end;
 3545    const captures_len = if (type_union.data.flags.any_captures) c: {
 3546        const len = extra_list.view().items(.@"0")[extra_index];
 3547        extra_index += 1;
 3548        break :c len;
 3549    } else 0;
 3550
 3551    const captures: CaptureValue.Slice = .{
 3552        .tid = unwrapped_index.tid,
 3553        .start = extra_index,
 3554        .len = captures_len,
 3555    };
 3556    extra_index += captures_len;
 3557    if (type_union.data.flags.is_reified) {
 3558        extra_index += 2; // PackedU64
 3559    }
 3560
 3561    const field_types: Index.Slice = .{
 3562        .tid = unwrapped_index.tid,
 3563        .start = extra_index,
 3564        .len = fields_len,
 3565    };
 3566    extra_index += fields_len;
 3567
 3568    const field_aligns = if (type_union.data.flags.any_aligned_fields) a: {
 3569        const a: Alignment.Slice = .{
 3570            .tid = unwrapped_index.tid,
 3571            .start = extra_index,
 3572            .len = fields_len,
 3573        };
 3574        extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
 3575        break :a a;
 3576    } else Alignment.Slice.empty;
 3577
 3578    return .{
 3579        .tid = unwrapped_index.tid,
 3580        .extra_index = data,
 3581        .name = type_union.data.name,
 3582        .name_nav = type_union.data.name_nav,
 3583        .namespace = type_union.data.namespace,
 3584        .enum_tag_ty = type_union.data.tag_ty,
 3585        .field_types = field_types,
 3586        .field_aligns = field_aligns,
 3587        .zir_index = type_union.data.zir_index,
 3588        .captures = captures,
 3589    };
 3590}
 3591
 3592pub const LoadedStructType = struct {
 3593    tid: Zcu.PerThread.Id,
 3594    /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload.
 3595    extra_index: u32,
 3596    // TODO: the non-fqn will be needed by the new dwarf structure
 3597    /// The name of this struct type.
 3598    name: NullTerminatedString,
 3599    namespace: NamespaceIndex,
 3600    /// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
 3601    /// Otherwise, or if this is a file's root struct type, this is `.none`.
 3602    name_nav: Nav.Index.Optional,
 3603    /// Index of the `struct_decl` or `reify` ZIR instruction.
 3604    zir_index: TrackedInst.Index,
 3605    layout: std.builtin.Type.ContainerLayout,
 3606    field_names: NullTerminatedString.Slice,
 3607    field_types: Index.Slice,
 3608    field_inits: Index.Slice,
 3609    field_aligns: Alignment.Slice,
 3610    runtime_order: RuntimeOrder.Slice,
 3611    comptime_bits: ComptimeBits,
 3612    offsets: Offsets,
 3613    names_map: OptionalMapIndex,
 3614    captures: CaptureValue.Slice,
 3615
 3616    pub const ComptimeBits = struct {
 3617        tid: Zcu.PerThread.Id,
 3618        start: u32,
 3619        /// This is the number of u32 elements, not the number of struct fields.
 3620        len: u32,
 3621
 3622        pub const empty: ComptimeBits = .{ .tid = .main, .start = 0, .len = 0 };
 3623
 3624        pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 {
 3625            const extra = ip.getLocalShared(this.tid).extra.acquire();
 3626            return extra.view().items(.@"0")[this.start..][0..this.len];
 3627        }
 3628
 3629        pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool {
 3630            if (this.len == 0) return false;
 3631            return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
 3632        }
 3633
 3634        pub fn setBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
 3635            this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
 3636        }
 3637
 3638        pub fn clearBit(this: ComptimeBits, ip: *const InternPool, i: usize) void {
 3639            this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
 3640        }
 3641    };
 3642
 3643    pub const Offsets = struct {
 3644        tid: Zcu.PerThread.Id,
 3645        start: u32,
 3646        len: u32,
 3647
 3648        pub const empty: Offsets = .{ .tid = .main, .start = 0, .len = 0 };
 3649
 3650        pub fn get(this: Offsets, ip: *const InternPool) []u32 {
 3651            const extra = ip.getLocalShared(this.tid).extra.acquire();
 3652            return @ptrCast(extra.view().items(.@"0")[this.start..][0..this.len]);
 3653        }
 3654    };
 3655
 3656    pub const RuntimeOrder = enum(u32) {
 3657        /// Placeholder until layout is resolved.
 3658        unresolved = std.math.maxInt(u32) - 0,
 3659        /// Field not present at runtime
 3660        omitted = std.math.maxInt(u32) - 1,
 3661        _,
 3662
 3663        pub const Slice = struct {
 3664            tid: Zcu.PerThread.Id,
 3665            start: u32,
 3666            len: u32,
 3667
 3668            pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 };
 3669
 3670            pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder {
 3671                const extra = ip.getLocalShared(slice.tid).extra.acquire();
 3672                return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]);
 3673            }
 3674        };
 3675
 3676        pub fn toInt(i: RuntimeOrder) ?u32 {
 3677            return switch (i) {
 3678                .omitted => null,
 3679                .unresolved => unreachable,
 3680                else => @intFromEnum(i),
 3681            };
 3682        }
 3683    };
 3684
 3685    /// Look up field index based on field name.
 3686    pub fn nameIndex(s: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
 3687        const names_map = s.names_map.unwrap() orelse {
 3688            const i = name.toUnsigned(ip) orelse return null;
 3689            if (i >= s.field_types.len) return null;
 3690            return i;
 3691        };
 3692        const map = names_map.get(ip);
 3693        const adapter: NullTerminatedString.Adapter = .{ .strings = s.field_names.get(ip) };
 3694        const field_index = map.getIndexAdapted(name, adapter) orelse return null;
 3695        return @intCast(field_index);
 3696    }
 3697
 3698    /// Returns the already-existing field with the same name, if any.
 3699    pub fn addFieldName(
 3700        s: LoadedStructType,
 3701        ip: *InternPool,
 3702        name: NullTerminatedString,
 3703    ) ?u32 {
 3704        const extra = ip.getLocalShared(s.tid).extra.acquire();
 3705        return ip.addFieldName(extra, s.names_map.unwrap().?, s.field_names.start, name);
 3706    }
 3707
 3708    pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment {
 3709        if (s.field_aligns.len == 0) return .none;
 3710        return s.field_aligns.get(ip)[i];
 3711    }
 3712
 3713    pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index {
 3714        if (s.field_inits.len == 0) return .none;
 3715        assert(s.haveFieldInits(ip));
 3716        return s.field_inits.get(ip)[i];
 3717    }
 3718
 3719    pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) NullTerminatedString {
 3720        return s.field_names.get(ip)[i];
 3721    }
 3722
 3723    pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool {
 3724        return s.comptime_bits.getBit(ip, i);
 3725    }
 3726
 3727    pub fn setFieldComptime(s: LoadedStructType, ip: *InternPool, i: usize) void {
 3728        s.comptime_bits.setBit(ip, i);
 3729    }
 3730
 3731    /// The returned pointer expires with any addition to the `InternPool`.
 3732    /// Asserts the struct is not packed.
 3733    fn flagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags {
 3734        assert(s.layout != .@"packed");
 3735        const extra = ip.getLocalShared(s.tid).extra.acquire();
 3736        const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
 3737        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]);
 3738    }
 3739
 3740    pub fn flagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStruct.Flags {
 3741        return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(ip), .unordered);
 3742    }
 3743
 3744    /// The returned pointer expires with any addition to the `InternPool`.
 3745    /// Asserts that the struct is packed.
 3746    fn packedFlagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags {
 3747        assert(s.layout == .@"packed");
 3748        const extra = ip.getLocalShared(s.tid).extra.acquire();
 3749        const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
 3750        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]);
 3751    }
 3752
 3753    pub fn packedFlagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStructPacked.Flags {
 3754        return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(ip), .unordered);
 3755    }
 3756
 3757    /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
 3758    /// complicated logic.
 3759    pub fn knownNonOpv(s: LoadedStructType, ip: *const InternPool) bool {
 3760        return switch (s.layout) {
 3761            .@"packed" => false,
 3762            .auto, .@"extern" => s.flagsUnordered(ip).known_non_opv,
 3763        };
 3764    }
 3765
 3766    pub fn requiresComptime(s: LoadedStructType, ip: *const InternPool) RequiresComptime {
 3767        return s.flagsUnordered(ip).requires_comptime;
 3768    }
 3769
 3770    pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool) RequiresComptime {
 3771        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3772        extra_mutex.lock();
 3773        defer extra_mutex.unlock();
 3774
 3775        const flags_ptr = s.flagsPtr(ip);
 3776        var flags = flags_ptr.*;
 3777        defer if (flags.requires_comptime == .unknown) {
 3778            flags.requires_comptime = .wip;
 3779            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3780        };
 3781        return flags.requires_comptime;
 3782    }
 3783
 3784    pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, requires_comptime: RequiresComptime) void {
 3785        assert(requires_comptime != .wip); // see setRequiresComptimeWip
 3786
 3787        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3788        extra_mutex.lock();
 3789        defer extra_mutex.unlock();
 3790
 3791        const flags_ptr = s.flagsPtr(ip);
 3792        var flags = flags_ptr.*;
 3793        flags.requires_comptime = requires_comptime;
 3794        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3795    }
 3796
 3797    pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
 3798        if (s.layout == .@"packed") return false;
 3799
 3800        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3801        extra_mutex.lock();
 3802        defer extra_mutex.unlock();
 3803
 3804        const flags_ptr = s.flagsPtr(ip);
 3805        var flags = flags_ptr.*;
 3806        defer if (flags.field_types_wip) {
 3807            flags.assumed_runtime_bits = true;
 3808            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3809        };
 3810        return flags.field_types_wip;
 3811    }
 3812
 3813    pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool {
 3814        if (s.layout == .@"packed") return false;
 3815
 3816        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3817        extra_mutex.lock();
 3818        defer extra_mutex.unlock();
 3819
 3820        const flags_ptr = s.flagsPtr(ip);
 3821        var flags = flags_ptr.*;
 3822        defer {
 3823            flags.field_types_wip = true;
 3824            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3825        }
 3826        return flags.field_types_wip;
 3827    }
 3828
 3829    pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool) void {
 3830        if (s.layout == .@"packed") return;
 3831
 3832        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3833        extra_mutex.lock();
 3834        defer extra_mutex.unlock();
 3835
 3836        const flags_ptr = s.flagsPtr(ip);
 3837        var flags = flags_ptr.*;
 3838        flags.field_types_wip = false;
 3839        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3840    }
 3841
 3842    pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool {
 3843        if (s.layout == .@"packed") return false;
 3844
 3845        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3846        extra_mutex.lock();
 3847        defer extra_mutex.unlock();
 3848
 3849        const flags_ptr = s.flagsPtr(ip);
 3850        var flags = flags_ptr.*;
 3851        defer {
 3852            flags.layout_wip = true;
 3853            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3854        }
 3855        return flags.layout_wip;
 3856    }
 3857
 3858    pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void {
 3859        if (s.layout == .@"packed") return;
 3860
 3861        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3862        extra_mutex.lock();
 3863        defer extra_mutex.unlock();
 3864
 3865        const flags_ptr = s.flagsPtr(ip);
 3866        var flags = flags_ptr.*;
 3867        flags.layout_wip = false;
 3868        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3869    }
 3870
 3871    pub fn setAlignment(s: LoadedStructType, ip: *InternPool, alignment: Alignment) void {
 3872        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3873        extra_mutex.lock();
 3874        defer extra_mutex.unlock();
 3875
 3876        const flags_ptr = s.flagsPtr(ip);
 3877        var flags = flags_ptr.*;
 3878        flags.alignment = alignment;
 3879        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3880    }
 3881
 3882    pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
 3883        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3884        extra_mutex.lock();
 3885        defer extra_mutex.unlock();
 3886
 3887        const flags_ptr = s.flagsPtr(ip);
 3888        var flags = flags_ptr.*;
 3889        defer if (flags.field_types_wip) {
 3890            flags.alignment = ptr_align;
 3891            flags.assumed_pointer_aligned = true;
 3892            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3893        };
 3894        return flags.field_types_wip;
 3895    }
 3896
 3897    pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool {
 3898        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3899        extra_mutex.lock();
 3900        defer extra_mutex.unlock();
 3901
 3902        const flags_ptr = s.flagsPtr(ip);
 3903        var flags = flags_ptr.*;
 3904        defer {
 3905            if (flags.alignment_wip) {
 3906                flags.alignment = ptr_align;
 3907                flags.assumed_pointer_aligned = true;
 3908            } else flags.alignment_wip = true;
 3909            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3910        }
 3911        return flags.alignment_wip;
 3912    }
 3913
 3914    pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void {
 3915        if (s.layout == .@"packed") return;
 3916
 3917        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3918        extra_mutex.lock();
 3919        defer extra_mutex.unlock();
 3920
 3921        const flags_ptr = s.flagsPtr(ip);
 3922        var flags = flags_ptr.*;
 3923        flags.alignment_wip = false;
 3924        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3925    }
 3926
 3927    pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool {
 3928        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3929        extra_mutex.lock();
 3930        defer extra_mutex.unlock();
 3931
 3932        switch (s.layout) {
 3933            .@"packed" => {
 3934                const flags_ptr = s.packedFlagsPtr(ip);
 3935                var flags = flags_ptr.*;
 3936                defer {
 3937                    flags.field_inits_wip = true;
 3938                    @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
 3939                }
 3940                return flags.field_inits_wip;
 3941            },
 3942            .auto, .@"extern" => {
 3943                const flags_ptr = s.flagsPtr(ip);
 3944                var flags = flags_ptr.*;
 3945                defer {
 3946                    flags.field_inits_wip = true;
 3947                    @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3948                }
 3949                return flags.field_inits_wip;
 3950            },
 3951        }
 3952    }
 3953
 3954    pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void {
 3955        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3956        extra_mutex.lock();
 3957        defer extra_mutex.unlock();
 3958
 3959        switch (s.layout) {
 3960            .@"packed" => {
 3961                const flags_ptr = s.packedFlagsPtr(ip);
 3962                var flags = flags_ptr.*;
 3963                flags.field_inits_wip = false;
 3964                @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
 3965            },
 3966            .auto, .@"extern" => {
 3967                const flags_ptr = s.flagsPtr(ip);
 3968                var flags = flags_ptr.*;
 3969                flags.field_inits_wip = false;
 3970                @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3971            },
 3972        }
 3973    }
 3974
 3975    pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool {
 3976        if (s.layout == .@"packed") return true;
 3977
 3978        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3979        extra_mutex.lock();
 3980        defer extra_mutex.unlock();
 3981
 3982        const flags_ptr = s.flagsPtr(ip);
 3983        var flags = flags_ptr.*;
 3984        defer {
 3985            flags.fully_resolved = true;
 3986            @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 3987        }
 3988        return flags.fully_resolved;
 3989    }
 3990
 3991    pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void {
 3992        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 3993        extra_mutex.lock();
 3994        defer extra_mutex.unlock();
 3995
 3996        const flags_ptr = s.flagsPtr(ip);
 3997        var flags = flags_ptr.*;
 3998        flags.fully_resolved = false;
 3999        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 4000    }
 4001
 4002    /// The returned pointer expires with any addition to the `InternPool`.
 4003    /// Asserts the struct is not packed.
 4004    fn sizePtr(s: LoadedStructType, ip: *const InternPool) *u32 {
 4005        assert(s.layout != .@"packed");
 4006        const extra = ip.getLocalShared(s.tid).extra.acquire();
 4007        const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
 4008        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + size_field_index]);
 4009    }
 4010
 4011    pub fn sizeUnordered(s: LoadedStructType, ip: *const InternPool) u32 {
 4012        return @atomicLoad(u32, s.sizePtr(ip), .unordered);
 4013    }
 4014
 4015    /// The backing integer type of the packed struct. Whether zig chooses
 4016    /// this type or the user specifies it, it is stored here. This will be
 4017    /// set to `none` until the layout is resolved.
 4018    /// Asserts the struct is packed.
 4019    fn backingIntTypePtr(s: LoadedStructType, ip: *const InternPool) *Index {
 4020        assert(s.layout == .@"packed");
 4021        const extra = ip.getLocalShared(s.tid).extra.acquire();
 4022        const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
 4023        return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]);
 4024    }
 4025
 4026    pub fn backingIntTypeUnordered(s: LoadedStructType, ip: *const InternPool) Index {
 4027        return @atomicLoad(Index, s.backingIntTypePtr(ip), .unordered);
 4028    }
 4029
 4030    pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void {
 4031        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 4032        extra_mutex.lock();
 4033        defer extra_mutex.unlock();
 4034
 4035        @atomicStore(Index, s.backingIntTypePtr(ip), backing_int_ty, .release);
 4036    }
 4037
 4038    /// Asserts the struct is not packed.
 4039    pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
 4040        assert(s.layout != .@"packed");
 4041        const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
 4042        ip.extra_.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
 4043    }
 4044
 4045    pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool {
 4046        const types = s.field_types.get(ip);
 4047        return types.len == 0 or types[types.len - 1] != .none;
 4048    }
 4049
 4050    pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool {
 4051        return switch (s.layout) {
 4052            .@"packed" => s.packedFlagsUnordered(ip).inits_resolved,
 4053            .auto, .@"extern" => s.flagsUnordered(ip).inits_resolved,
 4054        };
 4055    }
 4056
 4057    pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void {
 4058        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 4059        extra_mutex.lock();
 4060        defer extra_mutex.unlock();
 4061
 4062        switch (s.layout) {
 4063            .@"packed" => {
 4064                const flags_ptr = s.packedFlagsPtr(ip);
 4065                var flags = flags_ptr.*;
 4066                flags.inits_resolved = true;
 4067                @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release);
 4068            },
 4069            .auto, .@"extern" => {
 4070                const flags_ptr = s.flagsPtr(ip);
 4071                var flags = flags_ptr.*;
 4072                flags.inits_resolved = true;
 4073                @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 4074            },
 4075        }
 4076    }
 4077
 4078    pub fn haveLayout(s: LoadedStructType, ip: *const InternPool) bool {
 4079        return switch (s.layout) {
 4080            .@"packed" => s.backingIntTypeUnordered(ip) != .none,
 4081            .auto, .@"extern" => s.flagsUnordered(ip).layout_resolved,
 4082        };
 4083    }
 4084
 4085    pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, size: u32, alignment: Alignment) void {
 4086        const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex;
 4087        extra_mutex.lock();
 4088        defer extra_mutex.unlock();
 4089
 4090        @atomicStore(u32, s.sizePtr(ip), size, .unordered);
 4091        const flags_ptr = s.flagsPtr(ip);
 4092        var flags = flags_ptr.*;
 4093        flags.alignment = alignment;
 4094        flags.layout_resolved = true;
 4095        @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
 4096    }
 4097
 4098    pub fn hasReorderedFields(s: LoadedStructType) bool {
 4099        return s.layout == .auto;
 4100    }
 4101
 4102    pub const RuntimeOrderIterator = struct {
 4103        ip: *InternPool,
 4104        field_index: u32,
 4105        struct_type: InternPool.LoadedStructType,
 4106
 4107        pub fn next(it: *@This()) ?u32 {
 4108            var i = it.field_index;
 4109
 4110            if (i >= it.struct_type.field_types.len)
 4111                return null;
 4112
 4113            if (it.struct_type.hasReorderedFields()) {
 4114                it.field_index += 1;
 4115                return it.struct_type.runtime_order.get(it.ip)[i].toInt();
 4116            }
 4117
 4118            while (it.struct_type.fieldIsComptime(it.ip, i)) {
 4119                i += 1;
 4120                if (i >= it.struct_type.field_types.len)
 4121                    return null;
 4122            }
 4123
 4124            it.field_index = i + 1;
 4125            return i;
 4126        }
 4127    };
 4128
 4129    /// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
 4130    /// May or may not include zero-bit fields.
 4131    /// Asserts the struct is not packed.
 4132    pub fn iterateRuntimeOrder(s: LoadedStructType, ip: *InternPool) RuntimeOrderIterator {
 4133        assert(s.layout != .@"packed");
 4134        return .{
 4135            .ip = ip,
 4136            .field_index = 0,
 4137            .struct_type = s,
 4138        };
 4139    }
 4140
 4141    pub const ReverseRuntimeOrderIterator = struct {
 4142        ip: *InternPool,
 4143        last_index: u32,
 4144        struct_type: InternPool.LoadedStructType,
 4145
 4146        pub fn next(it: *@This()) ?u32 {
 4147            if (it.last_index == 0)
 4148                return null;
 4149
 4150            if (it.struct_type.hasReorderedFields()) {
 4151                it.last_index -= 1;
 4152                const order = it.struct_type.runtime_order.get(it.ip);
 4153                while (order[it.last_index] == .omitted) {
 4154                    it.last_index -= 1;
 4155                    if (it.last_index == 0)
 4156                        return null;
 4157                }
 4158                return order[it.last_index].toInt();
 4159            }
 4160
 4161            it.last_index -= 1;
 4162            while (it.struct_type.fieldIsComptime(it.ip, it.last_index)) {
 4163                it.last_index -= 1;
 4164                if (it.last_index == 0)
 4165                    return null;
 4166            }
 4167
 4168            return it.last_index;
 4169        }
 4170    };
 4171
 4172    pub fn iterateRuntimeOrderReverse(s: LoadedStructType, ip: *InternPool) ReverseRuntimeOrderIterator {
 4173        assert(s.layout != .@"packed");
 4174        return .{
 4175            .ip = ip,
 4176            .last_index = s.field_types.len,
 4177            .struct_type = s,
 4178        };
 4179    }
 4180};
 4181
 4182pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
 4183    const unwrapped_index = index.unwrap(ip);
 4184    const extra_list = unwrapped_index.getExtra(ip);
 4185    const extra_items = extra_list.view().items(.@"0");
 4186    const item = unwrapped_index.getItem(ip);
 4187    switch (item.tag) {
 4188        .type_struct => {
 4189            const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]);
 4190            const name_nav: Nav.Index.Optional = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?]);
 4191            const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?]);
 4192            const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
 4193            const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?];
 4194            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
 4195            var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).@"struct".fields.len);
 4196            const captures_len = if (flags.any_captures) c: {
 4197                const len = extra_list.view().items(.@"0")[extra_index];
 4198                extra_index += 1;
 4199                break :c len;
 4200            } else 0;
 4201            const captures: CaptureValue.Slice = .{
 4202                .tid = unwrapped_index.tid,
 4203                .start = extra_index,
 4204                .len = captures_len,
 4205            };
 4206            extra_index += captures_len;
 4207            if (flags.is_reified) {
 4208                extra_index += 2; // type_hash: PackedU64
 4209            }
 4210            const field_types: Index.Slice = .{
 4211                .tid = unwrapped_index.tid,
 4212                .start = extra_index,
 4213                .len = fields_len,
 4214            };
 4215            extra_index += fields_len;
 4216            const names_map: OptionalMapIndex, const names = n: {
 4217                const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]);
 4218                extra_index += 1;
 4219                const names: NullTerminatedString.Slice = .{
 4220                    .tid = unwrapped_index.tid,
 4221                    .start = extra_index,
 4222                    .len = fields_len,
 4223                };
 4224                extra_index += fields_len;
 4225                break :n .{ names_map, names };
 4226            };
 4227            const inits: Index.Slice = if (flags.any_default_inits) i: {
 4228                const inits: Index.Slice = .{
 4229                    .tid = unwrapped_index.tid,
 4230                    .start = extra_index,
 4231                    .len = fields_len,
 4232                };
 4233                extra_index += fields_len;
 4234                break :i inits;
 4235            } else Index.Slice.empty;
 4236            const aligns: Alignment.Slice = if (flags.any_aligned_fields) a: {
 4237                const a: Alignment.Slice = .{
 4238                    .tid = unwrapped_index.tid,
 4239                    .start = extra_index,
 4240                    .len = fields_len,
 4241                };
 4242                extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable;
 4243                break :a a;
 4244            } else Alignment.Slice.empty;
 4245            const comptime_bits: LoadedStructType.ComptimeBits = if (flags.any_comptime_fields) c: {
 4246                const len = std.math.divCeil(u32, fields_len, 32) catch unreachable;
 4247                const c: LoadedStructType.ComptimeBits = .{
 4248                    .tid = unwrapped_index.tid,
 4249                    .start = extra_index,
 4250                    .len = len,
 4251                };
 4252                extra_index += len;
 4253                break :c c;
 4254            } else LoadedStructType.ComptimeBits.empty;
 4255            const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!flags.is_extern) ro: {
 4256                const ro: LoadedStructType.RuntimeOrder.Slice = .{
 4257                    .tid = unwrapped_index.tid,
 4258                    .start = extra_index,
 4259                    .len = fields_len,
 4260                };
 4261                extra_index += fields_len;
 4262                break :ro ro;
 4263            } else LoadedStructType.RuntimeOrder.Slice.empty;
 4264            const offsets: LoadedStructType.Offsets = o: {
 4265                const o: LoadedStructType.Offsets = .{
 4266                    .tid = unwrapped_index.tid,
 4267                    .start = extra_index,
 4268                    .len = fields_len,
 4269                };
 4270                extra_index += fields_len;
 4271                break :o o;
 4272            };
 4273            return .{
 4274                .tid = unwrapped_index.tid,
 4275                .extra_index = item.data,
 4276                .name = name,
 4277                .name_nav = name_nav,
 4278                .namespace = namespace,
 4279                .zir_index = zir_index,
 4280                .layout = if (flags.is_extern) .@"extern" else .auto,
 4281                .field_names = names,
 4282                .field_types = field_types,
 4283                .field_inits = inits,
 4284                .field_aligns = aligns,
 4285                .runtime_order = runtime_order,
 4286                .comptime_bits = comptime_bits,
 4287                .offsets = offsets,
 4288                .names_map = names_map,
 4289                .captures = captures,
 4290            };
 4291        },
 4292        .type_struct_packed, .type_struct_packed_inits => {
 4293            const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?]);
 4294            const name_nav: Nav.Index.Optional = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?]);
 4295            const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]);
 4296            const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?];
 4297            const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]);
 4298            const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]);
 4299            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
 4300            var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).@"struct".fields.len);
 4301            const has_inits = item.tag == .type_struct_packed_inits;
 4302            const captures_len = if (flags.any_captures) c: {
 4303                const len = extra_list.view().items(.@"0")[extra_index];
 4304                extra_index += 1;
 4305                break :c len;
 4306            } else 0;
 4307            const captures: CaptureValue.Slice = .{
 4308                .tid = unwrapped_index.tid,
 4309                .start = extra_index,
 4310                .len = captures_len,
 4311            };
 4312            extra_index += captures_len;
 4313            if (flags.is_reified) {
 4314                extra_index += 2; // PackedU64
 4315            }
 4316            const field_types: Index.Slice = .{
 4317                .tid = unwrapped_index.tid,
 4318                .start = extra_index,
 4319                .len = fields_len,
 4320            };
 4321            extra_index += fields_len;
 4322            const field_names: NullTerminatedString.Slice = .{
 4323                .tid = unwrapped_index.tid,
 4324                .start = extra_index,
 4325                .len = fields_len,
 4326            };
 4327            extra_index += fields_len;
 4328            const field_inits: Index.Slice = if (has_inits) inits: {
 4329                const i: Index.Slice = .{
 4330                    .tid = unwrapped_index.tid,
 4331                    .start = extra_index,
 4332                    .len = fields_len,
 4333                };
 4334                extra_index += fields_len;
 4335                break :inits i;
 4336            } else Index.Slice.empty;
 4337            return .{
 4338                .tid = unwrapped_index.tid,
 4339                .extra_index = item.data,
 4340                .name = name,
 4341                .name_nav = name_nav,
 4342                .namespace = namespace,
 4343                .zir_index = zir_index,
 4344                .layout = .@"packed",
 4345                .field_names = field_names,
 4346                .field_types = field_types,
 4347                .field_inits = field_inits,
 4348                .field_aligns = Alignment.Slice.empty,
 4349                .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty,
 4350                .comptime_bits = LoadedStructType.ComptimeBits.empty,
 4351                .offsets = LoadedStructType.Offsets.empty,
 4352                .names_map = names_map.toOptional(),
 4353                .captures = captures,
 4354            };
 4355        },
 4356        else => unreachable,
 4357    }
 4358}
 4359
 4360pub const LoadedEnumType = struct {
 4361    // TODO: the non-fqn will be needed by the new dwarf structure
 4362    /// The name of this enum type.
 4363    name: NullTerminatedString,
 4364    /// Represents the declarations inside this enum.
 4365    namespace: NamespaceIndex,
 4366    /// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
 4367    /// Otherwise, this is `.none`.
 4368    name_nav: Nav.Index.Optional,
 4369    /// An integer type which is used for the numerical value of the enum.
 4370    /// This field is present regardless of whether the enum has an
 4371    /// explicitly provided tag type or auto-numbered.
 4372    tag_ty: Index,
 4373    /// Set of field names in declaration order.
 4374    names: NullTerminatedString.Slice,
 4375    /// Maps integer tag value to field index.
 4376    /// Entries are in declaration order, same as `fields`.
 4377    /// If this is empty, it means the enum tags are auto-numbered.
 4378    values: Index.Slice,
 4379    tag_mode: TagMode,
 4380    names_map: MapIndex,
 4381    /// This is guaranteed to not be `.none` if explicit values are provided.
 4382    values_map: OptionalMapIndex,
 4383    /// This is `none` only if this is a generated tag type.
 4384    zir_index: TrackedInst.Index.Optional,
 4385    captures: CaptureValue.Slice,
 4386
 4387    pub const TagMode = enum {
 4388        /// The integer tag type was auto-numbered by zig.
 4389        auto,
 4390        /// The integer tag type was provided by the enum declaration, and the enum
 4391        /// is exhaustive.
 4392        explicit,
 4393        /// The integer tag type was provided by the enum declaration, and the enum
 4394        /// is non-exhaustive.
 4395        nonexhaustive,
 4396    };
 4397
 4398    /// Look up field index based on field name.
 4399    pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
 4400        const map = self.names_map.get(ip);
 4401        const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
 4402        const field_index = map.getIndexAdapted(name, adapter) orelse return null;
 4403        return @intCast(field_index);
 4404    }
 4405
 4406    /// Look up field index based on tag value.
 4407    /// Asserts that `values_map` is not `none`.
 4408    /// This function returns `null` when `tag_val` does not have the
 4409    /// integer tag type of the enum.
 4410    pub fn tagValueIndex(self: LoadedEnumType, ip: *const InternPool, tag_val: Index) ?u32 {
 4411        assert(tag_val != .none);
 4412        // TODO: we should probably decide a single interface for this function, but currently
 4413        // it's being called with both tag values and underlying ints. Fix this!
 4414        const int_tag_val = switch (ip.indexToKey(tag_val)) {
 4415            .enum_tag => |enum_tag| enum_tag.int,
 4416            .int => tag_val,
 4417            else => unreachable,
 4418        };
 4419        if (self.values_map.unwrap()) |values_map| {
 4420            const map = values_map.get(ip);
 4421            const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
 4422            const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
 4423            return @intCast(field_index);
 4424        }
 4425        // Auto-numbered enum. Convert `int_tag_val` to field index.
 4426        const field_index = switch (ip.indexToKey(int_tag_val).int.storage) {
 4427            inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null,
 4428            .big_int => |x| x.toInt(u32) catch return null,
 4429            .lazy_align, .lazy_size => unreachable,
 4430        };
 4431        return if (field_index < self.names.len) field_index else null;
 4432    }
 4433};
 4434
 4435pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType {
 4436    const unwrapped_index = index.unwrap(ip);
 4437    const extra_list = unwrapped_index.getExtra(ip);
 4438    const item = unwrapped_index.getItem(ip);
 4439    const tag_mode: LoadedEnumType.TagMode = switch (item.tag) {
 4440        .type_enum_auto => {
 4441            const extra = extraDataTrail(extra_list, EnumAuto, item.data);
 4442            var extra_index: u32 = @intCast(extra.end);
 4443            if (extra.data.zir_index == .none) {
 4444                extra_index += 1; // owner_union
 4445            }
 4446            const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
 4447                extra_index += 2; // type_hash: PackedU64
 4448                break :c 0;
 4449            } else extra.data.captures_len;
 4450            return .{
 4451                .name = extra.data.name,
 4452                .name_nav = extra.data.name_nav,
 4453                .namespace = extra.data.namespace,
 4454                .tag_ty = extra.data.int_tag_type,
 4455                .names = .{
 4456                    .tid = unwrapped_index.tid,
 4457                    .start = extra_index + captures_len,
 4458                    .len = extra.data.fields_len,
 4459                },
 4460                .values = Index.Slice.empty,
 4461                .tag_mode = .auto,
 4462                .names_map = extra.data.names_map,
 4463                .values_map = .none,
 4464                .zir_index = extra.data.zir_index,
 4465                .captures = .{
 4466                    .tid = unwrapped_index.tid,
 4467                    .start = extra_index,
 4468                    .len = captures_len,
 4469                },
 4470            };
 4471        },
 4472        .type_enum_explicit => .explicit,
 4473        .type_enum_nonexhaustive => .nonexhaustive,
 4474        else => unreachable,
 4475    };
 4476    const extra = extraDataTrail(extra_list, EnumExplicit, item.data);
 4477    var extra_index: u32 = @intCast(extra.end);
 4478    if (extra.data.zir_index == .none) {
 4479        extra_index += 1; // owner_union
 4480    }
 4481    const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: {
 4482        extra_index += 2; // type_hash: PackedU64
 4483        break :c 0;
 4484    } else extra.data.captures_len;
 4485    return .{
 4486        .name = extra.data.name,
 4487        .name_nav = extra.data.name_nav,
 4488        .namespace = extra.data.namespace,
 4489        .tag_ty = extra.data.int_tag_type,
 4490        .names = .{
 4491            .tid = unwrapped_index.tid,
 4492            .start = extra_index + captures_len,
 4493            .len = extra.data.fields_len,
 4494        },
 4495        .values = .{
 4496            .tid = unwrapped_index.tid,
 4497            .start = extra_index + captures_len + extra.data.fields_len,
 4498            .len = if (extra.data.values_map != .none) extra.data.fields_len else 0,
 4499        },
 4500        .tag_mode = tag_mode,
 4501        .names_map = extra.data.names_map,
 4502        .values_map = extra.data.values_map,
 4503        .zir_index = extra.data.zir_index,
 4504        .captures = .{
 4505            .tid = unwrapped_index.tid,
 4506            .start = extra_index,
 4507            .len = captures_len,
 4508        },
 4509    };
 4510}
 4511
 4512/// Note that this type doubles as the payload for `Tag.type_opaque`.
 4513pub const LoadedOpaqueType = struct {
 4514    /// Contains the declarations inside this opaque.
 4515    namespace: NamespaceIndex,
 4516    // TODO: the non-fqn will be needed by the new dwarf structure
 4517    /// The name of this opaque type.
 4518    name: NullTerminatedString,
 4519    /// If this is a declared type with the `.parent` name strategy, this is the `Nav` it was named after.
 4520    /// Otherwise, this is `.none`.
 4521    name_nav: Nav.Index.Optional,
 4522    /// Index of the `opaque_decl` or `reify` instruction.
 4523    zir_index: TrackedInst.Index,
 4524    captures: CaptureValue.Slice,
 4525};
 4526
 4527pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType {
 4528    const unwrapped_index = index.unwrap(ip);
 4529    const item = unwrapped_index.getItem(ip);
 4530    assert(item.tag == .type_opaque);
 4531    const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, item.data);
 4532    const captures_len = if (extra.data.captures_len == std.math.maxInt(u32))
 4533        0
 4534    else
 4535        extra.data.captures_len;
 4536    return .{
 4537        .name = extra.data.name,
 4538        .name_nav = extra.data.name_nav,
 4539        .namespace = extra.data.namespace,
 4540        .zir_index = extra.data.zir_index,
 4541        .captures = .{
 4542            .tid = unwrapped_index.tid,
 4543            .start = extra.end,
 4544            .len = captures_len,
 4545        },
 4546    };
 4547}
 4548
 4549pub const Item = struct {
 4550    tag: Tag,
 4551    /// The doc comments on the respective Tag explain how to interpret this.
 4552    data: u32,
 4553};
 4554
 4555/// Represents an index into `map`. It represents the canonical index
 4556/// of a `Value` within this `InternPool`. The values are typed.
 4557/// Two values which have the same type can be equality compared simply
 4558/// by checking if their indexes are equal, provided they are both in
 4559/// the same `InternPool`.
 4560/// When adding a tag to this enum, consider adding a corresponding entry to
 4561/// `primitives` in AstGen.zig.
 4562pub const Index = enum(u32) {
 4563    pub const first_type: Index = .u0_type;
 4564    pub const last_type: Index = .empty_tuple_type;
 4565    pub const first_value: Index = .undef;
 4566    pub const last_value: Index = .empty_tuple;
 4567
 4568    u0_type,
 4569    i0_type,
 4570    u1_type,
 4571    u8_type,
 4572    i8_type,
 4573    u16_type,
 4574    i16_type,
 4575    u29_type,
 4576    u32_type,
 4577    i32_type,
 4578    u64_type,
 4579    i64_type,
 4580    u80_type,
 4581    u128_type,
 4582    i128_type,
 4583    u256_type,
 4584    usize_type,
 4585    isize_type,
 4586    c_char_type,
 4587    c_short_type,
 4588    c_ushort_type,
 4589    c_int_type,
 4590    c_uint_type,
 4591    c_long_type,
 4592    c_ulong_type,
 4593    c_longlong_type,
 4594    c_ulonglong_type,
 4595    c_longdouble_type,
 4596    f16_type,
 4597    f32_type,
 4598    f64_type,
 4599    f80_type,
 4600    f128_type,
 4601    anyopaque_type,
 4602    bool_type,
 4603    void_type,
 4604    type_type,
 4605    anyerror_type,
 4606    comptime_int_type,
 4607    comptime_float_type,
 4608    noreturn_type,
 4609    anyframe_type,
 4610    null_type,
 4611    undefined_type,
 4612    enum_literal_type,
 4613
 4614    ptr_usize_type,
 4615    ptr_const_comptime_int_type,
 4616    manyptr_u8_type,
 4617    manyptr_const_u8_type,
 4618    manyptr_const_u8_sentinel_0_type,
 4619    slice_const_u8_type,
 4620    slice_const_u8_sentinel_0_type,
 4621
 4622    manyptr_const_slice_const_u8_type,
 4623    slice_const_slice_const_u8_type,
 4624
 4625    optional_type_type,
 4626    manyptr_const_type_type,
 4627    slice_const_type_type,
 4628
 4629    vector_8_i8_type,
 4630    vector_16_i8_type,
 4631    vector_32_i8_type,
 4632    vector_64_i8_type,
 4633    vector_1_u8_type,
 4634    vector_2_u8_type,
 4635    vector_4_u8_type,
 4636    vector_8_u8_type,
 4637    vector_16_u8_type,
 4638    vector_32_u8_type,
 4639    vector_64_u8_type,
 4640    vector_2_i16_type,
 4641    vector_4_i16_type,
 4642    vector_8_i16_type,
 4643    vector_16_i16_type,
 4644    vector_32_i16_type,
 4645    vector_4_u16_type,
 4646    vector_8_u16_type,
 4647    vector_16_u16_type,
 4648    vector_32_u16_type,
 4649    vector_2_i32_type,
 4650    vector_4_i32_type,
 4651    vector_8_i32_type,
 4652    vector_16_i32_type,
 4653    vector_4_u32_type,
 4654    vector_8_u32_type,
 4655    vector_16_u32_type,
 4656    vector_2_i64_type,
 4657    vector_4_i64_type,
 4658    vector_8_i64_type,
 4659    vector_2_u64_type,
 4660    vector_4_u64_type,
 4661    vector_8_u64_type,
 4662    vector_1_u128_type,
 4663    vector_2_u128_type,
 4664    vector_1_u256_type,
 4665    vector_4_f16_type,
 4666    vector_8_f16_type,
 4667    vector_16_f16_type,
 4668    vector_32_f16_type,
 4669    vector_2_f32_type,
 4670    vector_4_f32_type,
 4671    vector_8_f32_type,
 4672    vector_16_f32_type,
 4673    vector_2_f64_type,
 4674    vector_4_f64_type,
 4675    vector_8_f64_type,
 4676
 4677    optional_noreturn_type,
 4678    anyerror_void_error_union_type,
 4679    /// Used for the inferred error set of inline/comptime function calls.
 4680    adhoc_inferred_error_set_type,
 4681    /// Represents a type which is unknown.
 4682    /// This is used in functions to represent generic parameter/return types, and
 4683    /// during semantic analysis to represent unknown result types (i.e. where AstGen
 4684    /// thought we would have a result type, but we do not).
 4685    generic_poison_type,
 4686    /// `@TypeOf(.{})`; a tuple with zero elements.
 4687    /// This is not the same as `struct {}`, since that is a struct rather than a tuple.
 4688    empty_tuple_type,
 4689
 4690    /// `undefined` (untyped)
 4691    undef,
 4692    /// `@as(bool, undefined)`
 4693    undef_bool,
 4694    /// `@as(usize, undefined)`
 4695    undef_usize,
 4696    /// `@as(u1, undefined)`
 4697    undef_u1,
 4698    /// `0` (comptime_int)
 4699    zero,
 4700    /// `@as(usize, 0)`
 4701    zero_usize,
 4702    /// `@as(u1, 0)`
 4703    zero_u1,
 4704    /// `@as(u8, 0)`
 4705    zero_u8,
 4706    /// `1` (comptime_int)
 4707    one,
 4708    /// `@as(usize, 1)`
 4709    one_usize,
 4710    /// `@as(u1, 1)`
 4711    one_u1,
 4712    /// `@as(u8, 1)`
 4713    one_u8,
 4714    /// `@as(u8, 4)`
 4715    four_u8,
 4716    /// `-1` (comptime_int)
 4717    negative_one,
 4718    /// `{}`
 4719    void_value,
 4720    /// `unreachable` (noreturn type)
 4721    unreachable_value,
 4722    /// `null` (untyped)
 4723    null_value,
 4724    /// `true`
 4725    bool_true,
 4726    /// `false`
 4727    bool_false,
 4728    /// `.{}`
 4729    empty_tuple,
 4730
 4731    /// Used by Air/Sema only.
 4732    none = std.math.maxInt(u32),
 4733
 4734    _,
 4735
 4736    /// An array of `Index` existing within the `extra` array.
 4737    /// This type exists to provide a struct with lifetime that is
 4738    /// not invalidated when items are added to the `InternPool`.
 4739    pub const Slice = struct {
 4740        tid: Zcu.PerThread.Id,
 4741        start: u32,
 4742        len: u32,
 4743
 4744        pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 };
 4745
 4746        pub fn get(slice: Slice, ip: *const InternPool) []Index {
 4747            const extra = ip.getLocalShared(slice.tid).extra.acquire();
 4748            return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]);
 4749        }
 4750    };
 4751
 4752    /// Used for a map of `Index` values to the index within a list of `Index` values.
 4753    const Adapter = struct {
 4754        indexes: []const Index,
 4755
 4756        pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool {
 4757            _ = b_void;
 4758            return a == ctx.indexes[b_map_index];
 4759        }
 4760
 4761        pub fn hash(ctx: @This(), a: Index) u32 {
 4762            _ = ctx;
 4763            return std.hash.int(@intFromEnum(a));
 4764        }
 4765    };
 4766
 4767    const Unwrapped = struct {
 4768        tid: Zcu.PerThread.Id,
 4769        index: u32,
 4770
 4771        fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Index {
 4772            assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
 4773            assert(unwrapped.index <= ip.getIndexMask(u30));
 4774            return @enumFromInt(@shlExact(@as(u32, @intFromEnum(unwrapped.tid)), ip.tid_shift_30) |
 4775                unwrapped.index);
 4776        }
 4777
 4778        pub fn getExtra(unwrapped: Unwrapped, ip: *const InternPool) Local.Extra {
 4779            return ip.getLocalShared(unwrapped.tid).extra.acquire();
 4780        }
 4781
 4782        pub fn getItem(unwrapped: Unwrapped, ip: *const InternPool) Item {
 4783            const item_ptr = unwrapped.itemPtr(ip);
 4784            const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire);
 4785            return .{ .tag = tag, .data = item_ptr.data_ptr.* };
 4786        }
 4787
 4788        pub fn getTag(unwrapped: Unwrapped, ip: *const InternPool) Tag {
 4789            const item_ptr = unwrapped.itemPtr(ip);
 4790            return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire);
 4791        }
 4792
 4793        pub fn getData(unwrapped: Unwrapped, ip: *const InternPool) u32 {
 4794            return unwrapped.getItem(ip).data;
 4795        }
 4796
 4797        const ItemPtr = struct {
 4798            tag_ptr: *Tag,
 4799            data_ptr: *u32,
 4800        };
 4801        fn itemPtr(unwrapped: Unwrapped, ip: *const InternPool) ItemPtr {
 4802            const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice();
 4803            return .{
 4804                .tag_ptr = &slice.items(.tag)[unwrapped.index],
 4805                .data_ptr = &slice.items(.data)[unwrapped.index],
 4806            };
 4807        }
 4808
 4809        const debug_state = InternPool.debug_state;
 4810    };
 4811    pub fn unwrap(index: Index, ip: *const InternPool) Unwrapped {
 4812        return if (single_threaded) .{
 4813            .tid = .main,
 4814            .index = @intFromEnum(index),
 4815        } else .{
 4816            .tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_30 & ip.getTidMask()),
 4817            .index = @intFromEnum(index) & ip.getIndexMask(u30),
 4818        };
 4819    }
 4820
 4821    /// This function is used in the debugger pretty formatters in tools/ to fetch the
 4822    /// Tag to encoding mapping to facilitate fancy debug printing for this type.
 4823    fn dbHelper(self: *Index, tag_to_encoding_map: *struct {
 4824        const DataIsIndex = struct { data: Index };
 4825        const DataIsExtraIndexOfEnumExplicit = struct {
 4826            const @"data.fields_len" = opaque {};
 4827            data: *EnumExplicit,
 4828            @"trailing.names.len": *@"data.fields_len",
 4829            @"trailing.values.len": *@"data.fields_len",
 4830            trailing: struct {
 4831                names: []NullTerminatedString,
 4832                values: []Index,
 4833            },
 4834        };
 4835        const DataIsExtraIndexOfTypeTuple = struct {
 4836            const @"data.fields_len" = opaque {};
 4837            data: *TypeTuple,
 4838            @"trailing.types.len": *@"data.fields_len",
 4839            @"trailing.values.len": *@"data.fields_len",
 4840            trailing: struct {
 4841                types: []Index,
 4842                values: []Index,
 4843            },
 4844        };
 4845
 4846        removed: void,
 4847        type_int_signed: struct { data: u32 },
 4848        type_int_unsigned: struct { data: u32 },
 4849        type_array_big: struct { data: *Array },
 4850        type_array_small: struct { data: *Vector },
 4851        type_vector: struct { data: *Vector },
 4852        type_pointer: struct { data: *Tag.TypePointer },
 4853        type_slice: DataIsIndex,
 4854        type_optional: DataIsIndex,
 4855        type_anyframe: DataIsIndex,
 4856        type_error_union: struct { data: *Key.ErrorUnionType },
 4857        type_anyerror_union: DataIsIndex,
 4858        type_error_set: struct {
 4859            const @"data.names_len" = opaque {};
 4860            data: *Tag.ErrorSet,
 4861            @"trailing.names.len": *@"data.names_len",
 4862            trailing: struct { names: []NullTerminatedString },
 4863        },
 4864        type_inferred_error_set: DataIsIndex,
 4865        type_enum_auto: struct {
 4866            const @"data.fields_len" = opaque {};
 4867            data: *EnumAuto,
 4868            @"trailing.names.len": *@"data.fields_len",
 4869            trailing: struct { names: []NullTerminatedString },
 4870        },
 4871        type_enum_explicit: DataIsExtraIndexOfEnumExplicit,
 4872        type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit,
 4873        simple_type: void,
 4874        type_opaque: struct { data: *Tag.TypeOpaque },
 4875        type_struct: struct { data: *Tag.TypeStruct },
 4876        type_struct_packed: struct { data: *Tag.TypeStructPacked },
 4877        type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
 4878        type_tuple: DataIsExtraIndexOfTypeTuple,
 4879        type_union: struct { data: *Tag.TypeUnion },
 4880        type_function: struct {
 4881            const @"data.flags.has_comptime_bits" = opaque {};
 4882            const @"data.flags.has_noalias_bits" = opaque {};
 4883            const @"data.params_len" = opaque {};
 4884            data: *Tag.TypeFunction,
 4885            @"trailing.comptime_bits.len": *@"data.flags.has_comptime_bits",
 4886            @"trailing.noalias_bits.len": *@"data.flags.has_noalias_bits",
 4887            @"trailing.param_types.len": *@"data.params_len",
 4888            trailing: struct { comptime_bits: []u32, noalias_bits: []u32, param_types: []Index },
 4889        },
 4890
 4891        undef: DataIsIndex,
 4892        simple_value: void,
 4893        ptr_nav: struct { data: *PtrNav },
 4894        ptr_comptime_alloc: struct { data: *PtrComptimeAlloc },
 4895        ptr_uav: struct { data: *PtrUav },
 4896        ptr_uav_aligned: struct { data: *PtrUavAligned },
 4897        ptr_comptime_field: struct { data: *PtrComptimeField },
 4898        ptr_int: struct { data: *PtrInt },
 4899        ptr_eu_payload: struct { data: *PtrBase },
 4900        ptr_opt_payload: struct { data: *PtrBase },
 4901        ptr_elem: struct { data: *PtrBaseIndex },
 4902        ptr_field: struct { data: *PtrBaseIndex },
 4903        ptr_slice: struct { data: *PtrSlice },
 4904        opt_payload: struct { data: *Tag.TypeValue },
 4905        opt_null: DataIsIndex,
 4906        int_u8: struct { data: u8 },
 4907        int_u16: struct { data: u16 },
 4908        int_u32: struct { data: u32 },
 4909        int_i32: struct { data: i32 },
 4910        int_usize: struct { data: u32 },
 4911        int_comptime_int_u32: struct { data: u32 },
 4912        int_comptime_int_i32: struct { data: i32 },
 4913        int_small: struct { data: *IntSmall },
 4914        int_positive: struct { data: u32 },
 4915        int_negative: struct { data: u32 },
 4916        int_lazy_align: struct { data: *IntLazy },
 4917        int_lazy_size: struct { data: *IntLazy },
 4918        error_set_error: struct { data: *Key.Error },
 4919        error_union_error: struct { data: *Key.Error },
 4920        error_union_payload: struct { data: *Tag.TypeValue },
 4921        enum_literal: struct { data: NullTerminatedString },
 4922        enum_tag: struct { data: *Tag.EnumTag },
 4923        float_f16: struct { data: f16 },
 4924        float_f32: struct { data: f32 },
 4925        float_f64: struct { data: *Float64 },
 4926        float_f80: struct { data: *Float80 },
 4927        float_f128: struct { data: *Float128 },
 4928        float_c_longdouble_f80: struct { data: *Float80 },
 4929        float_c_longdouble_f128: struct { data: *Float128 },
 4930        float_comptime_float: struct { data: *Float128 },
 4931        variable: struct { data: *Tag.Variable },
 4932        threadlocal_variable: struct { data: *Tag.Variable },
 4933        @"extern": struct { data: *Tag.Extern },
 4934        func_decl: struct {
 4935            const @"data.analysis.inferred_error_set" = opaque {};
 4936            data: *Tag.FuncDecl,
 4937            @"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set",
 4938            trailing: struct { resolved_error_set: []Index },
 4939        },
 4940        func_instance: struct {
 4941            const @"data.analysis.inferred_error_set" = opaque {};
 4942            const @"data.generic_owner.data.ty.data.params_len" = opaque {};
 4943            data: *Tag.FuncInstance,
 4944            @"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set",
 4945            @"trailing.comptime_args.len": *@"data.generic_owner.data.ty.data.params_len",
 4946            trailing: struct { resolved_error_set: []Index, comptime_args: []Index },
 4947        },
 4948        func_coerced: struct {
 4949            data: *Tag.FuncCoerced,
 4950        },
 4951        only_possible_value: DataIsIndex,
 4952        union_value: struct { data: *Key.Union },
 4953        bytes: struct { data: *Bytes },
 4954        aggregate: struct {
 4955            const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {};
 4956            data: *Tag.Aggregate,
 4957            @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len",
 4958            trailing: struct { element_values: []Index },
 4959        },
 4960        repeated: struct { data: *Repeated },
 4961
 4962        memoized_call: struct {
 4963            const @"data.args_len" = opaque {};
 4964            data: *MemoizedCall,
 4965            @"trailing.arg_values.len": *@"data.args_len",
 4966            trailing: struct { arg_values: []Index },
 4967        },
 4968    }) void {
 4969        _ = self;
 4970        const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).pointer.child).@"struct".fields;
 4971        @setEvalBranchQuota(2_000);
 4972        inline for (@typeInfo(Tag).@"enum".fields, 0..) |tag, start| {
 4973            inline for (0..map_fields.len) |offset| {
 4974                if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break;
 4975            } else {
 4976                @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry");
 4977            }
 4978        }
 4979    }
 4980    comptime {
 4981        if (!builtin.strip_debug_info) switch (builtin.zig_backend) {
 4982            .stage2_llvm => _ = &dbHelper,
 4983            .stage2_x86_64 => for (@typeInfo(Tag).@"enum".fields) |tag| {
 4984                if (!@hasField(@TypeOf(Tag.encodings), tag.name)) @compileLog("missing: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name);
 4985                const encoding = @field(Tag.encodings, tag.name);
 4986                if (@hasField(@TypeOf(encoding), "trailing")) for (@typeInfo(encoding.trailing).@"struct".fields) |field| {
 4987                    struct {
 4988                        fn checkConfig(name: []const u8) void {
 4989                            if (!@hasField(@TypeOf(encoding.config), name)) @compileError("missing field: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ ".config.@\"" ++ name ++ "\"");
 4990                            const FieldType = @TypeOf(@field(encoding.config, name));
 4991                            if (@typeInfo(FieldType) != .enum_literal) @compileError("expected enum literal: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ ".config.@\"" ++ name ++ "\": " ++ @typeName(FieldType));
 4992                        }
 4993                        fn checkField(name: []const u8, Type: type) void {
 4994                            switch (@typeInfo(Type)) {
 4995                                .int => {},
 4996                                .@"enum" => {},
 4997                                .@"struct" => |info| assert(info.layout == .@"packed"),
 4998                                .optional => |info| {
 4999                                    checkConfig(name ++ ".?");
 5000                                    checkField(name ++ ".?", info.child);
 5001                                },
 5002                                .pointer => |info| {
 5003                                    assert(info.size == .slice);
 5004                                    checkConfig(name ++ ".len");
 5005                                    checkField(name ++ "[0]", info.child);
 5006                                },
 5007                                else => @compileError("unsupported type: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ "." ++ name ++ ": " ++ @typeName(Type)),
 5008                            }
 5009                        }
 5010                    }.checkField("trailing." ++ field.name, field.type);
 5011                };
 5012            },
 5013            else => {},
 5014        };
 5015    }
 5016};
 5017
 5018pub const static_keys: [static_len]Key = .{
 5019    .{ .int_type = .{
 5020        .signedness = .unsigned,
 5021        .bits = 0,
 5022    } },
 5023
 5024    .{ .int_type = .{
 5025        .signedness = .signed,
 5026        .bits = 0,
 5027    } },
 5028
 5029    .{ .int_type = .{
 5030        .signedness = .unsigned,
 5031        .bits = 1,
 5032    } },
 5033
 5034    .{ .int_type = .{
 5035        .signedness = .unsigned,
 5036        .bits = 8,
 5037    } },
 5038
 5039    .{ .int_type = .{
 5040        .signedness = .signed,
 5041        .bits = 8,
 5042    } },
 5043
 5044    .{ .int_type = .{
 5045        .signedness = .unsigned,
 5046        .bits = 16,
 5047    } },
 5048
 5049    .{ .int_type = .{
 5050        .signedness = .signed,
 5051        .bits = 16,
 5052    } },
 5053
 5054    .{ .int_type = .{
 5055        .signedness = .unsigned,
 5056        .bits = 29,
 5057    } },
 5058
 5059    .{ .int_type = .{
 5060        .signedness = .unsigned,
 5061        .bits = 32,
 5062    } },
 5063
 5064    .{ .int_type = .{
 5065        .signedness = .signed,
 5066        .bits = 32,
 5067    } },
 5068
 5069    .{ .int_type = .{
 5070        .signedness = .unsigned,
 5071        .bits = 64,
 5072    } },
 5073
 5074    .{ .int_type = .{
 5075        .signedness = .signed,
 5076        .bits = 64,
 5077    } },
 5078
 5079    .{ .int_type = .{
 5080        .signedness = .unsigned,
 5081        .bits = 80,
 5082    } },
 5083
 5084    .{ .int_type = .{
 5085        .signedness = .unsigned,
 5086        .bits = 128,
 5087    } },
 5088
 5089    .{ .int_type = .{
 5090        .signedness = .signed,
 5091        .bits = 128,
 5092    } },
 5093
 5094    .{ .int_type = .{
 5095        .signedness = .unsigned,
 5096        .bits = 256,
 5097    } },
 5098
 5099    .{ .simple_type = .usize },
 5100    .{ .simple_type = .isize },
 5101    .{ .simple_type = .c_char },
 5102    .{ .simple_type = .c_short },
 5103    .{ .simple_type = .c_ushort },
 5104    .{ .simple_type = .c_int },
 5105    .{ .simple_type = .c_uint },
 5106    .{ .simple_type = .c_long },
 5107    .{ .simple_type = .c_ulong },
 5108    .{ .simple_type = .c_longlong },
 5109    .{ .simple_type = .c_ulonglong },
 5110    .{ .simple_type = .c_longdouble },
 5111    .{ .simple_type = .f16 },
 5112    .{ .simple_type = .f32 },
 5113    .{ .simple_type = .f64 },
 5114    .{ .simple_type = .f80 },
 5115    .{ .simple_type = .f128 },
 5116    .{ .simple_type = .anyopaque },
 5117    .{ .simple_type = .bool },
 5118    .{ .simple_type = .void },
 5119    .{ .simple_type = .type },
 5120    .{ .simple_type = .anyerror },
 5121    .{ .simple_type = .comptime_int },
 5122    .{ .simple_type = .comptime_float },
 5123    .{ .simple_type = .noreturn },
 5124    .{ .anyframe_type = .none },
 5125    .{ .simple_type = .null },
 5126    .{ .simple_type = .undefined },
 5127    .{ .simple_type = .enum_literal },
 5128
 5129    // *usize
 5130    .{ .ptr_type = .{
 5131        .child = .usize_type,
 5132        .flags = .{},
 5133    } },
 5134
 5135    // *const comptime_int
 5136    .{ .ptr_type = .{
 5137        .child = .comptime_int_type,
 5138        .flags = .{
 5139            .is_const = true,
 5140        },
 5141    } },
 5142
 5143    // [*]u8
 5144    .{ .ptr_type = .{
 5145        .child = .u8_type,
 5146        .flags = .{
 5147            .size = .many,
 5148        },
 5149    } },
 5150
 5151    // [*]const u8
 5152    .{ .ptr_type = .{
 5153        .child = .u8_type,
 5154        .flags = .{
 5155            .size = .many,
 5156            .is_const = true,
 5157        },
 5158    } },
 5159
 5160    // [*:0]const u8
 5161    .{ .ptr_type = .{
 5162        .child = .u8_type,
 5163        .sentinel = .zero_u8,
 5164        .flags = .{
 5165            .size = .many,
 5166            .is_const = true,
 5167        },
 5168    } },
 5169
 5170    // []const u8
 5171    .{ .ptr_type = .{
 5172        .child = .u8_type,
 5173        .flags = .{
 5174            .size = .slice,
 5175            .is_const = true,
 5176        },
 5177    } },
 5178
 5179    // [:0]const u8
 5180    .{ .ptr_type = .{
 5181        .child = .u8_type,
 5182        .sentinel = .zero_u8,
 5183        .flags = .{
 5184            .size = .slice,
 5185            .is_const = true,
 5186        },
 5187    } },
 5188
 5189    // [*]const []const u8
 5190    .{ .ptr_type = .{
 5191        .child = .slice_const_u8_type,
 5192        .flags = .{
 5193            .size = .many,
 5194            .is_const = true,
 5195        },
 5196    } },
 5197
 5198    // []const []const u8
 5199    .{ .ptr_type = .{
 5200        .child = .slice_const_u8_type,
 5201        .flags = .{
 5202            .size = .slice,
 5203            .is_const = true,
 5204        },
 5205    } },
 5206
 5207    // ?type
 5208    .{ .opt_type = .type_type },
 5209
 5210    // [*]const type
 5211    .{ .ptr_type = .{
 5212        .child = .type_type,
 5213        .flags = .{
 5214            .size = .many,
 5215            .is_const = true,
 5216        },
 5217    } },
 5218
 5219    // []const type
 5220    .{ .ptr_type = .{
 5221        .child = .type_type,
 5222        .flags = .{
 5223            .size = .slice,
 5224            .is_const = true,
 5225        },
 5226    } },
 5227
 5228    // @Vector(8, i8)
 5229    .{ .vector_type = .{ .len = 8, .child = .i8_type } },
 5230    // @Vector(16, i8)
 5231    .{ .vector_type = .{ .len = 16, .child = .i8_type } },
 5232    // @Vector(32, i8)
 5233    .{ .vector_type = .{ .len = 32, .child = .i8_type } },
 5234    // @Vector(64, i8)
 5235    .{ .vector_type = .{ .len = 64, .child = .i8_type } },
 5236    // @Vector(1, u8)
 5237    .{ .vector_type = .{ .len = 1, .child = .u8_type } },
 5238    // @Vector(2, u8)
 5239    .{ .vector_type = .{ .len = 2, .child = .u8_type } },
 5240    // @Vector(4, u8)
 5241    .{ .vector_type = .{ .len = 4, .child = .u8_type } },
 5242    // @Vector(8, u8)
 5243    .{ .vector_type = .{ .len = 8, .child = .u8_type } },
 5244    // @Vector(16, u8)
 5245    .{ .vector_type = .{ .len = 16, .child = .u8_type } },
 5246    // @Vector(32, u8)
 5247    .{ .vector_type = .{ .len = 32, .child = .u8_type } },
 5248    // @Vector(64, u8)
 5249    .{ .vector_type = .{ .len = 64, .child = .u8_type } },
 5250    // @Vector(2, i16)
 5251    .{ .vector_type = .{ .len = 2, .child = .i16_type } },
 5252    // @Vector(4, i16)
 5253    .{ .vector_type = .{ .len = 4, .child = .i16_type } },
 5254    // @Vector(8, i16)
 5255    .{ .vector_type = .{ .len = 8, .child = .i16_type } },
 5256    // @Vector(16, i16)
 5257    .{ .vector_type = .{ .len = 16, .child = .i16_type } },
 5258    // @Vector(32, i16)
 5259    .{ .vector_type = .{ .len = 32, .child = .i16_type } },
 5260    // @Vector(4, u16)
 5261    .{ .vector_type = .{ .len = 4, .child = .u16_type } },
 5262    // @Vector(8, u16)
 5263    .{ .vector_type = .{ .len = 8, .child = .u16_type } },
 5264    // @Vector(16, u16)
 5265    .{ .vector_type = .{ .len = 16, .child = .u16_type } },
 5266    // @Vector(32, u16)
 5267    .{ .vector_type = .{ .len = 32, .child = .u16_type } },
 5268    // @Vector(2, i32)
 5269    .{ .vector_type = .{ .len = 2, .child = .i32_type } },
 5270    // @Vector(4, i32)
 5271    .{ .vector_type = .{ .len = 4, .child = .i32_type } },
 5272    // @Vector(8, i32)
 5273    .{ .vector_type = .{ .len = 8, .child = .i32_type } },
 5274    // @Vector(16, i32)
 5275    .{ .vector_type = .{ .len = 16, .child = .i32_type } },
 5276    // @Vector(4, u32)
 5277    .{ .vector_type = .{ .len = 4, .child = .u32_type } },
 5278    // @Vector(8, u32)
 5279    .{ .vector_type = .{ .len = 8, .child = .u32_type } },
 5280    // @Vector(16, u32)
 5281    .{ .vector_type = .{ .len = 16, .child = .u32_type } },
 5282    // @Vector(2, i64)
 5283    .{ .vector_type = .{ .len = 2, .child = .i64_type } },
 5284    // @Vector(4, i64)
 5285    .{ .vector_type = .{ .len = 4, .child = .i64_type } },
 5286    // @Vector(8, i64)
 5287    .{ .vector_type = .{ .len = 8, .child = .i64_type } },
 5288    // @Vector(2, u64)
 5289    .{ .vector_type = .{ .len = 2, .child = .u64_type } },
 5290    // @Vector(4, u64)
 5291    .{ .vector_type = .{ .len = 4, .child = .u64_type } },
 5292    // @Vector(8, u64)
 5293    .{ .vector_type = .{ .len = 8, .child = .u64_type } },
 5294    // @Vector(1, u128)
 5295    .{ .vector_type = .{ .len = 1, .child = .u128_type } },
 5296    // @Vector(2, u128)
 5297    .{ .vector_type = .{ .len = 2, .child = .u128_type } },
 5298    // @Vector(1, u256)
 5299    .{ .vector_type = .{ .len = 1, .child = .u256_type } },
 5300    // @Vector(4, f16)
 5301    .{ .vector_type = .{ .len = 4, .child = .f16_type } },
 5302    // @Vector(8, f16)
 5303    .{ .vector_type = .{ .len = 8, .child = .f16_type } },
 5304    // @Vector(16, f16)
 5305    .{ .vector_type = .{ .len = 16, .child = .f16_type } },
 5306    // @Vector(32, f16)
 5307    .{ .vector_type = .{ .len = 32, .child = .f16_type } },
 5308    // @Vector(2, f32)
 5309    .{ .vector_type = .{ .len = 2, .child = .f32_type } },
 5310    // @Vector(4, f32)
 5311    .{ .vector_type = .{ .len = 4, .child = .f32_type } },
 5312    // @Vector(8, f32)
 5313    .{ .vector_type = .{ .len = 8, .child = .f32_type } },
 5314    // @Vector(16, f32)
 5315    .{ .vector_type = .{ .len = 16, .child = .f32_type } },
 5316    // @Vector(2, f64)
 5317    .{ .vector_type = .{ .len = 2, .child = .f64_type } },
 5318    // @Vector(4, f64)
 5319    .{ .vector_type = .{ .len = 4, .child = .f64_type } },
 5320    // @Vector(8, f64)
 5321    .{ .vector_type = .{ .len = 8, .child = .f64_type } },
 5322
 5323    // ?noreturn
 5324    .{ .opt_type = .noreturn_type },
 5325
 5326    // anyerror!void
 5327    .{ .error_union_type = .{
 5328        .error_set_type = .anyerror_type,
 5329        .payload_type = .void_type,
 5330    } },
 5331
 5332    // adhoc_inferred_error_set_type
 5333    .{ .simple_type = .adhoc_inferred_error_set },
 5334    // generic_poison_type
 5335    .{ .simple_type = .generic_poison },
 5336
 5337    // empty_tuple_type
 5338    .{ .tuple_type = .{
 5339        .types = .empty,
 5340        .values = .empty,
 5341    } },
 5342
 5343    .{ .simple_value = .undefined },
 5344    .{ .undef = .bool_type },
 5345    .{ .undef = .usize_type },
 5346    .{ .undef = .u1_type },
 5347
 5348    .{ .int = .{
 5349        .ty = .comptime_int_type,
 5350        .storage = .{ .u64 = 0 },
 5351    } },
 5352
 5353    .{ .int = .{
 5354        .ty = .usize_type,
 5355        .storage = .{ .u64 = 0 },
 5356    } },
 5357
 5358    .{ .int = .{
 5359        .ty = .u1_type,
 5360        .storage = .{ .u64 = 0 },
 5361    } },
 5362
 5363    .{ .int = .{
 5364        .ty = .u8_type,
 5365        .storage = .{ .u64 = 0 },
 5366    } },
 5367
 5368    .{ .int = .{
 5369        .ty = .comptime_int_type,
 5370        .storage = .{ .u64 = 1 },
 5371    } },
 5372
 5373    .{ .int = .{
 5374        .ty = .usize_type,
 5375        .storage = .{ .u64 = 1 },
 5376    } },
 5377
 5378    .{ .int = .{
 5379        .ty = .u1_type,
 5380        .storage = .{ .u64 = 1 },
 5381    } },
 5382
 5383    .{ .int = .{
 5384        .ty = .u8_type,
 5385        .storage = .{ .u64 = 1 },
 5386    } },
 5387
 5388    .{ .int = .{
 5389        .ty = .u8_type,
 5390        .storage = .{ .u64 = 4 },
 5391    } },
 5392
 5393    .{ .int = .{
 5394        .ty = .comptime_int_type,
 5395        .storage = .{ .i64 = -1 },
 5396    } },
 5397
 5398    .{ .simple_value = .void },
 5399    .{ .simple_value = .@"unreachable" },
 5400    .{ .simple_value = .null },
 5401    .{ .simple_value = .true },
 5402    .{ .simple_value = .false },
 5403    .{ .simple_value = .empty_tuple },
 5404};
 5405
 5406/// How many items in the InternPool are statically known.
 5407/// This is specified with an integer literal and a corresponding comptime
 5408/// assert below to break an unfortunate and arguably incorrect dependency loop
 5409/// when compiling.
 5410pub const static_len = Zir.Inst.Ref.static_len;
 5411
 5412pub const Tag = enum(u8) {
 5413    /// This special tag represents a value which was removed from this pool via
 5414    /// `InternPool.remove`. The item remains allocated to preserve indices, but
 5415    /// lookups will consider it not equal to any other item, and all queries
 5416    /// assert not this tag. `data` is unused.
 5417    removed,
 5418
 5419    /// An integer type.
 5420    /// data is number of bits
 5421    type_int_signed,
 5422    /// An integer type.
 5423    /// data is number of bits
 5424    type_int_unsigned,
 5425    /// An array type whose length requires 64 bits or which has a sentinel.
 5426    /// data is payload to Array.
 5427    type_array_big,
 5428    /// An array type that has no sentinel and whose length fits in 32 bits.
 5429    /// data is payload to Vector.
 5430    type_array_small,
 5431    /// A vector type.
 5432    /// data is payload to Vector.
 5433    type_vector,
 5434    /// A fully explicitly specified pointer type.
 5435    type_pointer,
 5436    /// A slice type.
 5437    /// data is Index of underlying pointer type.
 5438    type_slice,
 5439    /// An optional type.
 5440    /// data is the child type.
 5441    type_optional,
 5442    /// The type `anyframe->T`.
 5443    /// data is the child type.
 5444    /// If the child type is `none`, the type is `anyframe`.
 5445    type_anyframe,
 5446    /// An error union type.
 5447    /// data is payload to `Key.ErrorUnionType`.
 5448    type_error_union,
 5449    /// An error union type of the form `anyerror!T`.
 5450    /// data is `Index` of payload type.
 5451    type_anyerror_union,
 5452    /// An error set type.
 5453    /// data is payload to `ErrorSet`.
 5454    type_error_set,
 5455    /// The inferred error set type of a function.
 5456    /// data is `Index` of a `func_decl` or `func_instance`.
 5457    type_inferred_error_set,
 5458    /// An enum type with auto-numbered tag values.
 5459    /// The enum is exhaustive.
 5460    /// data is payload index to `EnumAuto`.
 5461    type_enum_auto,
 5462    /// An enum type with an explicitly provided integer tag type.
 5463    /// The enum is exhaustive.
 5464    /// data is payload index to `EnumExplicit`.
 5465    type_enum_explicit,
 5466    /// An enum type with an explicitly provided integer tag type.
 5467    /// The enum is non-exhaustive.
 5468    /// data is payload index to `EnumExplicit`.
 5469    type_enum_nonexhaustive,
 5470    /// A type that can be represented with only an enum tag.
 5471    simple_type,
 5472    /// An opaque type.
 5473    /// data is index of Tag.TypeOpaque in extra.
 5474    type_opaque,
 5475    /// A non-packed struct type.
 5476    /// data is 0 or extra index of `TypeStruct`.
 5477    type_struct,
 5478    /// A packed struct, no fields have any init values.
 5479    /// data is extra index of `TypeStructPacked`.
 5480    type_struct_packed,
 5481    /// A packed struct, one or more fields have init values.
 5482    /// data is extra index of `TypeStructPacked`.
 5483    type_struct_packed_inits,
 5484    /// A `TupleType`.
 5485    /// data is extra index of `TypeTuple`.
 5486    type_tuple,
 5487    /// A union type.
 5488    /// `data` is extra index of `TypeUnion`.
 5489    type_union,
 5490    /// A function body type.
 5491    /// `data` is extra index to `TypeFunction`.
 5492    type_function,
 5493
 5494    /// Typed `undefined`.
 5495    /// `data` is `Index` of the type.
 5496    /// Untyped `undefined` is stored instead via `simple_value`.
 5497    undef,
 5498    /// A value that can be represented with only an enum tag.
 5499    simple_value,
 5500    /// A pointer to a `Nav`.
 5501    /// data is extra index of `PtrNav`, which contains the type and address.
 5502    ptr_nav,
 5503    /// A pointer to a decl that can be mutated at comptime.
 5504    /// data is extra index of `PtrComptimeAlloc`, which contains the type and address.
 5505    ptr_comptime_alloc,
 5506    /// A pointer to an anonymous addressable value.
 5507    /// data is extra index of `PtrUav`, which contains the pointer type and decl value.
 5508    /// The alignment of the uav is communicated via the pointer type.
 5509    ptr_uav,
 5510    /// A pointer to an unnamed addressable value.
 5511    /// data is extra index of `PtrUavAligned`, which contains the pointer
 5512    /// type and decl value.
 5513    /// The original pointer type is also provided, which will be different than `ty`.
 5514    /// This encoding is only used when a pointer to a Uav is
 5515    /// coerced to a different pointer type with a different alignment.
 5516    ptr_uav_aligned,
 5517    /// data is extra index of `PtrComptimeField`, which contains the pointer type and field value.
 5518    ptr_comptime_field,
 5519    /// A pointer with an integer value.
 5520    /// data is extra index of `PtrInt`, which contains the type and address (byte offset from 0).
 5521    /// Only pointer types are allowed to have this encoding. Optional types must use
 5522    /// `opt_payload` or `opt_null`.
 5523    ptr_int,
 5524    /// A pointer to the payload of an error union.
 5525    /// data is extra index of `PtrBase`, which contains the type and base pointer.
 5526    ptr_eu_payload,
 5527    /// A pointer to the payload of an optional.
 5528    /// data is extra index of `PtrBase`, which contains the type and base pointer.
 5529    ptr_opt_payload,
 5530    /// A pointer to an array element.
 5531    /// data is extra index of PtrBaseIndex, which contains the base array and element index.
 5532    /// In order to use this encoding, one must ensure that the `InternPool`
 5533    /// already contains the elem pointer type corresponding to this payload.
 5534    ptr_elem,
 5535    /// A pointer to a container field.
 5536    /// data is extra index of PtrBaseIndex, which contains the base container and field index.
 5537    ptr_field,
 5538    /// A slice.
 5539    /// data is extra index of PtrSlice, which contains the ptr and len values
 5540    ptr_slice,
 5541    /// An optional value that is non-null.
 5542    /// data is extra index of `TypeValue`.
 5543    /// The type is the optional type (not the payload type).
 5544    opt_payload,
 5545    /// An optional value that is null.
 5546    /// data is Index of the optional type.
 5547    opt_null,
 5548    /// Type: u8
 5549    /// data is integer value
 5550    int_u8,
 5551    /// Type: u16
 5552    /// data is integer value
 5553    int_u16,
 5554    /// Type: u32
 5555    /// data is integer value
 5556    int_u32,
 5557    /// Type: i32
 5558    /// data is integer value bitcasted to u32.
 5559    int_i32,
 5560    /// A usize that fits in 32 bits.
 5561    /// data is integer value.
 5562    int_usize,
 5563    /// A comptime_int that fits in a u32.
 5564    /// data is integer value.
 5565    int_comptime_int_u32,
 5566    /// A comptime_int that fits in an i32.
 5567    /// data is integer value bitcasted to u32.
 5568    int_comptime_int_i32,
 5569    /// An integer value that fits in 32 bits with an explicitly provided type.
 5570    /// data is extra index of `IntSmall`.
 5571    int_small,
 5572    /// A positive integer value.
 5573    /// data is a limbs index to `Int`.
 5574    int_positive,
 5575    /// A negative integer value.
 5576    /// data is a limbs index to `Int`.
 5577    int_negative,
 5578    /// The ABI alignment of a lazy type.
 5579    /// data is extra index of `IntLazy`.
 5580    int_lazy_align,
 5581    /// The ABI size of a lazy type.
 5582    /// data is extra index of `IntLazy`.
 5583    int_lazy_size,
 5584    /// An error value.
 5585    /// data is extra index of `Key.Error`.
 5586    error_set_error,
 5587    /// An error union error.
 5588    /// data is extra index of `Key.Error`.
 5589    error_union_error,
 5590    /// An error union payload.
 5591    /// data is extra index of `TypeValue`.
 5592    error_union_payload,
 5593    /// An enum literal value.
 5594    /// data is `NullTerminatedString` of the error name.
 5595    enum_literal,
 5596    /// An enum tag value.
 5597    /// data is extra index of `EnumTag`.
 5598    enum_tag,
 5599    /// An f16 value.
 5600    /// data is float value bitcasted to u16 and zero-extended.
 5601    float_f16,
 5602    /// An f32 value.
 5603    /// data is float value bitcasted to u32.
 5604    float_f32,
 5605    /// An f64 value.
 5606    /// data is extra index to Float64.
 5607    float_f64,
 5608    /// An f80 value.
 5609    /// data is extra index to Float80.
 5610    float_f80,
 5611    /// An f128 value.
 5612    /// data is extra index to Float128.
 5613    float_f128,
 5614    /// A c_longdouble value of 80 bits.
 5615    /// data is extra index to Float80.
 5616    /// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized
 5617    /// values which cannot be losslessly represented as f128. It should only be used when the type
 5618    /// underlying c_longdouble for the target is 80 bits.
 5619    float_c_longdouble_f80,
 5620    /// A c_longdouble value of 128 bits.
 5621    /// data is extra index to Float128.
 5622    /// This is used when a c_longdouble value is provided as any type other than an f80, since all
 5623    /// other float types can be losslessly converted to and from f128.
 5624    float_c_longdouble_f128,
 5625    /// A comptime_float value.
 5626    /// data is extra index to Float128.
 5627    float_comptime_float,
 5628    /// A global variable.
 5629    /// data is extra index to Variable.
 5630    variable,
 5631    /// A global threadlocal variable.
 5632    /// data is extra index to Variable.
 5633    threadlocal_variable,
 5634    /// An extern function or variable.
 5635    /// data is extra index to Extern.
 5636    /// Some parts of the key are stored in `owner_nav`.
 5637    @"extern",
 5638    /// A non-extern function corresponding directly to the AST node from whence it originated.
 5639    /// data is extra index to `FuncDecl`.
 5640    /// Only the owner Decl is used for hashing and equality because the other
 5641    /// fields can get patched up during incremental compilation.
 5642    func_decl,
 5643    /// A generic function instantiation.
 5644    /// data is extra index to `FuncInstance`.
 5645    func_instance,
 5646    /// A `func_decl` or a `func_instance` that has been coerced to a different type.
 5647    /// data is extra index to `FuncCoerced`.
 5648    func_coerced,
 5649    /// This represents the only possible value for *some* types which have
 5650    /// only one possible value. Not all only-possible-values are encoded this way;
 5651    /// for example structs which have all comptime fields are not encoded this way.
 5652    /// The set of values that are encoded this way is:
 5653    /// * An array or vector which has length 0.
 5654    /// * A struct which has all fields comptime-known.
 5655    /// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909
 5656    /// data is Index of the type, which is known to be zero bits at runtime.
 5657    only_possible_value,
 5658    /// data is extra index to Key.Union.
 5659    union_value,
 5660    /// An array of bytes.
 5661    /// data is extra index to `Bytes`.
 5662    bytes,
 5663    /// An instance of a struct, array, or vector.
 5664    /// data is extra index to `Aggregate`.
 5665    aggregate,
 5666    /// An instance of an array or vector with every element being the same value.
 5667    /// data is extra index to `Repeated`.
 5668    repeated,
 5669
 5670    /// A memoized comptime function call result.
 5671    /// data is extra index to `MemoizedCall`
 5672    memoized_call,
 5673
 5674    const ErrorUnionType = Key.ErrorUnionType;
 5675    const TypeValue = Key.TypeValue;
 5676    const Error = Key.Error;
 5677    const EnumTag = Key.EnumTag;
 5678    const Union = Key.Union;
 5679    const TypePointer = Key.PtrType;
 5680
 5681    const enum_explicit_encoding = .{
 5682        .summary = .@"{.payload.name%summary#\"}",
 5683        .payload = EnumExplicit,
 5684        .trailing = struct {
 5685            owner_union: Index,
 5686            captures: ?[]CaptureValue,
 5687            type_hash: ?u64,
 5688            field_names: []NullTerminatedString,
 5689            tag_values: []Index,
 5690        },
 5691        .config = .{
 5692            .@"trailing.owner_union.?" = .@"payload.zir_index == .none",
 5693            .@"trailing.cau.?" = .@"payload.zir_index != .none",
 5694            .@"trailing.captures.?" = .@"payload.captures_len < 0xffffffff",
 5695            .@"trailing.captures.?.len" = .@"payload.captures_len",
 5696            .@"trailing.type_hash.?" = .@"payload.captures_len == 0xffffffff",
 5697            .@"trailing.field_names.len" = .@"payload.fields_len",
 5698            .@"trailing.tag_values.len" = .@"payload.fields_len",
 5699        },
 5700    };
 5701    const encodings = .{
 5702        .removed = .{},
 5703
 5704        .type_int_signed = .{ .summary = .@"i{.data%value}", .data = u32 },
 5705        .type_int_unsigned = .{ .summary = .@"u{.data%value}", .data = u32 },
 5706        .type_array_big = .{
 5707            .summary = .@"[{.payload.len1%value} << 32 | {.payload.len0%value}:{.payload.sentinel%summary}]{.payload.child%summary}",
 5708            .payload = Array,
 5709        },
 5710        .type_array_small = .{ .summary = .@"[{.payload.len%value}]{.payload.child%summary}", .payload = Vector },
 5711        .type_vector = .{ .summary = .@"@Vector({.payload.len%value}, {.payload.child%summary})", .payload = Vector },
 5712        .type_pointer = .{ .summary = .@"*... {.payload.child%summary}", .payload = TypePointer },
 5713        .type_slice = .{ .summary = .@"[]... {.data.unwrapped.payload.child%summary}", .data = Index },
 5714        .type_optional = .{ .summary = .@"?{.data%summary}", .data = Index },
 5715        .type_anyframe = .{ .summary = .@"anyframe->{.data%summary}", .data = Index },
 5716        .type_error_union = .{
 5717            .summary = .@"{.payload.error_set_type%summary}!{.payload.payload_type%summary}",
 5718            .payload = ErrorUnionType,
 5719        },
 5720        .type_anyerror_union = .{ .summary = .@"anyerror!{.data%summary}", .data = Index },
 5721        .type_error_set = .{ .summary = .@"error{...}", .payload = ErrorSet },
 5722        .type_inferred_error_set = .{
 5723            .summary = .@"@typeInfo(@typeInfo(@TypeOf({.data%summary})).@\"fn\".return_type.?).error_union.error_set",
 5724            .data = Index,
 5725        },
 5726        .type_enum_auto = .{
 5727            .summary = .@"{.payload.name%summary#\"}",
 5728            .payload = EnumAuto,
 5729            .trailing = struct {
 5730                owner_union: ?Index,
 5731                captures: ?[]CaptureValue,
 5732                type_hash: ?u64,
 5733                field_names: []NullTerminatedString,
 5734            },
 5735            .config = .{
 5736                .@"trailing.owner_union.?" = .@"payload.zir_index == .none",
 5737                .@"trailing.cau.?" = .@"payload.zir_index != .none",
 5738                .@"trailing.captures.?" = .@"payload.captures_len < 0xffffffff",
 5739                .@"trailing.captures.?.len" = .@"payload.captures_len",
 5740                .@"trailing.type_hash.?" = .@"payload.captures_len == 0xffffffff",
 5741                .@"trailing.field_names.len" = .@"payload.fields_len",
 5742            },
 5743        },
 5744        .type_enum_explicit = enum_explicit_encoding,
 5745        .type_enum_nonexhaustive = enum_explicit_encoding,
 5746        .simple_type = .{ .summary = .@"{.index%value#.}", .index = SimpleType },
 5747        .type_opaque = .{
 5748            .summary = .@"{.payload.name%summary#\"}",
 5749            .payload = TypeOpaque,
 5750            .trailing = struct { captures: []CaptureValue },
 5751            .config = .{ .@"trailing.captures.len" = .@"payload.captures_len" },
 5752        },
 5753        .type_struct = .{
 5754            .summary = .@"{.payload.name%summary#\"}",
 5755            .payload = TypeStruct,
 5756            .trailing = struct {
 5757                captures_len: ?u32,
 5758                captures: ?[]CaptureValue,
 5759                type_hash: ?u64,
 5760                field_types: []Index,
 5761                field_names_map: OptionalMapIndex,
 5762                field_names: []NullTerminatedString,
 5763                field_inits: ?[]Index,
 5764                field_aligns: ?[]Alignment,
 5765                field_is_comptime_bits: ?[]u32,
 5766                field_index: ?[]LoadedStructType.RuntimeOrder,
 5767                field_offset: []u32,
 5768            },
 5769            .config = .{
 5770                .@"trailing.captures_len.?" = .@"payload.flags.any_captures",
 5771                .@"trailing.captures.?" = .@"payload.flags.any_captures",
 5772                .@"trailing.captures.?.len" = .@"trailing.captures_len.?",
 5773                .@"trailing.type_hash.?" = .@"payload.flags.is_reified",
 5774                .@"trailing.field_types.len" = .@"payload.fields_len",
 5775                .@"trailing.field_names.len" = .@"payload.fields_len",
 5776                .@"trailing.field_inits.?" = .@"payload.flags.any_default_inits",
 5777                .@"trailing.field_inits.?.len" = .@"payload.fields_len",
 5778                .@"trailing.field_aligns.?" = .@"payload.flags.any_aligned_fields",
 5779                .@"trailing.field_aligns.?.len" = .@"payload.fields_len",
 5780                .@"trailing.field_is_comptime_bits.?" = .@"payload.flags.any_comptime_fields",
 5781                .@"trailing.field_is_comptime_bits.?.len" = .@"(payload.fields_len + 31) / 32",
 5782                .@"trailing.field_index.?" = .@"!payload.flags.is_extern",
 5783                .@"trailing.field_index.?.len" = .@"payload.fields_len",
 5784                .@"trailing.field_offset.len" = .@"payload.fields_len",
 5785            },
 5786        },
 5787        .type_struct_packed = .{
 5788            .summary = .@"{.payload.name%summary#\"}",
 5789            .payload = TypeStructPacked,
 5790            .trailing = struct {
 5791                captures_len: ?u32,
 5792                captures: ?[]CaptureValue,
 5793                type_hash: ?u64,
 5794                field_types: []Index,
 5795                field_names: []NullTerminatedString,
 5796            },
 5797            .config = .{
 5798                .@"trailing.captures_len.?" = .@"payload.flags.any_captures",
 5799                .@"trailing.captures.?" = .@"payload.flags.any_captures",
 5800                .@"trailing.captures.?.len" = .@"trailing.captures_len.?",
 5801                .@"trailing.type_hash.?" = .@"payload.is_flags.is_reified",
 5802                .@"trailing.field_types.len" = .@"payload.fields_len",
 5803                .@"trailing.field_names.len" = .@"payload.fields_len",
 5804            },
 5805        },
 5806        .type_struct_packed_inits = .{
 5807            .summary = .@"{.payload.name%summary#\"}",
 5808            .payload = TypeStructPacked,
 5809            .trailing = struct {
 5810                captures_len: ?u32,
 5811                captures: ?[]CaptureValue,
 5812                type_hash: ?u64,
 5813                field_types: []Index,
 5814                field_names: []NullTerminatedString,
 5815                field_inits: []Index,
 5816            },
 5817            .config = .{
 5818                .@"trailing.captures_len.?" = .@"payload.flags.any_captures",
 5819                .@"trailing.captures.?" = .@"payload.flags.any_captures",
 5820                .@"trailing.captures.?.len" = .@"trailing.captures_len.?",
 5821                .@"trailing.type_hash.?" = .@"payload.is_flags.is_reified",
 5822                .@"trailing.field_types.len" = .@"payload.fields_len",
 5823                .@"trailing.field_names.len" = .@"payload.fields_len",
 5824                .@"trailing.field_inits.len" = .@"payload.fields_len",
 5825            },
 5826        },
 5827        .type_tuple = .{
 5828            .summary = .@"struct {...}",
 5829            .payload = TypeTuple,
 5830            .trailing = struct {
 5831                field_types: []Index,
 5832                field_values: []Index,
 5833            },
 5834            .config = .{
 5835                .@"trailing.field_types.len" = .@"payload.fields_len",
 5836                .@"trailing.field_values.len" = .@"payload.fields_len",
 5837            },
 5838        },
 5839        .type_union = .{
 5840            .summary = .@"{.payload.name%summary#\"}",
 5841            .payload = TypeUnion,
 5842            .trailing = struct {
 5843                captures_len: ?u32,
 5844                captures: ?[]CaptureValue,
 5845                type_hash: ?u64,
 5846                field_types: []Index,
 5847                field_aligns: []Alignment,
 5848            },
 5849            .config = .{
 5850                .@"trailing.captures_len.?" = .@"payload.flags.any_captures",
 5851                .@"trailing.captures.?" = .@"payload.flags.any_captures",
 5852                .@"trailing.captures.?.len" = .@"trailing.captures_len.?",
 5853                .@"trailing.type_hash.?" = .@"payload.is_flags.is_reified",
 5854                .@"trailing.field_types.len" = .@"payload.fields_len",
 5855                .@"trailing.field_aligns.len" = .@"payload.fields_len",
 5856            },
 5857        },
 5858        .type_function = .{
 5859            .summary = .@"fn (...) ... {.payload.return_type%summary}",
 5860            .payload = TypeFunction,
 5861            .trailing = struct {
 5862                param_comptime_bits: ?[]u32,
 5863                param_noalias_bits: ?[]u32,
 5864                param_type: []Index,
 5865            },
 5866            .config = .{
 5867                .@"trailing.param_comptime_bits.?" = .@"payload.flags.has_comptime_bits",
 5868                .@"trailing.param_comptime_bits.?.len" = .@"(payload.params_len + 31) / 32",
 5869                .@"trailing.param_noalias_bits.?" = .@"payload.flags.has_noalias_bits",
 5870                .@"trailing.param_noalias_bits.?.len" = .@"(payload.params_len + 31) / 32",
 5871                .@"trailing.param_type.len" = .@"payload.params_len",
 5872            },
 5873        },
 5874
 5875        .undef = .{ .summary = .@"@as({.data%summary}, undefined)", .data = Index },
 5876        .simple_value = .{ .summary = .@"{.index%value#.}", .index = SimpleValue },
 5877        .ptr_nav = .{
 5878            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.nav.fqn%summary#\"}) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5879            .payload = PtrNav,
 5880        },
 5881        .ptr_comptime_alloc = .{
 5882            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&comptime_allocs[{.payload.index%summary}]) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5883            .payload = PtrComptimeAlloc,
 5884        },
 5885        .ptr_uav = .{
 5886            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.val%summary}) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5887            .payload = PtrUav,
 5888        },
 5889        .ptr_uav_aligned = .{
 5890            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(@as({.payload.orig_ty%summary}, &{.payload.val%summary})) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5891            .payload = PtrUavAligned,
 5892        },
 5893        .ptr_comptime_field = .{
 5894            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.field_val%summary}) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5895            .payload = PtrComptimeField,
 5896        },
 5897        .ptr_int = .{
 5898            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value}))",
 5899            .payload = PtrInt,
 5900        },
 5901        .ptr_eu_payload = .{
 5902            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&({.payload.base%summary} catch unreachable)) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5903            .payload = PtrBase,
 5904        },
 5905        .ptr_opt_payload = .{
 5906            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.base%summary}.?) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5907            .payload = PtrBase,
 5908        },
 5909        .ptr_elem = .{
 5910            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.base%summary}[{.payload.index%summary}]) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5911            .payload = PtrBaseIndex,
 5912        },
 5913        .ptr_field = .{
 5914            .summary = .@"@as({.payload.ty%summary}, @ptrFromInt(@intFromPtr(&{.payload.base%summary}[{.payload.index%summary}]) + ({.payload.byte_offset_a%value} << 32 | {.payload.byte_offset_b%value})))",
 5915            .payload = PtrBaseIndex,
 5916        },
 5917        .ptr_slice = .{
 5918            .summary = .@"{.payload.ptr%summary}[0..{.payload.len%summary}]",
 5919            .payload = PtrSlice,
 5920        },
 5921        .opt_payload = .{ .summary = .@"@as({.payload.ty%summary}, {.payload.val%summary})", .payload = TypeValue },
 5922        .opt_null = .{ .summary = .@"@as({.data%summary}, null)", .data = Index },
 5923        .int_u8 = .{ .summary = .@"@as(u8, {.data%value})", .data = u8 },
 5924        .int_u16 = .{ .summary = .@"@as(u16, {.data%value})", .data = u16 },
 5925        .int_u32 = .{ .summary = .@"@as(u32, {.data%value})", .data = u32 },
 5926        .int_i32 = .{ .summary = .@"@as(i32, {.data%value})", .data = i32 },
 5927        .int_usize = .{ .summary = .@"@as(usize, {.data%value})", .data = u32 },
 5928        .int_comptime_int_u32 = .{ .summary = .@"{.data%value}", .data = u32 },
 5929        .int_comptime_int_i32 = .{ .summary = .@"{.data%value}", .data = i32 },
 5930        .int_small = .{ .summary = .@"@as({.payload.ty%summary}, {.payload.value%value})", .payload = IntSmall },
 5931        .int_positive = .{},
 5932        .int_negative = .{},
 5933        .int_lazy_align = .{ .summary = .@"@as({.payload.ty%summary}, @alignOf({.payload.lazy_ty%summary}))", .payload = IntLazy },
 5934        .int_lazy_size = .{ .summary = .@"@as({.payload.ty%summary}, @sizeOf({.payload.lazy_ty%summary}))", .payload = IntLazy },
 5935        .error_set_error = .{ .summary = .@"@as({.payload.ty%summary}, error.@{.payload.name%summary})", .payload = Error },
 5936        .error_union_error = .{ .summary = .@"@as({.payload.ty%summary}, error.@{.payload.name%summary})", .payload = Error },
 5937        .error_union_payload = .{ .summary = .@"@as({.payload.ty%summary}, {.payload.val%summary})", .payload = TypeValue },
 5938        .enum_literal = .{ .summary = .@".@{.data%summary}", .data = NullTerminatedString },
 5939        .enum_tag = .{ .summary = .@"@as({.payload.ty%summary}, @enumFromInt({.payload.int%summary}))", .payload = EnumTag },
 5940        .float_f16 = .{ .summary = .@"@as(f16, {.data%value})", .data = f16 },
 5941        .float_f32 = .{ .summary = .@"@as(f32, {.data%value})", .data = f32 },
 5942        .float_f64 = .{ .summary = .@"@as(f64, {.payload%value})", .payload = f64 },
 5943        .float_f80 = .{ .summary = .@"@as(f80, {.payload%value})", .payload = f80 },
 5944        .float_f128 = .{ .summary = .@"@as(f128, {.payload%value})", .payload = f128 },
 5945        .float_c_longdouble_f80 = .{ .summary = .@"@as(c_longdouble, {.payload%value})", .payload = f80 },
 5946        .float_c_longdouble_f128 = .{ .summary = .@"@as(c_longdouble, {.payload%value})", .payload = f128 },
 5947        .float_comptime_float = .{ .summary = .@"{.payload%value}", .payload = f128 },
 5948        .variable = .{ .summary = .@"{.payload.owner_nav.fqn%summary#\"}", .payload = Variable },
 5949        .threadlocal_variable = .{ .summary = .@"{.payload.owner_nav.fqn%summary#\"}", .payload = Variable },
 5950        .@"extern" = .{ .summary = .@"{.payload.owner_nav.fqn%summary#\"}", .payload = Extern },
 5951        .func_decl = .{
 5952            .summary = .@"{.payload.owner_nav.fqn%summary#\"}",
 5953            .payload = FuncDecl,
 5954            .trailing = struct { inferred_error_set: ?Index },
 5955            .config = .{ .@"trailing.inferred_error_set.?" = .@"payload.analysis.inferred_error_set" },
 5956        },
 5957        .func_instance = .{
 5958            .summary = .@"{.payload.owner_nav.fqn%summary#\"}",
 5959            .payload = FuncInstance,
 5960            .trailing = struct {
 5961                inferred_error_set: ?Index,
 5962                param_values: []Index,
 5963            },
 5964            .config = .{
 5965                .@"trailing.inferred_error_set.?" = .@"payload.analysis.inferred_error_set",
 5966                .@"trailing.param_values.len" = .@"payload.ty.payload.params_len",
 5967            },
 5968        },
 5969        .func_coerced = .{
 5970            .summary = .@"@as(*const {.payload.ty%summary}, @ptrCast(&{.payload.func%summary})).*",
 5971            .payload = FuncCoerced,
 5972        },
 5973        .only_possible_value = .{ .summary = .@"@as({.data%summary}, undefined)", .data = Index },
 5974        .union_value = .{ .summary = .@"@as({.payload.ty%summary}, {})", .payload = Union },
 5975        .bytes = .{ .summary = .@"@as({.payload.ty%summary}, {.payload.bytes%summary}.*)", .payload = Bytes },
 5976        .aggregate = .{
 5977            .summary = .@"@as({.payload.ty%summary}, .{...})",
 5978            .payload = Aggregate,
 5979            .trailing = struct { elements: []Index },
 5980            .config = .{ .@"trailing.elements.len" = .@"payload.ty.payload.fields_len" },
 5981        },
 5982        .repeated = .{ .summary = .@"@as({.payload.ty%summary}, @splat({.payload.elem_val%summary}))", .payload = Repeated },
 5983
 5984        .memoized_call = .{
 5985            .summary = .@"@memoize({.payload.func%summary})",
 5986            .payload = MemoizedCall,
 5987            .trailing = struct { arg_values: []Index },
 5988            .config = .{ .@"trailing.arg_values.len" = .@"payload.args_len" },
 5989        },
 5990    };
 5991    fn Payload(comptime tag: Tag) type {
 5992        return @field(encodings, @tagName(tag)).payload;
 5993    }
 5994
 5995    pub const Variable = struct {
 5996        ty: Index,
 5997        /// May be `none`.
 5998        init: Index,
 5999        owner_nav: Nav.Index,
 6000    };
 6001
 6002    pub const Extern = struct {
 6003        // name, is_const, alignment, addrspace come from `owner_nav`.
 6004        ty: Index,
 6005        lib_name: OptionalNullTerminatedString,
 6006        flags: Flags,
 6007        owner_nav: Nav.Index,
 6008        zir_index: TrackedInst.Index,
 6009
 6010        pub const Flags = packed struct(u32) {
 6011            linkage: std.builtin.GlobalLinkage,
 6012            visibility: std.builtin.SymbolVisibility,
 6013            is_threadlocal: bool,
 6014            is_dll_import: bool,
 6015            relocation: std.builtin.ExternOptions.Relocation,
 6016            source: Source,
 6017            _: u24 = 0,
 6018
 6019            pub const Source = enum(u1) { builtin, syntax };
 6020        };
 6021    };
 6022
 6023    /// Trailing:
 6024    /// 0. element: Index for each len
 6025    /// len is determined by the aggregate type.
 6026    pub const Aggregate = struct {
 6027        /// The type of the aggregate.
 6028        ty: Index,
 6029    };
 6030
 6031    /// Trailing:
 6032    /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which
 6033    ///    is a regular error set corresponding to the finished inferred error set.
 6034    ///    A `none` value marks that the inferred error set is not resolved yet.
 6035    pub const FuncDecl = struct {
 6036        analysis: FuncAnalysis,
 6037        owner_nav: Nav.Index,
 6038        ty: Index,
 6039        zir_body_inst: TrackedInst.Index,
 6040        lbrace_line: u32,
 6041        rbrace_line: u32,
 6042        lbrace_column: u32,
 6043        rbrace_column: u32,
 6044    };
 6045
 6046    /// Trailing:
 6047    /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which
 6048    ///    is a regular error set corresponding to the finished inferred error set.
 6049    ///    A `none` value marks that the inferred error set is not resolved yet.
 6050    /// 1. For each parameter of generic_owner: `Index` if comptime, otherwise `none`
 6051    pub const FuncInstance = struct {
 6052        analysis: FuncAnalysis,
 6053        // Needed by the linker for codegen. Not part of hashing or equality.
 6054        owner_nav: Nav.Index,
 6055        ty: Index,
 6056        branch_quota: u32,
 6057        /// Points to a `FuncDecl`.
 6058        generic_owner: Index,
 6059    };
 6060
 6061    pub const FuncCoerced = struct {
 6062        ty: Index,
 6063        func: Index,
 6064    };
 6065
 6066    /// Trailing:
 6067    /// 0. name: NullTerminatedString for each names_len
 6068    pub const ErrorSet = struct {
 6069        names_len: u32,
 6070        /// Maps error names to declaration index.
 6071        names_map: MapIndex,
 6072    };
 6073
 6074    /// Trailing:
 6075    /// 0. comptime_bits: u32, // if has_comptime_bits
 6076    /// 1. noalias_bits: u32, // if has_noalias_bits
 6077    /// 2. param_type: Index for each params_len
 6078    pub const TypeFunction = struct {
 6079        params_len: u32,
 6080        return_type: Index,
 6081        flags: Flags,
 6082
 6083        pub const Flags = packed struct(u32) {
 6084            cc: PackedCallingConvention,
 6085            is_var_args: bool,
 6086            is_generic: bool,
 6087            has_comptime_bits: bool,
 6088            has_noalias_bits: bool,
 6089            is_noinline: bool,
 6090            _: u9 = 0,
 6091        };
 6092    };
 6093
 6094    /// Trailing:
 6095    /// 0. captures_len: u32 // if `any_captures`
 6096    /// 1. capture: CaptureValue // for each `captures_len`
 6097    /// 2. type_hash: PackedU64 // if `is_reified`
 6098    /// 3. field type: Index for each field; declaration order
 6099    /// 4. field align: Alignment for each field; declaration order
 6100    pub const TypeUnion = struct {
 6101        name: NullTerminatedString,
 6102        name_nav: Nav.Index.Optional,
 6103        flags: Flags,
 6104        /// This could be provided through the tag type, but it is more convenient
 6105        /// to store it directly. This is also necessary for `dumpStatsFallible` to
 6106        /// work on unresolved types.
 6107        fields_len: u32,
 6108        /// Only valid after .have_layout
 6109        size: u32,
 6110        /// Only valid after .have_layout
 6111        padding: u32,
 6112        namespace: NamespaceIndex,
 6113        /// The enum that provides the list of field names and values.
 6114        tag_ty: Index,
 6115        zir_index: TrackedInst.Index,
 6116
 6117        pub const Flags = packed struct(u32) {
 6118            any_captures: bool,
 6119            runtime_tag: LoadedUnionType.RuntimeTag,
 6120            /// If false, the field alignment trailing data is omitted.
 6121            any_aligned_fields: bool,
 6122            layout: std.builtin.Type.ContainerLayout,
 6123            status: LoadedUnionType.Status,
 6124            requires_comptime: RequiresComptime,
 6125            assumed_runtime_bits: bool,
 6126            assumed_pointer_aligned: bool,
 6127            alignment: Alignment,
 6128            is_reified: bool,
 6129            _: u12 = 0,
 6130        };
 6131    };
 6132
 6133    /// Trailing:
 6134    /// 0. captures_len: u32 // if `any_captures`
 6135    /// 1. capture: CaptureValue // for each `captures_len`
 6136    /// 2. type_hash: PackedU64 // if `is_reified`
 6137    /// 3. type: Index for each fields_len
 6138    /// 4. name: NullTerminatedString for each fields_len
 6139    /// 5. init: Index for each fields_len // if tag is type_struct_packed_inits
 6140    pub const TypeStructPacked = struct {
 6141        name: NullTerminatedString,
 6142        name_nav: Nav.Index.Optional,
 6143        zir_index: TrackedInst.Index,
 6144        fields_len: u32,
 6145        namespace: NamespaceIndex,
 6146        backing_int_ty: Index,
 6147        names_map: MapIndex,
 6148        flags: Flags,
 6149
 6150        pub const Flags = packed struct(u32) {
 6151            any_captures: bool = false,
 6152            /// Dependency loop detection when resolving field inits.
 6153            field_inits_wip: bool = false,
 6154            inits_resolved: bool = false,
 6155            is_reified: bool = false,
 6156            _: u28 = 0,
 6157        };
 6158    };
 6159
 6160    /// At first I thought of storing the denormalized data externally, such as...
 6161    ///
 6162    /// * runtime field order
 6163    /// * calculated field offsets
 6164    /// * size and alignment of the struct
 6165    ///
 6166    /// ...since these can be computed based on the other data here. However,
 6167    /// this data does need to be memoized, and therefore stored in memory
 6168    /// while the compiler is running, in order to avoid O(N^2) logic in many
 6169    /// places. Since the data can be stored compactly in the InternPool
 6170    /// representation, it is better for memory usage to store denormalized data
 6171    /// here, and potentially also better for performance as well. It's also simpler
 6172    /// than coming up with some other scheme for the data.
 6173    ///
 6174    /// Trailing:
 6175    /// 0. captures_len: u32 // if `any_captures`
 6176    /// 1. capture: CaptureValue // for each `captures_len`
 6177    /// 2. type_hash: PackedU64 // if `is_reified`
 6178    /// 3. type: Index for each field in declared order
 6179    /// 4. if any_default_inits:
 6180    ///    init: Index // for each field in declared order
 6181    /// 5. if any_aligned_fields:
 6182    ///    align: Alignment // for each field in declared order
 6183    /// 6. if any_comptime_fields:
 6184    ///    field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
 6185    /// 7. if not is_extern:
 6186    ///    field_index: RuntimeOrder // for each field in runtime order
 6187    /// 8. field_offset: u32 // for each field in declared order, undef until layout_resolved
 6188    pub const TypeStruct = struct {
 6189        name: NullTerminatedString,
 6190        name_nav: Nav.Index.Optional,
 6191        zir_index: TrackedInst.Index,
 6192        namespace: NamespaceIndex,
 6193        fields_len: u32,
 6194        flags: Flags,
 6195        size: u32,
 6196
 6197        pub const Flags = packed struct(u32) {
 6198            any_captures: bool = false,
 6199            is_extern: bool = false,
 6200            known_non_opv: bool = false,
 6201            requires_comptime: RequiresComptime = @enumFromInt(0),
 6202            assumed_runtime_bits: bool = false,
 6203            assumed_pointer_aligned: bool = false,
 6204            any_comptime_fields: bool = false,
 6205            any_default_inits: bool = false,
 6206            any_aligned_fields: bool = false,
 6207            /// `.none` until layout_resolved
 6208            alignment: Alignment = @enumFromInt(0),
 6209            /// Dependency loop detection when resolving struct alignment.
 6210            alignment_wip: bool = false,
 6211            /// Dependency loop detection when resolving field types.
 6212            field_types_wip: bool = false,
 6213            /// Dependency loop detection when resolving struct layout.
 6214            layout_wip: bool = false,
 6215            /// Indicates whether `size`, `alignment`, runtime field order, and
 6216            /// field offets are populated.
 6217            layout_resolved: bool = false,
 6218            /// Dependency loop detection when resolving field inits.
 6219            field_inits_wip: bool = false,
 6220            /// Indicates whether `field_inits` has been resolved.
 6221            inits_resolved: bool = false,
 6222            // The types and all its fields have had their layout resolved. Even through pointer = false,
 6223            // which `layout_resolved` does not ensure.
 6224            fully_resolved: bool = false,
 6225            is_reified: bool = false,
 6226            _: u8 = 0,
 6227        };
 6228    };
 6229
 6230    /// Trailing:
 6231    /// 0. capture: CaptureValue // for each `captures_len`
 6232    pub const TypeOpaque = struct {
 6233        name: NullTerminatedString,
 6234        name_nav: Nav.Index.Optional,
 6235        /// Contains the declarations inside this opaque.
 6236        namespace: NamespaceIndex,
 6237        /// The index of the `opaque_decl` instruction.
 6238        zir_index: TrackedInst.Index,
 6239        /// `std.math.maxInt(u32)` indicates this type is reified.
 6240        captures_len: u32,
 6241    };
 6242};
 6243
 6244/// State that is mutable during semantic analysis. This data is not used for
 6245/// equality or hashing, except for `inferred_error_set` which is considered
 6246/// to be part of the type of the function.
 6247pub const FuncAnalysis = packed struct(u32) {
 6248    is_analyzed: bool,
 6249    branch_hint: std.builtin.BranchHint,
 6250    is_noinline: bool,
 6251    has_error_trace: bool,
 6252    /// True if this function has an inferred error set.
 6253    inferred_error_set: bool,
 6254    disable_instrumentation: bool,
 6255    disable_intrinsics: bool,
 6256
 6257    _: u23 = 0,
 6258};
 6259
 6260pub const Bytes = struct {
 6261    /// The type of the aggregate
 6262    ty: Index,
 6263    /// Index into strings, of len ip.aggregateTypeLen(ty)
 6264    bytes: String,
 6265};
 6266
 6267pub const Repeated = struct {
 6268    /// The type of the aggregate.
 6269    ty: Index,
 6270    /// The value of every element.
 6271    elem_val: Index,
 6272};
 6273
 6274/// Trailing:
 6275/// 0. type: Index for each fields_len
 6276/// 1. value: Index for each fields_len
 6277pub const TypeTuple = struct {
 6278    fields_len: u32,
 6279};
 6280
 6281/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
 6282/// implement logic that only wants to deal with types because the logic can
 6283/// ignore all simple values. Note that technically, types are values.
 6284pub const SimpleType = enum(u32) {
 6285    f16 = @intFromEnum(Index.f16_type),
 6286    f32 = @intFromEnum(Index.f32_type),
 6287    f64 = @intFromEnum(Index.f64_type),
 6288    f80 = @intFromEnum(Index.f80_type),
 6289    f128 = @intFromEnum(Index.f128_type),
 6290    usize = @intFromEnum(Index.usize_type),
 6291    isize = @intFromEnum(Index.isize_type),
 6292    c_char = @intFromEnum(Index.c_char_type),
 6293    c_short = @intFromEnum(Index.c_short_type),
 6294    c_ushort = @intFromEnum(Index.c_ushort_type),
 6295    c_int = @intFromEnum(Index.c_int_type),
 6296    c_uint = @intFromEnum(Index.c_uint_type),
 6297    c_long = @intFromEnum(Index.c_long_type),
 6298    c_ulong = @intFromEnum(Index.c_ulong_type),
 6299    c_longlong = @intFromEnum(Index.c_longlong_type),
 6300    c_ulonglong = @intFromEnum(Index.c_ulonglong_type),
 6301    c_longdouble = @intFromEnum(Index.c_longdouble_type),
 6302    anyopaque = @intFromEnum(Index.anyopaque_type),
 6303    bool = @intFromEnum(Index.bool_type),
 6304    void = @intFromEnum(Index.void_type),
 6305    type = @intFromEnum(Index.type_type),
 6306    anyerror = @intFromEnum(Index.anyerror_type),
 6307    comptime_int = @intFromEnum(Index.comptime_int_type),
 6308    comptime_float = @intFromEnum(Index.comptime_float_type),
 6309    noreturn = @intFromEnum(Index.noreturn_type),
 6310    null = @intFromEnum(Index.null_type),
 6311    undefined = @intFromEnum(Index.undefined_type),
 6312    enum_literal = @intFromEnum(Index.enum_literal_type),
 6313
 6314    adhoc_inferred_error_set = @intFromEnum(Index.adhoc_inferred_error_set_type),
 6315    generic_poison = @intFromEnum(Index.generic_poison_type),
 6316};
 6317
 6318pub const SimpleValue = enum(u32) {
 6319    /// This is untyped `undefined`.
 6320    undefined = @intFromEnum(Index.undef),
 6321    void = @intFromEnum(Index.void_value),
 6322    /// This is untyped `null`.
 6323    null = @intFromEnum(Index.null_value),
 6324    /// This is the untyped empty struct/array literal: `.{}`
 6325    empty_tuple = @intFromEnum(Index.empty_tuple),
 6326    true = @intFromEnum(Index.bool_true),
 6327    false = @intFromEnum(Index.bool_false),
 6328    @"unreachable" = @intFromEnum(Index.unreachable_value),
 6329};
 6330
 6331/// Stored as a power-of-two, with one special value to indicate none.
 6332pub const Alignment = enum(u6) {
 6333    @"1" = 0,
 6334    @"2" = 1,
 6335    @"4" = 2,
 6336    @"8" = 3,
 6337    @"16" = 4,
 6338    @"32" = 5,
 6339    @"64" = 6,
 6340    none = std.math.maxInt(u6),
 6341    _,
 6342
 6343    pub fn toByteUnits(a: Alignment) ?u64 {
 6344        return switch (a) {
 6345            .none => null,
 6346            else => @as(u64, 1) << @intFromEnum(a),
 6347        };
 6348    }
 6349
 6350    pub fn fromByteUnits(n: u64) Alignment {
 6351        if (n == 0) return .none;
 6352        assert(std.math.isPowerOfTwo(n));
 6353        return @enumFromInt(@ctz(n));
 6354    }
 6355
 6356    pub fn fromNonzeroByteUnits(n: u64) Alignment {
 6357        assert(n != 0);
 6358        return fromByteUnits(n);
 6359    }
 6360
 6361    pub fn toLog2Units(a: Alignment) u6 {
 6362        assert(a != .none);
 6363        return @intFromEnum(a);
 6364    }
 6365
 6366    /// This is just a glorified `@enumFromInt` but using it can help
 6367    /// document the intended conversion.
 6368    /// The parameter uses a u32 for convenience at the callsite.
 6369    pub fn fromLog2Units(a: u32) Alignment {
 6370        assert(a != @intFromEnum(Alignment.none));
 6371        return @enumFromInt(a);
 6372    }
 6373
 6374    pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
 6375        assert(lhs != .none);
 6376        assert(rhs != .none);
 6377        return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
 6378    }
 6379
 6380    /// Relaxed comparison. We have this as default because a lot of callsites
 6381    /// were upgraded from directly using comparison operators on byte units,
 6382    /// with the `none` value represented by zero.
 6383    /// Prefer `compareStrict` if possible.
 6384    pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
 6385        return std.math.compare(lhs.toRelaxedCompareUnits(), op, rhs.toRelaxedCompareUnits());
 6386    }
 6387
 6388    pub fn compareStrict(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
 6389        assert(lhs != .none);
 6390        assert(rhs != .none);
 6391        return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs));
 6392    }
 6393
 6394    /// Treats `none` as zero.
 6395    /// This matches previous behavior of using `@max` directly on byte units.
 6396    /// Prefer `maxStrict` if possible.
 6397    pub fn max(lhs: Alignment, rhs: Alignment) Alignment {
 6398        if (lhs == .none) return rhs;
 6399        if (rhs == .none) return lhs;
 6400        return maxStrict(lhs, rhs);
 6401    }
 6402
 6403    pub fn maxStrict(lhs: Alignment, rhs: Alignment) Alignment {
 6404        assert(lhs != .none);
 6405        assert(rhs != .none);
 6406        return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs)));
 6407    }
 6408
 6409    /// Treats `none` as zero.
 6410    /// This matches previous behavior of using `@min` directly on byte units.
 6411    /// Prefer `minStrict` if possible.
 6412    pub fn min(lhs: Alignment, rhs: Alignment) Alignment {
 6413        if (lhs == .none) return lhs;
 6414        if (rhs == .none) return rhs;
 6415        return minStrict(lhs, rhs);
 6416    }
 6417
 6418    pub fn minStrict(lhs: Alignment, rhs: Alignment) Alignment {
 6419        assert(lhs != .none);
 6420        assert(rhs != .none);
 6421        return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs)));
 6422    }
 6423
 6424    /// Align an address forwards to this alignment.
 6425    pub fn forward(a: Alignment, addr: u64) u64 {
 6426        assert(a != .none);
 6427        const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
 6428        return (addr + x) & ~x;
 6429    }
 6430
 6431    /// Align an address backwards to this alignment.
 6432    pub fn backward(a: Alignment, addr: u64) u64 {
 6433        assert(a != .none);
 6434        const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
 6435        return addr & ~x;
 6436    }
 6437
 6438    /// Check if an address is aligned to this amount.
 6439    pub fn check(a: Alignment, addr: u64) bool {
 6440        assert(a != .none);
 6441        return @ctz(addr) >= @intFromEnum(a);
 6442    }
 6443
 6444    /// An array of `Alignment` objects existing within the `extra` array.
 6445    /// This type exists to provide a struct with lifetime that is
 6446    /// not invalidated when items are added to the `InternPool`.
 6447    pub const Slice = struct {
 6448        tid: Zcu.PerThread.Id,
 6449        start: u32,
 6450        /// This is the number of alignment values, not the number of u32 elements.
 6451        len: u32,
 6452
 6453        pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 };
 6454
 6455        pub fn get(slice: Slice, ip: *const InternPool) []Alignment {
 6456            // TODO: implement @ptrCast between slices changing the length
 6457            const extra = ip.getLocalShared(slice.tid).extra.acquire();
 6458            //const bytes: []u8 = @ptrCast(extra.view().items(.@"0")[slice.start..]);
 6459            const bytes: []u8 = std.mem.sliceAsBytes(extra.view().items(.@"0")[slice.start..]);
 6460            return @ptrCast(bytes[0..slice.len]);
 6461        }
 6462    };
 6463
 6464    pub fn toRelaxedCompareUnits(a: Alignment) u8 {
 6465        const n: u8 = @intFromEnum(a);
 6466        assert(n <= @intFromEnum(Alignment.none));
 6467        if (n == @intFromEnum(Alignment.none)) return 0;
 6468        return n + 1;
 6469    }
 6470
 6471    pub fn toStdMem(a: Alignment) std.mem.Alignment {
 6472        assert(a != .none);
 6473        return @enumFromInt(@intFromEnum(a));
 6474    }
 6475
 6476    pub fn fromStdMem(a: std.mem.Alignment) Alignment {
 6477        const r: Alignment = @enumFromInt(@intFromEnum(a));
 6478        assert(r != .none);
 6479        return r;
 6480    }
 6481
 6482    const LlvmBuilderAlignment = std.zig.llvm.Builder.Alignment;
 6483
 6484    pub fn toLlvm(a: Alignment) LlvmBuilderAlignment {
 6485        return @enumFromInt(@intFromEnum(a));
 6486    }
 6487
 6488    pub fn fromLlvm(a: LlvmBuilderAlignment) Alignment {
 6489        return @enumFromInt(@intFromEnum(a));
 6490    }
 6491};
 6492
 6493/// Used for non-sentineled arrays that have length fitting in u32, as well as
 6494/// vectors.
 6495pub const Vector = struct {
 6496    len: u32,
 6497    child: Index,
 6498};
 6499
 6500pub const Array = struct {
 6501    len0: u32,
 6502    len1: u32,
 6503    child: Index,
 6504    sentinel: Index,
 6505
 6506    pub const Length = PackedU64;
 6507
 6508    pub fn getLength(a: Array) u64 {
 6509        return (PackedU64{
 6510            .a = a.len0,
 6511            .b = a.len1,
 6512        }).get();
 6513    }
 6514};
 6515
 6516/// Trailing:
 6517/// 0. owner_union: Index // if `zir_index == .none`
 6518/// 1. capture: CaptureValue // for each `captures_len`
 6519/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
 6520/// 3. field name: NullTerminatedString for each fields_len; declaration order
 6521/// 4. tag value: Index for each fields_len; declaration order
 6522pub const EnumExplicit = struct {
 6523    name: NullTerminatedString,
 6524    name_nav: Nav.Index.Optional,
 6525    /// `std.math.maxInt(u32)` indicates this type is reified.
 6526    captures_len: u32,
 6527    namespace: NamespaceIndex,
 6528    /// An integer type which is used for the numerical value of the enum, which
 6529    /// has been explicitly provided by the enum declaration.
 6530    int_tag_type: Index,
 6531    fields_len: u32,
 6532    /// Maps field names to declaration index.
 6533    names_map: MapIndex,
 6534    /// Maps field values to declaration index.
 6535    /// If this is `none`, it means the trailing tag values are absent because
 6536    /// they are auto-numbered.
 6537    values_map: OptionalMapIndex,
 6538    /// `none` means this is a generated tag type.
 6539    /// There will be a trailing union type for which this is a tag.
 6540    zir_index: TrackedInst.Index.Optional,
 6541};
 6542
 6543/// Trailing:
 6544/// 0. owner_union: Index // if `zir_index == .none`
 6545/// 1. capture: CaptureValue // for each `captures_len`
 6546/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`)
 6547/// 3. field name: NullTerminatedString for each fields_len; declaration order
 6548pub const EnumAuto = struct {
 6549    name: NullTerminatedString,
 6550    name_nav: Nav.Index.Optional,
 6551    /// `std.math.maxInt(u32)` indicates this type is reified.
 6552    captures_len: u32,
 6553    namespace: NamespaceIndex,
 6554    /// An integer type which is used for the numerical value of the enum, which
 6555    /// was inferred by Zig based on the number of tags.
 6556    int_tag_type: Index,
 6557    fields_len: u32,
 6558    /// Maps field names to declaration index.
 6559    names_map: MapIndex,
 6560    /// `none` means this is a generated tag type.
 6561    /// There will be a trailing union type for which this is a tag.
 6562    zir_index: TrackedInst.Index.Optional,
 6563};
 6564
 6565pub const PackedU64 = packed struct(u64) {
 6566    a: u32,
 6567    b: u32,
 6568
 6569    pub fn get(x: PackedU64) u64 {
 6570        return @bitCast(x);
 6571    }
 6572
 6573    pub fn init(x: u64) PackedU64 {
 6574        return @bitCast(x);
 6575    }
 6576};
 6577
 6578pub const PtrNav = struct {
 6579    ty: Index,
 6580    nav: Nav.Index,
 6581    byte_offset_a: u32,
 6582    byte_offset_b: u32,
 6583    fn init(ty: Index, nav: Nav.Index, byte_offset: u64) @This() {
 6584        return .{
 6585            .ty = ty,
 6586            .nav = nav,
 6587            .byte_offset_a = @intCast(byte_offset >> 32),
 6588            .byte_offset_b = @truncate(byte_offset),
 6589        };
 6590    }
 6591    fn byteOffset(data: @This()) u64 {
 6592        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6593    }
 6594};
 6595
 6596pub const PtrUav = struct {
 6597    ty: Index,
 6598    val: Index,
 6599    byte_offset_a: u32,
 6600    byte_offset_b: u32,
 6601    fn init(ty: Index, val: Index, byte_offset: u64) @This() {
 6602        return .{
 6603            .ty = ty,
 6604            .val = val,
 6605            .byte_offset_a = @intCast(byte_offset >> 32),
 6606            .byte_offset_b = @truncate(byte_offset),
 6607        };
 6608    }
 6609    fn byteOffset(data: @This()) u64 {
 6610        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6611    }
 6612};
 6613
 6614pub const PtrUavAligned = struct {
 6615    ty: Index,
 6616    val: Index,
 6617    /// Must be nonequal to `ty`. Only the alignment from this value is important.
 6618    orig_ty: Index,
 6619    byte_offset_a: u32,
 6620    byte_offset_b: u32,
 6621    fn init(ty: Index, val: Index, orig_ty: Index, byte_offset: u64) @This() {
 6622        return .{
 6623            .ty = ty,
 6624            .val = val,
 6625            .orig_ty = orig_ty,
 6626            .byte_offset_a = @intCast(byte_offset >> 32),
 6627            .byte_offset_b = @truncate(byte_offset),
 6628        };
 6629    }
 6630    fn byteOffset(data: @This()) u64 {
 6631        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6632    }
 6633};
 6634
 6635pub const PtrComptimeAlloc = struct {
 6636    ty: Index,
 6637    index: ComptimeAllocIndex,
 6638    byte_offset_a: u32,
 6639    byte_offset_b: u32,
 6640    fn init(ty: Index, index: ComptimeAllocIndex, byte_offset: u64) @This() {
 6641        return .{
 6642            .ty = ty,
 6643            .index = index,
 6644            .byte_offset_a = @intCast(byte_offset >> 32),
 6645            .byte_offset_b = @truncate(byte_offset),
 6646        };
 6647    }
 6648    fn byteOffset(data: @This()) u64 {
 6649        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6650    }
 6651};
 6652
 6653pub const PtrComptimeField = struct {
 6654    ty: Index,
 6655    field_val: Index,
 6656    byte_offset_a: u32,
 6657    byte_offset_b: u32,
 6658    fn init(ty: Index, field_val: Index, byte_offset: u64) @This() {
 6659        return .{
 6660            .ty = ty,
 6661            .field_val = field_val,
 6662            .byte_offset_a = @intCast(byte_offset >> 32),
 6663            .byte_offset_b = @truncate(byte_offset),
 6664        };
 6665    }
 6666    fn byteOffset(data: @This()) u64 {
 6667        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6668    }
 6669};
 6670
 6671pub const PtrBase = struct {
 6672    ty: Index,
 6673    base: Index,
 6674    byte_offset_a: u32,
 6675    byte_offset_b: u32,
 6676    fn init(ty: Index, base: Index, byte_offset: u64) @This() {
 6677        return .{
 6678            .ty = ty,
 6679            .base = base,
 6680            .byte_offset_a = @intCast(byte_offset >> 32),
 6681            .byte_offset_b = @truncate(byte_offset),
 6682        };
 6683    }
 6684    fn byteOffset(data: @This()) u64 {
 6685        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6686    }
 6687};
 6688
 6689pub const PtrBaseIndex = struct {
 6690    ty: Index,
 6691    base: Index,
 6692    index: Index,
 6693    byte_offset_a: u32,
 6694    byte_offset_b: u32,
 6695    fn init(ty: Index, base: Index, index: Index, byte_offset: u64) @This() {
 6696        return .{
 6697            .ty = ty,
 6698            .base = base,
 6699            .index = index,
 6700            .byte_offset_a = @intCast(byte_offset >> 32),
 6701            .byte_offset_b = @truncate(byte_offset),
 6702        };
 6703    }
 6704    fn byteOffset(data: @This()) u64 {
 6705        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6706    }
 6707};
 6708
 6709pub const PtrInt = struct {
 6710    ty: Index,
 6711    byte_offset_a: u32,
 6712    byte_offset_b: u32,
 6713    fn init(ty: Index, byte_offset: u64) @This() {
 6714        return .{
 6715            .ty = ty,
 6716            .byte_offset_a = @intCast(byte_offset >> 32),
 6717            .byte_offset_b = @truncate(byte_offset),
 6718        };
 6719    }
 6720    fn byteOffset(data: @This()) u64 {
 6721        return @as(u64, data.byte_offset_a) << 32 | data.byte_offset_b;
 6722    }
 6723};
 6724
 6725pub const PtrSlice = struct {
 6726    /// The slice type.
 6727    ty: Index,
 6728    /// A many pointer value.
 6729    ptr: Index,
 6730    /// A usize value.
 6731    len: Index,
 6732};
 6733
 6734/// Trailing: Limb for every limbs_len
 6735pub const Int = packed struct {
 6736    ty: Index,
 6737    limbs_len: u32,
 6738
 6739    const limbs_items_len = @divExact(@sizeOf(Int), @sizeOf(Limb));
 6740};
 6741
 6742pub const IntSmall = struct {
 6743    ty: Index,
 6744    value: u32,
 6745};
 6746
 6747pub const IntLazy = struct {
 6748    ty: Index,
 6749    lazy_ty: Index,
 6750};
 6751
 6752/// A f64 value, broken up into 2 u32 parts.
 6753pub const Float64 = struct {
 6754    piece0: u32,
 6755    piece1: u32,
 6756
 6757    pub fn get(self: Float64) f64 {
 6758        const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32);
 6759        return @bitCast(int_bits);
 6760    }
 6761
 6762    fn pack(val: f64) Float64 {
 6763        const bits: u64 = @bitCast(val);
 6764        return .{
 6765            .piece0 = @truncate(bits),
 6766            .piece1 = @truncate(bits >> 32),
 6767        };
 6768    }
 6769};
 6770
 6771/// A f80 value, broken up into 2 u32 parts and a u16 part zero-padded to a u32.
 6772pub const Float80 = struct {
 6773    piece0: u32,
 6774    piece1: u32,
 6775    piece2: u32, // u16 part, top bits
 6776
 6777    pub fn get(self: Float80) f80 {
 6778        const int_bits = @as(u80, self.piece0) |
 6779            (@as(u80, self.piece1) << 32) |
 6780            (@as(u80, self.piece2) << 64);
 6781        return @bitCast(int_bits);
 6782    }
 6783
 6784    fn pack(val: f80) Float80 {
 6785        const bits: u80 = @bitCast(val);
 6786        return .{
 6787            .piece0 = @truncate(bits),
 6788            .piece1 = @truncate(bits >> 32),
 6789            .piece2 = @truncate(bits >> 64),
 6790        };
 6791    }
 6792};
 6793
 6794/// A f128 value, broken up into 4 u32 parts.
 6795pub const Float128 = struct {
 6796    piece0: u32,
 6797    piece1: u32,
 6798    piece2: u32,
 6799    piece3: u32,
 6800
 6801    pub fn get(self: Float128) f128 {
 6802        const int_bits = @as(u128, self.piece0) |
 6803            (@as(u128, self.piece1) << 32) |
 6804            (@as(u128, self.piece2) << 64) |
 6805            (@as(u128, self.piece3) << 96);
 6806        return @bitCast(int_bits);
 6807    }
 6808
 6809    fn pack(val: f128) Float128 {
 6810        const bits: u128 = @bitCast(val);
 6811        return .{
 6812            .piece0 = @truncate(bits),
 6813            .piece1 = @truncate(bits >> 32),
 6814            .piece2 = @truncate(bits >> 64),
 6815            .piece3 = @truncate(bits >> 96),
 6816        };
 6817    }
 6818};
 6819
 6820/// Trailing:
 6821/// 0. arg value: Index for each args_len
 6822pub const MemoizedCall = struct {
 6823    func: Index,
 6824    args_len: u32,
 6825    result: Index,
 6826    branch_count: u32,
 6827};
 6828
 6829pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
 6830    errdefer ip.deinit(gpa);
 6831    assert(ip.locals.len == 0 and ip.shards.len == 0);
 6832    assert(available_threads > 0 and available_threads <= std.math.maxInt(u8));
 6833
 6834    const used_threads = if (single_threaded) 1 else available_threads;
 6835    ip.locals = try gpa.alloc(Local, used_threads);
 6836    @memset(ip.locals, .{
 6837        .shared = .{
 6838            .items = .empty,
 6839            .extra = .empty,
 6840            .limbs = .empty,
 6841            .strings = .empty,
 6842            .string_bytes = .empty,
 6843            .tracked_insts = .empty,
 6844            .files = .empty,
 6845            .maps = .empty,
 6846            .navs = .empty,
 6847            .comptime_units = .empty,
 6848
 6849            .namespaces = .empty,
 6850        },
 6851        .mutate = .{
 6852            .arena = .{},
 6853
 6854            .items = .empty,
 6855            .extra = .empty,
 6856            .limbs = .empty,
 6857            .strings = .empty,
 6858            .string_bytes = .empty,
 6859            .tracked_insts = .empty,
 6860            .files = .empty,
 6861            .maps = .empty,
 6862            .navs = .empty,
 6863            .comptime_units = .empty,
 6864
 6865            .namespaces = .empty,
 6866        },
 6867    });
 6868    for (ip.locals) |*local| try local.getMutableStrings(gpa).append(.{0});
 6869
 6870    ip.tid_width = @intCast(std.math.log2_int_ceil(usize, used_threads));
 6871    ip.tid_shift_30 = if (single_threaded) 0 else 30 - ip.tid_width;
 6872    ip.tid_shift_31 = if (single_threaded) 0 else 31 - ip.tid_width;
 6873    ip.tid_shift_32 = if (single_threaded) 0 else ip.tid_shift_31 +| 1;
 6874    ip.shards = try gpa.alloc(Shard, @as(usize, 1) << ip.tid_width);
 6875    @memset(ip.shards, .{
 6876        .shared = .{
 6877            .map = Shard.Map(Index).empty,
 6878            .string_map = Shard.Map(OptionalNullTerminatedString).empty,
 6879            .tracked_inst_map = Shard.Map(TrackedInst.Index.Optional).empty,
 6880        },
 6881        .mutate = .{
 6882            .map = Shard.Mutate.empty,
 6883            .string_map = Shard.Mutate.empty,
 6884            .tracked_inst_map = Shard.Mutate.empty,
 6885        },
 6886    });
 6887
 6888    // Reserve string index 0 for an empty string.
 6889    assert((try ip.getOrPutString(gpa, .main, "", .no_embedded_nulls)) == .empty);
 6890
 6891    // This inserts all the statically-known values into the intern pool in the
 6892    // order expected.
 6893    for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) {
 6894        .empty_tuple_type => assert(try ip.getTupleType(gpa, .main, .{
 6895            .types = &.{},
 6896            .values = &.{},
 6897        }) == .empty_tuple_type),
 6898        else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index),
 6899    };
 6900
 6901    if (std.debug.runtime_safety) {
 6902        // Sanity check.
 6903        assert(ip.indexToKey(.bool_true).simple_value == .true);
 6904        assert(ip.indexToKey(.bool_false).simple_value == .false);
 6905    }
 6906}
 6907
 6908pub fn deinit(ip: *InternPool, gpa: Allocator) void {
 6909    if (debug_state.enable_checks) std.debug.assert(debug_state.intern_pool == null);
 6910
 6911    ip.src_hash_deps.deinit(gpa);
 6912    ip.nav_val_deps.deinit(gpa);
 6913    ip.nav_ty_deps.deinit(gpa);
 6914    ip.interned_deps.deinit(gpa);
 6915    ip.zon_file_deps.deinit(gpa);
 6916    ip.embed_file_deps.deinit(gpa);
 6917    ip.namespace_deps.deinit(gpa);
 6918    ip.namespace_name_deps.deinit(gpa);
 6919
 6920    ip.first_dependency.deinit(gpa);
 6921
 6922    ip.dep_entries.deinit(gpa);
 6923    ip.free_dep_entries.deinit(gpa);
 6924
 6925    gpa.free(ip.shards);
 6926    for (ip.locals) |*local| {
 6927        const buckets_len = local.mutate.namespaces.buckets_list.len;
 6928        if (buckets_len > 0) for (
 6929            local.shared.namespaces.view().items(.@"0")[0..buckets_len],
 6930            0..,
 6931        ) |namespace_bucket, buckets_index| {
 6932            for (namespace_bucket[0..if (buckets_index < buckets_len - 1)
 6933                namespace_bucket.len
 6934            else
 6935                local.mutate.namespaces.last_bucket_len]) |*namespace|
 6936            {
 6937                namespace.pub_decls.deinit(gpa);
 6938                namespace.priv_decls.deinit(gpa);
 6939                namespace.comptime_decls.deinit(gpa);
 6940                namespace.test_decls.deinit(gpa);
 6941            }
 6942        };
 6943        const maps = local.getMutableMaps(gpa);
 6944        if (maps.mutate.len > 0) for (maps.view().items(.@"0")) |*map| map.deinit(gpa);
 6945        local.mutate.arena.promote(gpa).deinit();
 6946    }
 6947    gpa.free(ip.locals);
 6948
 6949    ip.* = undefined;
 6950}
 6951
 6952pub fn activate(ip: *const InternPool) void {
 6953    if (!debug_state.enable) return;
 6954    _ = Index.Unwrapped.debug_state;
 6955    _ = String.debug_state;
 6956    _ = OptionalString.debug_state;
 6957    _ = NullTerminatedString.debug_state;
 6958    _ = OptionalNullTerminatedString.debug_state;
 6959    _ = TrackedInst.Index.debug_state;
 6960    _ = TrackedInst.Index.Optional.debug_state;
 6961    _ = Nav.Index.debug_state;
 6962    _ = Nav.Index.Optional.debug_state;
 6963    if (debug_state.enable_checks) std.debug.assert(debug_state.intern_pool == null);
 6964    debug_state.intern_pool = ip;
 6965}
 6966
 6967pub fn deactivate(ip: *const InternPool) void {
 6968    if (!debug_state.enable) return;
 6969    std.debug.assert(debug_state.intern_pool == ip);
 6970    if (debug_state.enable_checks) debug_state.intern_pool = null;
 6971}
 6972
 6973/// For debugger access only.
 6974const debug_state = struct {
 6975    const enable = false;
 6976    const enable_checks = enable and !builtin.single_threaded;
 6977    threadlocal var intern_pool: ?*const InternPool = null;
 6978};
 6979
 6980pub fn indexToKey(ip: *const InternPool, index: Index) Key {
 6981    assert(index != .none);
 6982    const unwrapped_index = index.unwrap(ip);
 6983    const item = unwrapped_index.getItem(ip);
 6984    const data = item.data;
 6985    return switch (item.tag) {
 6986        .removed => unreachable,
 6987        .type_int_signed => .{
 6988            .int_type = .{
 6989                .signedness = .signed,
 6990                .bits = @intCast(data),
 6991            },
 6992        },
 6993        .type_int_unsigned => .{
 6994            .int_type = .{
 6995                .signedness = .unsigned,
 6996                .bits = @intCast(data),
 6997            },
 6998        },
 6999        .type_array_big => {
 7000            const array_info = extraData(unwrapped_index.getExtra(ip), Array, data);
 7001            return .{ .array_type = .{
 7002                .len = array_info.getLength(),
 7003                .child = array_info.child,
 7004                .sentinel = array_info.sentinel,
 7005            } };
 7006        },
 7007        .type_array_small => {
 7008            const array_info = extraData(unwrapped_index.getExtra(ip), Vector, data);
 7009            return .{ .array_type = .{
 7010                .len = array_info.len,
 7011                .child = array_info.child,
 7012                .sentinel = .none,
 7013            } };
 7014        },
 7015        .simple_type => .{ .simple_type = @enumFromInt(@intFromEnum(index)) },
 7016        .simple_value => .{ .simple_value = @enumFromInt(@intFromEnum(index)) },
 7017
 7018        .type_vector => {
 7019            const vector_info = extraData(unwrapped_index.getExtra(ip), Vector, data);
 7020            return .{ .vector_type = .{
 7021                .len = vector_info.len,
 7022                .child = vector_info.child,
 7023            } };
 7024        },
 7025
 7026        .type_pointer => .{ .ptr_type = extraData(unwrapped_index.getExtra(ip), Tag.TypePointer, data) },
 7027
 7028        .type_slice => {
 7029            const many_ptr_index: Index = @enumFromInt(data);
 7030            const many_ptr_unwrapped = many_ptr_index.unwrap(ip);
 7031            const many_ptr_item = many_ptr_unwrapped.getItem(ip);
 7032            assert(many_ptr_item.tag == .type_pointer);
 7033            var ptr_info = extraData(many_ptr_unwrapped.getExtra(ip), Tag.TypePointer, many_ptr_item.data);
 7034            ptr_info.flags.size = .slice;
 7035            return .{ .ptr_type = ptr_info };
 7036        },
 7037
 7038        .type_optional => .{ .opt_type = @enumFromInt(data) },
 7039        .type_anyframe => .{ .anyframe_type = @enumFromInt(data) },
 7040
 7041        .type_error_union => .{ .error_union_type = extraData(unwrapped_index.getExtra(ip), Key.ErrorUnionType, data) },
 7042        .type_anyerror_union => .{ .error_union_type = .{
 7043            .error_set_type = .anyerror_type,
 7044            .payload_type = @enumFromInt(data),
 7045        } },
 7046        .type_error_set => .{ .error_set_type = extraErrorSet(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
 7047        .type_inferred_error_set => .{
 7048            .inferred_error_set_type = @enumFromInt(data),
 7049        },
 7050
 7051        .type_opaque => .{ .opaque_type = ns: {
 7052            const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, data);
 7053            if (extra.data.captures_len == std.math.maxInt(u32)) {
 7054                break :ns .{ .reified = .{
 7055                    .zir_index = extra.data.zir_index,
 7056                    .type_hash = 0,
 7057                } };
 7058            }
 7059            break :ns .{ .declared = .{
 7060                .zir_index = extra.data.zir_index,
 7061                .captures = .{ .owned = .{
 7062                    .tid = unwrapped_index.tid,
 7063                    .start = extra.end,
 7064                    .len = extra.data.captures_len,
 7065                } },
 7066            } };
 7067        } },
 7068
 7069        .type_struct => .{ .struct_type = ns: {
 7070            const extra_list = unwrapped_index.getExtra(ip);
 7071            const extra_items = extra_list.view().items(.@"0");
 7072            const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
 7073            const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered));
 7074            const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).@"struct".fields.len);
 7075            if (flags.is_reified) {
 7076                assert(!flags.any_captures);
 7077                break :ns .{ .reified = .{
 7078                    .zir_index = zir_index,
 7079                    .type_hash = extraData(extra_list, PackedU64, end_extra_index).get(),
 7080                } };
 7081            }
 7082            break :ns .{ .declared = .{
 7083                .zir_index = zir_index,
 7084                .captures = .{ .owned = if (flags.any_captures) .{
 7085                    .tid = unwrapped_index.tid,
 7086                    .start = end_extra_index + 1,
 7087                    .len = extra_list.view().items(.@"0")[end_extra_index],
 7088                } else CaptureValue.Slice.empty },
 7089            } };
 7090        } },
 7091
 7092        .type_struct_packed, .type_struct_packed_inits => .{ .struct_type = ns: {
 7093            const extra_list = unwrapped_index.getExtra(ip);
 7094            const extra_items = extra_list.view().items(.@"0");
 7095            const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]);
 7096            const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered));
 7097            const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).@"struct".fields.len);
 7098            if (flags.is_reified) {
 7099                assert(!flags.any_captures);
 7100                break :ns .{ .reified = .{
 7101                    .zir_index = zir_index,
 7102                    .type_hash = extraData(extra_list, PackedU64, end_extra_index).get(),
 7103                } };
 7104            }
 7105            break :ns .{ .declared = .{
 7106                .zir_index = zir_index,
 7107                .captures = .{ .owned = if (flags.any_captures) .{
 7108                    .tid = unwrapped_index.tid,
 7109                    .start = end_extra_index + 1,
 7110                    .len = extra_items[end_extra_index],
 7111                } else CaptureValue.Slice.empty },
 7112            } };
 7113        } },
 7114        .type_tuple => .{ .tuple_type = extraTypeTuple(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
 7115        .type_union => .{ .union_type = ns: {
 7116            const extra_list = unwrapped_index.getExtra(ip);
 7117            const extra = extraDataTrail(extra_list, Tag.TypeUnion, data);
 7118            if (extra.data.flags.is_reified) {
 7119                assert(!extra.data.flags.any_captures);
 7120                break :ns .{ .reified = .{
 7121                    .zir_index = extra.data.zir_index,
 7122                    .type_hash = extraData(extra_list, PackedU64, extra.end).get(),
 7123                } };
 7124            }
 7125            break :ns .{ .declared = .{
 7126                .zir_index = extra.data.zir_index,
 7127                .captures = .{ .owned = if (extra.data.flags.any_captures) .{
 7128                    .tid = unwrapped_index.tid,
 7129                    .start = extra.end + 1,
 7130                    .len = extra_list.view().items(.@"0")[extra.end],
 7131                } else CaptureValue.Slice.empty },
 7132            } };
 7133        } },
 7134
 7135        .type_enum_auto => .{ .enum_type = ns: {
 7136            const extra_list = unwrapped_index.getExtra(ip);
 7137            const extra = extraDataTrail(extra_list, EnumAuto, data);
 7138            const zir_index = extra.data.zir_index.unwrap() orelse {
 7139                assert(extra.data.captures_len == 0);
 7140                break :ns .{ .generated_tag = .{
 7141                    .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]),
 7142                } };
 7143            };
 7144            if (extra.data.captures_len == std.math.maxInt(u32)) {
 7145                break :ns .{ .reified = .{
 7146                    .zir_index = zir_index,
 7147                    .type_hash = extraData(extra_list, PackedU64, extra.end).get(),
 7148                } };
 7149            }
 7150            break :ns .{ .declared = .{
 7151                .zir_index = zir_index,
 7152                .captures = .{ .owned = .{
 7153                    .tid = unwrapped_index.tid,
 7154                    .start = extra.end,
 7155                    .len = extra.data.captures_len,
 7156                } },
 7157            } };
 7158        } },
 7159        .type_enum_explicit, .type_enum_nonexhaustive => .{ .enum_type = ns: {
 7160            const extra_list = unwrapped_index.getExtra(ip);
 7161            const extra = extraDataTrail(extra_list, EnumExplicit, data);
 7162            const zir_index = extra.data.zir_index.unwrap() orelse {
 7163                assert(extra.data.captures_len == 0);
 7164                break :ns .{ .generated_tag = .{
 7165                    .union_type = @enumFromInt(extra_list.view().items(.@"0")[extra.end]),
 7166                } };
 7167            };
 7168            if (extra.data.captures_len == std.math.maxInt(u32)) {
 7169                break :ns .{ .reified = .{
 7170                    .zir_index = zir_index,
 7171                    .type_hash = extraData(extra_list, PackedU64, extra.end).get(),
 7172                } };
 7173            }
 7174            break :ns .{ .declared = .{
 7175                .zir_index = zir_index,
 7176                .captures = .{ .owned = .{
 7177                    .tid = unwrapped_index.tid,
 7178                    .start = extra.end,
 7179                    .len = extra.data.captures_len,
 7180                } },
 7181            } };
 7182        } },
 7183        .type_function => .{ .func_type = extraFuncType(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
 7184
 7185        .undef => .{ .undef = @enumFromInt(data) },
 7186        .opt_null => .{ .opt = .{
 7187            .ty = @enumFromInt(data),
 7188            .val = .none,
 7189        } },
 7190        .opt_payload => {
 7191            const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data);
 7192            return .{ .opt = .{
 7193                .ty = extra.ty,
 7194                .val = extra.val,
 7195            } };
 7196        },
 7197        .ptr_nav => {
 7198            const info = extraData(unwrapped_index.getExtra(ip), PtrNav, data);
 7199            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .nav = info.nav }, .byte_offset = info.byteOffset() } };
 7200        },
 7201        .ptr_comptime_alloc => {
 7202            const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeAlloc, data);
 7203            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } };
 7204        },
 7205        .ptr_uav => {
 7206            const info = extraData(unwrapped_index.getExtra(ip), PtrUav, data);
 7207            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .uav = .{
 7208                .val = info.val,
 7209                .orig_ty = info.ty,
 7210            } }, .byte_offset = info.byteOffset() } };
 7211        },
 7212        .ptr_uav_aligned => {
 7213            const info = extraData(unwrapped_index.getExtra(ip), PtrUavAligned, data);
 7214            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .uav = .{
 7215                .val = info.val,
 7216                .orig_ty = info.orig_ty,
 7217            } }, .byte_offset = info.byteOffset() } };
 7218        },
 7219        .ptr_comptime_field => {
 7220            const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeField, data);
 7221            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_field = info.field_val }, .byte_offset = info.byteOffset() } };
 7222        },
 7223        .ptr_int => {
 7224            const info = extraData(unwrapped_index.getExtra(ip), PtrInt, data);
 7225            return .{ .ptr = .{
 7226                .ty = info.ty,
 7227                .base_addr = .int,
 7228                .byte_offset = info.byteOffset(),
 7229            } };
 7230        },
 7231        .ptr_eu_payload => {
 7232            const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data);
 7233            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .eu_payload = info.base }, .byte_offset = info.byteOffset() } };
 7234        },
 7235        .ptr_opt_payload => {
 7236            const info = extraData(unwrapped_index.getExtra(ip), PtrBase, data);
 7237            return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .opt_payload = info.base }, .byte_offset = info.byteOffset() } };
 7238        },
 7239        .ptr_elem => {
 7240            // Avoid `indexToKey` recursion by asserting the tag encoding.
 7241            const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data);
 7242            const index_item = info.index.unwrap(ip).getItem(ip);
 7243            return switch (index_item.tag) {
 7244                .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .arr_elem = .{
 7245                    .base = info.base,
 7246                    .index = index_item.data,
 7247                } }, .byte_offset = info.byteOffset() } },
 7248                .int_positive => @panic("TODO"), // implement along with behavior test coverage
 7249                else => unreachable,
 7250            };
 7251        },
 7252        .ptr_field => {
 7253            // Avoid `indexToKey` recursion by asserting the tag encoding.
 7254            const info = extraData(unwrapped_index.getExtra(ip), PtrBaseIndex, data);
 7255            const index_item = info.index.unwrap(ip).getItem(ip);
 7256            return switch (index_item.tag) {
 7257                .int_usize => .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .field = .{
 7258                    .base = info.base,
 7259                    .index = index_item.data,
 7260                } }, .byte_offset = info.byteOffset() } },
 7261                .int_positive => @panic("TODO"), // implement along with behavior test coverage
 7262                else => unreachable,
 7263            };
 7264        },
 7265        .ptr_slice => {
 7266            const info = extraData(unwrapped_index.getExtra(ip), PtrSlice, data);
 7267            return .{ .slice = .{
 7268                .ty = info.ty,
 7269                .ptr = info.ptr,
 7270                .len = info.len,
 7271            } };
 7272        },
 7273        .int_u8 => .{ .int = .{
 7274            .ty = .u8_type,
 7275            .storage = .{ .u64 = data },
 7276        } },
 7277        .int_u16 => .{ .int = .{
 7278            .ty = .u16_type,
 7279            .storage = .{ .u64 = data },
 7280        } },
 7281        .int_u32 => .{ .int = .{
 7282            .ty = .u32_type,
 7283            .storage = .{ .u64 = data },
 7284        } },
 7285        .int_i32 => .{ .int = .{
 7286            .ty = .i32_type,
 7287            .storage = .{ .i64 = @as(i32, @bitCast(data)) },
 7288        } },
 7289        .int_usize => .{ .int = .{
 7290            .ty = .usize_type,
 7291            .storage = .{ .u64 = data },
 7292        } },
 7293        .int_comptime_int_u32 => .{ .int = .{
 7294            .ty = .comptime_int_type,
 7295            .storage = .{ .u64 = data },
 7296        } },
 7297        .int_comptime_int_i32 => .{ .int = .{
 7298            .ty = .comptime_int_type,
 7299            .storage = .{ .i64 = @as(i32, @bitCast(data)) },
 7300        } },
 7301        .int_positive => ip.indexToKeyBigInt(unwrapped_index.tid, data, true),
 7302        .int_negative => ip.indexToKeyBigInt(unwrapped_index.tid, data, false),
 7303        .int_small => {
 7304            const info = extraData(unwrapped_index.getExtra(ip), IntSmall, data);
 7305            return .{ .int = .{
 7306                .ty = info.ty,
 7307                .storage = .{ .u64 = info.value },
 7308            } };
 7309        },
 7310        .int_lazy_align, .int_lazy_size => |tag| {
 7311            const info = extraData(unwrapped_index.getExtra(ip), IntLazy, data);
 7312            return .{ .int = .{
 7313                .ty = info.ty,
 7314                .storage = switch (tag) {
 7315                    .int_lazy_align => .{ .lazy_align = info.lazy_ty },
 7316                    .int_lazy_size => .{ .lazy_size = info.lazy_ty },
 7317                    else => unreachable,
 7318                },
 7319            } };
 7320        },
 7321        .float_f16 => .{ .float = .{
 7322            .ty = .f16_type,
 7323            .storage = .{ .f16 = @bitCast(@as(u16, @intCast(data))) },
 7324        } },
 7325        .float_f32 => .{ .float = .{
 7326            .ty = .f32_type,
 7327            .storage = .{ .f32 = @bitCast(data) },
 7328        } },
 7329        .float_f64 => .{ .float = .{
 7330            .ty = .f64_type,
 7331            .storage = .{ .f64 = extraData(unwrapped_index.getExtra(ip), Float64, data).get() },
 7332        } },
 7333        .float_f80 => .{ .float = .{
 7334            .ty = .f80_type,
 7335            .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() },
 7336        } },
 7337        .float_f128 => .{ .float = .{
 7338            .ty = .f128_type,
 7339            .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() },
 7340        } },
 7341        .float_c_longdouble_f80 => .{ .float = .{
 7342            .ty = .c_longdouble_type,
 7343            .storage = .{ .f80 = extraData(unwrapped_index.getExtra(ip), Float80, data).get() },
 7344        } },
 7345        .float_c_longdouble_f128 => .{ .float = .{
 7346            .ty = .c_longdouble_type,
 7347            .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() },
 7348        } },
 7349        .float_comptime_float => .{ .float = .{
 7350            .ty = .comptime_float_type,
 7351            .storage = .{ .f128 = extraData(unwrapped_index.getExtra(ip), Float128, data).get() },
 7352        } },
 7353        .variable, .threadlocal_variable => {
 7354            const extra = extraData(unwrapped_index.getExtra(ip), Tag.Variable, data);
 7355            return .{ .variable = .{
 7356                .ty = extra.ty,
 7357                .init = extra.init,
 7358                .owner_nav = extra.owner_nav,
 7359                .is_threadlocal = switch (item.tag) {
 7360                    else => unreachable,
 7361                    .variable => false,
 7362                    .threadlocal_variable => true,
 7363                },
 7364            } };
 7365        },
 7366        .@"extern" => {
 7367            const extra = extraData(unwrapped_index.getExtra(ip), Tag.Extern, data);
 7368            const nav = ip.getNav(extra.owner_nav);
 7369            return .{ .@"extern" = .{
 7370                .name = nav.name,
 7371                .ty = extra.ty,
 7372                .lib_name = extra.lib_name,
 7373                .linkage = extra.flags.linkage,
 7374                .visibility = extra.flags.visibility,
 7375                .is_threadlocal = extra.flags.is_threadlocal,
 7376                .is_dll_import = extra.flags.is_dll_import,
 7377                .relocation = extra.flags.relocation,
 7378                .is_const = nav.status.fully_resolved.is_const,
 7379                .alignment = nav.status.fully_resolved.alignment,
 7380                .@"addrspace" = nav.status.fully_resolved.@"addrspace",
 7381                .zir_index = extra.zir_index,
 7382                .owner_nav = extra.owner_nav,
 7383                .source = extra.flags.source,
 7384            } };
 7385        },
 7386        .func_instance => .{ .func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
 7387        .func_decl => .{ .func = extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
 7388        .func_coerced => .{ .func = ip.extraFuncCoerced(unwrapped_index.getExtra(ip), data) },
 7389        .only_possible_value => {
 7390            const ty: Index = @enumFromInt(data);
 7391            const ty_unwrapped = ty.unwrap(ip);
 7392            const ty_extra = ty_unwrapped.getExtra(ip);
 7393            const ty_item = ty_unwrapped.getItem(ip);
 7394            return switch (ty_item.tag) {
 7395                .type_array_big => {
 7396                    const sentinel = @as(
 7397                        *const [1]Index,
 7398                        @ptrCast(&ty_extra.view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?]),
 7399                    );
 7400                    return .{ .aggregate = .{
 7401                        .ty = ty,
 7402                        .storage = .{ .elems = sentinel[0..@intFromBool(sentinel[0] != .none)] },
 7403                    } };
 7404                },
 7405                .type_array_small,
 7406                .type_vector,
 7407                .type_struct_packed,
 7408                => .{ .aggregate = .{
 7409                    .ty = ty,
 7410                    .storage = .{ .elems = &.{} },
 7411                } },
 7412
 7413                // There is only one possible value precisely due to the
 7414                // fact that this values slice is fully populated!
 7415                .type_struct, .type_struct_packed_inits => {
 7416                    const info = loadStructType(ip, ty);
 7417                    return .{ .aggregate = .{
 7418                        .ty = ty,
 7419                        .storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
 7420                    } };
 7421                },
 7422
 7423                // There is only one possible value precisely due to the
 7424                // fact that this values slice is fully populated!
 7425                .type_tuple => {
 7426                    const type_tuple = extraDataTrail(ty_extra, TypeTuple, ty_item.data);
 7427                    const fields_len = type_tuple.data.fields_len;
 7428                    const values = ty_extra.view().items(.@"0")[type_tuple.end + fields_len ..][0..fields_len];
 7429                    return .{ .aggregate = .{
 7430                        .ty = ty,
 7431                        .storage = .{ .elems = @ptrCast(values) },
 7432                    } };
 7433                },
 7434
 7435                .type_enum_auto,
 7436                .type_enum_explicit,
 7437                .type_union,
 7438                => .{ .empty_enum_value = ty },
 7439
 7440                else => unreachable,
 7441            };
 7442        },
 7443        .bytes => {
 7444            const extra = extraData(unwrapped_index.getExtra(ip), Bytes, data);
 7445            return .{ .aggregate = .{
 7446                .ty = extra.ty,
 7447                .storage = .{ .bytes = extra.bytes },
 7448            } };
 7449        },
 7450        .aggregate => {
 7451            const extra_list = unwrapped_index.getExtra(ip);
 7452            const extra = extraDataTrail(extra_list, Tag.Aggregate, data);
 7453            const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
 7454            const fields: []const Index = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..len]);
 7455            return .{ .aggregate = .{
 7456                .ty = extra.data.ty,
 7457                .storage = .{ .elems = fields },
 7458            } };
 7459        },
 7460        .repeated => {
 7461            const extra = extraData(unwrapped_index.getExtra(ip), Repeated, data);
 7462            return .{ .aggregate = .{
 7463                .ty = extra.ty,
 7464                .storage = .{ .repeated_elem = extra.elem_val },
 7465            } };
 7466        },
 7467        .union_value => .{ .un = extraData(unwrapped_index.getExtra(ip), Key.Union, data) },
 7468        .error_set_error => .{ .err = extraData(unwrapped_index.getExtra(ip), Key.Error, data) },
 7469        .error_union_error => {
 7470            const extra = extraData(unwrapped_index.getExtra(ip), Key.Error, data);
 7471            return .{ .error_union = .{
 7472                .ty = extra.ty,
 7473                .val = .{ .err_name = extra.name },
 7474            } };
 7475        },
 7476        .error_union_payload => {
 7477            const extra = extraData(unwrapped_index.getExtra(ip), Tag.TypeValue, data);
 7478            return .{ .error_union = .{
 7479                .ty = extra.ty,
 7480                .val = .{ .payload = extra.val },
 7481            } };
 7482        },
 7483        .enum_literal => .{ .enum_literal = @enumFromInt(data) },
 7484        .enum_tag => .{ .enum_tag = extraData(unwrapped_index.getExtra(ip), Tag.EnumTag, data) },
 7485
 7486        .memoized_call => {
 7487            const extra_list = unwrapped_index.getExtra(ip);
 7488            const extra = extraDataTrail(extra_list, MemoizedCall, data);
 7489            return .{ .memoized_call = .{
 7490                .func = extra.data.func,
 7491                .arg_values = @ptrCast(extra_list.view().items(.@"0")[extra.end..][0..extra.data.args_len]),
 7492                .result = extra.data.result,
 7493                .branch_count = extra.data.branch_count,
 7494            } };
 7495        },
 7496    };
 7497}
 7498
 7499fn extraErrorSet(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.ErrorSetType {
 7500    const error_set = extraDataTrail(extra, Tag.ErrorSet, extra_index);
 7501    return .{
 7502        .names = .{
 7503            .tid = tid,
 7504            .start = @intCast(error_set.end),
 7505            .len = error_set.data.names_len,
 7506        },
 7507        .names_map = error_set.data.names_map.toOptional(),
 7508    };
 7509}
 7510
 7511fn extraTypeTuple(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.TupleType {
 7512    const type_tuple = extraDataTrail(extra, TypeTuple, extra_index);
 7513    const fields_len = type_tuple.data.fields_len;
 7514    return .{
 7515        .types = .{
 7516            .tid = tid,
 7517            .start = type_tuple.end,
 7518            .len = fields_len,
 7519        },
 7520        .values = .{
 7521            .tid = tid,
 7522            .start = type_tuple.end + fields_len,
 7523            .len = fields_len,
 7524        },
 7525    };
 7526}
 7527
 7528fn extraFuncType(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.FuncType {
 7529    const type_function = extraDataTrail(extra, Tag.TypeFunction, extra_index);
 7530    var trail_index: usize = type_function.end;
 7531    const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: {
 7532        const x = extra.view().items(.@"0")[trail_index];
 7533        trail_index += 1;
 7534        break :b x;
 7535    };
 7536    const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: {
 7537        const x = extra.view().items(.@"0")[trail_index];
 7538        trail_index += 1;
 7539        break :b x;
 7540    };
 7541    return .{
 7542        .param_types = .{
 7543            .tid = tid,
 7544            .start = @intCast(trail_index),
 7545            .len = type_function.data.params_len,
 7546        },
 7547        .return_type = type_function.data.return_type,
 7548        .comptime_bits = comptime_bits,
 7549        .noalias_bits = noalias_bits,
 7550        .cc = type_function.data.flags.cc.unpack(),
 7551        .is_var_args = type_function.data.flags.is_var_args,
 7552        .is_noinline = type_function.data.flags.is_noinline,
 7553        .is_generic = type_function.data.flags.is_generic,
 7554    };
 7555}
 7556
 7557fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func {
 7558    const P = Tag.FuncDecl;
 7559    const func_decl = extraDataTrail(extra, P, extra_index);
 7560    return .{
 7561        .tid = tid,
 7562        .ty = func_decl.data.ty,
 7563        .uncoerced_ty = func_decl.data.ty,
 7564        .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?,
 7565        .zir_body_inst_extra_index = extra_index + std.meta.fieldIndex(P, "zir_body_inst").?,
 7566        .resolved_error_set_extra_index = if (func_decl.data.analysis.inferred_error_set) func_decl.end else 0,
 7567        .branch_quota_extra_index = 0,
 7568        .owner_nav = func_decl.data.owner_nav,
 7569        .zir_body_inst = func_decl.data.zir_body_inst,
 7570        .lbrace_line = func_decl.data.lbrace_line,
 7571        .rbrace_line = func_decl.data.rbrace_line,
 7572        .lbrace_column = func_decl.data.lbrace_column,
 7573        .rbrace_column = func_decl.data.rbrace_column,
 7574        .generic_owner = .none,
 7575        .comptime_args = Index.Slice.empty,
 7576    };
 7577}
 7578
 7579fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func {
 7580    const extra_items = extra.view().items(.@"0");
 7581    const analysis_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?;
 7582    const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .unordered));
 7583    const owner_nav: Nav.Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_nav").?]);
 7584    const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]);
 7585    const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]);
 7586    const func_decl = ip.funcDeclInfo(generic_owner);
 7587    const end_extra_index = extra_index + @as(u32, @typeInfo(Tag.FuncInstance).@"struct".fields.len);
 7588    return .{
 7589        .tid = tid,
 7590        .ty = ty,
 7591        .uncoerced_ty = ty,
 7592        .analysis_extra_index = analysis_extra_index,
 7593        .zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index,
 7594        .resolved_error_set_extra_index = if (analysis.inferred_error_set) end_extra_index else 0,
 7595        .branch_quota_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "branch_quota").?,
 7596        .owner_nav = owner_nav,
 7597        .zir_body_inst = func_decl.zir_body_inst,
 7598        .lbrace_line = func_decl.lbrace_line,
 7599        .rbrace_line = func_decl.rbrace_line,
 7600        .lbrace_column = func_decl.lbrace_column,
 7601        .rbrace_column = func_decl.rbrace_column,
 7602        .generic_owner = generic_owner,
 7603        .comptime_args = .{
 7604            .tid = tid,
 7605            .start = end_extra_index + @intFromBool(analysis.inferred_error_set),
 7606            .len = ip.funcTypeParamsLen(func_decl.ty),
 7607        },
 7608    };
 7609}
 7610
 7611fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32) Key.Func {
 7612    const func_coerced = extraData(extra, Tag.FuncCoerced, extra_index);
 7613    const func_unwrapped = func_coerced.func.unwrap(ip);
 7614    const sub_item = func_unwrapped.getItem(ip);
 7615    const func_extra = func_unwrapped.getExtra(ip);
 7616    var func: Key.Func = switch (sub_item.tag) {
 7617        .func_instance => ip.extraFuncInstance(func_unwrapped.tid, func_extra, sub_item.data),
 7618        .func_decl => extraFuncDecl(func_unwrapped.tid, func_extra, sub_item.data),
 7619        else => unreachable,
 7620    };
 7621    func.ty = func_coerced.ty;
 7622    return func;
 7623}
 7624
 7625fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key {
 7626    const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0");
 7627    const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*);
 7628    const big_int: BigIntConst = .{
 7629        .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
 7630        .positive = positive,
 7631    };
 7632    return .{ .int = .{
 7633        .ty = int.ty,
 7634        .storage = if (big_int.toInt(u64)) |x|
 7635            .{ .u64 = x }
 7636        else |_| if (big_int.toInt(i64)) |x|
 7637            .{ .i64 = x }
 7638        else |_|
 7639            .{ .big_int = big_int },
 7640    } };
 7641}
 7642
 7643const GetOrPutKey = union(enum) {
 7644    existing: Index,
 7645    new: struct {
 7646        ip: *InternPool,
 7647        tid: Zcu.PerThread.Id,
 7648        shard: *Shard,
 7649        map_index: u32,
 7650    },
 7651
 7652    fn put(gop: *GetOrPutKey) Index {
 7653        switch (gop.*) {
 7654            .existing => unreachable,
 7655            .new => |*info| {
 7656                const index = Index.Unwrapped.wrap(.{
 7657                    .tid = info.tid,
 7658                    .index = info.ip.getLocal(info.tid).mutate.items.len - 1,
 7659                }, info.ip);
 7660                gop.putTentative(index);
 7661                gop.putFinal(index);
 7662                return index;
 7663            },
 7664        }
 7665    }
 7666
 7667    fn putTentative(gop: *GetOrPutKey, index: Index) void {
 7668        assert(index != .none);
 7669        switch (gop.*) {
 7670            .existing => unreachable,
 7671            .new => |*info| gop.new.shard.shared.map.entries[info.map_index].release(index),
 7672        }
 7673    }
 7674
 7675    fn putFinal(gop: *GetOrPutKey, index: Index) void {
 7676        assert(index != .none);
 7677        switch (gop.*) {
 7678            .existing => unreachable,
 7679            .new => |info| {
 7680                assert(info.shard.shared.map.entries[info.map_index].value == index);
 7681                info.shard.mutate.map.len += 1;
 7682                info.shard.mutate.map.mutex.unlock();
 7683                gop.* = .{ .existing = index };
 7684            },
 7685        }
 7686    }
 7687
 7688    fn cancel(gop: *GetOrPutKey) void {
 7689        switch (gop.*) {
 7690            .existing => {},
 7691            .new => |info| info.shard.mutate.map.mutex.unlock(),
 7692        }
 7693        gop.* = .{ .existing = undefined };
 7694    }
 7695
 7696    fn deinit(gop: *GetOrPutKey) void {
 7697        switch (gop.*) {
 7698            .existing => {},
 7699            .new => |info| info.shard.shared.map.entries[info.map_index].resetUnordered(),
 7700        }
 7701        gop.cancel();
 7702        gop.* = undefined;
 7703    }
 7704};
 7705fn getOrPutKey(
 7706    ip: *InternPool,
 7707    gpa: Allocator,
 7708    tid: Zcu.PerThread.Id,
 7709    key: Key,
 7710) Allocator.Error!GetOrPutKey {
 7711    return ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, key, 0);
 7712}
 7713fn getOrPutKeyEnsuringAdditionalCapacity(
 7714    ip: *InternPool,
 7715    gpa: Allocator,
 7716    tid: Zcu.PerThread.Id,
 7717    key: Key,
 7718    additional_capacity: u32,
 7719) Allocator.Error!GetOrPutKey {
 7720    const full_hash = key.hash64(ip);
 7721    const hash: u32 = @truncate(full_hash >> 32);
 7722    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
 7723    var map = shard.shared.map.acquire();
 7724    const Map = @TypeOf(map);
 7725    var map_mask = map.header().mask();
 7726    var map_index = hash;
 7727    while (true) : (map_index += 1) {
 7728        map_index &= map_mask;
 7729        const entry = &map.entries[map_index];
 7730        const index = entry.acquire();
 7731        if (index == .none) break;
 7732        if (entry.hash != hash) continue;
 7733        if (index.unwrap(ip).getTag(ip) == .removed) continue;
 7734        if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index };
 7735    }
 7736    shard.mutate.map.mutex.lock();
 7737    errdefer shard.mutate.map.mutex.unlock();
 7738    if (map.entries != shard.shared.map.entries) {
 7739        map = shard.shared.map;
 7740        map_mask = map.header().mask();
 7741        map_index = hash;
 7742    }
 7743    while (true) : (map_index += 1) {
 7744        map_index &= map_mask;
 7745        const entry = &map.entries[map_index];
 7746        const index = entry.value;
 7747        if (index == .none) break;
 7748        if (entry.hash != hash) continue;
 7749        if (ip.indexToKey(index).eql(key, ip)) {
 7750            defer shard.mutate.map.mutex.unlock();
 7751            return .{ .existing = index };
 7752        }
 7753    }
 7754    const map_header = map.header().*;
 7755    const required = shard.mutate.map.len + additional_capacity;
 7756    if (required >= map_header.capacity * 3 / 5) {
 7757        const arena_state = &ip.getLocal(tid).mutate.arena;
 7758        var arena = arena_state.promote(gpa);
 7759        defer arena_state.* = arena.state;
 7760        var new_map_capacity = map_header.capacity;
 7761        while (true) {
 7762            new_map_capacity *= 2;
 7763            if (required < new_map_capacity * 3 / 5) break;
 7764        }
 7765        const new_map_buf = try arena.allocator().alignedAlloc(
 7766            u8,
 7767            .fromByteUnits(Map.alignment),
 7768            Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
 7769        );
 7770        const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
 7771        new_map.header().* = .{ .capacity = new_map_capacity };
 7772        @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
 7773        const new_map_mask = new_map.header().mask();
 7774        map_index = 0;
 7775        while (map_index < map_header.capacity) : (map_index += 1) {
 7776            const entry = &map.entries[map_index];
 7777            const index = entry.value;
 7778            if (index == .none) continue;
 7779            const item_hash = entry.hash;
 7780            var new_map_index = item_hash;
 7781            while (true) : (new_map_index += 1) {
 7782                new_map_index &= new_map_mask;
 7783                const new_entry = &new_map.entries[new_map_index];
 7784                if (new_entry.value != .none) continue;
 7785                new_entry.* = .{
 7786                    .value = index,
 7787                    .hash = item_hash,
 7788                };
 7789                break;
 7790            }
 7791        }
 7792        map = new_map;
 7793        map_index = hash;
 7794        while (true) : (map_index += 1) {
 7795            map_index &= new_map_mask;
 7796            if (map.entries[map_index].value == .none) break;
 7797        }
 7798        shard.shared.map.release(new_map);
 7799    }
 7800    map.entries[map_index].hash = hash;
 7801    return .{ .new = .{
 7802        .ip = ip,
 7803        .tid = tid,
 7804        .shard = shard,
 7805        .map_index = map_index,
 7806    } };
 7807}
 7808/// Like `getOrPutKey`, but asserts that the key already exists, and prepares to replace
 7809/// its shard entry with a new `Index` anyway. After finalizing this, the old index remains
 7810/// valid (in that `indexToKey` and similar queries will behave as before), but it will
 7811/// never be returned from a lookup (`getOrPutKey` etc).
 7812/// This is used by incremental compilation when an existing container type is outdated. In
 7813/// this case, the type must be recreated at a new `InternPool.Index`, but the old index must
 7814/// remain valid since now-unreferenced `AnalUnit`s may retain references to it. The old index
 7815/// will be cleaned up when the `Zcu` undergoes garbage collection.
 7816fn putKeyReplace(
 7817    ip: *InternPool,
 7818    tid: Zcu.PerThread.Id,
 7819    key: Key,
 7820) GetOrPutKey {
 7821    const full_hash = key.hash64(ip);
 7822    const hash: u32 = @truncate(full_hash >> 32);
 7823    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
 7824    shard.mutate.map.mutex.lock();
 7825    errdefer shard.mutate.map.mutex.unlock();
 7826    const map = shard.shared.map;
 7827    const map_mask = map.header().mask();
 7828    var map_index = hash;
 7829    while (true) : (map_index += 1) {
 7830        map_index &= map_mask;
 7831        const entry = &map.entries[map_index];
 7832        const index = entry.value;
 7833        assert(index != .none); // key not present
 7834        if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) {
 7835            break; // we found the entry to replace
 7836        }
 7837    }
 7838    return .{ .new = .{
 7839        .ip = ip,
 7840        .tid = tid,
 7841        .shard = shard,
 7842        .map_index = map_index,
 7843    } };
 7844}
 7845
 7846pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
 7847    var gop = try ip.getOrPutKey(gpa, tid, key);
 7848    defer gop.deinit();
 7849    if (gop == .existing) return gop.existing;
 7850    const local = ip.getLocal(tid);
 7851    const items = local.getMutableItems(gpa);
 7852    const extra = local.getMutableExtra(gpa);
 7853    try items.ensureUnusedCapacity(1);
 7854    switch (key) {
 7855        .int_type => |int_type| {
 7856            const t: Tag = switch (int_type.signedness) {
 7857                .signed => .type_int_signed,
 7858                .unsigned => .type_int_unsigned,
 7859            };
 7860            items.appendAssumeCapacity(.{
 7861                .tag = t,
 7862                .data = int_type.bits,
 7863            });
 7864        },
 7865        .ptr_type => |ptr_type| {
 7866            assert(ptr_type.child != .none);
 7867            assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child);
 7868
 7869            if (ptr_type.flags.size == .slice) {
 7870                gop.cancel();
 7871                var new_key = key;
 7872                new_key.ptr_type.flags.size = .many;
 7873                const ptr_type_index = try ip.get(gpa, tid, new_key);
 7874                gop = try ip.getOrPutKey(gpa, tid, key);
 7875
 7876                try items.ensureUnusedCapacity(1);
 7877                items.appendAssumeCapacity(.{
 7878                    .tag = .type_slice,
 7879                    .data = @intFromEnum(ptr_type_index),
 7880                });
 7881                return gop.put();
 7882            }
 7883
 7884            var ptr_type_adjusted = ptr_type;
 7885            if (ptr_type.flags.size == .c) ptr_type_adjusted.flags.is_allowzero = true;
 7886
 7887            items.appendAssumeCapacity(.{
 7888                .tag = .type_pointer,
 7889                .data = try addExtra(extra, ptr_type_adjusted),
 7890            });
 7891        },
 7892        .array_type => |array_type| {
 7893            assert(array_type.child != .none);
 7894            assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child);
 7895
 7896            if (std.math.cast(u32, array_type.len)) |len| {
 7897                if (array_type.sentinel == .none) {
 7898                    items.appendAssumeCapacity(.{
 7899                        .tag = .type_array_small,
 7900                        .data = try addExtra(extra, Vector{
 7901                            .len = len,
 7902                            .child = array_type.child,
 7903                        }),
 7904                    });
 7905                    return gop.put();
 7906                }
 7907            }
 7908
 7909            const length = Array.Length.init(array_type.len);
 7910            items.appendAssumeCapacity(.{
 7911                .tag = .type_array_big,
 7912                .data = try addExtra(extra, Array{
 7913                    .len0 = length.a,
 7914                    .len1 = length.b,
 7915                    .child = array_type.child,
 7916                    .sentinel = array_type.sentinel,
 7917                }),
 7918            });
 7919        },
 7920        .vector_type => |vector_type| {
 7921            items.appendAssumeCapacity(.{
 7922                .tag = .type_vector,
 7923                .data = try addExtra(extra, Vector{
 7924                    .len = vector_type.len,
 7925                    .child = vector_type.child,
 7926                }),
 7927            });
 7928        },
 7929        .opt_type => |payload_type| {
 7930            assert(payload_type != .none);
 7931            items.appendAssumeCapacity(.{
 7932                .tag = .type_optional,
 7933                .data = @intFromEnum(payload_type),
 7934            });
 7935        },
 7936        .anyframe_type => |payload_type| {
 7937            // payload_type might be none, indicating the type is `anyframe`.
 7938            items.appendAssumeCapacity(.{
 7939                .tag = .type_anyframe,
 7940                .data = @intFromEnum(payload_type),
 7941            });
 7942        },
 7943        .error_union_type => |error_union_type| {
 7944            items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{
 7945                .tag = .type_anyerror_union,
 7946                .data = @intFromEnum(error_union_type.payload_type),
 7947            } else .{
 7948                .tag = .type_error_union,
 7949                .data = try addExtra(extra, error_union_type),
 7950            });
 7951        },
 7952        .error_set_type => |error_set_type| {
 7953            assert(error_set_type.names_map == .none);
 7954            assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan));
 7955            const names = error_set_type.names.get(ip);
 7956            const names_map = try ip.addMap(gpa, tid, names.len);
 7957            ip.addStringsToMap(names_map, names);
 7958            const names_len = error_set_type.names.len;
 7959            try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names_len);
 7960            items.appendAssumeCapacity(.{
 7961                .tag = .type_error_set,
 7962                .data = addExtraAssumeCapacity(extra, Tag.ErrorSet{
 7963                    .names_len = names_len,
 7964                    .names_map = names_map,
 7965                }),
 7966            });
 7967            extra.appendSliceAssumeCapacity(.{@ptrCast(error_set_type.names.get(ip))});
 7968        },
 7969        .inferred_error_set_type => |ies_index| {
 7970            items.appendAssumeCapacity(.{
 7971                .tag = .type_inferred_error_set,
 7972                .data = @intFromEnum(ies_index),
 7973            });
 7974        },
 7975        .simple_type => |simple_type| {
 7976            assert(@intFromEnum(simple_type) == items.mutate.len);
 7977            items.appendAssumeCapacity(.{
 7978                .tag = .simple_type,
 7979                .data = 0, // avoid writing `undefined` bits to a file
 7980            });
 7981        },
 7982        .simple_value => |simple_value| {
 7983            assert(@intFromEnum(simple_value) == items.mutate.len);
 7984            items.appendAssumeCapacity(.{
 7985                .tag = .simple_value,
 7986                .data = 0, // avoid writing `undefined` bits to a file
 7987            });
 7988        },
 7989        .undef => |ty| {
 7990            assert(ty != .none);
 7991            items.appendAssumeCapacity(.{
 7992                .tag = .undef,
 7993                .data = @intFromEnum(ty),
 7994            });
 7995        },
 7996
 7997        .struct_type => unreachable, // use getStructType() instead
 7998        .tuple_type => unreachable, // use getTupleType() instead
 7999        .union_type => unreachable, // use getUnionType() instead
 8000        .opaque_type => unreachable, // use getOpaqueType() instead
 8001
 8002        .enum_type => unreachable, // use getEnumType() instead
 8003        .func_type => unreachable, // use getFuncType() instead
 8004        .@"extern" => unreachable, // use getExtern() instead
 8005        .func => unreachable, // use getFuncInstance() or getFuncDecl() instead
 8006        .un => unreachable, // use getUnion instead
 8007
 8008        .variable => |variable| {
 8009            const has_init = variable.init != .none;
 8010            if (has_init) assert(variable.ty == ip.typeOf(variable.init));
 8011            items.appendAssumeCapacity(.{
 8012                .tag = switch (variable.is_threadlocal) {
 8013                    false => .variable,
 8014                    true => .threadlocal_variable,
 8015                },
 8016                .data = try addExtra(extra, Tag.Variable{
 8017                    .ty = variable.ty,
 8018                    .init = variable.init,
 8019                    .owner_nav = variable.owner_nav,
 8020                }),
 8021            });
 8022        },
 8023
 8024        .slice => |slice| {
 8025            assert(ip.indexToKey(slice.ty).ptr_type.flags.size == .slice);
 8026            assert(ip.indexToKey(ip.typeOf(slice.ptr)).ptr_type.flags.size == .many);
 8027            items.appendAssumeCapacity(.{
 8028                .tag = .ptr_slice,
 8029                .data = try addExtra(extra, PtrSlice{
 8030                    .ty = slice.ty,
 8031                    .ptr = slice.ptr,
 8032                    .len = slice.len,
 8033                }),
 8034            });
 8035        },
 8036
 8037        .ptr => |ptr| {
 8038            const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
 8039            assert(ptr_type.flags.size != .slice);
 8040            items.appendAssumeCapacity(switch (ptr.base_addr) {
 8041                .nav => |nav| .{
 8042                    .tag = .ptr_nav,
 8043                    .data = try addExtra(extra, PtrNav.init(ptr.ty, nav, ptr.byte_offset)),
 8044                },
 8045                .comptime_alloc => |alloc_index| .{
 8046                    .tag = .ptr_comptime_alloc,
 8047                    .data = try addExtra(extra, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)),
 8048                },
 8049                .uav => |uav| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, uav.orig_ty)) item: {
 8050                    if (ptr.ty != uav.orig_ty) {
 8051                        gop.cancel();
 8052                        var new_key = key;
 8053                        new_key.ptr.base_addr.uav.orig_ty = ptr.ty;
 8054                        gop = try ip.getOrPutKey(gpa, tid, new_key);
 8055                        if (gop == .existing) return gop.existing;
 8056                    }
 8057                    break :item .{
 8058                        .tag = .ptr_uav,
 8059                        .data = try addExtra(extra, PtrUav.init(ptr.ty, uav.val, ptr.byte_offset)),
 8060                    };
 8061                } else .{
 8062                    .tag = .ptr_uav_aligned,
 8063                    .data = try addExtra(extra, PtrUavAligned.init(ptr.ty, uav.val, uav.orig_ty, ptr.byte_offset)),
 8064                },
 8065                .comptime_field => |field_val| item: {
 8066                    assert(field_val != .none);
 8067                    break :item .{
 8068                        .tag = .ptr_comptime_field,
 8069                        .data = try addExtra(extra, PtrComptimeField.init(ptr.ty, field_val, ptr.byte_offset)),
 8070                    };
 8071                },
 8072                .eu_payload, .opt_payload => |base| item: {
 8073                    switch (ptr.base_addr) {
 8074                        .eu_payload => assert(ip.indexToKey(
 8075                            ip.indexToKey(ip.typeOf(base)).ptr_type.child,
 8076                        ) == .error_union_type),
 8077                        .opt_payload => assert(ip.indexToKey(
 8078                            ip.indexToKey(ip.typeOf(base)).ptr_type.child,
 8079                        ) == .opt_type),
 8080                        else => unreachable,
 8081                    }
 8082                    break :item .{
 8083                        .tag = switch (ptr.base_addr) {
 8084                            .eu_payload => .ptr_eu_payload,
 8085                            .opt_payload => .ptr_opt_payload,
 8086                            else => unreachable,
 8087                        },
 8088                        .data = try addExtra(extra, PtrBase.init(ptr.ty, base, ptr.byte_offset)),
 8089                    };
 8090                },
 8091                .int => .{
 8092                    .tag = .ptr_int,
 8093                    .data = try addExtra(extra, PtrInt.init(ptr.ty, ptr.byte_offset)),
 8094                },
 8095                .arr_elem, .field => |base_index| {
 8096                    const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type;
 8097                    switch (ptr.base_addr) {
 8098                        .arr_elem => assert(base_ptr_type.flags.size == .many),
 8099                        .field => {
 8100                            assert(base_ptr_type.flags.size == .one);
 8101                            switch (ip.indexToKey(base_ptr_type.child)) {
 8102                                .tuple_type => |tuple_type| {
 8103                                    assert(ptr.base_addr == .field);
 8104                                    assert(base_index.index < tuple_type.types.len);
 8105                                },
 8106                                .struct_type => {
 8107                                    assert(ptr.base_addr == .field);
 8108                                    assert(base_index.index < ip.loadStructType(base_ptr_type.child).field_types.len);
 8109                                },
 8110                                .union_type => {
 8111                                    const union_type = ip.loadUnionType(base_ptr_type.child);
 8112                                    assert(ptr.base_addr == .field);
 8113                                    assert(base_index.index < union_type.field_types.len);
 8114                                },
 8115                                .ptr_type => |slice_type| {
 8116                                    assert(ptr.base_addr == .field);
 8117                                    assert(slice_type.flags.size == .slice);
 8118                                    assert(base_index.index < 2);
 8119                                },
 8120                                else => unreachable,
 8121                            }
 8122                        },
 8123                        else => unreachable,
 8124                    }
 8125                    gop.cancel();
 8126                    const index_index = try ip.get(gpa, tid, .{ .int = .{
 8127                        .ty = .usize_type,
 8128                        .storage = .{ .u64 = base_index.index },
 8129                    } });
 8130                    gop = try ip.getOrPutKey(gpa, tid, key);
 8131                    try items.ensureUnusedCapacity(1);
 8132                    items.appendAssumeCapacity(.{
 8133                        .tag = switch (ptr.base_addr) {
 8134                            .arr_elem => .ptr_elem,
 8135                            .field => .ptr_field,
 8136                            else => unreachable,
 8137                        },
 8138                        .data = try addExtra(extra, PtrBaseIndex.init(ptr.ty, base_index.base, index_index, ptr.byte_offset)),
 8139                    });
 8140                    return gop.put();
 8141                },
 8142            });
 8143        },
 8144
 8145        .opt => |opt| {
 8146            assert(ip.isOptionalType(opt.ty));
 8147            assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val));
 8148            items.appendAssumeCapacity(if (opt.val == .none) .{
 8149                .tag = .opt_null,
 8150                .data = @intFromEnum(opt.ty),
 8151            } else .{
 8152                .tag = .opt_payload,
 8153                .data = try addExtra(extra, Tag.TypeValue{
 8154                    .ty = opt.ty,
 8155                    .val = opt.val,
 8156                }),
 8157            });
 8158        },
 8159
 8160        .int => |int| b: {
 8161            assert(ip.isIntegerType(int.ty));
 8162            switch (int.storage) {
 8163                .u64, .i64, .big_int => {},
 8164                .lazy_align, .lazy_size => |lazy_ty| {
 8165                    items.appendAssumeCapacity(.{
 8166                        .tag = switch (int.storage) {
 8167                            else => unreachable,
 8168                            .lazy_align => .int_lazy_align,
 8169                            .lazy_size => .int_lazy_size,
 8170                        },
 8171                        .data = try addExtra(extra, IntLazy{
 8172                            .ty = int.ty,
 8173                            .lazy_ty = lazy_ty,
 8174                        }),
 8175                    });
 8176                    return gop.put();
 8177                },
 8178            }
 8179            switch (int.ty) {
 8180                .u8_type => switch (int.storage) {
 8181                    .big_int => |big_int| {
 8182                        items.appendAssumeCapacity(.{
 8183                            .tag = .int_u8,
 8184                            .data = big_int.toInt(u8) catch unreachable,
 8185                        });
 8186                        break :b;
 8187                    },
 8188                    inline .u64, .i64 => |x| {
 8189                        items.appendAssumeCapacity(.{
 8190                            .tag = .int_u8,
 8191                            .data = @as(u8, @intCast(x)),
 8192                        });
 8193                        break :b;
 8194                    },
 8195                    .lazy_align, .lazy_size => unreachable,
 8196                },
 8197                .u16_type => switch (int.storage) {
 8198                    .big_int => |big_int| {
 8199                        items.appendAssumeCapacity(.{
 8200                            .tag = .int_u16,
 8201                            .data = big_int.toInt(u16) catch unreachable,
 8202                        });
 8203                        break :b;
 8204                    },
 8205                    inline .u64, .i64 => |x| {
 8206                        items.appendAssumeCapacity(.{
 8207                            .tag = .int_u16,
 8208                            .data = @as(u16, @intCast(x)),
 8209                        });
 8210                        break :b;
 8211                    },
 8212                    .lazy_align, .lazy_size => unreachable,
 8213                },
 8214                .u32_type => switch (int.storage) {
 8215                    .big_int => |big_int| {
 8216                        items.appendAssumeCapacity(.{
 8217                            .tag = .int_u32,
 8218                            .data = big_int.toInt(u32) catch unreachable,
 8219                        });
 8220                        break :b;
 8221                    },
 8222                    inline .u64, .i64 => |x| {
 8223                        items.appendAssumeCapacity(.{
 8224                            .tag = .int_u32,
 8225                            .data = @as(u32, @intCast(x)),
 8226                        });
 8227                        break :b;
 8228                    },
 8229                    .lazy_align, .lazy_size => unreachable,
 8230                },
 8231                .i32_type => switch (int.storage) {
 8232                    .big_int => |big_int| {
 8233                        const casted = big_int.toInt(i32) catch unreachable;
 8234                        items.appendAssumeCapacity(.{
 8235                            .tag = .int_i32,
 8236                            .data = @as(u32, @bitCast(casted)),
 8237                        });
 8238                        break :b;
 8239                    },
 8240                    inline .u64, .i64 => |x| {
 8241                        items.appendAssumeCapacity(.{
 8242                            .tag = .int_i32,
 8243                            .data = @as(u32, @bitCast(@as(i32, @intCast(x)))),
 8244                        });
 8245                        break :b;
 8246                    },
 8247                    .lazy_align, .lazy_size => unreachable,
 8248                },
 8249                .usize_type => switch (int.storage) {
 8250                    .big_int => |big_int| {
 8251                        if (big_int.toInt(u32)) |casted| {
 8252                            items.appendAssumeCapacity(.{
 8253                                .tag = .int_usize,
 8254                                .data = casted,
 8255                            });
 8256                            break :b;
 8257                        } else |_| {}
 8258                    },
 8259                    inline .u64, .i64 => |x| {
 8260                        if (std.math.cast(u32, x)) |casted| {
 8261                            items.appendAssumeCapacity(.{
 8262                                .tag = .int_usize,
 8263                                .data = casted,
 8264                            });
 8265                            break :b;
 8266                        }
 8267                    },
 8268                    .lazy_align, .lazy_size => unreachable,
 8269                },
 8270                .comptime_int_type => switch (int.storage) {
 8271                    .big_int => |big_int| {
 8272                        if (big_int.toInt(u32)) |casted| {
 8273                            items.appendAssumeCapacity(.{
 8274                                .tag = .int_comptime_int_u32,
 8275                                .data = casted,
 8276                            });
 8277                            break :b;
 8278                        } else |_| {}
 8279                        if (big_int.toInt(i32)) |casted| {
 8280                            items.appendAssumeCapacity(.{
 8281                                .tag = .int_comptime_int_i32,
 8282                                .data = @as(u32, @bitCast(casted)),
 8283                            });
 8284                            break :b;
 8285                        } else |_| {}
 8286                    },
 8287                    inline .u64, .i64 => |x| {
 8288                        if (std.math.cast(u32, x)) |casted| {
 8289                            items.appendAssumeCapacity(.{
 8290                                .tag = .int_comptime_int_u32,
 8291                                .data = casted,
 8292                            });
 8293                            break :b;
 8294                        }
 8295                        if (std.math.cast(i32, x)) |casted| {
 8296                            items.appendAssumeCapacity(.{
 8297                                .tag = .int_comptime_int_i32,
 8298                                .data = @as(u32, @bitCast(casted)),
 8299                            });
 8300                            break :b;
 8301                        }
 8302                    },
 8303                    .lazy_align, .lazy_size => unreachable,
 8304                },
 8305                else => {},
 8306            }
 8307            switch (int.storage) {
 8308                .big_int => |big_int| {
 8309                    if (big_int.toInt(u32)) |casted| {
 8310                        items.appendAssumeCapacity(.{
 8311                            .tag = .int_small,
 8312                            .data = try addExtra(extra, IntSmall{
 8313                                .ty = int.ty,
 8314                                .value = casted,
 8315                            }),
 8316                        });
 8317                        return gop.put();
 8318                    } else |_| {}
 8319
 8320                    const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
 8321                    try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs);
 8322                },
 8323                inline .u64, .i64 => |x| {
 8324                    if (std.math.cast(u32, x)) |casted| {
 8325                        items.appendAssumeCapacity(.{
 8326                            .tag = .int_small,
 8327                            .data = try addExtra(extra, IntSmall{
 8328                                .ty = int.ty,
 8329                                .value = casted,
 8330                            }),
 8331                        });
 8332                        return gop.put();
 8333                    }
 8334
 8335                    var buf: [2]Limb = undefined;
 8336                    const big_int = BigIntMutable.init(&buf, x).toConst();
 8337                    const tag: Tag = if (big_int.positive) .int_positive else .int_negative;
 8338                    try addInt(ip, gpa, tid, int.ty, tag, big_int.limbs);
 8339                },
 8340                .lazy_align, .lazy_size => unreachable,
 8341            }
 8342        },
 8343
 8344        .err => |err| {
 8345            assert(ip.isErrorSetType(err.ty));
 8346            items.appendAssumeCapacity(.{
 8347                .tag = .error_set_error,
 8348                .data = try addExtra(extra, err),
 8349            });
 8350        },
 8351
 8352        .error_union => |error_union| {
 8353            assert(ip.isErrorUnionType(error_union.ty));
 8354            items.appendAssumeCapacity(switch (error_union.val) {
 8355                .err_name => |err_name| .{
 8356                    .tag = .error_union_error,
 8357                    .data = try addExtra(extra, Key.Error{
 8358                        .ty = error_union.ty,
 8359                        .name = err_name,
 8360                    }),
 8361                },
 8362                .payload => |payload| .{
 8363                    .tag = .error_union_payload,
 8364                    .data = try addExtra(extra, Tag.TypeValue{
 8365                        .ty = error_union.ty,
 8366                        .val = payload,
 8367                    }),
 8368                },
 8369            });
 8370        },
 8371
 8372        .enum_literal => |enum_literal| items.appendAssumeCapacity(.{
 8373            .tag = .enum_literal,
 8374            .data = @intFromEnum(enum_literal),
 8375        }),
 8376
 8377        .enum_tag => |enum_tag| {
 8378            assert(ip.isEnumType(enum_tag.ty));
 8379            switch (ip.indexToKey(enum_tag.ty)) {
 8380                .simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))),
 8381                .enum_type => assert(ip.typeOf(enum_tag.int) == ip.loadEnumType(enum_tag.ty).tag_ty),
 8382                else => unreachable,
 8383            }
 8384            items.appendAssumeCapacity(.{
 8385                .tag = .enum_tag,
 8386                .data = try addExtra(extra, enum_tag),
 8387            });
 8388        },
 8389
 8390        .empty_enum_value => |enum_or_union_ty| items.appendAssumeCapacity(.{
 8391            .tag = .only_possible_value,
 8392            .data = @intFromEnum(enum_or_union_ty),
 8393        }),
 8394
 8395        .float => |float| {
 8396            switch (float.ty) {
 8397                .f16_type => items.appendAssumeCapacity(.{
 8398                    .tag = .float_f16,
 8399                    .data = @as(u16, @bitCast(float.storage.f16)),
 8400                }),
 8401                .f32_type => items.appendAssumeCapacity(.{
 8402                    .tag = .float_f32,
 8403                    .data = @as(u32, @bitCast(float.storage.f32)),
 8404                }),
 8405                .f64_type => items.appendAssumeCapacity(.{
 8406                    .tag = .float_f64,
 8407                    .data = try addExtra(extra, Float64.pack(float.storage.f64)),
 8408                }),
 8409                .f80_type => items.appendAssumeCapacity(.{
 8410                    .tag = .float_f80,
 8411                    .data = try addExtra(extra, Float80.pack(float.storage.f80)),
 8412                }),
 8413                .f128_type => items.appendAssumeCapacity(.{
 8414                    .tag = .float_f128,
 8415                    .data = try addExtra(extra, Float128.pack(float.storage.f128)),
 8416                }),
 8417                .c_longdouble_type => switch (float.storage) {
 8418                    .f80 => |x| items.appendAssumeCapacity(.{
 8419                        .tag = .float_c_longdouble_f80,
 8420                        .data = try addExtra(extra, Float80.pack(x)),
 8421                    }),
 8422                    inline .f16, .f32, .f64, .f128 => |x| items.appendAssumeCapacity(.{
 8423                        .tag = .float_c_longdouble_f128,
 8424                        .data = try addExtra(extra, Float128.pack(x)),
 8425                    }),
 8426                },
 8427                .comptime_float_type => items.appendAssumeCapacity(.{
 8428                    .tag = .float_comptime_float,
 8429                    .data = try addExtra(extra, Float128.pack(float.storage.f128)),
 8430                }),
 8431                else => unreachable,
 8432            }
 8433        },
 8434
 8435        .aggregate => |aggregate| {
 8436            const ty_key = ip.indexToKey(aggregate.ty);
 8437            const len = ip.aggregateTypeLen(aggregate.ty);
 8438            const child = switch (ty_key) {
 8439                .array_type => |array_type| array_type.child,
 8440                .vector_type => |vector_type| vector_type.child,
 8441                .tuple_type, .struct_type => .none,
 8442                else => unreachable,
 8443            };
 8444            const sentinel = switch (ty_key) {
 8445                .array_type => |array_type| array_type.sentinel,
 8446                .vector_type, .tuple_type, .struct_type => .none,
 8447                else => unreachable,
 8448            };
 8449            const len_including_sentinel = len + @intFromBool(sentinel != .none);
 8450            switch (aggregate.storage) {
 8451                .bytes => |bytes| {
 8452                    assert(child == .u8_type);
 8453                    if (sentinel != .none) {
 8454                        assert(bytes.at(@intCast(len), ip) == ip.indexToKey(sentinel).int.storage.u64);
 8455                    }
 8456                },
 8457                .elems => |elems| {
 8458                    if (elems.len != len) {
 8459                        assert(elems.len == len_including_sentinel);
 8460                        assert(elems[@intCast(len)] == sentinel);
 8461                    }
 8462                },
 8463                .repeated_elem => |elem| {
 8464                    assert(sentinel == .none or elem == sentinel);
 8465                },
 8466            }
 8467            if (aggregate.storage.values().len > 0) switch (ty_key) {
 8468                .array_type, .vector_type => {
 8469                    var any_defined = false;
 8470                    for (aggregate.storage.values()) |elem| {
 8471                        if (!ip.isUndef(elem)) any_defined = true;
 8472                        assert(ip.typeOf(elem) == child);
 8473                    }
 8474                    assert(any_defined); // aggregate fields must not be all undefined
 8475                },
 8476                .struct_type => {
 8477                    var any_defined = false;
 8478                    for (aggregate.storage.values(), ip.loadStructType(aggregate.ty).field_types.get(ip)) |elem, field_ty| {
 8479                        if (!ip.isUndef(elem)) any_defined = true;
 8480                        assert(ip.typeOf(elem) == field_ty);
 8481                    }
 8482                    assert(any_defined); // aggregate fields must not be all undefined
 8483                },
 8484                .tuple_type => |tuple_type| {
 8485                    var any_defined = false;
 8486                    for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| {
 8487                        if (!ip.isUndef(elem)) any_defined = true;
 8488                        assert(ip.typeOf(elem) == ty);
 8489                    }
 8490                    assert(any_defined); // aggregate fields must not be all undefined
 8491                },
 8492                else => unreachable,
 8493            };
 8494
 8495            if (len == 0) {
 8496                items.appendAssumeCapacity(.{
 8497                    .tag = .only_possible_value,
 8498                    .data = @intFromEnum(aggregate.ty),
 8499                });
 8500                return gop.put();
 8501            }
 8502
 8503            switch (ty_key) {
 8504                .tuple_type => |tuple_type| opv: {
 8505                    switch (aggregate.storage) {
 8506                        .bytes => |bytes| for (tuple_type.values.get(ip), bytes.at(0, ip)..) |value, byte| {
 8507                            if (value == .none) break :opv;
 8508                            switch (ip.indexToKey(value)) {
 8509                                .undef => break :opv,
 8510                                .int => |int| switch (int.storage) {
 8511                                    .u64 => |x| if (x != byte) break :opv,
 8512                                    else => break :opv,
 8513                                },
 8514                                else => unreachable,
 8515                            }
 8516                        },
 8517                        .elems => |elems| if (!std.mem.eql(
 8518                            Index,
 8519                            tuple_type.values.get(ip),
 8520                            elems,
 8521                        )) break :opv,
 8522                        .repeated_elem => |elem| for (tuple_type.values.get(ip)) |value| {
 8523                            if (value != elem) break :opv;
 8524                        },
 8525                    }
 8526                    // This encoding works thanks to the fact that, as we just verified,
 8527                    // the type itself contains a slice of values that can be provided
 8528                    // in the aggregate fields.
 8529                    items.appendAssumeCapacity(.{
 8530                        .tag = .only_possible_value,
 8531                        .data = @intFromEnum(aggregate.ty),
 8532                    });
 8533                    return gop.put();
 8534                },
 8535                else => {},
 8536            }
 8537
 8538            repeated: {
 8539                switch (aggregate.storage) {
 8540                    .bytes => |bytes| for (bytes.toSlice(len, ip)[1..]) |byte|
 8541                        if (byte != bytes.at(0, ip)) break :repeated,
 8542                    .elems => |elems| for (elems[1..@intCast(len)]) |elem|
 8543                        if (elem != elems[0]) break :repeated,
 8544                    .repeated_elem => {},
 8545                }
 8546                const elem = switch (aggregate.storage) {
 8547                    .bytes => |bytes| elem: {
 8548                        gop.cancel();
 8549                        const elem = try ip.get(gpa, tid, .{ .int = .{
 8550                            .ty = .u8_type,
 8551                            .storage = .{ .u64 = bytes.at(0, ip) },
 8552                        } });
 8553                        gop = try ip.getOrPutKey(gpa, tid, key);
 8554                        try items.ensureUnusedCapacity(1);
 8555                        break :elem elem;
 8556                    },
 8557                    .elems => |elems| elems[0],
 8558                    .repeated_elem => |elem| elem,
 8559                };
 8560
 8561                try extra.ensureUnusedCapacity(@typeInfo(Repeated).@"struct".fields.len);
 8562                items.appendAssumeCapacity(.{
 8563                    .tag = .repeated,
 8564                    .data = addExtraAssumeCapacity(extra, Repeated{
 8565                        .ty = aggregate.ty,
 8566                        .elem_val = elem,
 8567                    }),
 8568                });
 8569                return gop.put();
 8570            }
 8571
 8572            if (child == .u8_type) bytes: {
 8573                const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
 8574                const start = string_bytes.mutate.len;
 8575                try string_bytes.ensureUnusedCapacity(@intCast(len_including_sentinel + 1));
 8576                try extra.ensureUnusedCapacity(@typeInfo(Bytes).@"struct".fields.len);
 8577                switch (aggregate.storage) {
 8578                    .bytes => |bytes| string_bytes.appendSliceAssumeCapacity(.{bytes.toSlice(len, ip)}),
 8579                    .elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) {
 8580                        .undef => {
 8581                            string_bytes.shrinkRetainingCapacity(start);
 8582                            break :bytes;
 8583                        },
 8584                        .int => |int| string_bytes.appendAssumeCapacity(.{@intCast(int.storage.u64)}),
 8585                        else => unreachable,
 8586                    },
 8587                    .repeated_elem => |elem| switch (ip.indexToKey(elem)) {
 8588                        .undef => break :bytes,
 8589                        .int => |int| @memset(
 8590                            string_bytes.addManyAsSliceAssumeCapacity(@intCast(len))[0],
 8591                            @intCast(int.storage.u64),
 8592                        ),
 8593                        else => unreachable,
 8594                    },
 8595                }
 8596                if (sentinel != .none) string_bytes.appendAssumeCapacity(.{
 8597                    @intCast(ip.indexToKey(sentinel).int.storage.u64),
 8598                });
 8599                const string = try ip.getOrPutTrailingString(
 8600                    gpa,
 8601                    tid,
 8602                    @intCast(len_including_sentinel),
 8603                    .maybe_embedded_nulls,
 8604                );
 8605                items.appendAssumeCapacity(.{
 8606                    .tag = .bytes,
 8607                    .data = addExtraAssumeCapacity(extra, Bytes{
 8608                        .ty = aggregate.ty,
 8609                        .bytes = string,
 8610                    }),
 8611                });
 8612                return gop.put();
 8613            }
 8614
 8615            try extra.ensureUnusedCapacity(
 8616                @typeInfo(Tag.Aggregate).@"struct".fields.len + @as(usize, @intCast(len_including_sentinel + 1)),
 8617            );
 8618            items.appendAssumeCapacity(.{
 8619                .tag = .aggregate,
 8620                .data = addExtraAssumeCapacity(extra, Tag.Aggregate{
 8621                    .ty = aggregate.ty,
 8622                }),
 8623            });
 8624            extra.appendSliceAssumeCapacity(.{@ptrCast(aggregate.storage.elems)});
 8625            if (sentinel != .none) extra.appendAssumeCapacity(.{@intFromEnum(sentinel)});
 8626        },
 8627
 8628        .memoized_call => |memoized_call| {
 8629            for (memoized_call.arg_values) |arg| assert(arg != .none);
 8630            try extra.ensureUnusedCapacity(@typeInfo(MemoizedCall).@"struct".fields.len +
 8631                memoized_call.arg_values.len);
 8632            items.appendAssumeCapacity(.{
 8633                .tag = .memoized_call,
 8634                .data = addExtraAssumeCapacity(extra, MemoizedCall{
 8635                    .func = memoized_call.func,
 8636                    .args_len = @intCast(memoized_call.arg_values.len),
 8637                    .result = memoized_call.result,
 8638                    .branch_count = memoized_call.branch_count,
 8639                }),
 8640            });
 8641            extra.appendSliceAssumeCapacity(.{@ptrCast(memoized_call.arg_values)});
 8642        },
 8643    }
 8644    return gop.put();
 8645}
 8646
 8647pub fn getUnion(
 8648    ip: *InternPool,
 8649    gpa: Allocator,
 8650    tid: Zcu.PerThread.Id,
 8651    un: Key.Union,
 8652) Allocator.Error!Index {
 8653    var gop = try ip.getOrPutKey(gpa, tid, .{ .un = un });
 8654    defer gop.deinit();
 8655    if (gop == .existing) return gop.existing;
 8656    const local = ip.getLocal(tid);
 8657    const items = local.getMutableItems(gpa);
 8658    const extra = local.getMutableExtra(gpa);
 8659    try items.ensureUnusedCapacity(1);
 8660
 8661    assert(un.ty != .none);
 8662    assert(un.val != .none);
 8663    items.appendAssumeCapacity(.{
 8664        .tag = .union_value,
 8665        .data = try addExtra(extra, un),
 8666    });
 8667
 8668    return gop.put();
 8669}
 8670
 8671pub const UnionTypeInit = struct {
 8672    flags: packed struct {
 8673        runtime_tag: LoadedUnionType.RuntimeTag,
 8674        any_aligned_fields: bool,
 8675        layout: std.builtin.Type.ContainerLayout,
 8676        status: LoadedUnionType.Status,
 8677        requires_comptime: RequiresComptime,
 8678        assumed_runtime_bits: bool,
 8679        assumed_pointer_aligned: bool,
 8680        alignment: Alignment,
 8681    },
 8682    fields_len: u32,
 8683    enum_tag_ty: Index,
 8684    /// May have length 0 which leaves the values unset until later.
 8685    field_types: []const Index,
 8686    /// May have length 0 which leaves the values unset until later.
 8687    /// The logic for `any_aligned_fields` is asserted to have been done before
 8688    /// calling this function.
 8689    field_aligns: []const Alignment,
 8690    key: union(enum) {
 8691        declared: struct {
 8692            zir_index: TrackedInst.Index,
 8693            captures: []const CaptureValue,
 8694        },
 8695        declared_owned_captures: struct {
 8696            zir_index: TrackedInst.Index,
 8697            captures: CaptureValue.Slice,
 8698        },
 8699        reified: struct {
 8700            zir_index: TrackedInst.Index,
 8701            type_hash: u64,
 8702        },
 8703    },
 8704};
 8705
 8706pub fn getUnionType(
 8707    ip: *InternPool,
 8708    gpa: Allocator,
 8709    tid: Zcu.PerThread.Id,
 8710    ini: UnionTypeInit,
 8711    /// If it is known that there is an existing type with this key which is outdated,
 8712    /// this is passed as `true`, and the type is replaced with one at a fresh index.
 8713    replace_existing: bool,
 8714) Allocator.Error!WipNamespaceType.Result {
 8715    const key: Key = .{ .union_type = switch (ini.key) {
 8716        .declared => |d| .{ .declared = .{
 8717            .zir_index = d.zir_index,
 8718            .captures = .{ .external = d.captures },
 8719        } },
 8720        .declared_owned_captures => |d| .{ .declared = .{
 8721            .zir_index = d.zir_index,
 8722            .captures = .{ .owned = d.captures },
 8723        } },
 8724        .reified => |r| .{ .reified = .{
 8725            .zir_index = r.zir_index,
 8726            .type_hash = r.type_hash,
 8727        } },
 8728    } };
 8729    var gop = if (replace_existing)
 8730        ip.putKeyReplace(tid, key)
 8731    else
 8732        try ip.getOrPutKey(gpa, tid, key);
 8733    defer gop.deinit();
 8734    if (gop == .existing) return .{ .existing = gop.existing };
 8735
 8736    const local = ip.getLocal(tid);
 8737    const items = local.getMutableItems(gpa);
 8738    try items.ensureUnusedCapacity(1);
 8739    const extra = local.getMutableExtra(gpa);
 8740
 8741    const align_elements_len = if (ini.flags.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
 8742    const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
 8743    try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).@"struct".fields.len +
 8744        // TODO: fmt bug
 8745        // zig fmt: off
 8746        switch (ini.key) {
 8747            inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
 8748            .reified => 2, // type_hash: PackedU64
 8749        } +
 8750        // zig fmt: on
 8751        ini.fields_len + // field types
 8752        align_elements_len);
 8753
 8754    const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{
 8755        .flags = .{
 8756            .any_captures = switch (ini.key) {
 8757                inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
 8758                .reified => false,
 8759            },
 8760            .runtime_tag = ini.flags.runtime_tag,
 8761            .any_aligned_fields = ini.flags.any_aligned_fields,
 8762            .layout = ini.flags.layout,
 8763            .status = ini.flags.status,
 8764            .requires_comptime = ini.flags.requires_comptime,
 8765            .assumed_runtime_bits = ini.flags.assumed_runtime_bits,
 8766            .assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
 8767            .alignment = ini.flags.alignment,
 8768            .is_reified = switch (ini.key) {
 8769                .declared, .declared_owned_captures => false,
 8770                .reified => true,
 8771            },
 8772        },
 8773        .fields_len = ini.fields_len,
 8774        .size = std.math.maxInt(u32),
 8775        .padding = std.math.maxInt(u32),
 8776        .name = undefined, // set by `finish`
 8777        .name_nav = undefined, // set by `finish`
 8778        .namespace = undefined, // set by `finish`
 8779        .tag_ty = ini.enum_tag_ty,
 8780        .zir_index = switch (ini.key) {
 8781            inline else => |x| x.zir_index,
 8782        },
 8783    });
 8784
 8785    items.appendAssumeCapacity(.{
 8786        .tag = .type_union,
 8787        .data = extra_index,
 8788    });
 8789
 8790    switch (ini.key) {
 8791        .declared => |d| if (d.captures.len != 0) {
 8792            extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 8793            extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
 8794        },
 8795        .declared_owned_captures => |d| if (d.captures.len != 0) {
 8796            extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 8797            extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
 8798        },
 8799        .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
 8800    }
 8801
 8802    // field types
 8803    if (ini.field_types.len > 0) {
 8804        assert(ini.field_types.len == ini.fields_len);
 8805        extra.appendSliceAssumeCapacity(.{@ptrCast(ini.field_types)});
 8806    } else {
 8807        extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
 8808    }
 8809
 8810    // field alignments
 8811    if (ini.flags.any_aligned_fields) {
 8812        extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len);
 8813        if (ini.field_aligns.len > 0) {
 8814            assert(ini.field_aligns.len == ini.fields_len);
 8815            @memcpy((Alignment.Slice{
 8816                .tid = tid,
 8817                .start = @intCast(extra.mutate.len - align_elements_len),
 8818                .len = @intCast(ini.field_aligns.len),
 8819            }).get(ip), ini.field_aligns);
 8820        }
 8821    } else {
 8822        assert(ini.field_aligns.len == 0);
 8823    }
 8824
 8825    return .{ .wip = .{
 8826        .tid = tid,
 8827        .index = gop.put(),
 8828        .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?,
 8829        .name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name_nav").?,
 8830        .namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?,
 8831    } };
 8832}
 8833
 8834pub const WipNamespaceType = struct {
 8835    tid: Zcu.PerThread.Id,
 8836    index: Index,
 8837    type_name_extra_index: u32,
 8838    namespace_extra_index: u32,
 8839    name_nav_extra_index: u32,
 8840
 8841    pub fn setName(
 8842        wip: WipNamespaceType,
 8843        ip: *InternPool,
 8844        type_name: NullTerminatedString,
 8845        /// This should be the `Nav` we are named after if we use the `.parent` name strategy; `.none` otherwise.
 8846        /// This is also `.none` if we use `.parent` because we are the root struct type for a file.
 8847        name_nav: Nav.Index.Optional,
 8848    ) void {
 8849        const extra = ip.getLocalShared(wip.tid).extra.acquire();
 8850        const extra_items = extra.view().items(.@"0");
 8851        extra_items[wip.type_name_extra_index] = @intFromEnum(type_name);
 8852        extra_items[wip.name_nav_extra_index] = @intFromEnum(name_nav);
 8853    }
 8854
 8855    pub fn finish(
 8856        wip: WipNamespaceType,
 8857        ip: *InternPool,
 8858        namespace: NamespaceIndex,
 8859    ) Index {
 8860        const extra = ip.getLocalShared(wip.tid).extra.acquire();
 8861        const extra_items = extra.view().items(.@"0");
 8862
 8863        extra_items[wip.namespace_extra_index] = @intFromEnum(namespace);
 8864
 8865        return wip.index;
 8866    }
 8867
 8868    pub fn cancel(wip: WipNamespaceType, ip: *InternPool, tid: Zcu.PerThread.Id) void {
 8869        ip.remove(tid, wip.index);
 8870    }
 8871
 8872    pub const Result = union(enum) {
 8873        wip: WipNamespaceType,
 8874        existing: Index,
 8875    };
 8876};
 8877
 8878pub const StructTypeInit = struct {
 8879    layout: std.builtin.Type.ContainerLayout,
 8880    fields_len: u32,
 8881    known_non_opv: bool,
 8882    requires_comptime: RequiresComptime,
 8883    any_comptime_fields: bool,
 8884    any_default_inits: bool,
 8885    inits_resolved: bool,
 8886    any_aligned_fields: bool,
 8887    key: union(enum) {
 8888        declared: struct {
 8889            zir_index: TrackedInst.Index,
 8890            captures: []const CaptureValue,
 8891        },
 8892        declared_owned_captures: struct {
 8893            zir_index: TrackedInst.Index,
 8894            captures: CaptureValue.Slice,
 8895        },
 8896        reified: struct {
 8897            zir_index: TrackedInst.Index,
 8898            type_hash: u64,
 8899        },
 8900    },
 8901};
 8902
 8903pub fn getStructType(
 8904    ip: *InternPool,
 8905    gpa: Allocator,
 8906    tid: Zcu.PerThread.Id,
 8907    ini: StructTypeInit,
 8908    /// If it is known that there is an existing type with this key which is outdated,
 8909    /// this is passed as `true`, and the type is replaced with one at a fresh index.
 8910    replace_existing: bool,
 8911) Allocator.Error!WipNamespaceType.Result {
 8912    const key: Key = .{ .struct_type = switch (ini.key) {
 8913        .declared => |d| .{ .declared = .{
 8914            .zir_index = d.zir_index,
 8915            .captures = .{ .external = d.captures },
 8916        } },
 8917        .declared_owned_captures => |d| .{ .declared = .{
 8918            .zir_index = d.zir_index,
 8919            .captures = .{ .owned = d.captures },
 8920        } },
 8921        .reified => |r| .{ .reified = .{
 8922            .zir_index = r.zir_index,
 8923            .type_hash = r.type_hash,
 8924        } },
 8925    } };
 8926    var gop = if (replace_existing)
 8927        ip.putKeyReplace(tid, key)
 8928    else
 8929        try ip.getOrPutKey(gpa, tid, key);
 8930    defer gop.deinit();
 8931    if (gop == .existing) return .{ .existing = gop.existing };
 8932
 8933    const local = ip.getLocal(tid);
 8934    const items = local.getMutableItems(gpa);
 8935    const extra = local.getMutableExtra(gpa);
 8936
 8937    const names_map = try ip.addMap(gpa, tid, ini.fields_len);
 8938    errdefer local.mutate.maps.len -= 1;
 8939
 8940    const zir_index = switch (ini.key) {
 8941        inline else => |x| x.zir_index,
 8942    };
 8943
 8944    const is_extern = switch (ini.layout) {
 8945        .auto => false,
 8946        .@"extern" => true,
 8947        .@"packed" => {
 8948            try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
 8949                // TODO: fmt bug
 8950                // zig fmt: off
 8951                switch (ini.key) {
 8952                    inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
 8953                    .reified => 2, // type_hash: PackedU64
 8954                } +
 8955                // zig fmt: on
 8956                ini.fields_len + // types
 8957                ini.fields_len + // names
 8958                ini.fields_len); // inits
 8959            const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{
 8960                .name = undefined, // set by `finish`
 8961                .name_nav = undefined, // set by `finish`
 8962                .zir_index = zir_index,
 8963                .fields_len = ini.fields_len,
 8964                .namespace = undefined, // set by `finish`
 8965                .backing_int_ty = .none,
 8966                .names_map = names_map,
 8967                .flags = .{
 8968                    .any_captures = switch (ini.key) {
 8969                        inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
 8970                        .reified => false,
 8971                    },
 8972                    .field_inits_wip = false,
 8973                    .inits_resolved = ini.inits_resolved,
 8974                    .is_reified = switch (ini.key) {
 8975                        .declared, .declared_owned_captures => false,
 8976                        .reified => true,
 8977                    },
 8978                },
 8979            });
 8980            try items.append(.{
 8981                .tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed,
 8982                .data = extra_index,
 8983            });
 8984            switch (ini.key) {
 8985                .declared => |d| if (d.captures.len != 0) {
 8986                    extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 8987                    extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
 8988                },
 8989                .declared_owned_captures => |d| if (d.captures.len != 0) {
 8990                    extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 8991                    extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
 8992                },
 8993                .reified => |r| {
 8994                    _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
 8995                },
 8996            }
 8997            extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
 8998            extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len);
 8999            if (ini.any_default_inits) {
 9000                extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
 9001            }
 9002            return .{ .wip = .{
 9003                .tid = tid,
 9004                .index = gop.put(),
 9005                .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?,
 9006                .name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?,
 9007                .namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?,
 9008            } };
 9009        },
 9010    };
 9011
 9012    const align_elements_len = if (ini.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
 9013    const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
 9014    const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0;
 9015
 9016    try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).@"struct".fields.len +
 9017        // TODO: fmt bug
 9018        // zig fmt: off
 9019        switch (ini.key) {
 9020            inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
 9021            .reified => 2, // type_hash: PackedU64
 9022        } +
 9023        // zig fmt: on
 9024        (ini.fields_len * 5) + // types, names, inits, runtime order, offsets
 9025        align_elements_len + comptime_elements_len +
 9026        1); // names_map
 9027    const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{
 9028        .name = undefined, // set by `finish`
 9029        .name_nav = undefined, // set by `finish`
 9030        .zir_index = zir_index,
 9031        .namespace = undefined, // set by `finish`
 9032        .fields_len = ini.fields_len,
 9033        .size = std.math.maxInt(u32),
 9034        .flags = .{
 9035            .any_captures = switch (ini.key) {
 9036                inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
 9037                .reified => false,
 9038            },
 9039            .is_extern = is_extern,
 9040            .known_non_opv = ini.known_non_opv,
 9041            .requires_comptime = ini.requires_comptime,
 9042            .assumed_runtime_bits = false,
 9043            .assumed_pointer_aligned = false,
 9044            .any_comptime_fields = ini.any_comptime_fields,
 9045            .any_default_inits = ini.any_default_inits,
 9046            .any_aligned_fields = ini.any_aligned_fields,
 9047            .alignment = .none,
 9048            .alignment_wip = false,
 9049            .field_types_wip = false,
 9050            .layout_wip = false,
 9051            .layout_resolved = false,
 9052            .field_inits_wip = false,
 9053            .inits_resolved = ini.inits_resolved,
 9054            .fully_resolved = false,
 9055            .is_reified = switch (ini.key) {
 9056                .declared, .declared_owned_captures => false,
 9057                .reified => true,
 9058            },
 9059        },
 9060    });
 9061    try items.append(.{
 9062        .tag = .type_struct,
 9063        .data = extra_index,
 9064    });
 9065    switch (ini.key) {
 9066        .declared => |d| if (d.captures.len != 0) {
 9067            extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 9068            extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
 9069        },
 9070        .declared_owned_captures => |d| if (d.captures.len != 0) {
 9071            extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
 9072            extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
 9073        },
 9074        .reified => |r| {
 9075            _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
 9076        },
 9077    }
 9078    extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
 9079    extra.appendAssumeCapacity(.{@intFromEnum(names_map)});
 9080    extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len);
 9081    if (ini.any_default_inits) {
 9082        extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
 9083    }
 9084    if (ini.any_aligned_fields) {
 9085        extra.appendNTimesAssumeCapacity(.{align_element}, align_elements_len);
 9086    }
 9087    if (ini.any_comptime_fields) {
 9088        extra.appendNTimesAssumeCapacity(.{0}, comptime_elements_len);
 9089    }
 9090    if (ini.layout == .auto) {
 9091        extra.appendNTimesAssumeCapacity(.{@intFromEnum(LoadedStructType.RuntimeOrder.unresolved)}, ini.fields_len);
 9092    }
 9093    extra.appendNTimesAssumeCapacity(.{std.math.maxInt(u32)}, ini.fields_len);
 9094    return .{ .wip = .{
 9095        .tid = tid,
 9096        .index = gop.put(),
 9097        .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name").?,
 9098        .name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?,
 9099        .namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?,
 9100    } };
 9101}
 9102
 9103pub const TupleTypeInit = struct {
 9104    types: []const Index,
 9105    /// These elements may be `none`, indicating runtime-known.
 9106    values: []const Index,
 9107};
 9108
 9109pub fn getTupleType(
 9110    ip: *InternPool,
 9111    gpa: Allocator,
 9112    tid: Zcu.PerThread.Id,
 9113    ini: TupleTypeInit,
 9114) Allocator.Error!Index {
 9115    assert(ini.types.len == ini.values.len);
 9116    for (ini.types) |elem| assert(elem != .none);
 9117
 9118    const local = ip.getLocal(tid);
 9119    const items = local.getMutableItems(gpa);
 9120    const extra = local.getMutableExtra(gpa);
 9121
 9122    const prev_extra_len = extra.mutate.len;
 9123    const fields_len: u32 = @intCast(ini.types.len);
 9124
 9125    try items.ensureUnusedCapacity(1);
 9126    try extra.ensureUnusedCapacity(
 9127        @typeInfo(TypeTuple).@"struct".fields.len + (fields_len * 3),
 9128    );
 9129
 9130    const extra_index = addExtraAssumeCapacity(extra, TypeTuple{
 9131        .fields_len = fields_len,
 9132    });
 9133    extra.appendSliceAssumeCapacity(.{@ptrCast(ini.types)});
 9134    extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)});
 9135    errdefer extra.mutate.len = prev_extra_len;
 9136
 9137    var gop = try ip.getOrPutKey(gpa, tid, .{ .tuple_type = extraTypeTuple(tid, extra.list.*, extra_index) });
 9138    defer gop.deinit();
 9139    if (gop == .existing) {
 9140        extra.mutate.len = prev_extra_len;
 9141        return gop.existing;
 9142    }
 9143
 9144    items.appendAssumeCapacity(.{
 9145        .tag = .type_tuple,
 9146        .data = extra_index,
 9147    });
 9148    return gop.put();
 9149}
 9150
 9151/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`.
 9152pub const GetFuncTypeKey = struct {
 9153    param_types: []const Index,
 9154    return_type: Index,
 9155    comptime_bits: u32 = 0,
 9156    noalias_bits: u32 = 0,
 9157    /// `null` means generic.
 9158    cc: ?std.builtin.CallingConvention = .auto,
 9159    is_var_args: bool = false,
 9160    is_generic: bool = false,
 9161    is_noinline: bool = false,
 9162    section_is_generic: bool = false,
 9163    addrspace_is_generic: bool = false,
 9164};
 9165
 9166pub fn getFuncType(
 9167    ip: *InternPool,
 9168    gpa: Allocator,
 9169    tid: Zcu.PerThread.Id,
 9170    key: GetFuncTypeKey,
 9171) Allocator.Error!Index {
 9172    // Validate input parameters.
 9173    assert(key.return_type != .none);
 9174    for (key.param_types) |param_type| assert(param_type != .none);
 9175
 9176    const local = ip.getLocal(tid);
 9177    const items = local.getMutableItems(gpa);
 9178    try items.ensureUnusedCapacity(1);
 9179    const extra = local.getMutableExtra(gpa);
 9180
 9181    // The strategy here is to add the function type unconditionally, then to
 9182    // ask if it already exists, and if so, revert the lengths of the mutated
 9183    // arrays. This is similar to what `getOrPutTrailingString` does.
 9184    const prev_extra_len = extra.mutate.len;
 9185    const params_len: u32 = @intCast(key.param_types.len);
 9186
 9187    try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeFunction).@"struct".fields.len +
 9188        @intFromBool(key.comptime_bits != 0) +
 9189        @intFromBool(key.noalias_bits != 0) +
 9190        params_len);
 9191
 9192    const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{
 9193        .params_len = params_len,
 9194        .return_type = key.return_type,
 9195        .flags = .{
 9196            .cc = .pack(key.cc orelse .auto),
 9197            .is_var_args = key.is_var_args,
 9198            .has_comptime_bits = key.comptime_bits != 0,
 9199            .has_noalias_bits = key.noalias_bits != 0,
 9200            .is_generic = key.is_generic,
 9201            .is_noinline = key.is_noinline,
 9202        },
 9203    });
 9204
 9205    if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits});
 9206    if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits});
 9207    extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)});
 9208    errdefer extra.mutate.len = prev_extra_len;
 9209
 9210    var gop = try ip.getOrPutKey(gpa, tid, .{
 9211        .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
 9212    });
 9213    defer gop.deinit();
 9214    if (gop == .existing) {
 9215        extra.mutate.len = prev_extra_len;
 9216        return gop.existing;
 9217    }
 9218
 9219    items.appendAssumeCapacity(.{
 9220        .tag = .type_function,
 9221        .data = func_type_extra_index,
 9222    });
 9223    return gop.put();
 9224}
 9225
 9226/// Intern an `.@"extern"`, creating a corresponding owner `Nav` if necessary.
 9227/// This will *not* queue the extern for codegen: see `Zcu.PerThread.getExtern` for a wrapper which does.
 9228pub fn getExtern(
 9229    ip: *InternPool,
 9230    gpa: Allocator,
 9231    tid: Zcu.PerThread.Id,
 9232    /// `key.owner_nav` is ignored.
 9233    key: Key.Extern,
 9234) Allocator.Error!struct {
 9235    index: Index,
 9236    /// Only set if the `Nav` was newly created.
 9237    new_nav: Nav.Index.Optional,
 9238} {
 9239    var gop = try ip.getOrPutKey(gpa, tid, .{ .@"extern" = key });
 9240    defer gop.deinit();
 9241    if (gop == .existing) return .{
 9242        .index = gop.existing,
 9243        .new_nav = .none,
 9244    };
 9245
 9246    const local = ip.getLocal(tid);
 9247    const items = local.getMutableItems(gpa);
 9248    const extra = local.getMutableExtra(gpa);
 9249    try items.ensureUnusedCapacity(1);
 9250    try extra.ensureUnusedCapacity(@typeInfo(Tag.Extern).@"struct".fields.len);
 9251    try local.getMutableNavs(gpa).ensureUnusedCapacity(1);
 9252
 9253    // Predict the index the `@"extern" will live at, so we can construct the owner `Nav` before releasing the shard's mutex.
 9254    const extern_index = Index.Unwrapped.wrap(.{
 9255        .tid = tid,
 9256        .index = items.mutate.len,
 9257    }, ip);
 9258    const owner_nav = ip.createNav(gpa, tid, .{
 9259        .name = key.name,
 9260        .fqn = key.name,
 9261        .val = extern_index,
 9262        .is_const = key.is_const,
 9263        .alignment = key.alignment,
 9264        .@"linksection" = .none,
 9265        .@"addrspace" = key.@"addrspace",
 9266    }) catch unreachable; // capacity asserted above
 9267    const extra_index = addExtraAssumeCapacity(extra, Tag.Extern{
 9268        .ty = key.ty,
 9269        .lib_name = key.lib_name,
 9270        .flags = .{
 9271            .linkage = key.linkage,
 9272            .visibility = key.visibility,
 9273            .is_threadlocal = key.is_threadlocal,
 9274            .is_dll_import = key.is_dll_import,
 9275            .relocation = key.relocation,
 9276            .source = key.source,
 9277        },
 9278        .zir_index = key.zir_index,
 9279        .owner_nav = owner_nav,
 9280    });
 9281    items.appendAssumeCapacity(.{
 9282        .tag = .@"extern",
 9283        .data = extra_index,
 9284    });
 9285    assert(gop.put() == extern_index);
 9286
 9287    return .{
 9288        .index = extern_index,
 9289        .new_nav = owner_nav.toOptional(),
 9290    };
 9291}
 9292
 9293pub const GetFuncDeclKey = struct {
 9294    owner_nav: Nav.Index,
 9295    ty: Index,
 9296    zir_body_inst: TrackedInst.Index,
 9297    lbrace_line: u32,
 9298    rbrace_line: u32,
 9299    lbrace_column: u32,
 9300    rbrace_column: u32,
 9301    cc: ?std.builtin.CallingConvention,
 9302    is_noinline: bool,
 9303};
 9304
 9305pub fn getFuncDecl(
 9306    ip: *InternPool,
 9307    gpa: Allocator,
 9308    tid: Zcu.PerThread.Id,
 9309    key: GetFuncDeclKey,
 9310) Allocator.Error!Index {
 9311    const local = ip.getLocal(tid);
 9312    const items = local.getMutableItems(gpa);
 9313    try items.ensureUnusedCapacity(1);
 9314    const extra = local.getMutableExtra(gpa);
 9315
 9316    // The strategy here is to add the function type unconditionally, then to
 9317    // ask if it already exists, and if so, revert the lengths of the mutated
 9318    // arrays. This is similar to what `getOrPutTrailingString` does.
 9319    const prev_extra_len = extra.mutate.len;
 9320
 9321    try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).@"struct".fields.len);
 9322
 9323    const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{
 9324        .analysis = .{
 9325            .is_analyzed = false,
 9326            .branch_hint = .none,
 9327            .is_noinline = key.is_noinline,
 9328            .has_error_trace = false,
 9329            .inferred_error_set = false,
 9330            .disable_instrumentation = false,
 9331            .disable_intrinsics = false,
 9332        },
 9333        .owner_nav = key.owner_nav,
 9334        .ty = key.ty,
 9335        .zir_body_inst = key.zir_body_inst,
 9336        .lbrace_line = key.lbrace_line,
 9337        .rbrace_line = key.rbrace_line,
 9338        .lbrace_column = key.lbrace_column,
 9339        .rbrace_column = key.rbrace_column,
 9340    });
 9341    errdefer extra.mutate.len = prev_extra_len;
 9342
 9343    var gop = try ip.getOrPutKey(gpa, tid, .{
 9344        .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index),
 9345    });
 9346    defer gop.deinit();
 9347    if (gop == .existing) {
 9348        extra.mutate.len = prev_extra_len;
 9349
 9350        const zir_body_inst_ptr = ip.funcDeclInfo(gop.existing).zirBodyInstPtr(ip);
 9351        if (zir_body_inst_ptr.* != key.zir_body_inst) {
 9352            // Since this function's `owner_nav` matches `key`, this *is* the function we're talking
 9353            // about. The only way it could have a different ZIR `func` instruction is if the old
 9354            // instruction has been lost and replaced with a new `TrackedInst.Index`.
 9355            assert(zir_body_inst_ptr.resolve(ip) == null);
 9356            zir_body_inst_ptr.* = key.zir_body_inst;
 9357        }
 9358
 9359        return gop.existing;
 9360    }
 9361
 9362    items.appendAssumeCapacity(.{
 9363        .tag = .func_decl,
 9364        .data = func_decl_extra_index,
 9365    });
 9366    return gop.put();
 9367}
 9368
 9369pub const GetFuncDeclIesKey = struct {
 9370    owner_nav: Nav.Index,
 9371    param_types: []Index,
 9372    noalias_bits: u32,
 9373    comptime_bits: u32,
 9374    bare_return_type: Index,
 9375    /// null means generic.
 9376    cc: ?std.builtin.CallingConvention,
 9377    is_var_args: bool,
 9378    is_generic: bool,
 9379    is_noinline: bool,
 9380    zir_body_inst: TrackedInst.Index,
 9381    lbrace_line: u32,
 9382    rbrace_line: u32,
 9383    lbrace_column: u32,
 9384    rbrace_column: u32,
 9385};
 9386
 9387pub fn getFuncDeclIes(
 9388    ip: *InternPool,
 9389    gpa: Allocator,
 9390    tid: Zcu.PerThread.Id,
 9391    key: GetFuncDeclIesKey,
 9392) Allocator.Error!Index {
 9393    // Validate input parameters.
 9394    assert(key.bare_return_type != .none);
 9395    for (key.param_types) |param_type| assert(param_type != .none);
 9396
 9397    const local = ip.getLocal(tid);
 9398    const items = local.getMutableItems(gpa);
 9399    try items.ensureUnusedCapacity(4);
 9400    const extra = local.getMutableExtra(gpa);
 9401
 9402    // The strategy here is to add the function decl unconditionally, then to
 9403    // ask if it already exists, and if so, revert the lengths of the mutated
 9404    // arrays. This is similar to what `getOrPutTrailingString` does.
 9405    const prev_extra_len = extra.mutate.len;
 9406    const params_len: u32 = @intCast(key.param_types.len);
 9407
 9408    try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncDecl).@"struct".fields.len +
 9409        1 + // inferred_error_set
 9410        @typeInfo(Tag.ErrorUnionType).@"struct".fields.len +
 9411        @typeInfo(Tag.TypeFunction).@"struct".fields.len +
 9412        @intFromBool(key.comptime_bits != 0) +
 9413        @intFromBool(key.noalias_bits != 0) +
 9414        params_len);
 9415
 9416    const func_index = Index.Unwrapped.wrap(.{
 9417        .tid = tid,
 9418        .index = items.mutate.len + 0,
 9419    }, ip);
 9420    const error_union_type = Index.Unwrapped.wrap(.{
 9421        .tid = tid,
 9422        .index = items.mutate.len + 1,
 9423    }, ip);
 9424    const error_set_type = Index.Unwrapped.wrap(.{
 9425        .tid = tid,
 9426        .index = items.mutate.len + 2,
 9427    }, ip);
 9428    const func_ty = Index.Unwrapped.wrap(.{
 9429        .tid = tid,
 9430        .index = items.mutate.len + 3,
 9431    }, ip);
 9432
 9433    const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{
 9434        .analysis = .{
 9435            .is_analyzed = false,
 9436            .branch_hint = .none,
 9437            .is_noinline = key.is_noinline,
 9438            .has_error_trace = false,
 9439            .inferred_error_set = true,
 9440            .disable_instrumentation = false,
 9441            .disable_intrinsics = false,
 9442        },
 9443        .owner_nav = key.owner_nav,
 9444        .ty = func_ty,
 9445        .zir_body_inst = key.zir_body_inst,
 9446        .lbrace_line = key.lbrace_line,
 9447        .rbrace_line = key.rbrace_line,
 9448        .lbrace_column = key.lbrace_column,
 9449        .rbrace_column = key.rbrace_column,
 9450    });
 9451    extra.appendAssumeCapacity(.{@intFromEnum(Index.none)});
 9452
 9453    const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{
 9454        .params_len = params_len,
 9455        .return_type = error_union_type,
 9456        .flags = .{
 9457            .cc = .pack(key.cc orelse .auto),
 9458            .is_var_args = key.is_var_args,
 9459            .has_comptime_bits = key.comptime_bits != 0,
 9460            .has_noalias_bits = key.noalias_bits != 0,
 9461            .is_generic = key.is_generic,
 9462            .is_noinline = key.is_noinline,
 9463        },
 9464    });
 9465    if (key.comptime_bits != 0) extra.appendAssumeCapacity(.{key.comptime_bits});
 9466    if (key.noalias_bits != 0) extra.appendAssumeCapacity(.{key.noalias_bits});
 9467    extra.appendSliceAssumeCapacity(.{@ptrCast(key.param_types)});
 9468
 9469    items.appendSliceAssumeCapacity(.{
 9470        .tag = &.{
 9471            .func_decl,
 9472            .type_error_union,
 9473            .type_inferred_error_set,
 9474            .type_function,
 9475        },
 9476        .data = &.{
 9477            func_decl_extra_index,
 9478            addExtraAssumeCapacity(extra, Tag.ErrorUnionType{
 9479                .error_set_type = error_set_type,
 9480                .payload_type = key.bare_return_type,
 9481            }),
 9482            @intFromEnum(func_index),
 9483            func_type_extra_index,
 9484        },
 9485    });
 9486    errdefer {
 9487        items.mutate.len -= 4;
 9488        extra.mutate.len = prev_extra_len;
 9489    }
 9490
 9491    var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
 9492        .func = extraFuncDecl(tid, extra.list.*, func_decl_extra_index),
 9493    }, 3);
 9494    defer func_gop.deinit();
 9495    if (func_gop == .existing) {
 9496        // An existing function type was found; undo the additions to our two arrays.
 9497        items.mutate.len -= 4;
 9498        extra.mutate.len = prev_extra_len;
 9499
 9500        const zir_body_inst_ptr = ip.funcDeclInfo(func_gop.existing).zirBodyInstPtr(ip);
 9501        if (zir_body_inst_ptr.* != key.zir_body_inst) {
 9502            // Since this function's `owner_nav` matches `key`, this *is* the function we're talking
 9503            // about. The only way it could have a different ZIR `func` instruction is if the old
 9504            // instruction has been lost and replaced with a new `TrackedInst.Index`.
 9505            assert(zir_body_inst_ptr.resolve(ip) == null);
 9506            zir_body_inst_ptr.* = key.zir_body_inst;
 9507        }
 9508
 9509        return func_gop.existing;
 9510    }
 9511    func_gop.putTentative(func_index);
 9512    var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{ .error_union_type = .{
 9513        .error_set_type = error_set_type,
 9514        .payload_type = key.bare_return_type,
 9515    } }, 2);
 9516    defer error_union_type_gop.deinit();
 9517    error_union_type_gop.putTentative(error_union_type);
 9518    var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
 9519        .inferred_error_set_type = func_index,
 9520    }, 1);
 9521    defer error_set_type_gop.deinit();
 9522    error_set_type_gop.putTentative(error_set_type);
 9523    var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{
 9524        .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
 9525    });
 9526    defer func_ty_gop.deinit();
 9527    func_ty_gop.putTentative(func_ty);
 9528
 9529    func_gop.putFinal(func_index);
 9530    error_union_type_gop.putFinal(error_union_type);
 9531    error_set_type_gop.putFinal(error_set_type);
 9532    func_ty_gop.putFinal(func_ty);
 9533    return func_index;
 9534}
 9535
 9536pub fn getErrorSetType(
 9537    ip: *InternPool,
 9538    gpa: Allocator,
 9539    tid: Zcu.PerThread.Id,
 9540    names: []const NullTerminatedString,
 9541) Allocator.Error!Index {
 9542    assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan));
 9543
 9544    const local = ip.getLocal(tid);
 9545    const items = local.getMutableItems(gpa);
 9546    const extra = local.getMutableExtra(gpa);
 9547    try extra.ensureUnusedCapacity(@typeInfo(Tag.ErrorSet).@"struct".fields.len + names.len);
 9548
 9549    const names_map = try ip.addMap(gpa, tid, names.len);
 9550    errdefer local.mutate.maps.len -= 1;
 9551
 9552    // The strategy here is to add the type unconditionally, then to ask if it
 9553    // already exists, and if so, revert the lengths of the mutated arrays.
 9554    // This is similar to what `getOrPutTrailingString` does.
 9555    const prev_extra_len = extra.mutate.len;
 9556    errdefer extra.mutate.len = prev_extra_len;
 9557
 9558    const error_set_extra_index = addExtraAssumeCapacity(extra, Tag.ErrorSet{
 9559        .names_len = @intCast(names.len),
 9560        .names_map = names_map,
 9561    });
 9562    extra.appendSliceAssumeCapacity(.{@ptrCast(names)});
 9563    errdefer extra.mutate.len = prev_extra_len;
 9564
 9565    var gop = try ip.getOrPutKey(gpa, tid, .{
 9566        .error_set_type = extraErrorSet(tid, extra.list.*, error_set_extra_index),
 9567    });
 9568    defer gop.deinit();
 9569    if (gop == .existing) {
 9570        extra.mutate.len = prev_extra_len;
 9571        return gop.existing;
 9572    }
 9573
 9574    try items.append(.{
 9575        .tag = .type_error_set,
 9576        .data = error_set_extra_index,
 9577    });
 9578    errdefer items.mutate.len -= 1;
 9579
 9580    ip.addStringsToMap(names_map, names);
 9581
 9582    return gop.put();
 9583}
 9584
 9585pub const GetFuncInstanceKey = struct {
 9586    /// Has the length of the instance function (may be lesser than
 9587    /// comptime_args).
 9588    param_types: []Index,
 9589    /// Has the length of generic_owner's parameters (may be greater than
 9590    /// param_types).
 9591    comptime_args: []const Index,
 9592    noalias_bits: u32,
 9593    bare_return_type: Index,
 9594    is_noinline: bool,
 9595    generic_owner: Index,
 9596    inferred_error_set: bool,
 9597};
 9598
 9599pub fn getFuncInstance(
 9600    ip: *InternPool,
 9601    gpa: Allocator,
 9602    tid: Zcu.PerThread.Id,
 9603    arg: GetFuncInstanceKey,
 9604) Allocator.Error!Index {
 9605    if (arg.inferred_error_set)
 9606        return getFuncInstanceIes(ip, gpa, tid, arg);
 9607
 9608    const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
 9609    const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(generic_owner).ty).func_type;
 9610
 9611    const func_ty = try ip.getFuncType(gpa, tid, .{
 9612        .param_types = arg.param_types,
 9613        .return_type = arg.bare_return_type,
 9614        .noalias_bits = arg.noalias_bits,
 9615        .cc = generic_owner_ty.cc,
 9616        .is_noinline = arg.is_noinline,
 9617    });
 9618
 9619    const local = ip.getLocal(tid);
 9620    const items = local.getMutableItems(gpa);
 9621    const extra = local.getMutableExtra(gpa);
 9622    try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
 9623        arg.comptime_args.len);
 9624
 9625    assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner)));
 9626
 9627    const prev_extra_len = extra.mutate.len;
 9628    errdefer extra.mutate.len = prev_extra_len;
 9629
 9630    const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{
 9631        .analysis = .{
 9632            .is_analyzed = false,
 9633            .branch_hint = .none,
 9634            .is_noinline = arg.is_noinline,
 9635            .has_error_trace = false,
 9636            .inferred_error_set = false,
 9637            .disable_instrumentation = false,
 9638            .disable_intrinsics = false,
 9639        },
 9640        // This is populated after we create the Nav below. It is not read
 9641        // by equality or hashing functions.
 9642        .owner_nav = undefined,
 9643        .ty = func_ty,
 9644        .branch_quota = 0,
 9645        .generic_owner = generic_owner,
 9646    });
 9647    extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)});
 9648
 9649    var gop = try ip.getOrPutKey(gpa, tid, .{
 9650        .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index),
 9651    });
 9652    defer gop.deinit();
 9653    if (gop == .existing) {
 9654        extra.mutate.len = prev_extra_len;
 9655        return gop.existing;
 9656    }
 9657
 9658    const func_index = Index.Unwrapped.wrap(.{ .tid = tid, .index = items.mutate.len }, ip);
 9659    try items.append(.{
 9660        .tag = .func_instance,
 9661        .data = func_extra_index,
 9662    });
 9663    errdefer items.mutate.len -= 1;
 9664    try finishFuncInstance(
 9665        ip,
 9666        gpa,
 9667        tid,
 9668        extra,
 9669        generic_owner,
 9670        func_index,
 9671        func_extra_index,
 9672    );
 9673    return gop.put();
 9674}
 9675
 9676/// This function exists separately than `getFuncInstance` because it needs to
 9677/// create 4 new items in the InternPool atomically before it can look for an
 9678/// existing item in the map.
 9679pub fn getFuncInstanceIes(
 9680    ip: *InternPool,
 9681    gpa: Allocator,
 9682    tid: Zcu.PerThread.Id,
 9683    arg: GetFuncInstanceKey,
 9684) Allocator.Error!Index {
 9685    // Validate input parameters.
 9686    assert(arg.inferred_error_set);
 9687    assert(arg.bare_return_type != .none);
 9688    for (arg.param_types) |param_type| assert(param_type != .none);
 9689
 9690    const local = ip.getLocal(tid);
 9691    const items = local.getMutableItems(gpa);
 9692    const extra = local.getMutableExtra(gpa);
 9693    try items.ensureUnusedCapacity(4);
 9694
 9695    const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner);
 9696    const generic_owner_ty = ip.indexToKey(ip.funcDeclInfo(generic_owner).ty).func_type;
 9697
 9698    // The strategy here is to add the function decl unconditionally, then to
 9699    // ask if it already exists, and if so, revert the lengths of the mutated
 9700    // arrays. This is similar to what `getOrPutTrailingString` does.
 9701    const prev_extra_len = extra.mutate.len;
 9702    const params_len: u32 = @intCast(arg.param_types.len);
 9703
 9704    try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncInstance).@"struct".fields.len +
 9705        1 + // inferred_error_set
 9706        arg.comptime_args.len +
 9707        @typeInfo(Tag.ErrorUnionType).@"struct".fields.len +
 9708        @typeInfo(Tag.TypeFunction).@"struct".fields.len +
 9709        @intFromBool(arg.noalias_bits != 0) +
 9710        params_len);
 9711
 9712    const func_index = Index.Unwrapped.wrap(.{
 9713        .tid = tid,
 9714        .index = items.mutate.len + 0,
 9715    }, ip);
 9716    const error_union_type = Index.Unwrapped.wrap(.{
 9717        .tid = tid,
 9718        .index = items.mutate.len + 1,
 9719    }, ip);
 9720    const error_set_type = Index.Unwrapped.wrap(.{
 9721        .tid = tid,
 9722        .index = items.mutate.len + 2,
 9723    }, ip);
 9724    const func_ty = Index.Unwrapped.wrap(.{
 9725        .tid = tid,
 9726        .index = items.mutate.len + 3,
 9727    }, ip);
 9728
 9729    const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{
 9730        .analysis = .{
 9731            .is_analyzed = false,
 9732            .branch_hint = .none,
 9733            .is_noinline = arg.is_noinline,
 9734            .has_error_trace = false,
 9735            .inferred_error_set = true,
 9736            .disable_instrumentation = false,
 9737            .disable_intrinsics = false,
 9738        },
 9739        // This is populated after we create the Nav below. It is not read
 9740        // by equality or hashing functions.
 9741        .owner_nav = undefined,
 9742        .ty = func_ty,
 9743        .branch_quota = 0,
 9744        .generic_owner = generic_owner,
 9745    });
 9746    extra.appendAssumeCapacity(.{@intFromEnum(Index.none)}); // resolved error set
 9747    extra.appendSliceAssumeCapacity(.{@ptrCast(arg.comptime_args)});
 9748
 9749    const func_type_extra_index = addExtraAssumeCapacity(extra, Tag.TypeFunction{
 9750        .params_len = params_len,
 9751        .return_type = error_union_type,
 9752        .flags = .{
 9753            .cc = .pack(generic_owner_ty.cc),
 9754            .is_var_args = false,
 9755            .has_comptime_bits = false,
 9756            .has_noalias_bits = arg.noalias_bits != 0,
 9757            .is_generic = false,
 9758            .is_noinline = arg.is_noinline,
 9759        },
 9760    });
 9761    // no comptime_bits because has_comptime_bits is false
 9762    if (arg.noalias_bits != 0) extra.appendAssumeCapacity(.{arg.noalias_bits});
 9763    extra.appendSliceAssumeCapacity(.{@ptrCast(arg.param_types)});
 9764
 9765    items.appendSliceAssumeCapacity(.{
 9766        .tag = &.{
 9767            .func_instance,
 9768            .type_error_union,
 9769            .type_inferred_error_set,
 9770            .type_function,
 9771        },
 9772        .data = &.{
 9773            func_extra_index,
 9774            addExtraAssumeCapacity(extra, Tag.ErrorUnionType{
 9775                .error_set_type = error_set_type,
 9776                .payload_type = arg.bare_return_type,
 9777            }),
 9778            @intFromEnum(func_index),
 9779            func_type_extra_index,
 9780        },
 9781    });
 9782    errdefer {
 9783        items.mutate.len -= 4;
 9784        extra.mutate.len = prev_extra_len;
 9785    }
 9786
 9787    var func_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
 9788        .func = ip.extraFuncInstance(tid, extra.list.*, func_extra_index),
 9789    }, 3);
 9790    defer func_gop.deinit();
 9791    if (func_gop == .existing) {
 9792        // Hot path: undo the additions to our two arrays.
 9793        items.mutate.len -= 4;
 9794        extra.mutate.len = prev_extra_len;
 9795        return func_gop.existing;
 9796    }
 9797    func_gop.putTentative(func_index);
 9798    var error_union_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{ .error_union_type = .{
 9799        .error_set_type = error_set_type,
 9800        .payload_type = arg.bare_return_type,
 9801    } }, 2);
 9802    defer error_union_type_gop.deinit();
 9803    error_union_type_gop.putTentative(error_union_type);
 9804    var error_set_type_gop = try ip.getOrPutKeyEnsuringAdditionalCapacity(gpa, tid, .{
 9805        .inferred_error_set_type = func_index,
 9806    }, 1);
 9807    defer error_set_type_gop.deinit();
 9808    error_set_type_gop.putTentative(error_set_type);
 9809    var func_ty_gop = try ip.getOrPutKey(gpa, tid, .{
 9810        .func_type = extraFuncType(tid, extra.list.*, func_type_extra_index),
 9811    });
 9812    defer func_ty_gop.deinit();
 9813    func_ty_gop.putTentative(func_ty);
 9814    try finishFuncInstance(
 9815        ip,
 9816        gpa,
 9817        tid,
 9818        extra,
 9819        generic_owner,
 9820        func_index,
 9821        func_extra_index,
 9822    );
 9823
 9824    func_gop.putFinal(func_index);
 9825    error_union_type_gop.putFinal(error_union_type);
 9826    error_set_type_gop.putFinal(error_set_type);
 9827    func_ty_gop.putFinal(func_ty);
 9828    return func_index;
 9829}
 9830
 9831fn finishFuncInstance(
 9832    ip: *InternPool,
 9833    gpa: Allocator,
 9834    tid: Zcu.PerThread.Id,
 9835    extra: Local.Extra.Mutable,
 9836    generic_owner: Index,
 9837    func_index: Index,
 9838    func_extra_index: u32,
 9839) Allocator.Error!void {
 9840    const fn_owner_nav = ip.getNav(ip.funcDeclInfo(generic_owner).owner_nav);
 9841    const fn_namespace = fn_owner_nav.analysis.?.namespace;
 9842
 9843    // TODO: improve this name
 9844    const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{f}__anon_{d}", .{
 9845        fn_owner_nav.name.fmt(ip), @intFromEnum(func_index),
 9846    }, .no_embedded_nulls);
 9847    const nav_index = try ip.createNav(gpa, tid, .{
 9848        .name = nav_name,
 9849        .fqn = try ip.namespacePtr(fn_namespace).internFullyQualifiedName(ip, gpa, tid, nav_name),
 9850        .val = func_index,
 9851        .is_const = fn_owner_nav.status.fully_resolved.is_const,
 9852        .alignment = fn_owner_nav.status.fully_resolved.alignment,
 9853        .@"linksection" = fn_owner_nav.status.fully_resolved.@"linksection",
 9854        .@"addrspace" = fn_owner_nav.status.fully_resolved.@"addrspace",
 9855    });
 9856
 9857    // Populate the owner_nav field which was left undefined until now.
 9858    extra.view().items(.@"0")[
 9859        func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_nav").?
 9860    ] = @intFromEnum(nav_index);
 9861}
 9862
 9863pub const EnumTypeInit = struct {
 9864    has_values: bool,
 9865    tag_mode: LoadedEnumType.TagMode,
 9866    fields_len: u32,
 9867    key: union(enum) {
 9868        declared: struct {
 9869            zir_index: TrackedInst.Index,
 9870            captures: []const CaptureValue,
 9871        },
 9872        declared_owned_captures: struct {
 9873            zir_index: TrackedInst.Index,
 9874            captures: CaptureValue.Slice,
 9875        },
 9876        reified: struct {
 9877            zir_index: TrackedInst.Index,
 9878            type_hash: u64,
 9879        },
 9880    },
 9881};
 9882
 9883pub const WipEnumType = struct {
 9884    tid: Zcu.PerThread.Id,
 9885    index: Index,
 9886    tag_ty_index: u32,
 9887    type_name_extra_index: u32,
 9888    namespace_extra_index: u32,
 9889    name_nav_extra_index: u32,
 9890    names_map: MapIndex,
 9891    names_start: u32,
 9892    values_map: OptionalMapIndex,
 9893    values_start: u32,
 9894
 9895    pub fn setName(
 9896        wip: WipEnumType,
 9897        ip: *InternPool,
 9898        type_name: NullTerminatedString,
 9899        /// This should be the `Nav` we are named after if we use the `.parent` name strategy; `.none` otherwise.
 9900        name_nav: Nav.Index.Optional,
 9901    ) void {
 9902        const extra = ip.getLocalShared(wip.tid).extra.acquire();
 9903        const extra_items = extra.view().items(.@"0");
 9904        extra_items[wip.type_name_extra_index] = @intFromEnum(type_name);
 9905        extra_items[wip.name_nav_extra_index] = @intFromEnum(name_nav);
 9906    }
 9907
 9908    pub fn prepare(
 9909        wip: WipEnumType,
 9910        ip: *InternPool,
 9911        namespace: NamespaceIndex,
 9912    ) void {
 9913        const extra = ip.getLocalShared(wip.tid).extra.acquire();
 9914        const extra_items = extra.view().items(.@"0");
 9915
 9916        extra_items[wip.namespace_extra_index] = @intFromEnum(namespace);
 9917    }
 9918
 9919    pub fn setTagTy(wip: WipEnumType, ip: *InternPool, tag_ty: Index) void {
 9920        assert(ip.isIntegerType(tag_ty));
 9921        const extra = ip.getLocalShared(wip.tid).extra.acquire();
 9922        extra.view().items(.@"0")[wip.tag_ty_index] = @intFromEnum(tag_ty);
 9923    }
 9924
 9925    pub const FieldConflict = struct {
 9926        kind: enum { name, value },
 9927        prev_field_idx: u32,
 9928    };
 9929
 9930    /// Returns the already-existing field with the same name or value, if any.
 9931    /// If the enum is automatially numbered, `value` must be `.none`.
 9932    /// Otherwise, the type of `value` must be the integer tag type of the enum.
 9933    pub fn nextField(wip: WipEnumType, ip: *InternPool, name: NullTerminatedString, value: Index) ?FieldConflict {
 9934        const unwrapped_index = wip.index.unwrap(ip);
 9935        const extra_list = ip.getLocalShared(unwrapped_index.tid).extra.acquire();
 9936        const extra_items = extra_list.view().items(.@"0");
 9937        if (ip.addFieldName(extra_list, wip.names_map, wip.names_start, name)) |conflict| {
 9938            return .{ .kind = .name, .prev_field_idx = conflict };
 9939        }
 9940        if (value == .none) {
 9941            assert(wip.values_map == .none);
 9942            return null;
 9943        }
 9944        assert(ip.typeOf(value) == @as(Index, @enumFromInt(extra_items[wip.tag_ty_index])));
 9945        const map = wip.values_map.unwrap().?.get(ip);
 9946        const field_index = map.count();
 9947        const indexes = extra_items[wip.values_start..][0..field_index];
 9948        const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) };
 9949        const gop = map.getOrPutAssumeCapacityAdapted(value, adapter);
 9950        if (gop.found_existing) {
 9951            return .{ .kind = .value, .prev_field_idx = @intCast(gop.index) };
 9952        }
 9953        extra_items[wip.values_start + field_index] = @intFromEnum(value);
 9954        return null;
 9955    }
 9956
 9957    pub fn cancel(wip: WipEnumType, ip: *InternPool, tid: Zcu.PerThread.Id) void {
 9958        ip.remove(tid, wip.index);
 9959    }
 9960
 9961    pub const Result = union(enum) {
 9962        wip: WipEnumType,
 9963        existing: Index,
 9964    };
 9965};
 9966
 9967pub fn getEnumType(
 9968    ip: *InternPool,
 9969    gpa: Allocator,
 9970    tid: Zcu.PerThread.Id,
 9971    ini: EnumTypeInit,
 9972    /// If it is known that there is an existing type with this key which is outdated,
 9973    /// this is passed as `true`, and the type is replaced with one at a fresh index.
 9974    replace_existing: bool,
 9975) Allocator.Error!WipEnumType.Result {
 9976    const key: Key = .{ .enum_type = switch (ini.key) {
 9977        .declared => |d| .{ .declared = .{
 9978            .zir_index = d.zir_index,
 9979            .captures = .{ .external = d.captures },
 9980        } },
 9981        .declared_owned_captures => |d| .{ .declared = .{
 9982            .zir_index = d.zir_index,
 9983            .captures = .{ .owned = d.captures },
 9984        } },
 9985        .reified => |r| .{ .reified = .{
 9986            .zir_index = r.zir_index,
 9987            .type_hash = r.type_hash,
 9988        } },
 9989    } };
 9990    var gop = if (replace_existing)
 9991        ip.putKeyReplace(tid, key)
 9992    else
 9993        try ip.getOrPutKey(gpa, tid, key);
 9994    defer gop.deinit();
 9995    if (gop == .existing) return .{ .existing = gop.existing };
 9996
 9997    const local = ip.getLocal(tid);
 9998    const items = local.getMutableItems(gpa);
 9999    try items.ensureUnusedCapacity(1);
10000    const extra = local.getMutableExtra(gpa);
10001
10002    const names_map = try ip.addMap(gpa, tid, ini.fields_len);
10003    errdefer local.mutate.maps.len -= 1;
10004
10005    switch (ini.tag_mode) {
10006        .auto => {
10007            assert(!ini.has_values);
10008            try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).@"struct".fields.len +
10009                // TODO: fmt bug
10010                // zig fmt: off
10011                switch (ini.key) {
10012                    inline .declared, .declared_owned_captures => |d| d.captures.len,
10013                    .reified => 2, // type_hash: PackedU64
10014                } +
10015                // zig fmt: on
10016                ini.fields_len); // field types
10017
10018            const extra_index = addExtraAssumeCapacity(extra, EnumAuto{
10019                .name = undefined, // set by `prepare`
10020                .name_nav = undefined, // set by `prepare`
10021                .captures_len = switch (ini.key) {
10022                    inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
10023                    .reified => std.math.maxInt(u32),
10024                },
10025                .namespace = undefined, // set by `prepare`
10026                .int_tag_type = .none, // set by `prepare`
10027                .fields_len = ini.fields_len,
10028                .names_map = names_map,
10029                .zir_index = switch (ini.key) {
10030                    inline else => |x| x.zir_index,
10031                }.toOptional(),
10032            });
10033            items.appendAssumeCapacity(.{
10034                .tag = .type_enum_auto,
10035                .data = extra_index,
10036            });
10037            switch (ini.key) {
10038                .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
10039                .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
10040                .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
10041            }
10042            const names_start = extra.mutate.len;
10043            _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len);
10044            return .{ .wip = .{
10045                .tid = tid,
10046                .index = gop.put(),
10047                .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
10048                .type_name_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "name").?,
10049                .name_nav_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "name_nav").?,
10050                .namespace_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "namespace").?,
10051                .names_map = names_map,
10052                .names_start = @intCast(names_start),
10053                .values_map = .none,
10054                .values_start = undefined,
10055            } };
10056        },
10057        .explicit, .nonexhaustive => {
10058            const values_map: OptionalMapIndex = if (!ini.has_values) .none else m: {
10059                const values_map = try ip.addMap(gpa, tid, ini.fields_len);
10060                break :m values_map.toOptional();
10061            };
10062            errdefer if (ini.has_values) {
10063                local.mutate.maps.len -= 1;
10064            };
10065
10066            try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).@"struct".fields.len +
10067                // TODO: fmt bug
10068                // zig fmt: off
10069                switch (ini.key) {
10070                    inline .declared, .declared_owned_captures => |d| d.captures.len,
10071                    .reified => 2, // type_hash: PackedU64
10072                } +
10073                // zig fmt: on
10074                ini.fields_len + // field types
10075                ini.fields_len * @intFromBool(ini.has_values)); // field values
10076
10077            const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{
10078                .name = undefined, // set by `prepare`
10079                .name_nav = undefined, // set by `prepare`
10080                .captures_len = switch (ini.key) {
10081                    inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
10082                    .reified => std.math.maxInt(u32),
10083                },
10084                .namespace = undefined, // set by `prepare`
10085                .int_tag_type = .none, // set by `prepare`
10086                .fields_len = ini.fields_len,
10087                .names_map = names_map,
10088                .values_map = values_map,
10089                .zir_index = switch (ini.key) {
10090                    inline else => |x| x.zir_index,
10091                }.toOptional(),
10092            });
10093            items.appendAssumeCapacity(.{
10094                .tag = switch (ini.tag_mode) {
10095                    .auto => unreachable,
10096                    .explicit => .type_enum_explicit,
10097                    .nonexhaustive => .type_enum_nonexhaustive,
10098                },
10099                .data = extra_index,
10100            });
10101            switch (ini.key) {
10102                .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
10103                .declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
10104                .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
10105            }
10106            const names_start = extra.mutate.len;
10107            _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len);
10108            const values_start = extra.mutate.len;
10109            if (ini.has_values) {
10110                _ = extra.addManyAsSliceAssumeCapacity(ini.fields_len);
10111            }
10112            return .{ .wip = .{
10113                .tid = tid,
10114                .index = gop.put(),
10115                .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
10116                .type_name_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "name").?,
10117                .name_nav_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "name_nav").?,
10118                .namespace_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "namespace").?,
10119                .names_map = names_map,
10120                .names_start = @intCast(names_start),
10121                .values_map = values_map,
10122                .values_start = @intCast(values_start),
10123            } };
10124        },
10125    }
10126}
10127
10128const GeneratedTagEnumTypeInit = struct {
10129    name: NullTerminatedString,
10130    owner_union_ty: Index,
10131    tag_ty: Index,
10132    names: []const NullTerminatedString,
10133    values: []const Index,
10134    tag_mode: LoadedEnumType.TagMode,
10135    parent_namespace: NamespaceIndex,
10136};
10137
10138/// Creates an enum type which was automatically-generated as the tag type of a
10139/// `union` with no explicit tag type. Since this is only called once per union
10140/// type, it asserts that no matching type yet exists.
10141pub fn getGeneratedTagEnumType(
10142    ip: *InternPool,
10143    gpa: Allocator,
10144    tid: Zcu.PerThread.Id,
10145    ini: GeneratedTagEnumTypeInit,
10146) Allocator.Error!Index {
10147    assert(ip.isUnion(ini.owner_union_ty));
10148    assert(ip.isIntegerType(ini.tag_ty));
10149    for (ini.values) |val| assert(ip.typeOf(val) == ini.tag_ty);
10150
10151    const local = ip.getLocal(tid);
10152    const items = local.getMutableItems(gpa);
10153    try items.ensureUnusedCapacity(1);
10154    const extra = local.getMutableExtra(gpa);
10155
10156    const names_map = try ip.addMap(gpa, tid, ini.names.len);
10157    errdefer local.mutate.maps.len -= 1;
10158    ip.addStringsToMap(names_map, ini.names);
10159
10160    const fields_len: u32 = @intCast(ini.names.len);
10161
10162    // Predict the index the enum will live at so we can construct the namespace before releasing the shard's mutex.
10163    const enum_index = Index.Unwrapped.wrap(.{
10164        .tid = tid,
10165        .index = items.mutate.len,
10166    }, ip);
10167    const parent_namespace = ip.namespacePtr(ini.parent_namespace);
10168    const namespace = try ip.createNamespace(gpa, tid, .{
10169        .parent = ini.parent_namespace.toOptional(),
10170        .owner_type = enum_index,
10171        .file_scope = parent_namespace.file_scope,
10172        .generation = parent_namespace.generation,
10173    });
10174    errdefer ip.destroyNamespace(tid, namespace);
10175
10176    const prev_extra_len = extra.mutate.len;
10177    switch (ini.tag_mode) {
10178        .auto => {
10179            try extra.ensureUnusedCapacity(@typeInfo(EnumAuto).@"struct".fields.len +
10180                1 + // owner_union
10181                fields_len); // field names
10182            items.appendAssumeCapacity(.{
10183                .tag = .type_enum_auto,
10184                .data = addExtraAssumeCapacity(extra, EnumAuto{
10185                    .name = ini.name,
10186                    .name_nav = .none,
10187                    .captures_len = 0,
10188                    .namespace = namespace,
10189                    .int_tag_type = ini.tag_ty,
10190                    .fields_len = fields_len,
10191                    .names_map = names_map,
10192                    .zir_index = .none,
10193                }),
10194            });
10195            extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)});
10196            extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)});
10197        },
10198        .explicit, .nonexhaustive => {
10199            try extra.ensureUnusedCapacity(@typeInfo(EnumExplicit).@"struct".fields.len +
10200                1 + // owner_union
10201                fields_len + // field names
10202                ini.values.len); // field values
10203
10204            const values_map: OptionalMapIndex = if (ini.values.len != 0) m: {
10205                const map = try ip.addMap(gpa, tid, ini.values.len);
10206                ip.addIndexesToMap(map, ini.values);
10207                break :m map.toOptional();
10208            } else .none;
10209            // We don't clean up the values map on error!
10210            errdefer @compileError("error path leaks values_map");
10211
10212            items.appendAssumeCapacity(.{
10213                .tag = switch (ini.tag_mode) {
10214                    .explicit => .type_enum_explicit,
10215                    .nonexhaustive => .type_enum_nonexhaustive,
10216                    .auto => unreachable,
10217                },
10218                .data = addExtraAssumeCapacity(extra, EnumExplicit{
10219                    .name = ini.name,
10220                    .name_nav = .none,
10221                    .captures_len = 0,
10222                    .namespace = namespace,
10223                    .int_tag_type = ini.tag_ty,
10224                    .fields_len = fields_len,
10225                    .names_map = names_map,
10226                    .values_map = values_map,
10227                    .zir_index = .none,
10228                }),
10229            });
10230            extra.appendAssumeCapacity(.{@intFromEnum(ini.owner_union_ty)});
10231            extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)});
10232            extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)});
10233        },
10234    }
10235    errdefer extra.mutate.len = prev_extra_len;
10236    errdefer switch (ini.tag_mode) {
10237        .auto => {},
10238        .explicit, .nonexhaustive => if (ini.values.len != 0) {
10239            local.mutate.maps.len -= 1;
10240        },
10241    };
10242
10243    var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = .{
10244        .generated_tag = .{ .union_type = ini.owner_union_ty },
10245    } });
10246    defer gop.deinit();
10247    assert(gop.put() == enum_index);
10248    return enum_index;
10249}
10250
10251pub const OpaqueTypeInit = struct {
10252    zir_index: TrackedInst.Index,
10253    captures: []const CaptureValue,
10254};
10255
10256pub fn getOpaqueType(
10257    ip: *InternPool,
10258    gpa: Allocator,
10259    tid: Zcu.PerThread.Id,
10260    ini: OpaqueTypeInit,
10261) Allocator.Error!WipNamespaceType.Result {
10262    var gop = try ip.getOrPutKey(gpa, tid, .{ .opaque_type = .{ .declared = .{
10263        .zir_index = ini.zir_index,
10264        .captures = .{ .external = ini.captures },
10265    } } });
10266    defer gop.deinit();
10267    if (gop == .existing) return .{ .existing = gop.existing };
10268
10269    const local = ip.getLocal(tid);
10270    const items = local.getMutableItems(gpa);
10271    const extra = local.getMutableExtra(gpa);
10272    try items.ensureUnusedCapacity(1);
10273
10274    try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeOpaque).@"struct".fields.len + ini.captures.len);
10275    const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{
10276        .name = undefined, // set by `finish`
10277        .name_nav = undefined, // set by `finish`
10278        .namespace = undefined, // set by `finish`
10279        .zir_index = ini.zir_index,
10280        .captures_len = @intCast(ini.captures.len),
10281    });
10282    items.appendAssumeCapacity(.{
10283        .tag = .type_opaque,
10284        .data = extra_index,
10285    });
10286    extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)});
10287    return .{
10288        .wip = .{
10289            .tid = tid,
10290            .index = gop.put(),
10291            .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name").?,
10292            .name_nav_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name_nav").?,
10293            .namespace_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").?,
10294        },
10295    };
10296}
10297
10298pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
10299    const full_hash = key.hash64(ip);
10300    const hash: u32 = @truncate(full_hash >> 32);
10301    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
10302    const map = shard.shared.map.acquire();
10303    const map_mask = map.header().mask();
10304    var map_index = hash;
10305    while (true) : (map_index += 1) {
10306        map_index &= map_mask;
10307        const entry = &map.entries[map_index];
10308        const index = entry.acquire();
10309        if (index == .none) return null;
10310        if (entry.hash != hash) continue;
10311        if (ip.indexToKey(index).eql(key, ip)) return index;
10312    }
10313}
10314
10315fn addStringsToMap(
10316    ip: *InternPool,
10317    map_index: MapIndex,
10318    strings: []const NullTerminatedString,
10319) void {
10320    const map = map_index.get(ip);
10321    const adapter: NullTerminatedString.Adapter = .{ .strings = strings };
10322    for (strings) |string| {
10323        const gop = map.getOrPutAssumeCapacityAdapted(string, adapter);
10324        assert(!gop.found_existing);
10325    }
10326}
10327
10328fn addIndexesToMap(
10329    ip: *InternPool,
10330    map_index: MapIndex,
10331    indexes: []const Index,
10332) void {
10333    const map = map_index.get(ip);
10334    const adapter: Index.Adapter = .{ .indexes = indexes };
10335    for (indexes) |index| {
10336        const gop = map.getOrPutAssumeCapacityAdapted(index, adapter);
10337        assert(!gop.found_existing);
10338    }
10339}
10340
10341fn addMap(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, cap: usize) Allocator.Error!MapIndex {
10342    const maps = ip.getLocal(tid).getMutableMaps(gpa);
10343    const unwrapped: MapIndex.Unwrapped = .{ .tid = tid, .index = maps.mutate.len };
10344    const ptr = try maps.addOne();
10345    errdefer maps.mutate.len = unwrapped.index;
10346    ptr[0].* = .{};
10347    try ptr[0].ensureTotalCapacity(gpa, cap);
10348    return unwrapped.wrap(ip);
10349}
10350
10351/// This operation only happens under compile error conditions.
10352/// Leak the index until the next garbage collection.
10353/// Invalidates all references to this index.
10354pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void {
10355    const unwrapped_index = index.unwrap(ip);
10356
10357    if (unwrapped_index.tid == tid) {
10358        const items_len = &ip.getLocal(unwrapped_index.tid).mutate.items.len;
10359        if (unwrapped_index.index == items_len.* - 1) {
10360            // Happy case - we can just drop the item without affecting any other indices.
10361            items_len.* -= 1;
10362            return;
10363        }
10364    }
10365
10366    // We must preserve the item so that indices following it remain valid.
10367    // Thus, we will rewrite the tag to `removed`, leaking the item until
10368    // next GC but causing `KeyAdapter` to ignore it.
10369    const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view();
10370    @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .unordered);
10371}
10372
10373fn addInt(
10374    ip: *InternPool,
10375    gpa: Allocator,
10376    tid: Zcu.PerThread.Id,
10377    ty: Index,
10378    tag: Tag,
10379    limbs: []const Limb,
10380) !void {
10381    const local = ip.getLocal(tid);
10382    const items_list = local.getMutableItems(gpa);
10383    const limbs_list = local.getMutableLimbs(gpa);
10384    const limbs_len: u32 = @intCast(limbs.len);
10385    try limbs_list.ensureUnusedCapacity(Int.limbs_items_len + limbs_len);
10386    items_list.appendAssumeCapacity(.{
10387        .tag = tag,
10388        .data = limbs_list.mutate.len,
10389    });
10390    limbs_list.addManyAsArrayAssumeCapacity(Int.limbs_items_len)[0].* = @bitCast(Int{
10391        .ty = ty,
10392        .limbs_len = limbs_len,
10393    });
10394    limbs_list.appendSliceAssumeCapacity(.{limbs});
10395}
10396
10397fn addExtra(extra: Local.Extra.Mutable, item: anytype) Allocator.Error!u32 {
10398    const fields = @typeInfo(@TypeOf(item)).@"struct".fields;
10399    try extra.ensureUnusedCapacity(fields.len);
10400    return addExtraAssumeCapacity(extra, item);
10401}
10402
10403fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 {
10404    const result: u32 = extra.mutate.len;
10405    inline for (@typeInfo(@TypeOf(item)).@"struct".fields) |field| {
10406        extra.appendAssumeCapacity(.{switch (field.type) {
10407            Index,
10408            Nav.Index,
10409            Nav.Index.Optional,
10410            NamespaceIndex,
10411            OptionalNamespaceIndex,
10412            MapIndex,
10413            OptionalMapIndex,
10414            String,
10415            NullTerminatedString,
10416            OptionalNullTerminatedString,
10417            Tag.TypePointer.VectorIndex,
10418            TrackedInst.Index,
10419            TrackedInst.Index.Optional,
10420            ComptimeAllocIndex,
10421            => @intFromEnum(@field(item, field.name)),
10422
10423            u32,
10424            i32,
10425            FuncAnalysis,
10426            Tag.Extern.Flags,
10427            Tag.TypePointer.Flags,
10428            Tag.TypeFunction.Flags,
10429            Tag.TypePointer.PackedOffset,
10430            Tag.TypeUnion.Flags,
10431            Tag.TypeStruct.Flags,
10432            Tag.TypeStructPacked.Flags,
10433            => @bitCast(@field(item, field.name)),
10434
10435            else => @compileError("bad field type: " ++ @typeName(field.type)),
10436        }});
10437    }
10438    return result;
10439}
10440
10441fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
10442    switch (@sizeOf(Limb)) {
10443        @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra),
10444        @sizeOf(u64) => {},
10445        else => @compileError("unsupported host"),
10446    }
10447    const result: u32 = @intCast(ip.limbs.items.len);
10448    inline for (@typeInfo(@TypeOf(extra)).@"struct".fields, 0..) |field, i| {
10449        const new: u32 = switch (field.type) {
10450            u32 => @field(extra, field.name),
10451            Index => @intFromEnum(@field(extra, field.name)),
10452            else => @compileError("bad field type: " ++ @typeName(field.type)),
10453        };
10454        if (i % 2 == 0) {
10455            ip.limbs.appendAssumeCapacity(new);
10456        } else {
10457            ip.limbs.items[ip.limbs.items.len - 1] |= @as(u64, new) << 32;
10458        }
10459    }
10460    return result;
10461}
10462
10463fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { data: T, end: u32 } {
10464    const extra_items = extra.view().items(.@"0");
10465    var result: T = undefined;
10466    const fields = @typeInfo(T).@"struct".fields;
10467    inline for (fields, index..) |field, extra_index| {
10468        const extra_item = extra_items[extra_index];
10469        @field(result, field.name) = switch (field.type) {
10470            Index,
10471            Nav.Index,
10472            Nav.Index.Optional,
10473            NamespaceIndex,
10474            OptionalNamespaceIndex,
10475            MapIndex,
10476            OptionalMapIndex,
10477            String,
10478            NullTerminatedString,
10479            OptionalNullTerminatedString,
10480            Tag.TypePointer.VectorIndex,
10481            TrackedInst.Index,
10482            TrackedInst.Index.Optional,
10483            ComptimeAllocIndex,
10484            => @enumFromInt(extra_item),
10485
10486            u32,
10487            i32,
10488            Tag.Extern.Flags,
10489            Tag.TypePointer.Flags,
10490            Tag.TypeFunction.Flags,
10491            Tag.TypePointer.PackedOffset,
10492            Tag.TypeUnion.Flags,
10493            Tag.TypeStruct.Flags,
10494            Tag.TypeStructPacked.Flags,
10495            FuncAnalysis,
10496            => @bitCast(extra_item),
10497
10498            else => @compileError("bad field type: " ++ @typeName(field.type)),
10499        };
10500    }
10501    return .{
10502        .data = result,
10503        .end = @intCast(index + fields.len),
10504    };
10505}
10506
10507fn extraData(extra: Local.Extra, comptime T: type, index: u32) T {
10508    return extraDataTrail(extra, T, index).data;
10509}
10510
10511test "basic usage" {
10512    const gpa = std.testing.allocator;
10513
10514    var ip: InternPool = .empty;
10515    try ip.init(gpa, 1);
10516    defer ip.deinit(gpa);
10517
10518    const i32_type = try ip.get(gpa, .main, .{ .int_type = .{
10519        .signedness = .signed,
10520        .bits = 32,
10521    } });
10522    const array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
10523        .len = 10,
10524        .child = i32_type,
10525        .sentinel = .none,
10526    } });
10527
10528    const another_i32_type = try ip.get(gpa, .main, .{ .int_type = .{
10529        .signedness = .signed,
10530        .bits = 32,
10531    } });
10532    try std.testing.expect(another_i32_type == i32_type);
10533
10534    const another_array_i32 = try ip.get(gpa, .main, .{ .array_type = .{
10535        .len = 10,
10536        .child = i32_type,
10537        .sentinel = .none,
10538    } });
10539    try std.testing.expect(another_array_i32 == array_i32);
10540}
10541
10542pub fn childType(ip: *const InternPool, i: Index) Index {
10543    return switch (ip.indexToKey(i)) {
10544        .ptr_type => |ptr_type| ptr_type.child,
10545        .vector_type => |vector_type| vector_type.child,
10546        .array_type => |array_type| array_type.child,
10547        .opt_type, .anyframe_type => |child| child,
10548        else => unreachable,
10549    };
10550}
10551
10552/// Given a slice type, returns the type of the ptr field.
10553pub fn slicePtrType(ip: *const InternPool, index: Index) Index {
10554    switch (index) {
10555        .slice_const_u8_type => return .manyptr_const_u8_type,
10556        .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type,
10557        .slice_const_slice_const_u8_type => return .manyptr_const_slice_const_u8_type,
10558        .slice_const_type_type => return .manyptr_const_type_type,
10559        else => {},
10560    }
10561    const item = index.unwrap(ip).getItem(ip);
10562    switch (item.tag) {
10563        .type_slice => return @enumFromInt(item.data),
10564        else => unreachable, // not a slice type
10565    }
10566}
10567
10568/// Given a slice value, returns the value of the ptr field.
10569pub fn slicePtr(ip: *const InternPool, index: Index) Index {
10570    const unwrapped_index = index.unwrap(ip);
10571    const item = unwrapped_index.getItem(ip);
10572    switch (item.tag) {
10573        .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).ptr,
10574        else => unreachable, // not a slice value
10575    }
10576}
10577
10578/// Given a slice value, returns the value of the len field.
10579pub fn sliceLen(ip: *const InternPool, index: Index) Index {
10580    const unwrapped_index = index.unwrap(ip);
10581    const item = unwrapped_index.getItem(ip);
10582    switch (item.tag) {
10583        .ptr_slice => return extraData(unwrapped_index.getExtra(ip), PtrSlice, item.data).len,
10584        else => unreachable, // not a slice value
10585    }
10586}
10587
10588/// Given an existing value, returns the same value but with the supplied type.
10589/// Only some combinations are allowed:
10590/// * identity coercion
10591/// * undef => any
10592/// * int <=> int
10593/// * int <=> enum
10594/// * enum_literal => enum
10595/// * float <=> float
10596/// * ptr <=> ptr
10597/// * opt ptr <=> ptr
10598/// * opt ptr <=> opt ptr
10599/// * int <=> ptr
10600/// * null_value => opt
10601/// * payload => opt
10602/// * error set <=> error set
10603/// * error union <=> error union
10604/// * error set => error union
10605/// * payload => error union
10606/// * fn <=> fn
10607/// * aggregate <=> aggregate (where children can also be coerced)
10608pub fn getCoerced(
10609    ip: *InternPool,
10610    gpa: Allocator,
10611    tid: Zcu.PerThread.Id,
10612    val: Index,
10613    new_ty: Index,
10614) Allocator.Error!Index {
10615    const old_ty = ip.typeOf(val);
10616    if (old_ty == new_ty) return val;
10617
10618    switch (val) {
10619        .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
10620        .null_value => {
10621            if (ip.isOptionalType(new_ty)) return ip.get(gpa, tid, .{ .opt = .{
10622                .ty = new_ty,
10623                .val = .none,
10624            } });
10625
10626            if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
10627                .one, .many, .c => return ip.get(gpa, tid, .{ .ptr = .{
10628                    .ty = new_ty,
10629                    .base_addr = .int,
10630                    .byte_offset = 0,
10631                } }),
10632                .slice => return ip.get(gpa, tid, .{ .slice = .{
10633                    .ty = new_ty,
10634                    .ptr = try ip.get(gpa, tid, .{ .ptr = .{
10635                        .ty = ip.slicePtrType(new_ty),
10636                        .base_addr = .int,
10637                        .byte_offset = 0,
10638                    } }),
10639                    .len = .undef_usize,
10640                } }),
10641            };
10642        },
10643        else => {
10644            const unwrapped_val = val.unwrap(ip);
10645            const val_item = unwrapped_val.getItem(ip);
10646            switch (val_item.tag) {
10647                .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
10648                .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
10649                .func_coerced => {
10650                    const func: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[
10651                        val_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
10652                    ]);
10653                    switch (func.unwrap(ip).getTag(ip)) {
10654                        .func_decl => return getCoercedFuncDecl(ip, gpa, tid, val, new_ty),
10655                        .func_instance => return getCoercedFuncInstance(ip, gpa, tid, val, new_ty),
10656                        else => unreachable,
10657                    }
10658                },
10659                else => {},
10660            }
10661        },
10662    }
10663
10664    switch (ip.indexToKey(val)) {
10665        .undef => return ip.get(gpa, tid, .{ .undef = new_ty }),
10666        .func => unreachable,
10667
10668        .int => |int| switch (ip.indexToKey(new_ty)) {
10669            .enum_type => return ip.get(gpa, tid, .{ .enum_tag = .{
10670                .ty = new_ty,
10671                .int = try ip.getCoerced(gpa, tid, val, ip.loadEnumType(new_ty).tag_ty),
10672            } }),
10673            .ptr_type => switch (int.storage) {
10674                inline .u64, .i64 => |int_val| return ip.get(gpa, tid, .{ .ptr = .{
10675                    .ty = new_ty,
10676                    .base_addr = .int,
10677                    .byte_offset = @intCast(int_val),
10678                } }),
10679                .big_int => unreachable, // must be a usize
10680                .lazy_align, .lazy_size => {},
10681            },
10682            else => if (ip.isIntegerType(new_ty))
10683                return ip.getCoercedInts(gpa, tid, int, new_ty),
10684        },
10685        .float => |float| switch (ip.indexToKey(new_ty)) {
10686            .simple_type => |simple| switch (simple) {
10687                .f16,
10688                .f32,
10689                .f64,
10690                .f80,
10691                .f128,
10692                .c_longdouble,
10693                .comptime_float,
10694                => return ip.get(gpa, tid, .{ .float = .{
10695                    .ty = new_ty,
10696                    .storage = float.storage,
10697                } }),
10698                else => {},
10699            },
10700            else => {},
10701        },
10702        .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty))
10703            return ip.getCoercedInts(gpa, tid, ip.indexToKey(enum_tag.int).int, new_ty),
10704        .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) {
10705            .enum_type => {
10706                const enum_type = ip.loadEnumType(new_ty);
10707                const index = enum_type.nameIndex(ip, enum_literal).?;
10708                return ip.get(gpa, tid, .{ .enum_tag = .{
10709                    .ty = new_ty,
10710                    .int = if (enum_type.values.len != 0)
10711                        enum_type.values.get(ip)[index]
10712                    else
10713                        try ip.get(gpa, tid, .{ .int = .{
10714                            .ty = enum_type.tag_ty,
10715                            .storage = .{ .u64 = index },
10716                        } }),
10717                } });
10718            },
10719            else => {},
10720        },
10721        .slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .slice)
10722            return ip.get(gpa, tid, .{ .slice = .{
10723                .ty = new_ty,
10724                .ptr = try ip.getCoerced(gpa, tid, slice.ptr, ip.slicePtrType(new_ty)),
10725                .len = slice.len,
10726            } })
10727        else if (ip.isIntegerType(new_ty))
10728            return ip.getCoerced(gpa, tid, slice.ptr, new_ty),
10729        .ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .slice)
10730            return ip.get(gpa, tid, .{ .ptr = .{
10731                .ty = new_ty,
10732                .base_addr = ptr.base_addr,
10733                .byte_offset = ptr.byte_offset,
10734            } })
10735        else if (ip.isIntegerType(new_ty))
10736            switch (ptr.base_addr) {
10737                .int => return ip.get(gpa, tid, .{ .int = .{
10738                    .ty = .usize_type,
10739                    .storage = .{ .u64 = @intCast(ptr.byte_offset) },
10740                } }),
10741                else => {},
10742            },
10743        .opt => |opt| switch (ip.indexToKey(new_ty)) {
10744            .ptr_type => |ptr_type| return switch (opt.val) {
10745                .none => switch (ptr_type.flags.size) {
10746                    .one, .many, .c => try ip.get(gpa, tid, .{ .ptr = .{
10747                        .ty = new_ty,
10748                        .base_addr = .int,
10749                        .byte_offset = 0,
10750                    } }),
10751                    .slice => try ip.get(gpa, tid, .{ .slice = .{
10752                        .ty = new_ty,
10753                        .ptr = try ip.get(gpa, tid, .{ .ptr = .{
10754                            .ty = ip.slicePtrType(new_ty),
10755                            .base_addr = .int,
10756                            .byte_offset = 0,
10757                        } }),
10758                        .len = .undef_usize,
10759                    } }),
10760                },
10761                else => |payload| try ip.getCoerced(gpa, tid, payload, new_ty),
10762            },
10763            .opt_type => |child_type| return try ip.get(gpa, tid, .{ .opt = .{
10764                .ty = new_ty,
10765                .val = switch (opt.val) {
10766                    .none => .none,
10767                    else => try ip.getCoerced(gpa, tid, opt.val, child_type),
10768                },
10769            } }),
10770            else => {},
10771        },
10772        .err => |err| if (ip.isErrorSetType(new_ty))
10773            return ip.get(gpa, tid, .{ .err = .{
10774                .ty = new_ty,
10775                .name = err.name,
10776            } })
10777        else if (ip.isErrorUnionType(new_ty))
10778            return ip.get(gpa, tid, .{ .error_union = .{
10779                .ty = new_ty,
10780                .val = .{ .err_name = err.name },
10781            } }),
10782        .error_union => |error_union| if (ip.isErrorUnionType(new_ty))
10783            return ip.get(gpa, tid, .{ .error_union = .{
10784                .ty = new_ty,
10785                .val = error_union.val,
10786            } }),
10787        .aggregate => |aggregate| {
10788            const new_len: usize = @intCast(ip.aggregateTypeLen(new_ty));
10789            direct: {
10790                const old_ty_child = switch (ip.indexToKey(old_ty)) {
10791                    inline .array_type, .vector_type => |seq_type| seq_type.child,
10792                    .tuple_type, .struct_type => break :direct,
10793                    else => unreachable,
10794                };
10795                const new_ty_child = switch (ip.indexToKey(new_ty)) {
10796                    inline .array_type, .vector_type => |seq_type| seq_type.child,
10797                    .tuple_type, .struct_type => break :direct,
10798                    else => unreachable,
10799                };
10800                if (old_ty_child != new_ty_child) break :direct;
10801                switch (aggregate.storage) {
10802                    .bytes => |bytes| return ip.get(gpa, tid, .{ .aggregate = .{
10803                        .ty = new_ty,
10804                        .storage = .{ .bytes = bytes },
10805                    } }),
10806                    .elems => |elems| {
10807                        const elems_copy = try gpa.dupe(Index, elems[0..new_len]);
10808                        defer gpa.free(elems_copy);
10809                        return ip.get(gpa, tid, .{ .aggregate = .{
10810                            .ty = new_ty,
10811                            .storage = .{ .elems = elems_copy },
10812                        } });
10813                    },
10814                    .repeated_elem => |elem| {
10815                        return ip.get(gpa, tid, .{ .aggregate = .{
10816                            .ty = new_ty,
10817                            .storage = .{ .repeated_elem = elem },
10818                        } });
10819                    },
10820                }
10821            }
10822            // Direct approach failed - we must recursively coerce elems
10823            const agg_elems = try gpa.alloc(Index, new_len);
10824            defer gpa.free(agg_elems);
10825            // First, fill the vector with the uncoerced elements. We do this to avoid key
10826            // lifetime issues, since it'll allow us to avoid referencing `aggregate` after we
10827            // begin interning elems.
10828            switch (aggregate.storage) {
10829                .bytes => |bytes| {
10830                    // We have to intern each value here, so unfortunately we can't easily avoid
10831                    // the repeated indexToKey calls.
10832                    for (agg_elems, 0..) |*elem, index| {
10833                        elem.* = try ip.get(gpa, tid, .{ .int = .{
10834                            .ty = .u8_type,
10835                            .storage = .{ .u64 = bytes.at(index, ip) },
10836                        } });
10837                    }
10838                },
10839                .elems => |elems| @memcpy(agg_elems, elems[0..new_len]),
10840                .repeated_elem => |elem| @memset(agg_elems, elem),
10841            }
10842            // Now, coerce each element to its new type.
10843            for (agg_elems, 0..) |*elem, i| {
10844                const new_elem_ty = switch (ip.indexToKey(new_ty)) {
10845                    inline .array_type, .vector_type => |seq_type| seq_type.child,
10846                    .tuple_type => |tuple_type| tuple_type.types.get(ip)[i],
10847                    .struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
10848                    else => unreachable,
10849                };
10850                elem.* = try ip.getCoerced(gpa, tid, elem.*, new_elem_ty);
10851            }
10852            return ip.get(gpa, tid, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } });
10853        },
10854        else => {},
10855    }
10856
10857    switch (ip.indexToKey(new_ty)) {
10858        .opt_type => |child_type| switch (val) {
10859            .null_value => return ip.get(gpa, tid, .{ .opt = .{
10860                .ty = new_ty,
10861                .val = .none,
10862            } }),
10863            else => return ip.get(gpa, tid, .{ .opt = .{
10864                .ty = new_ty,
10865                .val = try ip.getCoerced(gpa, tid, val, child_type),
10866            } }),
10867        },
10868        .error_union_type => |error_union_type| return ip.get(gpa, tid, .{ .error_union = .{
10869            .ty = new_ty,
10870            .val = .{ .payload = try ip.getCoerced(gpa, tid, val, error_union_type.payload_type) },
10871        } }),
10872        else => {},
10873    }
10874    if (std.debug.runtime_safety) {
10875        std.debug.panic("InternPool.getCoerced of {s} not implemented from {s} to {s}", .{
10876            @tagName(ip.indexToKey(val)),
10877            @tagName(ip.indexToKey(old_ty)),
10878            @tagName(ip.indexToKey(new_ty)),
10879        });
10880    }
10881    unreachable;
10882}
10883
10884fn getCoercedFuncDecl(
10885    ip: *InternPool,
10886    gpa: Allocator,
10887    tid: Zcu.PerThread.Id,
10888    val: Index,
10889    new_ty: Index,
10890) Allocator.Error!Index {
10891    const unwrapped_val = val.unwrap(ip);
10892    const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[
10893        unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncDecl, "ty").?
10894    ]);
10895    if (new_ty == prev_ty) return val;
10896    return getCoercedFunc(ip, gpa, tid, val, new_ty);
10897}
10898
10899fn getCoercedFuncInstance(
10900    ip: *InternPool,
10901    gpa: Allocator,
10902    tid: Zcu.PerThread.Id,
10903    val: Index,
10904    new_ty: Index,
10905) Allocator.Error!Index {
10906    const unwrapped_val = val.unwrap(ip);
10907    const prev_ty: Index = @enumFromInt(unwrapped_val.getExtra(ip).view().items(.@"0")[
10908        unwrapped_val.getData(ip) + std.meta.fieldIndex(Tag.FuncInstance, "ty").?
10909    ]);
10910    if (new_ty == prev_ty) return val;
10911    return getCoercedFunc(ip, gpa, tid, val, new_ty);
10912}
10913
10914fn getCoercedFunc(
10915    ip: *InternPool,
10916    gpa: Allocator,
10917    tid: Zcu.PerThread.Id,
10918    func: Index,
10919    ty: Index,
10920) Allocator.Error!Index {
10921    const local = ip.getLocal(tid);
10922    const items = local.getMutableItems(gpa);
10923    try items.ensureUnusedCapacity(1);
10924    const extra = local.getMutableExtra(gpa);
10925
10926    const prev_extra_len = extra.mutate.len;
10927    try extra.ensureUnusedCapacity(@typeInfo(Tag.FuncCoerced).@"struct".fields.len);
10928
10929    const extra_index = addExtraAssumeCapacity(extra, Tag.FuncCoerced{
10930        .ty = ty,
10931        .func = func,
10932    });
10933    errdefer extra.mutate.len = prev_extra_len;
10934
10935    var gop = try ip.getOrPutKey(gpa, tid, .{
10936        .func = ip.extraFuncCoerced(extra.list.*, extra_index),
10937    });
10938    defer gop.deinit();
10939    if (gop == .existing) {
10940        extra.mutate.len = prev_extra_len;
10941        return gop.existing;
10942    }
10943
10944    items.appendAssumeCapacity(.{
10945        .tag = .func_coerced,
10946        .data = extra_index,
10947    });
10948    return gop.put();
10949}
10950
10951/// Asserts `val` has an integer type.
10952/// Assumes `new_ty` is an integer type.
10953pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, int: Key.Int, new_ty: Index) Allocator.Error!Index {
10954    return ip.get(gpa, tid, .{ .int = .{
10955        .ty = new_ty,
10956        .storage = int.storage,
10957    } });
10958}
10959
10960pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
10961    const unwrapped_val = val.unwrap(ip);
10962    const item = unwrapped_val.getItem(ip);
10963    switch (item.tag) {
10964        .type_function => return extraFuncType(unwrapped_val.tid, unwrapped_val.getExtra(ip), item.data),
10965        else => return null,
10966    }
10967}
10968
10969/// includes .comptime_int_type
10970pub fn isIntegerType(ip: *const InternPool, ty: Index) bool {
10971    return switch (ty) {
10972        .usize_type,
10973        .isize_type,
10974        .c_char_type,
10975        .c_short_type,
10976        .c_ushort_type,
10977        .c_int_type,
10978        .c_uint_type,
10979        .c_long_type,
10980        .c_ulong_type,
10981        .c_longlong_type,
10982        .c_ulonglong_type,
10983        .comptime_int_type,
10984        => true,
10985        else => switch (ty.unwrap(ip).getTag(ip)) {
10986            .type_int_signed,
10987            .type_int_unsigned,
10988            => true,
10989            else => false,
10990        },
10991    };
10992}
10993
10994/// does not include .enum_literal_type
10995pub fn isEnumType(ip: *const InternPool, ty: Index) bool {
10996    return ip.indexToKey(ty) == .enum_type;
10997}
10998
10999pub fn isUnion(ip: *const InternPool, ty: Index) bool {
11000    return ip.indexToKey(ty) == .union_type;
11001}
11002
11003pub fn isFunctionType(ip: *const InternPool, ty: Index) bool {
11004    return ip.indexToKey(ty) == .func_type;
11005}
11006
11007pub fn isPointerType(ip: *const InternPool, ty: Index) bool {
11008    return ip.indexToKey(ty) == .ptr_type;
11009}
11010
11011pub fn isOptionalType(ip: *const InternPool, ty: Index) bool {
11012    return ip.indexToKey(ty) == .opt_type;
11013}
11014
11015/// includes .inferred_error_set_type
11016pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool {
11017    return switch (ty) {
11018        .anyerror_type, .adhoc_inferred_error_set_type => true,
11019        else => switch (ip.indexToKey(ty)) {
11020            .error_set_type, .inferred_error_set_type => true,
11021            else => false,
11022        },
11023    };
11024}
11025
11026pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool {
11027    return ty == .adhoc_inferred_error_set_type or ip.indexToKey(ty) == .inferred_error_set_type;
11028}
11029
11030pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool {
11031    return ip.indexToKey(ty) == .error_union_type;
11032}
11033
11034pub fn isAggregateType(ip: *const InternPool, ty: Index) bool {
11035    return switch (ip.indexToKey(ty)) {
11036        .array_type, .vector_type, .tuple_type, .struct_type => true,
11037        else => false,
11038    };
11039}
11040
11041pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index {
11042    return ip.indexToKey(ty).error_union_type.error_set_type;
11043}
11044
11045pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index {
11046    return ip.indexToKey(ty).error_union_type.payload_type;
11047}
11048
11049/// The is only legal because the initializer is not part of the hash.
11050pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void {
11051    const unwrapped_index = index.unwrap(ip);
11052
11053    const local = ip.getLocal(unwrapped_index.tid);
11054    local.mutate.extra.mutex.lock();
11055    defer local.mutate.extra.mutex.unlock();
11056
11057    const extra_items = local.shared.extra.view().items(.@"0");
11058    const item = unwrapped_index.getItem(ip);
11059    assert(item.tag == .variable);
11060    @atomicStore(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?], @intFromEnum(init_index), .release);
11061}
11062
11063pub fn dump(ip: *const InternPool) void {
11064    dumpStatsFallible(ip, std.heap.page_allocator) catch return;
11065    dumpAllFallible(ip) catch return;
11066}
11067
11068fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
11069    var items_len: usize = 0;
11070    var extra_len: usize = 0;
11071    var limbs_len: usize = 0;
11072    for (ip.locals) |*local| {
11073        items_len += local.mutate.items.len;
11074        extra_len += local.mutate.extra.len;
11075        limbs_len += local.mutate.limbs.len;
11076    }
11077    const items_size = (1 + 4) * items_len;
11078    const extra_size = 4 * extra_len;
11079    const limbs_size = 8 * limbs_len;
11080
11081    // TODO: map overhead size is not taken into account
11082    const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size;
11083
11084    std.debug.print(
11085        \\InternPool size: {d} bytes
11086        \\  {d} items: {d} bytes
11087        \\  {d} extra: {d} bytes
11088        \\  {d} limbs: {d} bytes
11089        \\
11090    , .{
11091        total_size,
11092        items_len,
11093        items_size,
11094        extra_len,
11095        extra_size,
11096        limbs_len,
11097        limbs_size,
11098    });
11099
11100    const TagStats = struct {
11101        count: usize = 0,
11102        bytes: usize = 0,
11103    };
11104    var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena);
11105    for (ip.locals) |*local| {
11106        const items = local.shared.items.view().slice();
11107        const extra_list = local.shared.extra;
11108        const extra_items = extra_list.view().items(.@"0");
11109        for (
11110            items.items(.tag)[0..local.mutate.items.len],
11111            items.items(.data)[0..local.mutate.items.len],
11112        ) |tag, data| {
11113            const gop = try counts.getOrPut(tag);
11114            if (!gop.found_existing) gop.value_ptr.* = .{};
11115            gop.value_ptr.count += 1;
11116            gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) {
11117                // Note that in this case, we have technically leaked some extra data
11118                // bytes which we do not account for here.
11119                .removed => 0,
11120
11121                .type_int_signed => 0,
11122                .type_int_unsigned => 0,
11123                .type_array_small => @sizeOf(Vector),
11124                .type_array_big => @sizeOf(Array),
11125                .type_vector => @sizeOf(Vector),
11126                .type_pointer => @sizeOf(Tag.TypePointer),
11127                .type_slice => 0,
11128                .type_optional => 0,
11129                .type_anyframe => 0,
11130                .type_error_union => @sizeOf(Key.ErrorUnionType),
11131                .type_anyerror_union => 0,
11132                .type_error_set => b: {
11133                    const info = extraData(extra_list, Tag.ErrorSet, data);
11134                    break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len);
11135                },
11136                .type_inferred_error_set => 0,
11137                .type_enum_explicit, .type_enum_nonexhaustive => b: {
11138                    const info = extraData(extra_list, EnumExplicit, data);
11139                    var ints = @typeInfo(EnumExplicit).@"struct".fields.len;
11140                    if (info.zir_index == .none) ints += 1;
11141                    ints += if (info.captures_len != std.math.maxInt(u32))
11142                        info.captures_len
11143                    else
11144                        @typeInfo(PackedU64).@"struct".fields.len;
11145                    ints += info.fields_len;
11146                    if (info.values_map != .none) ints += info.fields_len;
11147                    break :b @sizeOf(u32) * ints;
11148                },
11149                .type_enum_auto => b: {
11150                    const info = extraData(extra_list, EnumAuto, data);
11151                    const ints = @typeInfo(EnumAuto).@"struct".fields.len + info.captures_len + info.fields_len;
11152                    break :b @sizeOf(u32) * ints;
11153                },
11154                .type_opaque => b: {
11155                    const info = extraData(extra_list, Tag.TypeOpaque, data);
11156                    const ints = @typeInfo(Tag.TypeOpaque).@"struct".fields.len + info.captures_len;
11157                    break :b @sizeOf(u32) * ints;
11158                },
11159                .type_struct => b: {
11160                    const extra = extraDataTrail(extra_list, Tag.TypeStruct, data);
11161                    const info = extra.data;
11162                    var ints: usize = @typeInfo(Tag.TypeStruct).@"struct".fields.len;
11163                    if (info.flags.any_captures) {
11164                        const captures_len = extra_items[extra.end];
11165                        ints += 1 + captures_len;
11166                    }
11167                    ints += info.fields_len; // types
11168                    ints += 1; // names_map
11169                    ints += info.fields_len; // names
11170                    if (info.flags.any_default_inits)
11171                        ints += info.fields_len; // inits
11172                    if (info.flags.any_aligned_fields)
11173                        ints += (info.fields_len + 3) / 4; // aligns
11174                    if (info.flags.any_comptime_fields)
11175                        ints += (info.fields_len + 31) / 32; // comptime bits
11176                    if (!info.flags.is_extern)
11177                        ints += info.fields_len; // runtime order
11178                    ints += info.fields_len; // offsets
11179                    break :b @sizeOf(u32) * ints;
11180                },
11181                .type_struct_packed => b: {
11182                    const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data);
11183                    const captures_len = if (extra.data.flags.any_captures)
11184                        extra_items[extra.end]
11185                    else
11186                        0;
11187                    break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
11188                        @intFromBool(extra.data.flags.any_captures) + captures_len +
11189                        extra.data.fields_len * 2);
11190                },
11191                .type_struct_packed_inits => b: {
11192                    const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data);
11193                    const captures_len = if (extra.data.flags.any_captures)
11194                        extra_items[extra.end]
11195                    else
11196                        0;
11197                    break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).@"struct".fields.len +
11198                        @intFromBool(extra.data.flags.any_captures) + captures_len +
11199                        extra.data.fields_len * 3);
11200                },
11201                .type_tuple => b: {
11202                    const info = extraData(extra_list, TypeTuple, data);
11203                    break :b @sizeOf(TypeTuple) + (@sizeOf(u32) * 2 * info.fields_len);
11204                },
11205
11206                .type_union => b: {
11207                    const extra = extraDataTrail(extra_list, Tag.TypeUnion, data);
11208                    const captures_len = if (extra.data.flags.any_captures)
11209                        extra_items[extra.end]
11210                    else
11211                        0;
11212                    const per_field = @sizeOf(u32); // field type
11213                    // 1 byte per field for alignment, rounded up to the nearest 4 bytes
11214                    const alignments = if (extra.data.flags.any_aligned_fields)
11215                        ((extra.data.fields_len + 3) / 4) * 4
11216                    else
11217                        0;
11218                    break :b @sizeOf(Tag.TypeUnion) +
11219                        4 * (@intFromBool(extra.data.flags.any_captures) + captures_len) +
11220                        (extra.data.fields_len * per_field) + alignments;
11221                },
11222
11223                .type_function => b: {
11224                    const info = extraData(extra_list, Tag.TypeFunction, data);
11225                    break :b @sizeOf(Tag.TypeFunction) +
11226                        (@sizeOf(Index) * info.params_len) +
11227                        (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) +
11228                        (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits));
11229                },
11230
11231                .undef => 0,
11232                .simple_type => 0,
11233                .simple_value => 0,
11234                .ptr_nav => @sizeOf(PtrNav),
11235                .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc),
11236                .ptr_uav => @sizeOf(PtrUav),
11237                .ptr_uav_aligned => @sizeOf(PtrUavAligned),
11238                .ptr_comptime_field => @sizeOf(PtrComptimeField),
11239                .ptr_int => @sizeOf(PtrInt),
11240                .ptr_eu_payload => @sizeOf(PtrBase),
11241                .ptr_opt_payload => @sizeOf(PtrBase),
11242                .ptr_elem => @sizeOf(PtrBaseIndex),
11243                .ptr_field => @sizeOf(PtrBaseIndex),
11244                .ptr_slice => @sizeOf(PtrSlice),
11245                .opt_null => 0,
11246                .opt_payload => @sizeOf(Tag.TypeValue),
11247                .int_u8 => 0,
11248                .int_u16 => 0,
11249                .int_u32 => 0,
11250                .int_i32 => 0,
11251                .int_usize => 0,
11252                .int_comptime_int_u32 => 0,
11253                .int_comptime_int_i32 => 0,
11254                .int_small => @sizeOf(IntSmall),
11255
11256                .int_positive,
11257                .int_negative,
11258                => b: {
11259                    const limbs_list = local.shared.getLimbs();
11260                    const int: Int = @bitCast(limbs_list.view().items(.@"0")[data..][0..Int.limbs_items_len].*);
11261                    break :b @sizeOf(Int) + int.limbs_len * @sizeOf(Limb);
11262                },
11263
11264                .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy),
11265
11266                .error_set_error, .error_union_error => @sizeOf(Key.Error),
11267                .error_union_payload => @sizeOf(Tag.TypeValue),
11268                .enum_literal => 0,
11269                .enum_tag => @sizeOf(Tag.EnumTag),
11270
11271                .bytes => b: {
11272                    const info = extraData(extra_list, Bytes, data);
11273                    const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty));
11274                    break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0);
11275                },
11276                .aggregate => b: {
11277                    const info = extraData(extra_list, Tag.Aggregate, data);
11278                    const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty));
11279                    break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len);
11280                },
11281                .repeated => @sizeOf(Repeated),
11282
11283                .float_f16 => 0,
11284                .float_f32 => 0,
11285                .float_f64 => @sizeOf(Float64),
11286                .float_f80 => @sizeOf(Float80),
11287                .float_f128 => @sizeOf(Float128),
11288                .float_c_longdouble_f80 => @sizeOf(Float80),
11289                .float_c_longdouble_f128 => @sizeOf(Float128),
11290                .float_comptime_float => @sizeOf(Float128),
11291                .variable, .threadlocal_variable => @sizeOf(Tag.Variable),
11292                .@"extern" => @sizeOf(Tag.Extern),
11293                .func_decl => @sizeOf(Tag.FuncDecl),
11294                .func_instance => b: {
11295                    const info = extraData(extra_list, Tag.FuncInstance, data);
11296                    const ty = ip.typeOf(info.generic_owner);
11297                    const params_len = ip.indexToKey(ty).func_type.param_types.len;
11298                    break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len;
11299                },
11300                .func_coerced => @sizeOf(Tag.FuncCoerced),
11301                .only_possible_value => 0,
11302                .union_value => @sizeOf(Key.Union),
11303
11304                .memoized_call => b: {
11305                    const info = extraData(extra_list, MemoizedCall, data);
11306                    break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len);
11307                },
11308            });
11309        }
11310    }
11311    const SortContext = struct {
11312        map: *std.AutoArrayHashMap(Tag, TagStats),
11313        pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
11314            const values = ctx.map.values();
11315            return values[a_index].bytes > values[b_index].bytes;
11316            //return values[a_index].count > values[b_index].count;
11317        }
11318    };
11319    counts.sort(SortContext{ .map = &counts });
11320    const len = @min(50, counts.count());
11321    std.debug.print("  top 50 tags:\n", .{});
11322    for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| {
11323        std.debug.print("    {s}: {d} occurrences, {d} total bytes\n", .{
11324            @tagName(tag), stats.count, stats.bytes,
11325        });
11326    }
11327}
11328
11329fn dumpAllFallible(ip: *const InternPool) anyerror!void {
11330    var buffer: [4096]u8 = undefined;
11331    const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
11332    defer std.debug.unlockStderrWriter();
11333    for (ip.locals, 0..) |*local, tid| {
11334        const items = local.shared.items.view();
11335        for (
11336            items.items(.tag)[0..local.mutate.items.len],
11337            items.items(.data)[0..local.mutate.items.len],
11338            0..,
11339        ) |tag, data, index| {
11340            const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip);
11341            try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) });
11342            switch (tag) {
11343                .removed => {},
11344
11345                .simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
11346                .simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
11347
11348                .type_int_signed,
11349                .type_int_unsigned,
11350                .type_array_small,
11351                .type_array_big,
11352                .type_vector,
11353                .type_pointer,
11354                .type_optional,
11355                .type_anyframe,
11356                .type_error_union,
11357                .type_anyerror_union,
11358                .type_error_set,
11359                .type_inferred_error_set,
11360                .type_enum_explicit,
11361                .type_enum_nonexhaustive,
11362                .type_enum_auto,
11363                .type_opaque,
11364                .type_struct,
11365                .type_struct_packed,
11366                .type_struct_packed_inits,
11367                .type_tuple,
11368                .type_union,
11369                .type_function,
11370                .undef,
11371                .ptr_nav,
11372                .ptr_comptime_alloc,
11373                .ptr_uav,
11374                .ptr_uav_aligned,
11375                .ptr_comptime_field,
11376                .ptr_int,
11377                .ptr_eu_payload,
11378                .ptr_opt_payload,
11379                .ptr_elem,
11380                .ptr_field,
11381                .ptr_slice,
11382                .opt_payload,
11383                .int_u8,
11384                .int_u16,
11385                .int_u32,
11386                .int_i32,
11387                .int_usize,
11388                .int_comptime_int_u32,
11389                .int_comptime_int_i32,
11390                .int_small,
11391                .int_positive,
11392                .int_negative,
11393                .int_lazy_align,
11394                .int_lazy_size,
11395                .error_set_error,
11396                .error_union_error,
11397                .error_union_payload,
11398                .enum_literal,
11399                .enum_tag,
11400                .bytes,
11401                .aggregate,
11402                .repeated,
11403                .float_f16,
11404                .float_f32,
11405                .float_f64,
11406                .float_f80,
11407                .float_f128,
11408                .float_c_longdouble_f80,
11409                .float_c_longdouble_f128,
11410                .float_comptime_float,
11411                .variable,
11412                .threadlocal_variable,
11413                .@"extern",
11414                .func_decl,
11415                .func_instance,
11416                .func_coerced,
11417                .union_value,
11418                .memoized_call,
11419                => try stderr_bw.print("{d}", .{data}),
11420
11421                .opt_null,
11422                .type_slice,
11423                .only_possible_value,
11424                => try stderr_bw.print("${d}", .{data}),
11425            }
11426            try stderr_bw.writeAll(")\n");
11427        }
11428    }
11429}
11430
11431pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void {
11432    ip.dumpGenericInstancesFallible(allocator) catch return;
11433}
11434
11435pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) anyerror!void {
11436    var arena_allocator = std.heap.ArenaAllocator.init(allocator);
11437    defer arena_allocator.deinit();
11438    const arena = arena_allocator.allocator();
11439
11440    var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayList(Index)) = .empty;
11441    for (ip.locals, 0..) |*local, tid| {
11442        const items = local.shared.items.view().slice();
11443        const extra_list = local.shared.extra;
11444        for (
11445            items.items(.tag)[0..local.mutate.items.len],
11446            items.items(.data)[0..local.mutate.items.len],
11447            0..,
11448        ) |tag, data, index| {
11449            if (tag != .func_instance) continue;
11450            const info = extraData(extra_list, Tag.FuncInstance, data);
11451
11452            const gop = try instances.getOrPut(arena, info.generic_owner);
11453            if (!gop.found_existing) gop.value_ptr.* = .{};
11454
11455            try gop.value_ptr.append(
11456                arena,
11457                Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip),
11458            );
11459        }
11460    }
11461
11462    var buffer: [4096]u8 = undefined;
11463    const stderr_bw, _ = std.debug.lockStderrWriter(&buffer);
11464    defer std.debug.unlockStderrWriter();
11465
11466    const SortContext = struct {
11467        values: []std.ArrayList(Index),
11468        pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
11469            return ctx.values[a_index].items.len > ctx.values[b_index].items.len;
11470        }
11471    };
11472
11473    instances.sort(SortContext{ .values = instances.values() });
11474    var it = instances.iterator();
11475    while (it.next()) |entry| {
11476        const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
11477        try stderr_bw.print("{f} ({d}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
11478        for (entry.value_ptr.items) |index| {
11479            const unwrapped_index = index.unwrap(ip);
11480            const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
11481            const owner_nav = ip.getNav(func.owner_nav);
11482            try stderr_bw.print("  {f}: (", .{owner_nav.name.fmt(ip)});
11483            for (func.comptime_args.get(ip)) |arg| {
11484                if (arg != .none) {
11485                    const key = ip.indexToKey(arg);
11486                    try stderr_bw.print(" {} ", .{key});
11487                }
11488            }
11489            try stderr_bw.writeAll(")\n");
11490        }
11491    }
11492}
11493
11494pub fn getNav(ip: *const InternPool, index: Nav.Index) Nav {
11495    const unwrapped = index.unwrap(ip);
11496    const navs = ip.getLocalShared(unwrapped.tid).navs.acquire();
11497    return navs.view().get(unwrapped.index).unpack();
11498}
11499
11500pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Namespace {
11501    const unwrapped_namespace_index = namespace_index.unwrap(ip);
11502    const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire();
11503    const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index];
11504    return &namespaces_bucket[unwrapped_namespace_index.index];
11505}
11506
11507/// Create a `ComptimeUnit`, forming an `AnalUnit` for a `comptime` declaration.
11508pub fn createComptimeUnit(
11509    ip: *InternPool,
11510    gpa: Allocator,
11511    tid: Zcu.PerThread.Id,
11512    zir_index: TrackedInst.Index,
11513    namespace: NamespaceIndex,
11514) Allocator.Error!ComptimeUnit.Id {
11515    const comptime_units = ip.getLocal(tid).getMutableComptimeUnits(gpa);
11516    const id_unwrapped: ComptimeUnit.Id.Unwrapped = .{
11517        .tid = tid,
11518        .index = comptime_units.mutate.len,
11519    };
11520    try comptime_units.append(.{.{
11521        .zir_index = zir_index,
11522        .namespace = namespace,
11523    }});
11524    return id_unwrapped.wrap(ip);
11525}
11526
11527pub fn getComptimeUnit(ip: *const InternPool, id: ComptimeUnit.Id) ComptimeUnit {
11528    const unwrapped = id.unwrap(ip);
11529    const comptime_units = ip.getLocalShared(unwrapped.tid).comptime_units.acquire();
11530    return comptime_units.view().items(.@"0")[unwrapped.index];
11531}
11532
11533/// Create a `Nav` which does not undergo semantic analysis.
11534/// Since it is never analyzed, the `Nav`'s value must be known at creation time.
11535pub fn createNav(
11536    ip: *InternPool,
11537    gpa: Allocator,
11538    tid: Zcu.PerThread.Id,
11539    opts: struct {
11540        name: NullTerminatedString,
11541        fqn: NullTerminatedString,
11542        val: InternPool.Index,
11543        is_const: bool,
11544        alignment: Alignment,
11545        @"linksection": OptionalNullTerminatedString,
11546        @"addrspace": std.builtin.AddressSpace,
11547    },
11548) Allocator.Error!Nav.Index {
11549    const navs = ip.getLocal(tid).getMutableNavs(gpa);
11550    const index_unwrapped: Nav.Index.Unwrapped = .{
11551        .tid = tid,
11552        .index = navs.mutate.len,
11553    };
11554    try navs.append(Nav.pack(.{
11555        .name = opts.name,
11556        .fqn = opts.fqn,
11557        .analysis = null,
11558        .status = .{ .fully_resolved = .{
11559            .val = opts.val,
11560            .is_const = opts.is_const,
11561            .alignment = opts.alignment,
11562            .@"linksection" = opts.@"linksection",
11563            .@"addrspace" = opts.@"addrspace",
11564        } },
11565    }));
11566    return index_unwrapped.wrap(ip);
11567}
11568
11569/// Create a `Nav` which undergoes semantic analysis because it corresponds to a source declaration.
11570/// The value of the `Nav` is initially unresolved.
11571pub fn createDeclNav(
11572    ip: *InternPool,
11573    gpa: Allocator,
11574    tid: Zcu.PerThread.Id,
11575    name: NullTerminatedString,
11576    fqn: NullTerminatedString,
11577    zir_index: TrackedInst.Index,
11578    namespace: NamespaceIndex,
11579) Allocator.Error!Nav.Index {
11580    const navs = ip.getLocal(tid).getMutableNavs(gpa);
11581
11582    try navs.ensureUnusedCapacity(1);
11583
11584    const nav = Nav.Index.Unwrapped.wrap(.{
11585        .tid = tid,
11586        .index = navs.mutate.len,
11587    }, ip);
11588
11589    navs.appendAssumeCapacity(Nav.pack(.{
11590        .name = name,
11591        .fqn = fqn,
11592        .analysis = .{
11593            .namespace = namespace,
11594            .zir_index = zir_index,
11595        },
11596        .status = .unresolved,
11597    }));
11598
11599    return nav;
11600}
11601
11602/// Resolve the type of a `Nav` with an analysis owner.
11603/// If its status is already `resolved`, the old value is discarded.
11604pub fn resolveNavType(
11605    ip: *InternPool,
11606    nav: Nav.Index,
11607    resolved: struct {
11608        type: InternPool.Index,
11609        is_const: bool,
11610        alignment: Alignment,
11611        @"linksection": OptionalNullTerminatedString,
11612        @"addrspace": std.builtin.AddressSpace,
11613        is_threadlocal: bool,
11614        is_extern_decl: bool,
11615    },
11616) void {
11617    const unwrapped = nav.unwrap(ip);
11618
11619    const local = ip.getLocal(unwrapped.tid);
11620    local.mutate.extra.mutex.lock();
11621    defer local.mutate.extra.mutex.unlock();
11622
11623    const navs = local.shared.navs.view();
11624
11625    const nav_analysis_namespace = navs.items(.analysis_namespace);
11626    const nav_analysis_zir_index = navs.items(.analysis_zir_index);
11627    const nav_types = navs.items(.type_or_val);
11628    const nav_linksections = navs.items(.@"linksection");
11629    const nav_bits = navs.items(.bits);
11630
11631    assert(nav_analysis_namespace[unwrapped.index] != .none);
11632    assert(nav_analysis_zir_index[unwrapped.index] != .none);
11633
11634    @atomicStore(InternPool.Index, &nav_types[unwrapped.index], resolved.type, .release);
11635    @atomicStore(OptionalNullTerminatedString, &nav_linksections[unwrapped.index], resolved.@"linksection", .release);
11636
11637    var bits = nav_bits[unwrapped.index];
11638    bits.status = if (resolved.is_extern_decl) .type_resolved_extern_decl else .type_resolved;
11639    bits.is_const = resolved.is_const;
11640    bits.alignment = resolved.alignment;
11641    bits.@"addrspace" = resolved.@"addrspace";
11642    bits.is_threadlocal = resolved.is_threadlocal;
11643    @atomicStore(Nav.Repr.Bits, &nav_bits[unwrapped.index], bits, .release);
11644}
11645
11646/// Resolve the value of a `Nav` with an analysis owner.
11647/// If its status is already `resolved`, the old value is discarded.
11648pub fn resolveNavValue(
11649    ip: *InternPool,
11650    nav: Nav.Index,
11651    resolved: struct {
11652        val: InternPool.Index,
11653        is_const: bool,
11654        alignment: Alignment,
11655        @"linksection": OptionalNullTerminatedString,
11656        @"addrspace": std.builtin.AddressSpace,
11657    },
11658) void {
11659    const unwrapped = nav.unwrap(ip);
11660
11661    const local = ip.getLocal(unwrapped.tid);
11662    local.mutate.extra.mutex.lock();
11663    defer local.mutate.extra.mutex.unlock();
11664
11665    const navs = local.shared.navs.view();
11666
11667    const nav_analysis_namespace = navs.items(.analysis_namespace);
11668    const nav_analysis_zir_index = navs.items(.analysis_zir_index);
11669    const nav_vals = navs.items(.type_or_val);
11670    const nav_linksections = navs.items(.@"linksection");
11671    const nav_bits = navs.items(.bits);
11672
11673    assert(nav_analysis_namespace[unwrapped.index] != .none);
11674    assert(nav_analysis_zir_index[unwrapped.index] != .none);
11675
11676    @atomicStore(InternPool.Index, &nav_vals[unwrapped.index], resolved.val, .release);
11677    @atomicStore(OptionalNullTerminatedString, &nav_linksections[unwrapped.index], resolved.@"linksection", .release);
11678
11679    var bits = nav_bits[unwrapped.index];
11680    bits.status = .fully_resolved;
11681    bits.is_const = resolved.is_const;
11682    bits.alignment = resolved.alignment;
11683    bits.@"addrspace" = resolved.@"addrspace";
11684    @atomicStore(Nav.Repr.Bits, &nav_bits[unwrapped.index], bits, .release);
11685}
11686
11687pub fn createNamespace(
11688    ip: *InternPool,
11689    gpa: Allocator,
11690    tid: Zcu.PerThread.Id,
11691    initialization: Zcu.Namespace,
11692) Allocator.Error!NamespaceIndex {
11693    const local = ip.getLocal(tid);
11694    const free_list_next = local.mutate.namespaces.free_list;
11695    if (free_list_next != Local.BucketListMutate.free_list_sentinel) {
11696        const reused_namespace_index: NamespaceIndex = @enumFromInt(free_list_next);
11697        const reused_namespace = ip.namespacePtr(reused_namespace_index);
11698        local.mutate.namespaces.free_list =
11699            @intFromEnum(@field(reused_namespace, Local.namespace_next_free_field));
11700        reused_namespace.* = initialization;
11701        return reused_namespace_index;
11702    }
11703    const namespaces = local.getMutableNamespaces(gpa);
11704    const last_bucket_len = local.mutate.namespaces.last_bucket_len & Local.namespaces_bucket_mask;
11705    if (last_bucket_len == 0) {
11706        try namespaces.ensureUnusedCapacity(1);
11707        var arena = namespaces.arena.promote(namespaces.gpa);
11708        defer namespaces.arena.* = arena.state;
11709        namespaces.appendAssumeCapacity(.{try arena.allocator().create(
11710            [1 << Local.namespaces_bucket_width]Zcu.Namespace,
11711        )});
11712    }
11713    const unwrapped_namespace_index: NamespaceIndex.Unwrapped = .{
11714        .tid = tid,
11715        .bucket_index = namespaces.mutate.len - 1,
11716        .index = last_bucket_len,
11717    };
11718    local.mutate.namespaces.last_bucket_len = last_bucket_len + 1;
11719    const namespace_index = unwrapped_namespace_index.wrap(ip);
11720    ip.namespacePtr(namespace_index).* = initialization;
11721    return namespace_index;
11722}
11723
11724pub fn destroyNamespace(
11725    ip: *InternPool,
11726    tid: Zcu.PerThread.Id,
11727    namespace_index: NamespaceIndex,
11728) void {
11729    const local = ip.getLocal(tid);
11730    const namespace = ip.namespacePtr(namespace_index);
11731    namespace.* = .{
11732        .parent = undefined,
11733        .file_scope = undefined,
11734        .owner_type = undefined,
11735        .generation = undefined,
11736    };
11737    @field(namespace, Local.namespace_next_free_field) =
11738        @enumFromInt(local.mutate.namespaces.free_list);
11739    local.mutate.namespaces.free_list = @intFromEnum(namespace_index);
11740}
11741
11742pub fn filePtr(ip: *const InternPool, file_index: FileIndex) *Zcu.File {
11743    const file_index_unwrapped = file_index.unwrap(ip);
11744    const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
11745    return files.view().items(.file)[file_index_unwrapped.index];
11746}
11747
11748pub fn createFile(
11749    ip: *InternPool,
11750    gpa: Allocator,
11751    tid: Zcu.PerThread.Id,
11752    file: File,
11753) Allocator.Error!FileIndex {
11754    const files = ip.getLocal(tid).getMutableFiles(gpa);
11755    const file_index_unwrapped: FileIndex.Unwrapped = .{
11756        .tid = tid,
11757        .index = files.mutate.len,
11758    };
11759    try files.append(file);
11760    return file_index_unwrapped.wrap(ip);
11761}
11762
11763const EmbeddedNulls = enum {
11764    no_embedded_nulls,
11765    maybe_embedded_nulls,
11766
11767    fn StringType(comptime embedded_nulls: EmbeddedNulls) type {
11768        return switch (embedded_nulls) {
11769            .no_embedded_nulls => NullTerminatedString,
11770            .maybe_embedded_nulls => String,
11771        };
11772    }
11773
11774    fn OptionalStringType(comptime embedded_nulls: EmbeddedNulls) type {
11775        return switch (embedded_nulls) {
11776            .no_embedded_nulls => OptionalNullTerminatedString,
11777            .maybe_embedded_nulls => OptionalString,
11778        };
11779    }
11780};
11781
11782pub fn getOrPutString(
11783    ip: *InternPool,
11784    gpa: Allocator,
11785    tid: Zcu.PerThread.Id,
11786    slice: []const u8,
11787    comptime embedded_nulls: EmbeddedNulls,
11788) Allocator.Error!embedded_nulls.StringType() {
11789    const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
11790    try string_bytes.ensureUnusedCapacity(slice.len + 1);
11791    string_bytes.appendSliceAssumeCapacity(.{slice});
11792    string_bytes.appendAssumeCapacity(.{0});
11793    return ip.getOrPutTrailingString(gpa, tid, @intCast(slice.len + 1), embedded_nulls);
11794}
11795
11796pub fn getOrPutStringFmt(
11797    ip: *InternPool,
11798    gpa: Allocator,
11799    tid: Zcu.PerThread.Id,
11800    comptime format: []const u8,
11801    args: anytype,
11802    comptime embedded_nulls: EmbeddedNulls,
11803) Allocator.Error!embedded_nulls.StringType() {
11804    // ensure that references to strings in args do not get invalidated
11805    const format_z = format ++ .{0};
11806    const len: u32 = @intCast(std.fmt.count(format_z, args));
11807    const string_bytes = ip.getLocal(tid).getMutableStringBytes(gpa);
11808    const slice = try string_bytes.addManyAsSlice(len);
11809    assert((std.fmt.bufPrint(slice[0], format_z, args) catch unreachable).len == len);
11810    return ip.getOrPutTrailingString(gpa, tid, len, embedded_nulls);
11811}
11812
11813pub fn getOrPutStringOpt(
11814    ip: *InternPool,
11815    gpa: Allocator,
11816    tid: Zcu.PerThread.Id,
11817    slice: ?[]const u8,
11818    comptime embedded_nulls: EmbeddedNulls,
11819) Allocator.Error!embedded_nulls.OptionalStringType() {
11820    const string = try getOrPutString(ip, gpa, tid, slice orelse return .none, embedded_nulls);
11821    return string.toOptional();
11822}
11823
11824/// Uses the last len bytes of strings as the key.
11825pub fn getOrPutTrailingString(
11826    ip: *InternPool,
11827    gpa: Allocator,
11828    tid: Zcu.PerThread.Id,
11829    len: u32,
11830    comptime embedded_nulls: EmbeddedNulls,
11831) Allocator.Error!embedded_nulls.StringType() {
11832    const local = ip.getLocal(tid);
11833    const strings = local.getMutableStrings(gpa);
11834    try strings.ensureUnusedCapacity(1);
11835    const string_bytes = local.getMutableStringBytes(gpa);
11836    const start: u32 = @intCast(string_bytes.mutate.len - len);
11837    if (len > 0 and string_bytes.view().items(.@"0")[string_bytes.mutate.len - 1] == 0) {
11838        string_bytes.mutate.len -= 1;
11839    } else {
11840        try string_bytes.ensureUnusedCapacity(1);
11841    }
11842    const key: []const u8 = string_bytes.view().items(.@"0")[start..];
11843    const value: embedded_nulls.StringType() = @enumFromInt(@intFromEnum((String.Unwrapped{
11844        .tid = tid,
11845        .index = strings.mutate.len - 1,
11846    }).wrap(ip)));
11847    const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null;
11848    switch (embedded_nulls) {
11849        .no_embedded_nulls => assert(!has_embedded_null),
11850        .maybe_embedded_nulls => if (has_embedded_null) {
11851            string_bytes.appendAssumeCapacity(.{0});
11852            strings.appendAssumeCapacity(.{string_bytes.mutate.len});
11853            return value;
11854        },
11855    }
11856
11857    const full_hash = Hash.hash(0, key);
11858    const hash: u32 = @truncate(full_hash >> 32);
11859    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
11860    var map = shard.shared.string_map.acquire();
11861    const Map = @TypeOf(map);
11862    var map_mask = map.header().mask();
11863    var map_index = hash;
11864    while (true) : (map_index += 1) {
11865        map_index &= map_mask;
11866        const entry = &map.entries[map_index];
11867        const index = entry.acquire().unwrap() orelse break;
11868        if (entry.hash != hash) continue;
11869        if (!index.eqlSlice(key, ip)) continue;
11870        string_bytes.shrinkRetainingCapacity(start);
11871        return @enumFromInt(@intFromEnum(index));
11872    }
11873    shard.mutate.string_map.mutex.lock();
11874    defer shard.mutate.string_map.mutex.unlock();
11875    if (map.entries != shard.shared.string_map.entries) {
11876        map = shard.shared.string_map;
11877        map_mask = map.header().mask();
11878        map_index = hash;
11879    }
11880    while (true) : (map_index += 1) {
11881        map_index &= map_mask;
11882        const entry = &map.entries[map_index];
11883        const index = entry.acquire().unwrap() orelse break;
11884        if (entry.hash != hash) continue;
11885        if (!index.eqlSlice(key, ip)) continue;
11886        string_bytes.shrinkRetainingCapacity(start);
11887        return @enumFromInt(@intFromEnum(index));
11888    }
11889    defer shard.mutate.string_map.len += 1;
11890    const map_header = map.header().*;
11891    if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) {
11892        string_bytes.appendAssumeCapacity(.{0});
11893        strings.appendAssumeCapacity(.{string_bytes.mutate.len});
11894        const entry = &map.entries[map_index];
11895        entry.hash = hash;
11896        entry.release(@enumFromInt(@intFromEnum(value)));
11897        return value;
11898    }
11899    const arena_state = &local.mutate.arena;
11900    var arena = arena_state.promote(gpa);
11901    defer arena_state.* = arena.state;
11902    const new_map_capacity = map_header.capacity * 2;
11903    const new_map_buf = try arena.allocator().alignedAlloc(
11904        u8,
11905        .fromByteUnits(Map.alignment),
11906        Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
11907    );
11908    const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
11909    new_map.header().* = .{ .capacity = new_map_capacity };
11910    @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
11911    const new_map_mask = new_map.header().mask();
11912    map_index = 0;
11913    while (map_index < map_header.capacity) : (map_index += 1) {
11914        const entry = &map.entries[map_index];
11915        const index = entry.value.unwrap() orelse continue;
11916        const item_hash = entry.hash;
11917        var new_map_index = item_hash;
11918        while (true) : (new_map_index += 1) {
11919            new_map_index &= new_map_mask;
11920            const new_entry = &new_map.entries[new_map_index];
11921            if (new_entry.value != .none) continue;
11922            new_entry.* = .{
11923                .value = index.toOptional(),
11924                .hash = item_hash,
11925            };
11926            break;
11927        }
11928    }
11929    map = new_map;
11930    map_index = hash;
11931    while (true) : (map_index += 1) {
11932        map_index &= new_map_mask;
11933        if (map.entries[map_index].value == .none) break;
11934    }
11935    string_bytes.appendAssumeCapacity(.{0});
11936    strings.appendAssumeCapacity(.{string_bytes.mutate.len});
11937    map.entries[map_index] = .{
11938        .value = @enumFromInt(@intFromEnum(value)),
11939        .hash = hash,
11940    };
11941    shard.shared.string_map.release(new_map);
11942    return value;
11943}
11944
11945pub fn getString(ip: *InternPool, key: []const u8) OptionalNullTerminatedString {
11946    const full_hash = Hash.hash(0, key);
11947    const hash: u32 = @truncate(full_hash >> 32);
11948    const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
11949    const map = shard.shared.string_map.acquire();
11950    const map_mask = map.header().mask();
11951    var map_index = hash;
11952    while (true) : (map_index += 1) {
11953        map_index &= map_mask;
11954        const entry = &map.entries[map_index];
11955        const index = entry.value.unwrap() orelse return .none;
11956        if (entry.hash != hash) continue;
11957        if (index.eqlSlice(key, ip)) return index.toOptional();
11958    }
11959}
11960
11961pub fn typeOf(ip: *const InternPool, index: Index) Index {
11962    // This optimization of static keys is required so that typeOf can be called
11963    // on static keys that haven't been added yet during static key initialization.
11964    // An alternative would be to topological sort the static keys, but this would
11965    // mean that the range of type indices would not be dense.
11966    return switch (index) {
11967        .u0_type,
11968        .i0_type,
11969        .u1_type,
11970        .u8_type,
11971        .i8_type,
11972        .u16_type,
11973        .i16_type,
11974        .u29_type,
11975        .u32_type,
11976        .i32_type,
11977        .u64_type,
11978        .i64_type,
11979        .u80_type,
11980        .u128_type,
11981        .i128_type,
11982        .u256_type,
11983        .usize_type,
11984        .isize_type,
11985        .c_char_type,
11986        .c_short_type,
11987        .c_ushort_type,
11988        .c_int_type,
11989        .c_uint_type,
11990        .c_long_type,
11991        .c_ulong_type,
11992        .c_longlong_type,
11993        .c_ulonglong_type,
11994        .c_longdouble_type,
11995        .f16_type,
11996        .f32_type,
11997        .f64_type,
11998        .f80_type,
11999        .f128_type,
12000        .anyopaque_type,
12001        .bool_type,
12002        .void_type,
12003        .type_type,
12004        .anyerror_type,
12005        .comptime_int_type,
12006        .comptime_float_type,
12007        .noreturn_type,
12008        .anyframe_type,
12009        .null_type,
12010        .undefined_type,
12011        .enum_literal_type,
12012        .ptr_usize_type,
12013        .ptr_const_comptime_int_type,
12014        .manyptr_u8_type,
12015        .manyptr_const_u8_type,
12016        .manyptr_const_u8_sentinel_0_type,
12017        .manyptr_const_slice_const_u8_type,
12018        .slice_const_u8_type,
12019        .slice_const_u8_sentinel_0_type,
12020        .slice_const_slice_const_u8_type,
12021        .optional_type_type,
12022        .manyptr_const_type_type,
12023        .slice_const_type_type,
12024        .vector_8_i8_type,
12025        .vector_16_i8_type,
12026        .vector_32_i8_type,
12027        .vector_64_i8_type,
12028        .vector_1_u8_type,
12029        .vector_2_u8_type,
12030        .vector_4_u8_type,
12031        .vector_8_u8_type,
12032        .vector_16_u8_type,
12033        .vector_32_u8_type,
12034        .vector_64_u8_type,
12035        .vector_2_i16_type,
12036        .vector_4_i16_type,
12037        .vector_8_i16_type,
12038        .vector_16_i16_type,
12039        .vector_32_i16_type,
12040        .vector_4_u16_type,
12041        .vector_8_u16_type,
12042        .vector_16_u16_type,
12043        .vector_32_u16_type,
12044        .vector_2_i32_type,
12045        .vector_4_i32_type,
12046        .vector_8_i32_type,
12047        .vector_16_i32_type,
12048        .vector_4_u32_type,
12049        .vector_8_u32_type,
12050        .vector_16_u32_type,
12051        .vector_2_i64_type,
12052        .vector_4_i64_type,
12053        .vector_8_i64_type,
12054        .vector_2_u64_type,
12055        .vector_4_u64_type,
12056        .vector_8_u64_type,
12057        .vector_1_u128_type,
12058        .vector_2_u128_type,
12059        .vector_1_u256_type,
12060        .vector_4_f16_type,
12061        .vector_8_f16_type,
12062        .vector_16_f16_type,
12063        .vector_32_f16_type,
12064        .vector_2_f32_type,
12065        .vector_4_f32_type,
12066        .vector_8_f32_type,
12067        .vector_16_f32_type,
12068        .vector_2_f64_type,
12069        .vector_4_f64_type,
12070        .vector_8_f64_type,
12071        .optional_noreturn_type,
12072        .anyerror_void_error_union_type,
12073        .adhoc_inferred_error_set_type,
12074        .generic_poison_type,
12075        .empty_tuple_type,
12076        => .type_type,
12077
12078        .undef => .undefined_type,
12079        .zero, .one, .negative_one => .comptime_int_type,
12080        .undef_usize, .zero_usize, .one_usize => .usize_type,
12081        .undef_u1, .zero_u1, .one_u1 => .u1_type,
12082        .zero_u8, .one_u8, .four_u8 => .u8_type,
12083        .void_value => .void_type,
12084        .unreachable_value => .noreturn_type,
12085        .null_value => .null_type,
12086        .undef_bool, .bool_true, .bool_false => .bool_type,
12087        .empty_tuple => .empty_tuple_type,
12088
12089        // This optimization on tags is needed so that indexToKey can call
12090        // typeOf without being recursive.
12091        _ => {
12092            const unwrapped_index = index.unwrap(ip);
12093            const item = unwrapped_index.getItem(ip);
12094            return switch (item.tag) {
12095                .removed => unreachable,
12096
12097                .type_int_signed,
12098                .type_int_unsigned,
12099                .type_array_big,
12100                .type_array_small,
12101                .type_vector,
12102                .type_pointer,
12103                .type_slice,
12104                .type_optional,
12105                .type_anyframe,
12106                .type_error_union,
12107                .type_anyerror_union,
12108                .type_error_set,
12109                .type_inferred_error_set,
12110                .type_enum_auto,
12111                .type_enum_explicit,
12112                .type_enum_nonexhaustive,
12113                .type_opaque,
12114                .type_struct,
12115                .type_struct_packed,
12116                .type_struct_packed_inits,
12117                .type_tuple,
12118                .type_union,
12119                .type_function,
12120                => .type_type,
12121
12122                .undef,
12123                .opt_null,
12124                .only_possible_value,
12125                => @enumFromInt(item.data),
12126
12127                .simple_type, .simple_value => unreachable, // handled via Index above
12128
12129                inline .ptr_nav,
12130                .ptr_comptime_alloc,
12131                .ptr_uav,
12132                .ptr_uav_aligned,
12133                .ptr_comptime_field,
12134                .ptr_int,
12135                .ptr_eu_payload,
12136                .ptr_opt_payload,
12137                .ptr_elem,
12138                .ptr_field,
12139                .ptr_slice,
12140                .opt_payload,
12141                .error_union_payload,
12142                .int_small,
12143                .int_lazy_align,
12144                .int_lazy_size,
12145                .error_set_error,
12146                .error_union_error,
12147                .enum_tag,
12148                .variable,
12149                .threadlocal_variable,
12150                .@"extern",
12151                .func_decl,
12152                .func_instance,
12153                .func_coerced,
12154                .union_value,
12155                .bytes,
12156                .aggregate,
12157                .repeated,
12158                => |t| {
12159                    const extra_list = unwrapped_index.getExtra(ip);
12160                    return @enumFromInt(extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(t.Payload(), "ty").?]);
12161                },
12162
12163                .int_u8 => .u8_type,
12164                .int_u16 => .u16_type,
12165                .int_u32 => .u32_type,
12166                .int_i32 => .i32_type,
12167                .int_usize => .usize_type,
12168
12169                .int_comptime_int_u32,
12170                .int_comptime_int_i32,
12171                => .comptime_int_type,
12172
12173                // Note these are stored in limbs data, not extra data.
12174                .int_positive,
12175                .int_negative,
12176                => {
12177                    const limbs_list = ip.getLocalShared(unwrapped_index.tid).getLimbs();
12178                    const int: Int = @bitCast(limbs_list.view().items(.@"0")[item.data..][0..Int.limbs_items_len].*);
12179                    return int.ty;
12180                },
12181
12182                .enum_literal => .enum_literal_type,
12183                .float_f16 => .f16_type,
12184                .float_f32 => .f32_type,
12185                .float_f64 => .f64_type,
12186                .float_f80 => .f80_type,
12187                .float_f128 => .f128_type,
12188
12189                .float_c_longdouble_f80,
12190                .float_c_longdouble_f128,
12191                => .c_longdouble_type,
12192
12193                .float_comptime_float => .comptime_float_type,
12194
12195                .memoized_call => unreachable,
12196            };
12197        },
12198
12199        .none => unreachable,
12200    };
12201}
12202
12203/// Assumes that the enum's field indexes equal its value tags.
12204pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
12205    const int = ip.indexToKey(i).enum_tag.int;
12206    return @enumFromInt(ip.indexToKey(int).int.storage.u64);
12207}
12208
12209pub fn toFunc(ip: *const InternPool, i: Index) Key.Func {
12210    return ip.indexToKey(i).func;
12211}
12212
12213pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
12214    return switch (ip.indexToKey(ty)) {
12215        .struct_type => ip.loadStructType(ty).field_types.len,
12216        .tuple_type => |tuple_type| tuple_type.types.len,
12217        .array_type => |array_type| array_type.len,
12218        .vector_type => |vector_type| vector_type.len,
12219        else => unreachable,
12220    };
12221}
12222
12223pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
12224    return switch (ip.indexToKey(ty)) {
12225        .struct_type => ip.loadStructType(ty).field_types.len,
12226        .tuple_type => |tuple_type| tuple_type.types.len,
12227        .array_type => |array_type| array_type.lenIncludingSentinel(),
12228        .vector_type => |vector_type| vector_type.len,
12229        else => unreachable,
12230    };
12231}
12232
12233pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index {
12234    const unwrapped_ty = ty.unwrap(ip);
12235    const ty_extra = unwrapped_ty.getExtra(ip);
12236    const ty_item = unwrapped_ty.getItem(ip);
12237    const child_extra, const child_item = switch (ty_item.tag) {
12238        .type_pointer => child: {
12239            const child_index: Index = @enumFromInt(ty_extra.view().items(.@"0")[
12240                ty_item.data + std.meta.fieldIndex(Tag.TypePointer, "child").?
12241            ]);
12242            const unwrapped_child = child_index.unwrap(ip);
12243            break :child .{ unwrapped_child.getExtra(ip), unwrapped_child.getItem(ip) };
12244        },
12245        .type_function => .{ ty_extra, ty_item },
12246        else => unreachable,
12247    };
12248    assert(child_item.tag == .type_function);
12249    return @enumFromInt(child_extra.view().items(.@"0")[
12250        child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").?
12251    ]);
12252}
12253
12254pub fn isNoReturn(ip: *const InternPool, ty: Index) bool {
12255    switch (ty) {
12256        .noreturn_type => return true,
12257        else => {
12258            const unwrapped_ty = ty.unwrap(ip);
12259            const ty_item = unwrapped_ty.getItem(ip);
12260            return switch (ty_item.tag) {
12261                .type_error_set => unwrapped_ty.getExtra(ip).view().items(.@"0")[ty_item.data + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0,
12262                else => false,
12263            };
12264        },
12265    }
12266}
12267
12268pub fn isUndef(ip: *const InternPool, val: Index) bool {
12269    return val == .undef or val.unwrap(ip).getTag(ip) == .undef;
12270}
12271
12272pub fn isVariable(ip: *const InternPool, val: Index) bool {
12273    return val.unwrap(ip).getTag(ip) == .variable;
12274}
12275
12276pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag {
12277    var base = val;
12278    while (true) {
12279        const unwrapped_base = base.unwrap(ip);
12280        const base_item = unwrapped_base.getItem(ip);
12281        switch (base_item.tag) {
12282            .ptr_nav => return .nav,
12283            .ptr_comptime_alloc => return .comptime_alloc,
12284            .ptr_uav,
12285            .ptr_uav_aligned,
12286            => return .uav,
12287            .ptr_comptime_field => return .comptime_field,
12288            .ptr_int => return .int,
12289            inline .ptr_eu_payload,
12290            .ptr_opt_payload,
12291            .ptr_elem,
12292            .ptr_field,
12293            => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[
12294                base_item.data + std.meta.fieldIndex(tag.Payload(), "base").?
12295            ]),
12296            inline .ptr_slice => |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[
12297                base_item.data + std.meta.fieldIndex(tag.Payload(), "ptr").?
12298            ]),
12299            else => return null,
12300        }
12301    }
12302}
12303
12304/// This is a particularly hot function, so we operate directly on encodings
12305/// rather than the more straightforward implementation of calling `indexToKey`.
12306/// Asserts `index` is not `.generic_poison_type`.
12307pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
12308    return switch (index) {
12309        .u0_type,
12310        .i0_type,
12311        .u1_type,
12312        .u8_type,
12313        .i8_type,
12314        .u16_type,
12315        .i16_type,
12316        .u29_type,
12317        .u32_type,
12318        .i32_type,
12319        .u64_type,
12320        .i64_type,
12321        .u80_type,
12322        .u128_type,
12323        .i128_type,
12324        .u256_type,
12325        .usize_type,
12326        .isize_type,
12327        .c_char_type,
12328        .c_short_type,
12329        .c_ushort_type,
12330        .c_int_type,
12331        .c_uint_type,
12332        .c_long_type,
12333        .c_ulong_type,
12334        .c_longlong_type,
12335        .c_ulonglong_type,
12336        => .int,
12337
12338        .c_longdouble_type,
12339        .f16_type,
12340        .f32_type,
12341        .f64_type,
12342        .f80_type,
12343        .f128_type,
12344        => .float,
12345
12346        .anyopaque_type => .@"opaque",
12347        .bool_type => .bool,
12348        .void_type => .void,
12349        .type_type => .type,
12350        .anyerror_type, .adhoc_inferred_error_set_type => .error_set,
12351        .comptime_int_type => .comptime_int,
12352        .comptime_float_type => .comptime_float,
12353        .noreturn_type => .noreturn,
12354        .anyframe_type => .@"anyframe",
12355        .null_type => .null,
12356        .undefined_type => .undefined,
12357        .enum_literal_type => .enum_literal,
12358
12359        .ptr_usize_type,
12360        .ptr_const_comptime_int_type,
12361        .manyptr_u8_type,
12362        .manyptr_const_u8_type,
12363        .manyptr_const_u8_sentinel_0_type,
12364        .manyptr_const_slice_const_u8_type,
12365        .slice_const_u8_type,
12366        .slice_const_u8_sentinel_0_type,
12367        .slice_const_slice_const_u8_type,
12368        .manyptr_const_type_type,
12369        .slice_const_type_type,
12370        => .pointer,
12371
12372        .vector_8_i8_type,
12373        .vector_16_i8_type,
12374        .vector_32_i8_type,
12375        .vector_64_i8_type,
12376        .vector_1_u8_type,
12377        .vector_2_u8_type,
12378        .vector_4_u8_type,
12379        .vector_8_u8_type,
12380        .vector_16_u8_type,
12381        .vector_32_u8_type,
12382        .vector_64_u8_type,
12383        .vector_2_i16_type,
12384        .vector_4_i16_type,
12385        .vector_8_i16_type,
12386        .vector_16_i16_type,
12387        .vector_32_i16_type,
12388        .vector_4_u16_type,
12389        .vector_8_u16_type,
12390        .vector_16_u16_type,
12391        .vector_32_u16_type,
12392        .vector_2_i32_type,
12393        .vector_4_i32_type,
12394        .vector_8_i32_type,
12395        .vector_16_i32_type,
12396        .vector_4_u32_type,
12397        .vector_8_u32_type,
12398        .vector_16_u32_type,
12399        .vector_2_i64_type,
12400        .vector_4_i64_type,
12401        .vector_8_i64_type,
12402        .vector_2_u64_type,
12403        .vector_4_u64_type,
12404        .vector_8_u64_type,
12405        .vector_1_u128_type,
12406        .vector_2_u128_type,
12407        .vector_1_u256_type,
12408        .vector_4_f16_type,
12409        .vector_8_f16_type,
12410        .vector_16_f16_type,
12411        .vector_32_f16_type,
12412        .vector_2_f32_type,
12413        .vector_4_f32_type,
12414        .vector_8_f32_type,
12415        .vector_16_f32_type,
12416        .vector_2_f64_type,
12417        .vector_4_f64_type,
12418        .vector_8_f64_type,
12419        => .vector,
12420
12421        .optional_type_type => .optional,
12422        .optional_noreturn_type => .optional,
12423        .anyerror_void_error_union_type => .error_union,
12424        .empty_tuple_type => .@"struct",
12425
12426        .generic_poison_type => unreachable,
12427
12428        // values, not types
12429        .undef => unreachable,
12430        .undef_bool => unreachable,
12431        .undef_usize => unreachable,
12432        .undef_u1 => unreachable,
12433        .zero => unreachable,
12434        .zero_usize => unreachable,
12435        .zero_u1 => unreachable,
12436        .zero_u8 => unreachable,
12437        .one => unreachable,
12438        .one_usize => unreachable,
12439        .one_u1 => unreachable,
12440        .one_u8 => unreachable,
12441        .four_u8 => unreachable,
12442        .negative_one => unreachable,
12443        .void_value => unreachable,
12444        .unreachable_value => unreachable,
12445        .null_value => unreachable,
12446        .bool_true => unreachable,
12447        .bool_false => unreachable,
12448        .empty_tuple => unreachable,
12449
12450        _ => switch (index.unwrap(ip).getTag(ip)) {
12451            .removed => unreachable,
12452
12453            .type_int_signed,
12454            .type_int_unsigned,
12455            => .int,
12456
12457            .type_array_big,
12458            .type_array_small,
12459            => .array,
12460
12461            .type_vector => .vector,
12462
12463            .type_pointer,
12464            .type_slice,
12465            => .pointer,
12466
12467            .type_optional => .optional,
12468            .type_anyframe => .@"anyframe",
12469
12470            .type_error_union,
12471            .type_anyerror_union,
12472            => .error_union,
12473
12474            .type_error_set,
12475            .type_inferred_error_set,
12476            => .error_set,
12477
12478            .type_enum_auto,
12479            .type_enum_explicit,
12480            .type_enum_nonexhaustive,
12481            => .@"enum",
12482
12483            .simple_type => unreachable, // handled via Index tag above
12484
12485            .type_opaque => .@"opaque",
12486
12487            .type_struct,
12488            .type_struct_packed,
12489            .type_struct_packed_inits,
12490            .type_tuple,
12491            => .@"struct",
12492
12493            .type_union => .@"union",
12494
12495            .type_function => .@"fn",
12496
12497            // values, not types
12498            .undef,
12499            .simple_value,
12500            .ptr_nav,
12501            .ptr_comptime_alloc,
12502            .ptr_uav,
12503            .ptr_uav_aligned,
12504            .ptr_comptime_field,
12505            .ptr_int,
12506            .ptr_eu_payload,
12507            .ptr_opt_payload,
12508            .ptr_elem,
12509            .ptr_field,
12510            .ptr_slice,
12511            .opt_payload,
12512            .opt_null,
12513            .int_u8,
12514            .int_u16,
12515            .int_u32,
12516            .int_i32,
12517            .int_usize,
12518            .int_comptime_int_u32,
12519            .int_comptime_int_i32,
12520            .int_small,
12521            .int_positive,
12522            .int_negative,
12523            .int_lazy_align,
12524            .int_lazy_size,
12525            .error_set_error,
12526            .error_union_error,
12527            .error_union_payload,
12528            .enum_literal,
12529            .enum_tag,
12530            .float_f16,
12531            .float_f32,
12532            .float_f64,
12533            .float_f80,
12534            .float_f128,
12535            .float_c_longdouble_f80,
12536            .float_c_longdouble_f128,
12537            .float_comptime_float,
12538            .variable,
12539            .threadlocal_variable,
12540            .@"extern",
12541            .func_decl,
12542            .func_instance,
12543            .func_coerced,
12544            .only_possible_value,
12545            .union_value,
12546            .bytes,
12547            .aggregate,
12548            .repeated,
12549            // memoization, not types
12550            .memoized_call,
12551            => unreachable,
12552        },
12553        .none => unreachable, // special tag
12554    };
12555}
12556
12557pub fn isFuncBody(ip: *const InternPool, func: Index) bool {
12558    return switch (func.unwrap(ip).getTag(ip)) {
12559        .func_decl, .func_instance, .func_coerced => true,
12560        else => false,
12561    };
12562}
12563
12564fn funcAnalysisPtr(ip: *const InternPool, func: Index) *FuncAnalysis {
12565    const unwrapped_func = func.unwrap(ip);
12566    const extra = unwrapped_func.getExtra(ip);
12567    const item = unwrapped_func.getItem(ip);
12568    const extra_index = switch (item.tag) {
12569        .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
12570        .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
12571        .func_coerced => {
12572            const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?;
12573            const coerced_func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]);
12574            const unwrapped_coerced_func = coerced_func_index.unwrap(ip);
12575            const coerced_func_item = unwrapped_coerced_func.getItem(ip);
12576            return @ptrCast(&unwrapped_coerced_func.getExtra(ip).view().items(.@"0")[
12577                switch (coerced_func_item.tag) {
12578                    .func_decl => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?,
12579                    .func_instance => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?,
12580                    else => unreachable,
12581                }
12582            ]);
12583        },
12584        else => unreachable,
12585    };
12586    return @ptrCast(&extra.view().items(.@"0")[extra_index]);
12587}
12588
12589pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
12590    return @atomicLoad(FuncAnalysis, ip.funcAnalysisPtr(func), .unordered);
12591}
12592
12593pub fn funcSetHasErrorTrace(ip: *InternPool, func: Index, has_error_trace: bool) void {
12594    const unwrapped_func = func.unwrap(ip);
12595    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
12596    extra_mutex.lock();
12597    defer extra_mutex.unlock();
12598
12599    const analysis_ptr = ip.funcAnalysisPtr(func);
12600    var analysis = analysis_ptr.*;
12601    analysis.has_error_trace = has_error_trace;
12602    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
12603}
12604
12605pub fn funcSetDisableInstrumentation(ip: *InternPool, func: Index) void {
12606    const unwrapped_func = func.unwrap(ip);
12607    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
12608    extra_mutex.lock();
12609    defer extra_mutex.unlock();
12610
12611    const analysis_ptr = ip.funcAnalysisPtr(func);
12612    var analysis = analysis_ptr.*;
12613    analysis.disable_instrumentation = true;
12614    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
12615}
12616
12617pub fn funcSetDisableIntrinsics(ip: *InternPool, func: Index) void {
12618    const unwrapped_func = func.unwrap(ip);
12619    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
12620    extra_mutex.lock();
12621    defer extra_mutex.unlock();
12622
12623    const analysis_ptr = ip.funcAnalysisPtr(func);
12624    var analysis = analysis_ptr.*;
12625    analysis.disable_intrinsics = true;
12626    @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release);
12627}
12628
12629pub fn funcZirBodyInst(ip: *const InternPool, func: Index) TrackedInst.Index {
12630    const unwrapped_func = func.unwrap(ip);
12631    const item = unwrapped_func.getItem(ip);
12632    const item_extra = unwrapped_func.getExtra(ip);
12633    const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?;
12634    switch (item.tag) {
12635        .func_decl => return @enumFromInt(item_extra.view().items(.@"0")[item.data + zir_body_inst_field_index]),
12636        .func_instance => {
12637            const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?;
12638            const func_decl_index: Index = @enumFromInt(item_extra.view().items(.@"0")[item.data + generic_owner_field_index]);
12639            const unwrapped_func_decl = func_decl_index.unwrap(ip);
12640            const func_decl_item = unwrapped_func_decl.getItem(ip);
12641            const func_decl_extra = unwrapped_func_decl.getExtra(ip);
12642            assert(func_decl_item.tag == .func_decl);
12643            return @enumFromInt(func_decl_extra.view().items(.@"0")[func_decl_item.data + zir_body_inst_field_index]);
12644        },
12645        .func_coerced => {
12646            const uncoerced_func_index: Index = @enumFromInt(item_extra.view().items(.@"0")[
12647                item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
12648            ]);
12649            return ip.funcZirBodyInst(uncoerced_func_index);
12650        },
12651        else => unreachable,
12652    }
12653}
12654
12655pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index {
12656    const item = ies_index.unwrap(ip).getItem(ip);
12657    assert(item.tag == .type_inferred_error_set);
12658    const func_index: Index = @enumFromInt(item.data);
12659    switch (func_index.unwrap(ip).getTag(ip)) {
12660        .func_decl, .func_instance => {},
12661        else => unreachable, // assertion failed
12662    }
12663    return func_index;
12664}
12665
12666/// Returns a mutable pointer to the resolved error set type of an inferred
12667/// error set function. The returned pointer is invalidated when anything is
12668/// added to `ip`.
12669fn iesResolvedPtr(ip: *InternPool, ies_index: Index) *Index {
12670    const ies_item = ies_index.getItem(ip);
12671    assert(ies_item.tag == .type_inferred_error_set);
12672    return ip.funcIesResolvedPtr(ies_item.data);
12673}
12674
12675/// Returns a mutable pointer to the resolved error set type of an inferred
12676/// error set function. The returned pointer is invalidated when anything is
12677/// added to `ip`.
12678fn funcIesResolvedPtr(ip: *const InternPool, func_index: Index) *Index {
12679    assert(ip.funcAnalysisUnordered(func_index).inferred_error_set);
12680    const unwrapped_func = func_index.unwrap(ip);
12681    const func_extra = unwrapped_func.getExtra(ip);
12682    const func_item = unwrapped_func.getItem(ip);
12683    const extra_index = switch (func_item.tag) {
12684        .func_decl => func_item.data + @typeInfo(Tag.FuncDecl).@"struct".fields.len,
12685        .func_instance => func_item.data + @typeInfo(Tag.FuncInstance).@"struct".fields.len,
12686        .func_coerced => {
12687            const uncoerced_func_index: Index = @enumFromInt(func_extra.view().items(.@"0")[
12688                func_item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
12689            ]);
12690            const unwrapped_uncoerced_func = uncoerced_func_index.unwrap(ip);
12691            const uncoerced_func_item = unwrapped_uncoerced_func.getItem(ip);
12692            return @ptrCast(&unwrapped_uncoerced_func.getExtra(ip).view().items(.@"0")[
12693                switch (uncoerced_func_item.tag) {
12694                    .func_decl => uncoerced_func_item.data + @typeInfo(Tag.FuncDecl).@"struct".fields.len,
12695                    .func_instance => uncoerced_func_item.data + @typeInfo(Tag.FuncInstance).@"struct".fields.len,
12696                    else => unreachable,
12697                }
12698            ]);
12699        },
12700        else => unreachable,
12701    };
12702    return @ptrCast(&func_extra.view().items(.@"0")[extra_index]);
12703}
12704
12705pub fn funcIesResolvedUnordered(ip: *const InternPool, index: Index) Index {
12706    return @atomicLoad(Index, ip.funcIesResolvedPtr(index), .unordered);
12707}
12708
12709pub fn funcSetIesResolved(ip: *InternPool, index: Index, ies: Index) void {
12710    const unwrapped_func = index.unwrap(ip);
12711    const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex;
12712    extra_mutex.lock();
12713    defer extra_mutex.unlock();
12714
12715    @atomicStore(Index, ip.funcIesResolvedPtr(index), ies, .release);
12716}
12717
12718pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func {
12719    const unwrapped_index = index.unwrap(ip);
12720    const item = unwrapped_index.getItem(ip);
12721    assert(item.tag == .func_decl);
12722    return extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), item.data);
12723}
12724
12725pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 {
12726    const unwrapped_index = index.unwrap(ip);
12727    const extra_list = unwrapped_index.getExtra(ip);
12728    const item = unwrapped_index.getItem(ip);
12729    assert(item.tag == .type_function);
12730    return extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?];
12731}
12732
12733pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index {
12734    const unwrapped_index = index.unwrap(ip);
12735    const item = unwrapped_index.getItem(ip);
12736    return switch (item.tag) {
12737        .func_coerced => @enumFromInt(unwrapped_index.getExtra(ip).view().items(.@"0")[
12738            item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?
12739        ]),
12740        .func_instance, .func_decl => index,
12741        else => unreachable,
12742    };
12743}
12744
12745/// Returns the already-existing field with the same name, if any.
12746pub fn addFieldName(
12747    ip: *InternPool,
12748    extra: Local.Extra,
12749    names_map: MapIndex,
12750    names_start: u32,
12751    name: NullTerminatedString,
12752) ?u32 {
12753    const extra_items = extra.view().items(.@"0");
12754    const map = names_map.get(ip);
12755    const field_index = map.count();
12756    const strings = extra_items[names_start..][0..field_index];
12757    const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) };
12758    const gop = map.getOrPutAssumeCapacityAdapted(name, adapter);
12759    if (gop.found_existing) return @intCast(gop.index);
12760    extra_items[names_start + field_index] = @intFromEnum(name);
12761    return null;
12762}
12763
12764/// Used only by `get` for pointer values, and mainly intended to use `Tag.ptr_uav`
12765/// encoding instead of `Tag.ptr_uav_aligned` when possible.
12766fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty: Index) bool {
12767    if (a_ty == b_ty) return true;
12768    const b_info = ip.indexToKey(b_ty).ptr_type;
12769    return a_info.flags.alignment == b_info.flags.alignment and
12770        (a_info.child == b_info.child or a_info.flags.alignment != .none);
12771}
12772
12773const GlobalErrorSet = struct {
12774    shared: struct {
12775        names: Names,
12776        map: Shard.Map(GlobalErrorSet.Index),
12777    } align(std.atomic.cache_line),
12778    mutate: struct {
12779        names: Local.ListMutate,
12780        map: struct { mutex: std.Thread.Mutex },
12781    } align(std.atomic.cache_line),
12782
12783    const Names = Local.List(struct { NullTerminatedString });
12784
12785    const empty: GlobalErrorSet = .{
12786        .shared = .{
12787            .names = Names.empty,
12788            .map = Shard.Map(GlobalErrorSet.Index).empty,
12789        },
12790        .mutate = .{
12791            .names = Local.ListMutate.empty,
12792            .map = .{ .mutex = .{} },
12793        },
12794    };
12795
12796    const Index = enum(Zcu.ErrorInt) {
12797        none = 0,
12798        _,
12799    };
12800
12801    /// Not thread-safe, may only be called from the main thread.
12802    pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString {
12803        const len = ges.mutate.names.len;
12804        return if (len > 0) ges.shared.names.view().items(.@"0")[0..len] else &.{};
12805    }
12806
12807    fn getErrorValue(
12808        ges: *GlobalErrorSet,
12809        gpa: Allocator,
12810        arena_state: *std.heap.ArenaAllocator.State,
12811        name: NullTerminatedString,
12812    ) Allocator.Error!GlobalErrorSet.Index {
12813        if (name == .empty) return .none;
12814        const hash = std.hash.int(@intFromEnum(name));
12815        var map = ges.shared.map.acquire();
12816        const Map = @TypeOf(map);
12817        var map_mask = map.header().mask();
12818        const names = ges.shared.names.acquire();
12819        var map_index = hash;
12820        while (true) : (map_index += 1) {
12821            map_index &= map_mask;
12822            const entry = &map.entries[map_index];
12823            const index = entry.acquire();
12824            if (index == .none) break;
12825            if (entry.hash != hash) continue;
12826            if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
12827        }
12828        ges.mutate.map.mutex.lock();
12829        defer ges.mutate.map.mutex.unlock();
12830        if (map.entries != ges.shared.map.entries) {
12831            map = ges.shared.map;
12832            map_mask = map.header().mask();
12833            map_index = hash;
12834        }
12835        while (true) : (map_index += 1) {
12836            map_index &= map_mask;
12837            const entry = &map.entries[map_index];
12838            const index = entry.value;
12839            if (index == .none) break;
12840            if (entry.hash != hash) continue;
12841            if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
12842        }
12843        const mutable_names: Names.Mutable = .{
12844            .gpa = gpa,
12845            .arena = arena_state,
12846            .mutate = &ges.mutate.names,
12847            .list = &ges.shared.names,
12848        };
12849        try mutable_names.ensureUnusedCapacity(1);
12850        const map_header = map.header().*;
12851        if (ges.mutate.names.len < map_header.capacity * 3 / 5) {
12852            mutable_names.appendAssumeCapacity(.{name});
12853            const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len);
12854            const entry = &map.entries[map_index];
12855            entry.hash = hash;
12856            entry.release(index);
12857            return index;
12858        }
12859        var arena = arena_state.promote(gpa);
12860        defer arena_state.* = arena.state;
12861        const new_map_capacity = map_header.capacity * 2;
12862        const new_map_buf = try arena.allocator().alignedAlloc(
12863            u8,
12864            .fromByteUnits(Map.alignment),
12865            Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
12866        );
12867        const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
12868        new_map.header().* = .{ .capacity = new_map_capacity };
12869        @memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
12870        const new_map_mask = new_map.header().mask();
12871        map_index = 0;
12872        while (map_index < map_header.capacity) : (map_index += 1) {
12873            const entry = &map.entries[map_index];
12874            const index = entry.value;
12875            if (index == .none) continue;
12876            const item_hash = entry.hash;
12877            var new_map_index = item_hash;
12878            while (true) : (new_map_index += 1) {
12879                new_map_index &= new_map_mask;
12880                const new_entry = &new_map.entries[new_map_index];
12881                if (new_entry.value != .none) continue;
12882                new_entry.* = .{
12883                    .value = index,
12884                    .hash = item_hash,
12885                };
12886                break;
12887            }
12888        }
12889        map = new_map;
12890        map_index = hash;
12891        while (true) : (map_index += 1) {
12892            map_index &= new_map_mask;
12893            if (map.entries[map_index].value == .none) break;
12894        }
12895        mutable_names.appendAssumeCapacity(.{name});
12896        const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len);
12897        map.entries[map_index] = .{ .value = index, .hash = hash };
12898        ges.shared.map.release(new_map);
12899        return index;
12900    }
12901
12902    fn getErrorValueIfExists(
12903        ges: *const GlobalErrorSet,
12904        name: NullTerminatedString,
12905    ) ?GlobalErrorSet.Index {
12906        if (name == .empty) return .none;
12907        const hash = std.hash.int(@intFromEnum(name));
12908        const map = ges.shared.map.acquire();
12909        const map_mask = map.header().mask();
12910        const names_items = ges.shared.names.acquire().view().items(.@"0");
12911        var map_index = hash;
12912        while (true) : (map_index += 1) {
12913            map_index &= map_mask;
12914            const entry = &map.entries[map_index];
12915            const index = entry.acquire();
12916            if (index == .none) return null;
12917            if (entry.hash != hash) continue;
12918            if (names_items[@intFromEnum(index) - 1] == name) return index;
12919        }
12920    }
12921};
12922
12923pub fn getErrorValue(
12924    ip: *InternPool,
12925    gpa: Allocator,
12926    tid: Zcu.PerThread.Id,
12927    name: NullTerminatedString,
12928) Allocator.Error!Zcu.ErrorInt {
12929    return @intFromEnum(try ip.global_error_set.getErrorValue(gpa, &ip.getLocal(tid).mutate.arena, name));
12930}
12931
12932pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt {
12933    return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null);
12934}
12935
12936const PackedCallingConvention = packed struct(u18) {
12937    tag: std.builtin.CallingConvention.Tag,
12938    /// May be ignored depending on `tag`.
12939    incoming_stack_alignment: Alignment,
12940    /// Interpretation depends on `tag`.
12941    extra: u4,
12942
12943    fn pack(cc: std.builtin.CallingConvention) PackedCallingConvention {
12944        return switch (cc) {
12945            inline else => |pl, tag| switch (@TypeOf(pl)) {
12946                void => .{
12947                    .tag = tag,
12948                    .incoming_stack_alignment = .none, // unused
12949                    .extra = 0, // unused
12950                },
12951                std.builtin.CallingConvention.CommonOptions => .{
12952                    .tag = tag,
12953                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12954                    .extra = 0, // unused
12955                },
12956                std.builtin.CallingConvention.X86RegparmOptions => .{
12957                    .tag = tag,
12958                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12959                    .extra = pl.register_params,
12960                },
12961                std.builtin.CallingConvention.ArcInterruptOptions => .{
12962                    .tag = tag,
12963                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12964                    .extra = @intFromEnum(pl.type),
12965                },
12966                std.builtin.CallingConvention.ArmInterruptOptions => .{
12967                    .tag = tag,
12968                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12969                    .extra = @intFromEnum(pl.type),
12970                },
12971                std.builtin.CallingConvention.MicroblazeInterruptOptions => .{
12972                    .tag = tag,
12973                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12974                    .extra = @intFromEnum(pl.type),
12975                },
12976                std.builtin.CallingConvention.MipsInterruptOptions => .{
12977                    .tag = tag,
12978                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12979                    .extra = @intFromEnum(pl.mode),
12980                },
12981                std.builtin.CallingConvention.RiscvInterruptOptions => .{
12982                    .tag = tag,
12983                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12984                    .extra = @intFromEnum(pl.mode),
12985                },
12986                std.builtin.CallingConvention.ShInterruptOptions => .{
12987                    .tag = tag,
12988                    .incoming_stack_alignment = .fromByteUnits(pl.incoming_stack_alignment orelse 0),
12989                    .extra = @intFromEnum(pl.save),
12990                },
12991                else => comptime unreachable,
12992            },
12993        };
12994    }
12995
12996    fn unpack(cc: PackedCallingConvention) std.builtin.CallingConvention {
12997        return switch (cc.tag) {
12998            inline else => |tag| @unionInit(
12999                std.builtin.CallingConvention,
13000                @tagName(tag),
13001                switch (@FieldType(std.builtin.CallingConvention, @tagName(tag))) {
13002                    void => {},
13003                    std.builtin.CallingConvention.CommonOptions => .{
13004                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13005                    },
13006                    std.builtin.CallingConvention.X86RegparmOptions => .{
13007                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13008                        .register_params = @intCast(cc.extra),
13009                    },
13010                    std.builtin.CallingConvention.ArcInterruptOptions => .{
13011                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13012                        .type = @enumFromInt(cc.extra),
13013                    },
13014                    std.builtin.CallingConvention.ArmInterruptOptions => .{
13015                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13016                        .type = @enumFromInt(cc.extra),
13017                    },
13018                    std.builtin.CallingConvention.MicroblazeInterruptOptions => .{
13019                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13020                        .type = @enumFromInt(cc.extra),
13021                    },
13022                    std.builtin.CallingConvention.MipsInterruptOptions => .{
13023                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13024                        .mode = @enumFromInt(cc.extra),
13025                    },
13026                    std.builtin.CallingConvention.RiscvInterruptOptions => .{
13027                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13028                        .mode = @enumFromInt(cc.extra),
13029                    },
13030                    std.builtin.CallingConvention.ShInterruptOptions => .{
13031                        .incoming_stack_alignment = cc.incoming_stack_alignment.toByteUnits(),
13032                        .save = @enumFromInt(cc.extra),
13033                    },
13034                    else => comptime unreachable,
13035                },
13036            ),
13037        };
13038    }
13039};