master
   1//! Both types and values are canonically represented by a single 32-bit integer
   2//! which is an index into an `InternPool` data structure.
   3//! This struct abstracts around this storage by providing methods only
   4//! applicable to types rather than values in general.
   5
   6const std = @import("std");
   7const builtin = @import("builtin");
   8const Allocator = std.mem.Allocator;
   9const Value = @import("Value.zig");
  10const assert = std.debug.assert;
  11const Target = std.Target;
  12const Zcu = @import("Zcu.zig");
  13const log = std.log.scoped(.Type);
  14const target_util = @import("target.zig");
  15const Sema = @import("Sema.zig");
  16const InternPool = @import("InternPool.zig");
  17const Alignment = InternPool.Alignment;
  18const Zir = std.zig.Zir;
  19const Type = @This();
  20const SemaError = Zcu.SemaError;
  21
  22ip_index: InternPool.Index,
  23
  24pub fn zigTypeTag(ty: Type, zcu: *const Zcu) std.builtin.TypeId {
  25    return zcu.intern_pool.zigTypeTag(ty.toIntern());
  26}
  27
  28pub fn baseZigTypeTag(self: Type, mod: *Zcu) std.builtin.TypeId {
  29    return switch (self.zigTypeTag(mod)) {
  30        .error_union => self.errorUnionPayload(mod).baseZigTypeTag(mod),
  31        .optional => {
  32            return self.optionalChild(mod).baseZigTypeTag(mod);
  33        },
  34        else => |t| t,
  35    };
  36}
  37
  38/// Asserts the type is resolved.
  39pub fn isSelfComparable(ty: Type, zcu: *const Zcu, is_equality_cmp: bool) bool {
  40    return switch (ty.zigTypeTag(zcu)) {
  41        .int,
  42        .float,
  43        .comptime_float,
  44        .comptime_int,
  45        => true,
  46
  47        .vector => ty.elemType2(zcu).isSelfComparable(zcu, is_equality_cmp),
  48
  49        .bool,
  50        .type,
  51        .void,
  52        .error_set,
  53        .@"fn",
  54        .@"opaque",
  55        .@"anyframe",
  56        .@"enum",
  57        .enum_literal,
  58        => is_equality_cmp,
  59
  60        .noreturn,
  61        .array,
  62        .undefined,
  63        .null,
  64        .error_union,
  65        .@"union",
  66        .frame,
  67        => false,
  68
  69        .@"struct" => is_equality_cmp and ty.containerLayout(zcu) == .@"packed",
  70        .pointer => !ty.isSlice(zcu) and (is_equality_cmp or ty.isCPtr(zcu)),
  71        .optional => {
  72            if (!is_equality_cmp) return false;
  73            return ty.optionalChild(zcu).isSelfComparable(zcu, is_equality_cmp);
  74        },
  75    };
  76}
  77
  78/// If it is a function pointer, returns the function type. Otherwise returns null.
  79pub fn castPtrToFn(ty: Type, zcu: *const Zcu) ?Type {
  80    if (ty.zigTypeTag(zcu) != .pointer) return null;
  81    const elem_ty = ty.childType(zcu);
  82    if (elem_ty.zigTypeTag(zcu) != .@"fn") return null;
  83    return elem_ty;
  84}
  85
  86/// Asserts the type is a pointer.
  87pub fn ptrIsMutable(ty: Type, zcu: *const Zcu) bool {
  88    return !zcu.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const;
  89}
  90
  91pub const ArrayInfo = struct {
  92    elem_type: Type,
  93    sentinel: ?Value = null,
  94    len: u64,
  95};
  96
  97pub fn arrayInfo(self: Type, zcu: *const Zcu) ArrayInfo {
  98    return .{
  99        .len = self.arrayLen(zcu),
 100        .sentinel = self.sentinel(zcu),
 101        .elem_type = self.childType(zcu),
 102    };
 103}
 104
 105pub fn ptrInfo(ty: Type, zcu: *const Zcu) InternPool.Key.PtrType {
 106    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
 107        .ptr_type => |p| p,
 108        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
 109            .ptr_type => |p| p,
 110            else => unreachable,
 111        },
 112        else => unreachable,
 113    };
 114}
 115
 116pub fn eql(a: Type, b: Type, zcu: *const Zcu) bool {
 117    _ = zcu; // TODO: remove this parameter
 118    // The InternPool data structure hashes based on Key to make interned objects
 119    // unique. An Index can be treated simply as u32 value for the
 120    // purpose of Type/Value hashing and equality.
 121    return a.toIntern() == b.toIntern();
 122}
 123
 124pub fn format(ty: Type, writer: *std.Io.Writer) !void {
 125    _ = ty;
 126    _ = writer;
 127    @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()");
 128}
 129
 130pub const Formatter = std.fmt.Alt(Format, Format.default);
 131
 132pub fn fmt(ty: Type, pt: Zcu.PerThread) Formatter {
 133    return .{ .data = .{
 134        .ty = ty,
 135        .pt = pt,
 136    } };
 137}
 138
 139const Format = struct {
 140    ty: Type,
 141    pt: Zcu.PerThread,
 142
 143    fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
 144        return print(f.ty, writer, f.pt, null);
 145    }
 146};
 147
 148pub fn fmtDebug(ty: Type) std.fmt.Alt(Type, dump) {
 149    return .{ .data = ty };
 150}
 151
 152/// This is a debug function. In order to print types in a meaningful way
 153/// we also need access to the module.
 154pub fn dump(start_type: Type, writer: *std.Io.Writer) std.Io.Writer.Error!void {
 155    return writer.print("{any}", .{start_type.ip_index});
 156}
 157
 158/// Prints a name suitable for `@typeName`.
 159/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels.
 160pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Comparison) std.Io.Writer.Error!void {
 161    if (ctx) |c| {
 162        const should_dedupe = shouldDedupeType(ty, c, pt) catch |err| switch (err) {
 163            error.OutOfMemory => return error.WriteFailed,
 164        };
 165        switch (should_dedupe) {
 166            .dont_dedupe => {},
 167            .dedupe => |placeholder| return placeholder.format(writer),
 168        }
 169    }
 170
 171    const zcu = pt.zcu;
 172    const ip = &zcu.intern_pool;
 173    switch (ip.indexToKey(ty.toIntern())) {
 174        .undef => return writer.writeAll("@as(type, undefined)"),
 175        .int_type => |int_type| {
 176            const sign_char: u8 = switch (int_type.signedness) {
 177                .signed => 'i',
 178                .unsigned => 'u',
 179            };
 180            return writer.print("{c}{d}", .{ sign_char, int_type.bits });
 181        },
 182        .ptr_type => {
 183            const info = ty.ptrInfo(zcu);
 184
 185            if (info.sentinel != .none) switch (info.flags.size) {
 186                .one, .c => unreachable,
 187                .many => try writer.print("[*:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
 188                .slice => try writer.print("[:{f}]", .{Value.fromInterned(info.sentinel).fmtValue(pt)}),
 189            } else switch (info.flags.size) {
 190                .one => try writer.writeAll("*"),
 191                .many => try writer.writeAll("[*]"),
 192                .c => try writer.writeAll("[*c]"),
 193                .slice => try writer.writeAll("[]"),
 194            }
 195            if (info.flags.is_allowzero and info.flags.size != .c) try writer.writeAll("allowzero ");
 196            if (info.flags.alignment != .none or
 197                info.packed_offset.host_size != 0 or
 198                info.flags.vector_index != .none)
 199            {
 200                const alignment = if (info.flags.alignment != .none)
 201                    info.flags.alignment
 202                else
 203                    Type.fromInterned(info.child).abiAlignment(pt.zcu);
 204                try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
 205
 206                if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
 207                    try writer.print(":{d}:{d}", .{
 208                        info.packed_offset.bit_offset, info.packed_offset.host_size,
 209                    });
 210                }
 211                if (info.flags.vector_index != .none) {
 212                    try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
 213                }
 214                try writer.writeAll(") ");
 215            }
 216            if (info.flags.address_space != .generic) {
 217                try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)});
 218            }
 219            if (info.flags.is_const) try writer.writeAll("const ");
 220            if (info.flags.is_volatile) try writer.writeAll("volatile ");
 221
 222            try print(Type.fromInterned(info.child), writer, pt, ctx);
 223            return;
 224        },
 225        .array_type => |array_type| {
 226            if (array_type.sentinel == .none) {
 227                try writer.print("[{d}]", .{array_type.len});
 228                try print(Type.fromInterned(array_type.child), writer, pt, ctx);
 229            } else {
 230                try writer.print("[{d}:{f}]", .{
 231                    array_type.len,
 232                    Value.fromInterned(array_type.sentinel).fmtValue(pt),
 233                });
 234                try print(Type.fromInterned(array_type.child), writer, pt, ctx);
 235            }
 236            return;
 237        },
 238        .vector_type => |vector_type| {
 239            try writer.print("@Vector({d}, ", .{vector_type.len});
 240            try print(Type.fromInterned(vector_type.child), writer, pt, ctx);
 241            try writer.writeAll(")");
 242            return;
 243        },
 244        .opt_type => |child| {
 245            try writer.writeByte('?');
 246            return print(Type.fromInterned(child), writer, pt, ctx);
 247        },
 248        .error_union_type => |error_union_type| {
 249            try print(Type.fromInterned(error_union_type.error_set_type), writer, pt, ctx);
 250            try writer.writeByte('!');
 251            if (error_union_type.payload_type == .generic_poison_type) {
 252                try writer.writeAll("anytype");
 253            } else {
 254                try print(Type.fromInterned(error_union_type.payload_type), writer, pt, ctx);
 255            }
 256            return;
 257        },
 258        .inferred_error_set_type => |func_index| {
 259            const func_nav = ip.getNav(zcu.funcInfo(func_index).owner_nav);
 260            try writer.print("@typeInfo(@typeInfo(@TypeOf({f})).@\"fn\".return_type.?).error_union.error_set", .{
 261                func_nav.fqn.fmt(ip),
 262            });
 263        },
 264        .error_set_type => |error_set_type| {
 265            const NullTerminatedString = InternPool.NullTerminatedString;
 266            const sorted_names = zcu.gpa.dupe(NullTerminatedString, error_set_type.names.get(ip)) catch {
 267                zcu.comp.setAllocFailure();
 268                return writer.writeAll("error{...}");
 269            };
 270            defer zcu.gpa.free(sorted_names);
 271
 272            std.mem.sortUnstable(NullTerminatedString, sorted_names, ip, struct {
 273                fn lessThan(ip_: *InternPool, lhs: NullTerminatedString, rhs: NullTerminatedString) bool {
 274                    const lhs_slice = lhs.toSlice(ip_);
 275                    const rhs_slice = rhs.toSlice(ip_);
 276                    return std.mem.lessThan(u8, lhs_slice, rhs_slice);
 277                }
 278            }.lessThan);
 279
 280            try writer.writeAll("error{");
 281            for (sorted_names, 0..) |name, i| {
 282                if (i != 0) try writer.writeByte(',');
 283                try writer.print("{f}", .{name.fmt(ip)});
 284            }
 285            try writer.writeAll("}");
 286        },
 287        .simple_type => |s| switch (s) {
 288            .f16,
 289            .f32,
 290            .f64,
 291            .f80,
 292            .f128,
 293            .usize,
 294            .isize,
 295            .c_char,
 296            .c_short,
 297            .c_ushort,
 298            .c_int,
 299            .c_uint,
 300            .c_long,
 301            .c_ulong,
 302            .c_longlong,
 303            .c_ulonglong,
 304            .c_longdouble,
 305            .anyopaque,
 306            .bool,
 307            .void,
 308            .type,
 309            .anyerror,
 310            .comptime_int,
 311            .comptime_float,
 312            .noreturn,
 313            .adhoc_inferred_error_set,
 314            => return writer.writeAll(@tagName(s)),
 315
 316            .null,
 317            .undefined,
 318            => try writer.print("@TypeOf({s})", .{@tagName(s)}),
 319
 320            .enum_literal => try writer.writeAll("@EnumLiteral()"),
 321
 322            .generic_poison => unreachable,
 323        },
 324        .struct_type => {
 325            const name = ip.loadStructType(ty.toIntern()).name;
 326            try writer.print("{f}", .{name.fmt(ip)});
 327        },
 328        .tuple_type => |tuple| {
 329            if (tuple.types.len == 0) {
 330                return writer.writeAll("@TypeOf(.{})");
 331            }
 332            try writer.writeAll("struct {");
 333            for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, val, i| {
 334                try writer.writeAll(if (i == 0) " " else ", ");
 335                if (val != .none) try writer.writeAll("comptime ");
 336                try print(Type.fromInterned(field_ty), writer, pt, ctx);
 337                if (val != .none) try writer.print(" = {f}", .{Value.fromInterned(val).fmtValue(pt)});
 338            }
 339            try writer.writeAll(" }");
 340        },
 341
 342        .union_type => {
 343            const name = ip.loadUnionType(ty.toIntern()).name;
 344            try writer.print("{f}", .{name.fmt(ip)});
 345        },
 346        .opaque_type => {
 347            const name = ip.loadOpaqueType(ty.toIntern()).name;
 348            try writer.print("{f}", .{name.fmt(ip)});
 349        },
 350        .enum_type => {
 351            const name = ip.loadEnumType(ty.toIntern()).name;
 352            try writer.print("{f}", .{name.fmt(ip)});
 353        },
 354        .func_type => |fn_info| {
 355            if (fn_info.is_noinline) {
 356                try writer.writeAll("noinline ");
 357            }
 358            try writer.writeAll("fn (");
 359            const param_types = fn_info.param_types.get(&zcu.intern_pool);
 360            for (param_types, 0..) |param_ty, i| {
 361                if (i != 0) try writer.writeAll(", ");
 362                if (std.math.cast(u5, i)) |index| {
 363                    if (fn_info.paramIsComptime(index)) {
 364                        try writer.writeAll("comptime ");
 365                    }
 366                    if (fn_info.paramIsNoalias(index)) {
 367                        try writer.writeAll("noalias ");
 368                    }
 369                }
 370                if (param_ty == .generic_poison_type) {
 371                    try writer.writeAll("anytype");
 372                } else {
 373                    try print(Type.fromInterned(param_ty), writer, pt, ctx);
 374                }
 375            }
 376            if (fn_info.is_var_args) {
 377                if (param_types.len != 0) {
 378                    try writer.writeAll(", ");
 379                }
 380                try writer.writeAll("...");
 381            }
 382            try writer.writeAll(") ");
 383            if (fn_info.cc != .auto) print_cc: {
 384                if (zcu.getTarget().cCallingConvention()) |ccc| {
 385                    if (fn_info.cc.eql(ccc)) {
 386                        try writer.writeAll("callconv(.c) ");
 387                        break :print_cc;
 388                    }
 389                }
 390                switch (fn_info.cc) {
 391                    .auto, .async, .naked, .@"inline" => try writer.print("callconv(.{f}) ", .{
 392                        std.zig.fmtId(@tagName(fn_info.cc)),
 393                    }),
 394                    else => try writer.print("callconv({any}) ", .{fn_info.cc}),
 395                }
 396            }
 397            if (fn_info.return_type == .generic_poison_type) {
 398                try writer.writeAll("anytype");
 399            } else {
 400                try print(Type.fromInterned(fn_info.return_type), writer, pt, ctx);
 401            }
 402        },
 403        .anyframe_type => |child| {
 404            if (child == .none) return writer.writeAll("anyframe");
 405            try writer.writeAll("anyframe->");
 406            return print(Type.fromInterned(child), writer, pt, ctx);
 407        },
 408
 409        // values, not types
 410        .simple_value,
 411        .variable,
 412        .@"extern",
 413        .func,
 414        .int,
 415        .err,
 416        .error_union,
 417        .enum_literal,
 418        .enum_tag,
 419        .empty_enum_value,
 420        .float,
 421        .ptr,
 422        .slice,
 423        .opt,
 424        .aggregate,
 425        .un,
 426        // memoization, not types
 427        .memoized_call,
 428        => unreachable,
 429    }
 430}
 431
 432pub fn fromInterned(i: InternPool.Index) Type {
 433    assert(i != .none);
 434    return .{ .ip_index = i };
 435}
 436
 437pub fn toIntern(ty: Type) InternPool.Index {
 438    assert(ty.ip_index != .none);
 439    return ty.ip_index;
 440}
 441
 442pub fn toValue(self: Type) Value {
 443    return Value.fromInterned(self.toIntern());
 444}
 445
 446const RuntimeBitsError = SemaError || error{NeedLazy};
 447
 448pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
 449    return hasRuntimeBitsInner(ty, false, .eager, zcu, {}) catch unreachable;
 450}
 451
 452pub fn hasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
 453    return hasRuntimeBitsInner(ty, false, .sema, pt.zcu, pt.tid) catch |err| switch (err) {
 454        error.NeedLazy => unreachable, // this would require a resolve strat of lazy
 455        else => |e| return e,
 456    };
 457}
 458
 459pub fn hasRuntimeBitsIgnoreComptime(ty: Type, zcu: *const Zcu) bool {
 460    return hasRuntimeBitsInner(ty, true, .eager, zcu, {}) catch unreachable;
 461}
 462
 463pub fn hasRuntimeBitsIgnoreComptimeSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
 464    return hasRuntimeBitsInner(ty, true, .sema, pt.zcu, pt.tid) catch |err| switch (err) {
 465        error.NeedLazy => unreachable, // this would require a resolve strat of lazy
 466        else => |e| return e,
 467    };
 468}
 469
 470/// true if and only if the type takes up space in memory at runtime.
 471/// There are two reasons a type will return false:
 472/// * the type is a comptime-only type. For example, the type `type` itself.
 473///   - note, however, that a struct can have mixed fields and only the non-comptime-only
 474///     fields will count towards the ABI size. For example, `struct {T: type, x: i32}`
 475///     hasRuntimeBits()=true and abiSize()=4
 476/// * the type has only one possible value, making its ABI size 0.
 477///   - an enum with an explicit tag type has the ABI size of the integer tag type,
 478///     making it one-possible-value only if the integer tag type has 0 bits.
 479/// When `ignore_comptime_only` is true, then types that are comptime-only
 480/// may return false positives.
 481pub fn hasRuntimeBitsInner(
 482    ty: Type,
 483    ignore_comptime_only: bool,
 484    comptime strat: ResolveStratLazy,
 485    zcu: strat.ZcuPtr(),
 486    tid: strat.Tid(),
 487) RuntimeBitsError!bool {
 488    const ip = &zcu.intern_pool;
 489    return switch (ty.toIntern()) {
 490        .empty_tuple_type => false,
 491        else => switch (ip.indexToKey(ty.toIntern())) {
 492            .int_type => |int_type| int_type.bits != 0,
 493            .ptr_type => {
 494                // Pointers to zero-bit types still have a runtime address; however, pointers
 495                // to comptime-only types do not, with the exception of function pointers.
 496                if (ignore_comptime_only) return true;
 497                return switch (strat) {
 498                    .sema => {
 499                        const pt = strat.pt(zcu, tid);
 500                        return !try ty.comptimeOnlySema(pt);
 501                    },
 502                    .eager => !ty.comptimeOnly(zcu),
 503                    .lazy => error.NeedLazy,
 504                };
 505            },
 506            .anyframe_type => true,
 507            .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and
 508                try Type.fromInterned(array_type.child).hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid),
 509            .vector_type => |vector_type| return vector_type.len > 0 and
 510                try Type.fromInterned(vector_type.child).hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid),
 511            .opt_type => |child| {
 512                const child_ty = Type.fromInterned(child);
 513                if (child_ty.isNoReturn(zcu)) {
 514                    // Then the optional is comptime-known to be null.
 515                    return false;
 516                }
 517                if (ignore_comptime_only) return true;
 518                return switch (strat) {
 519                    .sema => !try child_ty.comptimeOnlyInner(.sema, zcu, tid),
 520                    .eager => !child_ty.comptimeOnly(zcu),
 521                    .lazy => error.NeedLazy,
 522                };
 523            },
 524            .error_union_type,
 525            .error_set_type,
 526            .inferred_error_set_type,
 527            => true,
 528
 529            // These are function *bodies*, not pointers.
 530            // They return false here because they are comptime-only types.
 531            // Special exceptions have to be made when emitting functions due to
 532            // this returning false.
 533            .func_type => false,
 534
 535            .simple_type => |t| switch (t) {
 536                .f16,
 537                .f32,
 538                .f64,
 539                .f80,
 540                .f128,
 541                .usize,
 542                .isize,
 543                .c_char,
 544                .c_short,
 545                .c_ushort,
 546                .c_int,
 547                .c_uint,
 548                .c_long,
 549                .c_ulong,
 550                .c_longlong,
 551                .c_ulonglong,
 552                .c_longdouble,
 553                .bool,
 554                .anyerror,
 555                .adhoc_inferred_error_set,
 556                .anyopaque,
 557                => true,
 558
 559                // These are false because they are comptime-only types.
 560                .void,
 561                .type,
 562                .comptime_int,
 563                .comptime_float,
 564                .noreturn,
 565                .null,
 566                .undefined,
 567                .enum_literal,
 568                => false,
 569
 570                .generic_poison => unreachable,
 571            },
 572            .struct_type => {
 573                const struct_type = ip.loadStructType(ty.toIntern());
 574                if (strat != .eager and struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
 575                    // In this case, we guess that hasRuntimeBits() for this type is true,
 576                    // and then later if our guess was incorrect, we emit a compile error.
 577                    return true;
 578                }
 579                switch (strat) {
 580                    .sema => try ty.resolveFields(strat.pt(zcu, tid)),
 581                    .eager => assert(struct_type.haveFieldTypes(ip)),
 582                    .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
 583                }
 584                for (0..struct_type.field_types.len) |i| {
 585                    if (struct_type.comptime_bits.getBit(ip, i)) continue;
 586                    const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
 587                    if (try field_ty.hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid))
 588                        return true;
 589                } else {
 590                    return false;
 591                }
 592            },
 593            .tuple_type => |tuple| {
 594                for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
 595                    if (val != .none) continue; // comptime field
 596                    if (try Type.fromInterned(field_ty).hasRuntimeBitsInner(
 597                        ignore_comptime_only,
 598                        strat,
 599                        zcu,
 600                        tid,
 601                    )) return true;
 602                }
 603                return false;
 604            },
 605
 606            .union_type => {
 607                const union_type = ip.loadUnionType(ty.toIntern());
 608                const union_flags = union_type.flagsUnordered(ip);
 609                switch (union_flags.runtime_tag) {
 610                    .none => if (strat != .eager) {
 611                        // In this case, we guess that hasRuntimeBits() for this type is true,
 612                        // and then later if our guess was incorrect, we emit a compile error.
 613                        if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true;
 614                    },
 615                    .safety, .tagged => {},
 616                }
 617                switch (strat) {
 618                    .sema => try ty.resolveFields(strat.pt(zcu, tid)),
 619                    .eager => assert(union_flags.status.haveFieldTypes()),
 620                    .lazy => if (!union_flags.status.haveFieldTypes())
 621                        return error.NeedLazy,
 622                }
 623                switch (union_flags.runtime_tag) {
 624                    .none => {},
 625                    .safety, .tagged => {
 626                        const tag_ty = union_type.tagTypeUnordered(ip);
 627                        assert(tag_ty != .none); // tag_ty should have been resolved above
 628                        if (try Type.fromInterned(tag_ty).hasRuntimeBitsInner(
 629                            ignore_comptime_only,
 630                            strat,
 631                            zcu,
 632                            tid,
 633                        )) {
 634                            return true;
 635                        }
 636                    },
 637                }
 638                for (0..union_type.field_types.len) |field_index| {
 639                    const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
 640                    if (try field_ty.hasRuntimeBitsInner(ignore_comptime_only, strat, zcu, tid))
 641                        return true;
 642                } else {
 643                    return false;
 644                }
 645            },
 646
 647            .opaque_type => true,
 648            .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsInner(
 649                ignore_comptime_only,
 650                strat,
 651                zcu,
 652                tid,
 653            ),
 654
 655            // values, not types
 656            .undef,
 657            .simple_value,
 658            .variable,
 659            .@"extern",
 660            .func,
 661            .int,
 662            .err,
 663            .error_union,
 664            .enum_literal,
 665            .enum_tag,
 666            .empty_enum_value,
 667            .float,
 668            .ptr,
 669            .slice,
 670            .opt,
 671            .aggregate,
 672            .un,
 673            // memoization, not types
 674            .memoized_call,
 675            => unreachable,
 676        },
 677    };
 678}
 679
 680/// true if and only if the type has a well-defined memory layout
 681/// readFrom/writeToMemory are supported only for types with a well-
 682/// defined memory layout
 683pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
 684    const ip = &zcu.intern_pool;
 685    return switch (ip.indexToKey(ty.toIntern())) {
 686        .int_type,
 687        .vector_type,
 688        => true,
 689
 690        .error_union_type,
 691        .error_set_type,
 692        .inferred_error_set_type,
 693        .tuple_type,
 694        .opaque_type,
 695        .anyframe_type,
 696        // These are function bodies, not function pointers.
 697        .func_type,
 698        => false,
 699
 700        .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(zcu),
 701        .opt_type => ty.isPtrLikeOptional(zcu),
 702        .ptr_type => |ptr_type| ptr_type.flags.size != .slice,
 703
 704        .simple_type => |t| switch (t) {
 705            .f16,
 706            .f32,
 707            .f64,
 708            .f80,
 709            .f128,
 710            .usize,
 711            .isize,
 712            .c_char,
 713            .c_short,
 714            .c_ushort,
 715            .c_int,
 716            .c_uint,
 717            .c_long,
 718            .c_ulong,
 719            .c_longlong,
 720            .c_ulonglong,
 721            .c_longdouble,
 722            .bool,
 723            .void,
 724            => true,
 725
 726            .anyerror,
 727            .adhoc_inferred_error_set,
 728            .anyopaque,
 729            .type,
 730            .comptime_int,
 731            .comptime_float,
 732            .noreturn,
 733            .null,
 734            .undefined,
 735            .enum_literal,
 736            .generic_poison,
 737            => false,
 738        },
 739        .struct_type => ip.loadStructType(ty.toIntern()).layout != .auto,
 740        .union_type => {
 741            const union_type = ip.loadUnionType(ty.toIntern());
 742            return switch (union_type.flagsUnordered(ip).runtime_tag) {
 743                .none, .safety => union_type.flagsUnordered(ip).layout != .auto,
 744                .tagged => false,
 745            };
 746        },
 747        .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
 748            .auto => false,
 749            .explicit, .nonexhaustive => true,
 750        },
 751
 752        // values, not types
 753        .undef,
 754        .simple_value,
 755        .variable,
 756        .@"extern",
 757        .func,
 758        .int,
 759        .err,
 760        .error_union,
 761        .enum_literal,
 762        .enum_tag,
 763        .empty_enum_value,
 764        .float,
 765        .ptr,
 766        .slice,
 767        .opt,
 768        .aggregate,
 769        .un,
 770        // memoization, not types
 771        .memoized_call,
 772        => unreachable,
 773    };
 774}
 775
 776pub fn fnHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
 777    return ty.fnHasRuntimeBitsInner(.normal, zcu, {}) catch unreachable;
 778}
 779
 780pub fn fnHasRuntimeBitsSema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
 781    return try ty.fnHasRuntimeBitsInner(.sema, pt.zcu, pt.tid);
 782}
 783
 784/// Determines whether a function type has runtime bits, i.e. whether a
 785/// function with this type can exist at runtime.
 786/// Asserts that `ty` is a function type.
 787pub fn fnHasRuntimeBitsInner(
 788    ty: Type,
 789    comptime strat: ResolveStrat,
 790    zcu: strat.ZcuPtr(),
 791    tid: strat.Tid(),
 792) SemaError!bool {
 793    const fn_info = zcu.typeToFunc(ty).?;
 794    if (fn_info.is_generic) return false;
 795    if (fn_info.is_var_args) return true;
 796    if (fn_info.cc == .@"inline") return false;
 797    return !try Type.fromInterned(fn_info.return_type).comptimeOnlyInner(strat, zcu, tid);
 798}
 799
 800pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
 801    switch (ty.zigTypeTag(zcu)) {
 802        .@"fn" => return ty.fnHasRuntimeBits(zcu),
 803        else => return ty.hasRuntimeBits(zcu),
 804    }
 805}
 806
 807/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
 808pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
 809    return switch (ty.zigTypeTag(zcu)) {
 810        .@"fn" => true,
 811        else => return ty.hasRuntimeBitsIgnoreComptime(zcu),
 812    };
 813}
 814
 815pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool {
 816    return zcu.intern_pool.isNoReturn(ty.toIntern());
 817}
 818
 819/// Never returns `none`. Asserts that all necessary type resolution is already done.
 820pub fn ptrAlignment(ty: Type, zcu: *Zcu) Alignment {
 821    return ptrAlignmentInner(ty, .normal, zcu, {}) catch unreachable;
 822}
 823
 824pub fn ptrAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
 825    return try ty.ptrAlignmentInner(.sema, pt.zcu, pt.tid);
 826}
 827
 828pub fn ptrAlignmentInner(
 829    ty: Type,
 830    comptime strat: ResolveStrat,
 831    zcu: strat.ZcuPtr(),
 832    tid: strat.Tid(),
 833) !Alignment {
 834    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
 835        .ptr_type => |ptr_type| {
 836            if (ptr_type.flags.alignment != .none) return ptr_type.flags.alignment;
 837            const res = try Type.fromInterned(ptr_type.child).abiAlignmentInner(strat.toLazy(), zcu, tid);
 838            return res.scalar;
 839        },
 840        .opt_type => |child| Type.fromInterned(child).ptrAlignmentInner(strat, zcu, tid),
 841        else => unreachable,
 842    };
 843}
 844
 845pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace {
 846    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
 847        .ptr_type => |ptr_type| ptr_type.flags.address_space,
 848        .opt_type => |child| zcu.intern_pool.indexToKey(child).ptr_type.flags.address_space,
 849        else => unreachable,
 850    };
 851}
 852
 853/// May capture a reference to `ty`.
 854/// Returned value has type `comptime_int`.
 855pub fn lazyAbiAlignment(ty: Type, pt: Zcu.PerThread) !Value {
 856    switch (try ty.abiAlignmentInner(.lazy, pt.zcu, pt.tid)) {
 857        .val => |val| return val,
 858        .scalar => |x| return pt.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
 859    }
 860}
 861
 862pub const AbiAlignmentInner = union(enum) {
 863    scalar: Alignment,
 864    val: Value,
 865};
 866
 867pub const ResolveStratLazy = enum {
 868    /// Return a `lazy_size` or `lazy_align` value if necessary.
 869    /// This value can be resolved later using `Value.resolveLazy`.
 870    lazy,
 871    /// Return a scalar result, expecting all necessary type resolution to be completed.
 872    /// Backends should typically use this, since they must not perform type resolution.
 873    eager,
 874    /// Return a scalar result, performing type resolution as necessary.
 875    /// This should typically be used from semantic analysis.
 876    sema,
 877
 878    pub fn Tid(strat: ResolveStratLazy) type {
 879        return switch (strat) {
 880            .lazy, .sema => Zcu.PerThread.Id,
 881            .eager => void,
 882        };
 883    }
 884
 885    pub fn ZcuPtr(strat: ResolveStratLazy) type {
 886        return switch (strat) {
 887            .eager => *const Zcu,
 888            .sema, .lazy => *Zcu,
 889        };
 890    }
 891
 892    pub fn pt(
 893        comptime strat: ResolveStratLazy,
 894        zcu: strat.ZcuPtr(),
 895        tid: strat.Tid(),
 896    ) switch (strat) {
 897        .lazy, .sema => Zcu.PerThread,
 898        .eager => void,
 899    } {
 900        return switch (strat) {
 901            .lazy, .sema => .{ .tid = tid, .zcu = zcu },
 902            else => {},
 903        };
 904    }
 905};
 906
 907/// The chosen strategy can be easily optimized away in release builds.
 908/// However, in debug builds, it helps to avoid accidentally resolving types in backends.
 909pub const ResolveStrat = enum {
 910    /// Assert that all necessary resolution is completed.
 911    /// Backends should typically use this, since they must not perform type resolution.
 912    normal,
 913    /// Perform type resolution as necessary using `Zcu`.
 914    /// This should typically be used from semantic analysis.
 915    sema,
 916
 917    pub fn Tid(strat: ResolveStrat) type {
 918        return switch (strat) {
 919            .sema => Zcu.PerThread.Id,
 920            .normal => void,
 921        };
 922    }
 923
 924    pub fn ZcuPtr(strat: ResolveStrat) type {
 925        return switch (strat) {
 926            .normal => *const Zcu,
 927            .sema => *Zcu,
 928        };
 929    }
 930
 931    pub fn pt(comptime strat: ResolveStrat, zcu: strat.ZcuPtr(), tid: strat.Tid()) switch (strat) {
 932        .sema => Zcu.PerThread,
 933        .normal => void,
 934    } {
 935        return switch (strat) {
 936            .sema => .{ .tid = tid, .zcu = zcu },
 937            .normal => {},
 938        };
 939    }
 940
 941    pub inline fn toLazy(strat: ResolveStrat) ResolveStratLazy {
 942        return switch (strat) {
 943            .normal => .eager,
 944            .sema => .sema,
 945        };
 946    }
 947};
 948
 949/// Never returns `none`. Asserts that all necessary type resolution is already done.
 950pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
 951    return (ty.abiAlignmentInner(.eager, zcu, {}) catch unreachable).scalar;
 952}
 953
 954pub fn abiAlignmentSema(ty: Type, pt: Zcu.PerThread) SemaError!Alignment {
 955    return (try ty.abiAlignmentInner(.sema, pt.zcu, pt.tid)).scalar;
 956}
 957
 958/// If you pass `eager` you will get back `scalar` and assert the type is resolved.
 959/// In this case there will be no error, guaranteed.
 960/// If you pass `lazy` you may get back `scalar` or `val`.
 961/// If `val` is returned, a reference to `ty` has been captured.
 962/// If you pass `sema` you will get back `scalar` and resolve the type if
 963/// necessary, possibly returning a CompileError.
 964pub fn abiAlignmentInner(
 965    ty: Type,
 966    comptime strat: ResolveStratLazy,
 967    zcu: strat.ZcuPtr(),
 968    tid: strat.Tid(),
 969) SemaError!AbiAlignmentInner {
 970    const pt = strat.pt(zcu, tid);
 971    const target = zcu.getTarget();
 972    const ip = &zcu.intern_pool;
 973
 974    switch (ty.toIntern()) {
 975        .empty_tuple_type => return .{ .scalar = .@"1" },
 976        else => switch (ip.indexToKey(ty.toIntern())) {
 977            .int_type => |int_type| {
 978                if (int_type.bits == 0) return .{ .scalar = .@"1" };
 979                return .{ .scalar = .fromByteUnits(std.zig.target.intAlignment(target, int_type.bits)) };
 980            },
 981            .ptr_type, .anyframe_type => {
 982                return .{ .scalar = ptrAbiAlignment(target) };
 983            },
 984            .array_type => |array_type| {
 985                return Type.fromInterned(array_type.child).abiAlignmentInner(strat, zcu, tid);
 986            },
 987            .vector_type => |vector_type| {
 988                if (vector_type.len == 0) return .{ .scalar = .@"1" };
 989                switch (zcu.comp.getZigBackend()) {
 990                    else => {
 991                        // This is fine because the child type of a vector always has a bit-size known
 992                        // without needing any type resolution.
 993                        const elem_bits: u32 = @intCast(Type.fromInterned(vector_type.child).bitSize(zcu));
 994                        if (elem_bits == 0) return .{ .scalar = .@"1" };
 995                        const bytes = ((elem_bits * vector_type.len) + 7) / 8;
 996                        const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
 997                        return .{ .scalar = Alignment.fromByteUnits(alignment) };
 998                    },
 999                    .stage2_c => {
1000                        return Type.fromInterned(vector_type.child).abiAlignmentInner(strat, zcu, tid);
1001                    },
1002                    .stage2_x86_64 => {
1003                        if (vector_type.child == .bool_type) {
1004                            if (vector_type.len > 256 and target.cpu.has(.x86, .avx512f)) return .{ .scalar = .@"64" };
1005                            if (vector_type.len > 128 and target.cpu.has(.x86, .avx)) return .{ .scalar = .@"32" };
1006                            if (vector_type.len > 64) return .{ .scalar = .@"16" };
1007                            const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
1008                            const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
1009                            return .{ .scalar = Alignment.fromByteUnits(alignment) };
1010                        }
1011                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
1012                        if (elem_bytes == 0) return .{ .scalar = .@"1" };
1013                        const bytes = elem_bytes * vector_type.len;
1014                        if (bytes > 32 and target.cpu.has(.x86, .avx512f)) return .{ .scalar = .@"64" };
1015                        if (bytes > 16 and target.cpu.has(.x86, .avx)) return .{ .scalar = .@"32" };
1016                        return .{ .scalar = .@"16" };
1017                    },
1018                }
1019            },
1020
1021            .opt_type => return ty.abiAlignmentInnerOptional(strat, zcu, tid),
1022            .error_union_type => |info| return ty.abiAlignmentInnerErrorUnion(
1023                strat,
1024                zcu,
1025                tid,
1026                Type.fromInterned(info.payload_type),
1027            ),
1028
1029            .error_set_type, .inferred_error_set_type => {
1030                const bits = zcu.errorSetBits();
1031                if (bits == 0) return .{ .scalar = .@"1" };
1032                return .{ .scalar = .fromByteUnits(std.zig.target.intAlignment(target, bits)) };
1033            },
1034
1035            // represents machine code; not a pointer
1036            .func_type => return .{ .scalar = target_util.minFunctionAlignment(target) },
1037
1038            .simple_type => |t| switch (t) {
1039                .bool,
1040                .anyopaque,
1041                => return .{ .scalar = .@"1" },
1042
1043                .usize,
1044                .isize,
1045                => return .{ .scalar = .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())) },
1046
1047                .c_char => return .{ .scalar = cTypeAlign(target, .char) },
1048                .c_short => return .{ .scalar = cTypeAlign(target, .short) },
1049                .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) },
1050                .c_int => return .{ .scalar = cTypeAlign(target, .int) },
1051                .c_uint => return .{ .scalar = cTypeAlign(target, .uint) },
1052                .c_long => return .{ .scalar = cTypeAlign(target, .long) },
1053                .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) },
1054                .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) },
1055                .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) },
1056                .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) },
1057
1058                .f16 => return .{ .scalar = .@"2" },
1059                .f32 => return .{ .scalar = cTypeAlign(target, .float) },
1060                .f64 => switch (target.cTypeBitSize(.double)) {
1061                    64 => return .{ .scalar = cTypeAlign(target, .double) },
1062                    else => return .{ .scalar = .@"8" },
1063                },
1064                .f80 => switch (target.cTypeBitSize(.longdouble)) {
1065                    80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
1066                    else => return .{ .scalar = Type.u80.abiAlignment(zcu) },
1067                },
1068                .f128 => switch (target.cTypeBitSize(.longdouble)) {
1069                    128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
1070                    else => return .{ .scalar = .@"16" },
1071                },
1072
1073                .anyerror, .adhoc_inferred_error_set => {
1074                    const bits = zcu.errorSetBits();
1075                    if (bits == 0) return .{ .scalar = .@"1" };
1076                    return .{ .scalar = .fromByteUnits(std.zig.target.intAlignment(target, bits)) };
1077                },
1078
1079                .void,
1080                .type,
1081                .comptime_int,
1082                .comptime_float,
1083                .null,
1084                .undefined,
1085                .enum_literal,
1086                => return .{ .scalar = .@"1" },
1087
1088                .noreturn => unreachable,
1089                .generic_poison => unreachable,
1090            },
1091            .struct_type => {
1092                const struct_type = ip.loadStructType(ty.toIntern());
1093                if (struct_type.layout == .@"packed") {
1094                    switch (strat) {
1095                        .sema => try ty.resolveLayout(pt),
1096                        .lazy => if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
1097                            .val = Value.fromInterned(try pt.intern(.{ .int = .{
1098                                .ty = .comptime_int_type,
1099                                .storage = .{ .lazy_align = ty.toIntern() },
1100                            } })),
1101                        },
1102                        .eager => {},
1103                    }
1104                    return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(zcu) };
1105                }
1106
1107                if (struct_type.flagsUnordered(ip).alignment == .none) switch (strat) {
1108                    .eager => unreachable, // struct alignment not resolved
1109                    .sema => try ty.resolveStructAlignment(pt),
1110                    .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1111                        .ty = .comptime_int_type,
1112                        .storage = .{ .lazy_align = ty.toIntern() },
1113                    } })) },
1114                };
1115
1116                return .{ .scalar = struct_type.flagsUnordered(ip).alignment };
1117            },
1118            .tuple_type => |tuple| {
1119                var big_align: Alignment = .@"1";
1120                for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
1121                    if (val != .none) continue; // comptime field
1122                    switch (try Type.fromInterned(field_ty).abiAlignmentInner(strat, zcu, tid)) {
1123                        .scalar => |field_align| big_align = big_align.max(field_align),
1124                        .val => switch (strat) {
1125                            .eager => unreachable, // field type alignment not resolved
1126                            .sema => unreachable, // passed to abiAlignmentInner above
1127                            .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1128                                .ty = .comptime_int_type,
1129                                .storage = .{ .lazy_align = ty.toIntern() },
1130                            } })) },
1131                        },
1132                    }
1133                }
1134                return .{ .scalar = big_align };
1135            },
1136            .union_type => {
1137                const union_type = ip.loadUnionType(ty.toIntern());
1138
1139                if (union_type.flagsUnordered(ip).alignment == .none) switch (strat) {
1140                    .eager => unreachable, // union layout not resolved
1141                    .sema => try ty.resolveUnionAlignment(pt),
1142                    .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1143                        .ty = .comptime_int_type,
1144                        .storage = .{ .lazy_align = ty.toIntern() },
1145                    } })) },
1146                };
1147
1148                return .{ .scalar = union_type.flagsUnordered(ip).alignment };
1149            },
1150            .opaque_type => return .{ .scalar = .@"1" },
1151            .enum_type => return .{
1152                .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(zcu),
1153            },
1154
1155            // values, not types
1156            .undef,
1157            .simple_value,
1158            .variable,
1159            .@"extern",
1160            .func,
1161            .int,
1162            .err,
1163            .error_union,
1164            .enum_literal,
1165            .enum_tag,
1166            .empty_enum_value,
1167            .float,
1168            .ptr,
1169            .slice,
1170            .opt,
1171            .aggregate,
1172            .un,
1173            // memoization, not types
1174            .memoized_call,
1175            => unreachable,
1176        },
1177    }
1178}
1179
1180fn abiAlignmentInnerErrorUnion(
1181    ty: Type,
1182    comptime strat: ResolveStratLazy,
1183    zcu: strat.ZcuPtr(),
1184    tid: strat.Tid(),
1185    payload_ty: Type,
1186) SemaError!AbiAlignmentInner {
1187    // This code needs to be kept in sync with the equivalent switch prong
1188    // in abiSizeInner.
1189    const code_align = Type.anyerror.abiAlignment(zcu);
1190    switch (strat) {
1191        .eager, .sema => {
1192            if (!(payload_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
1193                error.NeedLazy => if (strat == .lazy) {
1194                    const pt = strat.pt(zcu, tid);
1195                    return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1196                        .ty = .comptime_int_type,
1197                        .storage = .{ .lazy_align = ty.toIntern() },
1198                    } })) };
1199                } else unreachable,
1200                else => |e| return e,
1201            })) {
1202                return .{ .scalar = code_align };
1203            }
1204            return .{ .scalar = code_align.max(
1205                (try payload_ty.abiAlignmentInner(strat, zcu, tid)).scalar,
1206            ) };
1207        },
1208        .lazy => {
1209            const pt = strat.pt(zcu, tid);
1210            switch (try payload_ty.abiAlignmentInner(strat, zcu, tid)) {
1211                .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
1212                .val => {},
1213            }
1214            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1215                .ty = .comptime_int_type,
1216                .storage = .{ .lazy_align = ty.toIntern() },
1217            } })) };
1218        },
1219    }
1220}
1221
1222fn abiAlignmentInnerOptional(
1223    ty: Type,
1224    comptime strat: ResolveStratLazy,
1225    zcu: strat.ZcuPtr(),
1226    tid: strat.Tid(),
1227) SemaError!AbiAlignmentInner {
1228    const pt = strat.pt(zcu, tid);
1229    const target = zcu.getTarget();
1230    const child_type = ty.optionalChild(zcu);
1231
1232    switch (child_type.zigTypeTag(zcu)) {
1233        .pointer => return .{ .scalar = ptrAbiAlignment(target) },
1234        .error_set => return Type.anyerror.abiAlignmentInner(strat, zcu, tid),
1235        .noreturn => return .{ .scalar = .@"1" },
1236        else => {},
1237    }
1238
1239    switch (strat) {
1240        .eager, .sema => {
1241            if (!(child_type.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
1242                error.NeedLazy => if (strat == .lazy) {
1243                    return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1244                        .ty = .comptime_int_type,
1245                        .storage = .{ .lazy_align = ty.toIntern() },
1246                    } })) };
1247                } else unreachable,
1248                else => |e| return e,
1249            })) {
1250                return .{ .scalar = .@"1" };
1251            }
1252            return child_type.abiAlignmentInner(strat, zcu, tid);
1253        },
1254        .lazy => switch (try child_type.abiAlignmentInner(strat, zcu, tid)) {
1255            .scalar => |x| return .{ .scalar = x.max(.@"1") },
1256            .val => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1257                .ty = .comptime_int_type,
1258                .storage = .{ .lazy_align = ty.toIntern() },
1259            } })) },
1260        },
1261    }
1262}
1263
1264const AbiSizeInner = union(enum) {
1265    scalar: u64,
1266    val: Value,
1267};
1268
1269/// Asserts the type has the ABI size already resolved.
1270/// Types that return false for hasRuntimeBits() return 0.
1271pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
1272    return (abiSizeInner(ty, .eager, zcu, {}) catch unreachable).scalar;
1273}
1274
1275/// May capture a reference to `ty`.
1276pub fn abiSizeLazy(ty: Type, pt: Zcu.PerThread) !Value {
1277    switch (try ty.abiSizeInner(.lazy, pt.zcu, pt.tid)) {
1278        .val => |val| return val,
1279        .scalar => |x| return pt.intValue(Type.comptime_int, x),
1280    }
1281}
1282
1283pub fn abiSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
1284    return (try abiSizeInner(ty, .sema, pt.zcu, pt.tid)).scalar;
1285}
1286
1287/// If you pass `eager` you will get back `scalar` and assert the type is resolved.
1288/// In this case there will be no error, guaranteed.
1289/// If you pass `lazy` you may get back `scalar` or `val`.
1290/// If `val` is returned, a reference to `ty` has been captured.
1291/// If you pass `sema` you will get back `scalar` and resolve the type if
1292/// necessary, possibly returning a CompileError.
1293pub fn abiSizeInner(
1294    ty: Type,
1295    comptime strat: ResolveStratLazy,
1296    zcu: strat.ZcuPtr(),
1297    tid: strat.Tid(),
1298) SemaError!AbiSizeInner {
1299    const target = zcu.getTarget();
1300    const ip = &zcu.intern_pool;
1301
1302    switch (ty.toIntern()) {
1303        .empty_tuple_type => return .{ .scalar = 0 },
1304
1305        else => switch (ip.indexToKey(ty.toIntern())) {
1306            .int_type => |int_type| {
1307                if (int_type.bits == 0) return .{ .scalar = 0 };
1308                return .{ .scalar = std.zig.target.intByteSize(target, int_type.bits) };
1309            },
1310            .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
1311                .slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 },
1312                else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
1313            },
1314            .anyframe_type => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
1315
1316            .array_type => |array_type| {
1317                const len = array_type.lenIncludingSentinel();
1318                if (len == 0) return .{ .scalar = 0 };
1319                switch (try Type.fromInterned(array_type.child).abiSizeInner(strat, zcu, tid)) {
1320                    .scalar => |elem_size| return .{ .scalar = len * elem_size },
1321                    .val => switch (strat) {
1322                        .sema, .eager => unreachable,
1323                        .lazy => {
1324                            const pt = strat.pt(zcu, tid);
1325                            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1326                                .ty = .comptime_int_type,
1327                                .storage = .{ .lazy_size = ty.toIntern() },
1328                            } })) };
1329                        },
1330                    },
1331                }
1332            },
1333            .vector_type => |vector_type| {
1334                const sub_strat: ResolveStrat = switch (strat) {
1335                    .sema => .sema,
1336                    .eager => .normal,
1337                    .lazy => {
1338                        const pt = strat.pt(zcu, tid);
1339                        return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1340                            .ty = .comptime_int_type,
1341                            .storage = .{ .lazy_size = ty.toIntern() },
1342                        } })) };
1343                    },
1344                };
1345                const alignment = (try ty.abiAlignmentInner(strat, zcu, tid)).scalar;
1346                const total_bytes = switch (zcu.comp.getZigBackend()) {
1347                    else => total_bytes: {
1348                        const elem_bits = try Type.fromInterned(vector_type.child).bitSizeInner(sub_strat, zcu, tid);
1349                        const total_bits = elem_bits * vector_type.len;
1350                        break :total_bytes (total_bits + 7) / 8;
1351                    },
1352                    .stage2_c => total_bytes: {
1353                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
1354                        break :total_bytes elem_bytes * vector_type.len;
1355                    },
1356                    .stage2_x86_64 => total_bytes: {
1357                        if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
1358                        const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeInner(strat, zcu, tid)).scalar);
1359                        break :total_bytes elem_bytes * vector_type.len;
1360                    },
1361                };
1362                return .{ .scalar = alignment.forward(total_bytes) };
1363            },
1364
1365            .opt_type => return ty.abiSizeInnerOptional(strat, zcu, tid),
1366
1367            .error_set_type, .inferred_error_set_type => {
1368                const bits = zcu.errorSetBits();
1369                if (bits == 0) return .{ .scalar = 0 };
1370                return .{ .scalar = std.zig.target.intByteSize(target, bits) };
1371            },
1372
1373            .error_union_type => |error_union_type| {
1374                const payload_ty = Type.fromInterned(error_union_type.payload_type);
1375                // This code needs to be kept in sync with the equivalent switch prong
1376                // in abiAlignmentInner.
1377                const code_size = Type.anyerror.abiSize(zcu);
1378                if (!(payload_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
1379                    error.NeedLazy => if (strat == .lazy) {
1380                        const pt = strat.pt(zcu, tid);
1381                        return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1382                            .ty = .comptime_int_type,
1383                            .storage = .{ .lazy_size = ty.toIntern() },
1384                        } })) };
1385                    } else unreachable,
1386                    else => |e| return e,
1387                })) {
1388                    // Same as anyerror.
1389                    return .{ .scalar = code_size };
1390                }
1391                const code_align = Type.anyerror.abiAlignment(zcu);
1392                const payload_align = (try payload_ty.abiAlignmentInner(strat, zcu, tid)).scalar;
1393                const payload_size = switch (try payload_ty.abiSizeInner(strat, zcu, tid)) {
1394                    .scalar => |elem_size| elem_size,
1395                    .val => switch (strat) {
1396                        .sema => unreachable,
1397                        .eager => unreachable,
1398                        .lazy => {
1399                            const pt = strat.pt(zcu, tid);
1400                            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1401                                .ty = .comptime_int_type,
1402                                .storage = .{ .lazy_size = ty.toIntern() },
1403                            } })) };
1404                        },
1405                    },
1406                };
1407
1408                var size: u64 = 0;
1409                if (code_align.compare(.gt, payload_align)) {
1410                    size += code_size;
1411                    size = payload_align.forward(size);
1412                    size += payload_size;
1413                    size = code_align.forward(size);
1414                } else {
1415                    size += payload_size;
1416                    size = code_align.forward(size);
1417                    size += code_size;
1418                    size = payload_align.forward(size);
1419                }
1420                return .{ .scalar = size };
1421            },
1422            .func_type => unreachable, // represents machine code; not a pointer
1423            .simple_type => |t| switch (t) {
1424                .bool => return .{ .scalar = 1 },
1425
1426                .f16 => return .{ .scalar = 2 },
1427                .f32 => return .{ .scalar = 4 },
1428                .f64 => return .{ .scalar = 8 },
1429                .f128 => return .{ .scalar = 16 },
1430                .f80 => switch (target.cTypeBitSize(.longdouble)) {
1431                    80 => return .{ .scalar = target.cTypeByteSize(.longdouble) },
1432                    else => return .{ .scalar = Type.u80.abiSize(zcu) },
1433                },
1434
1435                .usize,
1436                .isize,
1437                => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) },
1438
1439                .c_char => return .{ .scalar = target.cTypeByteSize(.char) },
1440                .c_short => return .{ .scalar = target.cTypeByteSize(.short) },
1441                .c_ushort => return .{ .scalar = target.cTypeByteSize(.ushort) },
1442                .c_int => return .{ .scalar = target.cTypeByteSize(.int) },
1443                .c_uint => return .{ .scalar = target.cTypeByteSize(.uint) },
1444                .c_long => return .{ .scalar = target.cTypeByteSize(.long) },
1445                .c_ulong => return .{ .scalar = target.cTypeByteSize(.ulong) },
1446                .c_longlong => return .{ .scalar = target.cTypeByteSize(.longlong) },
1447                .c_ulonglong => return .{ .scalar = target.cTypeByteSize(.ulonglong) },
1448                .c_longdouble => return .{ .scalar = target.cTypeByteSize(.longdouble) },
1449
1450                .anyopaque,
1451                .void,
1452                .type,
1453                .comptime_int,
1454                .comptime_float,
1455                .null,
1456                .undefined,
1457                .enum_literal,
1458                => return .{ .scalar = 0 },
1459
1460                .anyerror, .adhoc_inferred_error_set => {
1461                    const bits = zcu.errorSetBits();
1462                    if (bits == 0) return .{ .scalar = 0 };
1463                    return .{ .scalar = std.zig.target.intByteSize(target, bits) };
1464                },
1465
1466                .noreturn => unreachable,
1467                .generic_poison => unreachable,
1468            },
1469            .struct_type => {
1470                const struct_type = ip.loadStructType(ty.toIntern());
1471                switch (strat) {
1472                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
1473                    .lazy => {
1474                        const pt = strat.pt(zcu, tid);
1475                        switch (struct_type.layout) {
1476                            .@"packed" => {
1477                                if (struct_type.backingIntTypeUnordered(ip) == .none) return .{
1478                                    .val = Value.fromInterned(try pt.intern(.{ .int = .{
1479                                        .ty = .comptime_int_type,
1480                                        .storage = .{ .lazy_size = ty.toIntern() },
1481                                    } })),
1482                                };
1483                            },
1484                            .auto, .@"extern" => {
1485                                if (!struct_type.haveLayout(ip)) return .{
1486                                    .val = Value.fromInterned(try pt.intern(.{ .int = .{
1487                                        .ty = .comptime_int_type,
1488                                        .storage = .{ .lazy_size = ty.toIntern() },
1489                                    } })),
1490                                };
1491                            },
1492                        }
1493                    },
1494                    .eager => {},
1495                }
1496                switch (struct_type.layout) {
1497                    .@"packed" => return .{
1498                        .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(zcu),
1499                    },
1500                    .auto, .@"extern" => {
1501                        assert(struct_type.haveLayout(ip));
1502                        return .{ .scalar = struct_type.sizeUnordered(ip) };
1503                    },
1504                }
1505            },
1506            .tuple_type => |tuple| {
1507                switch (strat) {
1508                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
1509                    .lazy, .eager => {},
1510                }
1511                const field_count = tuple.types.len;
1512                if (field_count == 0) {
1513                    return .{ .scalar = 0 };
1514                }
1515                return .{ .scalar = ty.structFieldOffset(field_count, zcu) };
1516            },
1517
1518            .union_type => {
1519                const union_type = ip.loadUnionType(ty.toIntern());
1520                switch (strat) {
1521                    .sema => try ty.resolveLayout(strat.pt(zcu, tid)),
1522                    .lazy => {
1523                        const pt = strat.pt(zcu, tid);
1524                        if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{
1525                            .val = Value.fromInterned(try pt.intern(.{ .int = .{
1526                                .ty = .comptime_int_type,
1527                                .storage = .{ .lazy_size = ty.toIntern() },
1528                            } })),
1529                        };
1530                    },
1531                    .eager => {},
1532                }
1533
1534                assert(union_type.haveLayout(ip));
1535                return .{ .scalar = union_type.sizeUnordered(ip) };
1536            },
1537            .opaque_type => unreachable, // no size available
1538            .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(zcu) },
1539
1540            // values, not types
1541            .undef,
1542            .simple_value,
1543            .variable,
1544            .@"extern",
1545            .func,
1546            .int,
1547            .err,
1548            .error_union,
1549            .enum_literal,
1550            .enum_tag,
1551            .empty_enum_value,
1552            .float,
1553            .ptr,
1554            .slice,
1555            .opt,
1556            .aggregate,
1557            .un,
1558            // memoization, not types
1559            .memoized_call,
1560            => unreachable,
1561        },
1562    }
1563}
1564
1565fn abiSizeInnerOptional(
1566    ty: Type,
1567    comptime strat: ResolveStratLazy,
1568    zcu: strat.ZcuPtr(),
1569    tid: strat.Tid(),
1570) SemaError!AbiSizeInner {
1571    const child_ty = ty.optionalChild(zcu);
1572
1573    if (child_ty.isNoReturn(zcu)) {
1574        return .{ .scalar = 0 };
1575    }
1576
1577    if (!(child_ty.hasRuntimeBitsInner(false, strat, zcu, tid) catch |err| switch (err) {
1578        error.NeedLazy => if (strat == .lazy) {
1579            const pt = strat.pt(zcu, tid);
1580            return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{
1581                .ty = .comptime_int_type,
1582                .storage = .{ .lazy_size = ty.toIntern() },
1583            } })) };
1584        } else unreachable,
1585        else => |e| return e,
1586    })) return .{ .scalar = 1 };
1587
1588    if (ty.optionalReprIsPayload(zcu)) {
1589        return child_ty.abiSizeInner(strat, zcu, tid);
1590    }
1591
1592    const payload_size = switch (try child_ty.abiSizeInner(strat, zcu, tid)) {
1593        .scalar => |elem_size| elem_size,
1594        .val => switch (strat) {
1595            .sema => unreachable,
1596            .eager => unreachable,
1597            .lazy => return .{ .val = Value.fromInterned(try strat.pt(zcu, tid).intern(.{ .int = .{
1598                .ty = .comptime_int_type,
1599                .storage = .{ .lazy_size = ty.toIntern() },
1600            } })) },
1601        },
1602    };
1603
1604    // Optional types are represented as a struct with the child type as the first
1605    // field and a boolean as the second. Since the child type's abi alignment is
1606    // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
1607    // to the child type's ABI alignment.
1608    return .{
1609        .scalar = (child_ty.abiAlignment(zcu).toByteUnits() orelse 0) + payload_size,
1610    };
1611}
1612
1613pub fn ptrAbiAlignment(target: *const Target) Alignment {
1614    return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8));
1615}
1616
1617pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
1618    return bitSizeInner(ty, .normal, zcu, {}) catch unreachable;
1619}
1620
1621pub fn bitSizeSema(ty: Type, pt: Zcu.PerThread) SemaError!u64 {
1622    return bitSizeInner(ty, .sema, pt.zcu, pt.tid);
1623}
1624
1625pub fn bitSizeInner(
1626    ty: Type,
1627    comptime strat: ResolveStrat,
1628    zcu: strat.ZcuPtr(),
1629    tid: strat.Tid(),
1630) SemaError!u64 {
1631    const target = zcu.getTarget();
1632    const ip = &zcu.intern_pool;
1633
1634    const strat_lazy: ResolveStratLazy = strat.toLazy();
1635
1636    switch (ip.indexToKey(ty.toIntern())) {
1637        .int_type => |int_type| return int_type.bits,
1638        .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
1639            .slice => return target.ptrBitWidth() * 2,
1640            else => return target.ptrBitWidth(),
1641        },
1642        .anyframe_type => return target.ptrBitWidth(),
1643
1644        .array_type => |array_type| {
1645            const len = array_type.lenIncludingSentinel();
1646            if (len == 0) return 0;
1647            const elem_ty: Type = .fromInterned(array_type.child);
1648            switch (zcu.comp.getZigBackend()) {
1649                else => {
1650                    const elem_size = (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar;
1651                    if (elem_size == 0) return 0;
1652                    const elem_bit_size = try elem_ty.bitSizeInner(strat, zcu, tid);
1653                    return (len - 1) * 8 * elem_size + elem_bit_size;
1654                },
1655                .stage2_x86_64 => {
1656                    const elem_bit_size = try elem_ty.bitSizeInner(strat, zcu, tid);
1657                    return elem_bit_size * len;
1658                },
1659            }
1660        },
1661        .vector_type => |vector_type| {
1662            const child_ty: Type = .fromInterned(vector_type.child);
1663            const elem_bit_size = try child_ty.bitSizeInner(strat, zcu, tid);
1664            return elem_bit_size * vector_type.len;
1665        },
1666        .opt_type => {
1667            // Optionals and error unions are not packed so their bitsize
1668            // includes padding bits.
1669            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
1670        },
1671
1672        .error_set_type, .inferred_error_set_type => return zcu.errorSetBits(),
1673
1674        .error_union_type => {
1675            // Optionals and error unions are not packed so their bitsize
1676            // includes padding bits.
1677            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
1678        },
1679        .func_type => unreachable, // represents machine code; not a pointer
1680        .simple_type => |t| switch (t) {
1681            .f16 => return 16,
1682            .f32 => return 32,
1683            .f64 => return 64,
1684            .f80 => return 80,
1685            .f128 => return 128,
1686
1687            .usize,
1688            .isize,
1689            => return target.ptrBitWidth(),
1690
1691            .c_char => return target.cTypeBitSize(.char),
1692            .c_short => return target.cTypeBitSize(.short),
1693            .c_ushort => return target.cTypeBitSize(.ushort),
1694            .c_int => return target.cTypeBitSize(.int),
1695            .c_uint => return target.cTypeBitSize(.uint),
1696            .c_long => return target.cTypeBitSize(.long),
1697            .c_ulong => return target.cTypeBitSize(.ulong),
1698            .c_longlong => return target.cTypeBitSize(.longlong),
1699            .c_ulonglong => return target.cTypeBitSize(.ulonglong),
1700            .c_longdouble => return target.cTypeBitSize(.longdouble),
1701
1702            .bool => return 1,
1703            .void => return 0,
1704
1705            .anyerror,
1706            .adhoc_inferred_error_set,
1707            => return zcu.errorSetBits(),
1708
1709            .anyopaque => unreachable,
1710            .type => unreachable,
1711            .comptime_int => unreachable,
1712            .comptime_float => unreachable,
1713            .noreturn => unreachable,
1714            .null => unreachable,
1715            .undefined => unreachable,
1716            .enum_literal => unreachable,
1717            .generic_poison => unreachable,
1718        },
1719        .struct_type => {
1720            const struct_type = ip.loadStructType(ty.toIntern());
1721            const is_packed = struct_type.layout == .@"packed";
1722            if (strat == .sema) {
1723                const pt = strat.pt(zcu, tid);
1724                try ty.resolveFields(pt);
1725                if (is_packed) try ty.resolveLayout(pt);
1726            }
1727            if (is_packed) {
1728                return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip))
1729                    .bitSizeInner(strat, zcu, tid);
1730            }
1731            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
1732        },
1733
1734        .tuple_type => {
1735            return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
1736        },
1737
1738        .union_type => {
1739            const union_type = ip.loadUnionType(ty.toIntern());
1740            const is_packed = ty.containerLayout(zcu) == .@"packed";
1741            if (strat == .sema) {
1742                const pt = strat.pt(zcu, tid);
1743                try ty.resolveFields(pt);
1744                if (is_packed) try ty.resolveLayout(pt);
1745            }
1746            if (!is_packed) {
1747                return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
1748            }
1749            assert(union_type.flagsUnordered(ip).status.haveFieldTypes());
1750
1751            var size: u64 = 0;
1752            for (0..union_type.field_types.len) |field_index| {
1753                const field_ty = union_type.field_types.get(ip)[field_index];
1754                size = @max(size, try Type.fromInterned(field_ty).bitSizeInner(strat, zcu, tid));
1755            }
1756
1757            return size;
1758        },
1759        .opaque_type => unreachable,
1760        .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty)
1761            .bitSizeInner(strat, zcu, tid),
1762
1763        // values, not types
1764        .undef,
1765        .simple_value,
1766        .variable,
1767        .@"extern",
1768        .func,
1769        .int,
1770        .err,
1771        .error_union,
1772        .enum_literal,
1773        .enum_tag,
1774        .empty_enum_value,
1775        .float,
1776        .ptr,
1777        .slice,
1778        .opt,
1779        .aggregate,
1780        .un,
1781        // memoization, not types
1782        .memoized_call,
1783        => unreachable,
1784    }
1785}
1786
1787/// Returns true if the type's layout is already resolved and it is safe
1788/// to use `abiSize`, `abiAlignment` and `bitSize` on it.
1789pub fn layoutIsResolved(ty: Type, zcu: *const Zcu) bool {
1790    const ip = &zcu.intern_pool;
1791    return switch (ip.indexToKey(ty.toIntern())) {
1792        .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
1793        .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
1794        .array_type => |array_type| {
1795            if (array_type.lenIncludingSentinel() == 0) return true;
1796            return Type.fromInterned(array_type.child).layoutIsResolved(zcu);
1797        },
1798        .opt_type => |child| Type.fromInterned(child).layoutIsResolved(zcu),
1799        .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(zcu),
1800        else => true,
1801    };
1802}
1803
1804pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
1805    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1806        .ptr_type => |ptr_info| ptr_info.flags.size == .one,
1807        else => false,
1808    };
1809}
1810
1811/// Asserts `ty` is a pointer.
1812pub fn ptrSize(ty: Type, zcu: *const Zcu) std.builtin.Type.Pointer.Size {
1813    return ty.ptrSizeOrNull(zcu).?;
1814}
1815
1816/// Returns `null` if `ty` is not a pointer.
1817pub fn ptrSizeOrNull(ty: Type, zcu: *const Zcu) ?std.builtin.Type.Pointer.Size {
1818    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1819        .ptr_type => |ptr_info| ptr_info.flags.size,
1820        else => null,
1821    };
1822}
1823
1824pub fn isSlice(ty: Type, zcu: *const Zcu) bool {
1825    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1826        .ptr_type => |ptr_type| ptr_type.flags.size == .slice,
1827        else => false,
1828    };
1829}
1830
1831pub fn isSliceAtRuntime(ty: Type, zcu: *const Zcu) bool {
1832    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1833        .ptr_type => |ptr_type| ptr_type.flags.size == .slice,
1834        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
1835            .ptr_type => |ptr_type| !ptr_type.flags.is_allowzero and ptr_type.flags.size == .slice,
1836            else => false,
1837        },
1838        else => false,
1839    };
1840}
1841
1842pub fn slicePtrFieldType(ty: Type, zcu: *const Zcu) Type {
1843    return Type.fromInterned(zcu.intern_pool.slicePtrType(ty.toIntern()));
1844}
1845
1846pub fn isConstPtr(ty: Type, zcu: *const Zcu) bool {
1847    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1848        .ptr_type => |ptr_type| ptr_type.flags.is_const,
1849        else => false,
1850    };
1851}
1852
1853pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
1854    return isVolatilePtrIp(ty, &zcu.intern_pool);
1855}
1856
1857pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
1858    return switch (ip.indexToKey(ty.toIntern())) {
1859        .ptr_type => |ptr_type| ptr_type.flags.is_volatile,
1860        else => false,
1861    };
1862}
1863
1864pub fn isAllowzeroPtr(ty: Type, zcu: *const Zcu) bool {
1865    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1866        .ptr_type => |ptr_type| ptr_type.flags.is_allowzero,
1867        .opt_type => true,
1868        else => false,
1869    };
1870}
1871
1872pub fn isCPtr(ty: Type, zcu: *const Zcu) bool {
1873    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1874        .ptr_type => |ptr_type| ptr_type.flags.size == .c,
1875        else => false,
1876    };
1877}
1878
1879pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
1880    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1881        .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
1882            .slice => false,
1883            .one, .many, .c => true,
1884        },
1885        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
1886            .ptr_type => |p| switch (p.flags.size) {
1887                .slice, .c => false,
1888                .many, .one => !p.flags.is_allowzero,
1889            },
1890            else => false,
1891        },
1892        else => false,
1893    };
1894}
1895
1896/// For pointer-like optionals, returns true, otherwise returns the allowzero property
1897/// of pointers.
1898pub fn ptrAllowsZero(ty: Type, zcu: *const Zcu) bool {
1899    if (ty.isPtrLikeOptional(zcu)) {
1900        return true;
1901    }
1902    return ty.ptrInfo(zcu).flags.is_allowzero;
1903}
1904
1905/// See also `isPtrLikeOptional`.
1906pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
1907    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1908        .opt_type => |child_type| child_type == .anyerror_type or switch (zcu.intern_pool.indexToKey(child_type)) {
1909            .ptr_type => |ptr_type| ptr_type.flags.size != .c and !ptr_type.flags.is_allowzero,
1910            .error_set_type, .inferred_error_set_type => true,
1911            else => false,
1912        },
1913        .ptr_type => |ptr_type| ptr_type.flags.size == .c,
1914        else => false,
1915    };
1916}
1917
1918/// Returns true if the type is optional and would be lowered to a single pointer
1919/// address value, using 0 for null. Note that this returns true for C pointers.
1920/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`.
1921pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
1922    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1923        .ptr_type => |ptr_type| ptr_type.flags.size == .c,
1924        .opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
1925            .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
1926                .slice, .c => false,
1927                .many, .one => !ptr_type.flags.is_allowzero,
1928            },
1929            else => false,
1930        },
1931        else => false,
1932    };
1933}
1934
1935/// For *[N]T,         returns [N]T.
1936/// For *T,            returns T.
1937/// For [*]T,          returns T.
1938/// For @Vector(N, T), returns T.
1939/// For [N]T,          returns T.
1940/// For ?T,            returns T.
1941pub fn childType(ty: Type, zcu: *const Zcu) Type {
1942    return childTypeIp(ty, &zcu.intern_pool);
1943}
1944
1945pub fn childTypeIp(ty: Type, ip: *const InternPool) Type {
1946    return Type.fromInterned(ip.childType(ty.toIntern()));
1947}
1948
1949/// For *[N]T,       returns T.
1950/// For ?*T,         returns T.
1951/// For ?*[N]T,      returns T.
1952/// For ?[*]T,       returns T.
1953/// For *T,          returns T.
1954/// For [*]T,        returns T.
1955/// For [N]T,        returns T.
1956/// For []T,         returns T.
1957/// For anyframe->T, returns T.
1958pub fn elemType2(ty: Type, zcu: *const Zcu) Type {
1959    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
1960        .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
1961            .one => Type.fromInterned(ptr_type.child).shallowElemType(zcu),
1962            .many, .c, .slice => Type.fromInterned(ptr_type.child),
1963        },
1964        .anyframe_type => |child| {
1965            assert(child != .none);
1966            return Type.fromInterned(child);
1967        },
1968        .vector_type => |vector_type| Type.fromInterned(vector_type.child),
1969        .array_type => |array_type| Type.fromInterned(array_type.child),
1970        .opt_type => |child| Type.fromInterned(zcu.intern_pool.childType(child)),
1971        else => unreachable,
1972    };
1973}
1974
1975/// Given that `ty` is an indexable pointer, returns its element type. Specifically:
1976/// * for `*[n]T`, returns `T`
1977/// * for `[]T`, returns `T`
1978/// * for `[*]T`, returns `T`
1979/// * for `[*c]T`, returns `T`
1980pub fn indexablePtrElem(ty: Type, zcu: *const Zcu) Type {
1981    const ip = &zcu.intern_pool;
1982    const ptr_type = ip.indexToKey(ty.toIntern()).ptr_type;
1983    switch (ptr_type.flags.size) {
1984        .many, .slice, .c => return .fromInterned(ptr_type.child),
1985        .one => {},
1986    }
1987    const array_type = ip.indexToKey(ptr_type.child).array_type;
1988    return .fromInterned(array_type.child);
1989}
1990
1991fn shallowElemType(child_ty: Type, zcu: *const Zcu) Type {
1992    return switch (child_ty.zigTypeTag(zcu)) {
1993        .array, .vector => child_ty.childType(zcu),
1994        else => child_ty,
1995    };
1996}
1997
1998/// For vectors, returns the element type. Otherwise returns self.
1999pub fn scalarType(ty: Type, zcu: *const Zcu) Type {
2000    return switch (ty.zigTypeTag(zcu)) {
2001        .vector => ty.childType(zcu),
2002        else => ty,
2003    };
2004}
2005
2006/// Asserts that the type is an optional.
2007/// Note that for C pointers this returns the type unmodified.
2008pub fn optionalChild(ty: Type, zcu: *const Zcu) Type {
2009    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2010        .opt_type => |child| Type.fromInterned(child),
2011        .ptr_type => |ptr_type| b: {
2012            assert(ptr_type.flags.size == .c);
2013            break :b ty;
2014        },
2015        else => unreachable,
2016    };
2017}
2018
2019/// Returns the tag type of a union, if the type is a union and it has a tag type.
2020/// Otherwise, returns `null`.
2021pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type {
2022    const ip = &zcu.intern_pool;
2023    switch (ip.indexToKey(ty.toIntern())) {
2024        .union_type => {},
2025        else => return null,
2026    }
2027    const union_type = ip.loadUnionType(ty.toIntern());
2028    const union_flags = union_type.flagsUnordered(ip);
2029    switch (union_flags.runtime_tag) {
2030        .tagged => {
2031            assert(union_flags.status.haveFieldTypes());
2032            return Type.fromInterned(union_type.enum_tag_ty);
2033        },
2034        else => return null,
2035    }
2036}
2037
2038/// Same as `unionTagType` but includes safety tag.
2039/// Codegen should use this version.
2040pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type {
2041    const ip = &zcu.intern_pool;
2042    return switch (ip.indexToKey(ty.toIntern())) {
2043        .union_type => {
2044            const union_type = ip.loadUnionType(ty.toIntern());
2045            if (!union_type.hasTag(ip)) return null;
2046            assert(union_type.haveFieldTypes(ip));
2047            return Type.fromInterned(union_type.enum_tag_ty);
2048        },
2049        else => null,
2050    };
2051}
2052
2053/// Asserts the type is a union; returns the tag type, even if the tag will
2054/// not be stored at runtime.
2055pub fn unionTagTypeHypothetical(ty: Type, zcu: *const Zcu) Type {
2056    const union_obj = zcu.typeToUnion(ty).?;
2057    return Type.fromInterned(union_obj.enum_tag_ty);
2058}
2059
2060pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type {
2061    const ip = &zcu.intern_pool;
2062    const union_obj = zcu.typeToUnion(ty).?;
2063    const union_fields = union_obj.field_types.get(ip);
2064    const index = zcu.unionTagFieldIndex(union_obj, enum_tag) orelse return null;
2065    return Type.fromInterned(union_fields[index]);
2066}
2067
2068pub fn unionFieldTypeByIndex(ty: Type, index: usize, zcu: *const Zcu) Type {
2069    const ip = &zcu.intern_pool;
2070    const union_obj = zcu.typeToUnion(ty).?;
2071    return Type.fromInterned(union_obj.field_types.get(ip)[index]);
2072}
2073
2074pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
2075    const union_obj = zcu.typeToUnion(ty).?;
2076    return zcu.unionTagFieldIndex(union_obj, enum_tag);
2077}
2078
2079pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool {
2080    const ip = &zcu.intern_pool;
2081    const union_obj = zcu.typeToUnion(ty).?;
2082    for (union_obj.field_types.get(ip)) |field_ty| {
2083        if (Type.fromInterned(field_ty).hasRuntimeBits(zcu)) return false;
2084    }
2085    return true;
2086}
2087
2088/// Returns the type used for backing storage of this union during comptime operations.
2089/// Asserts the type is either an extern or packed union.
2090pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
2091    const zcu = pt.zcu;
2092    return switch (ty.containerLayout(zcu)) {
2093        .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }),
2094        .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))),
2095        .auto => unreachable,
2096    };
2097}
2098
2099pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout {
2100    const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern());
2101    return Type.getUnionLayout(union_obj, zcu);
2102}
2103
2104pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayout {
2105    const ip = &zcu.intern_pool;
2106    return switch (ip.indexToKey(ty.toIntern())) {
2107        .struct_type => ip.loadStructType(ty.toIntern()).layout,
2108        .tuple_type => .auto,
2109        .union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout,
2110        else => unreachable,
2111    };
2112}
2113
2114/// Asserts that the type is an error union.
2115pub fn errorUnionPayload(ty: Type, zcu: *const Zcu) Type {
2116    return Type.fromInterned(zcu.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type);
2117}
2118
2119/// Asserts that the type is an error union.
2120pub fn errorUnionSet(ty: Type, zcu: *const Zcu) Type {
2121    return Type.fromInterned(zcu.intern_pool.errorUnionSet(ty.toIntern()));
2122}
2123
2124/// Returns false for unresolved inferred error sets.
2125pub fn errorSetIsEmpty(ty: Type, zcu: *const Zcu) bool {
2126    const ip = &zcu.intern_pool;
2127    return switch (ty.toIntern()) {
2128        .anyerror_type, .adhoc_inferred_error_set_type => false,
2129        else => switch (ip.indexToKey(ty.toIntern())) {
2130            .error_set_type => |error_set_type| error_set_type.names.len == 0,
2131            .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
2132                .none, .anyerror_type => false,
2133                else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
2134            },
2135            else => unreachable,
2136        },
2137    };
2138}
2139
2140/// Returns true if it is an error set that includes anyerror, false otherwise.
2141/// Note that the result may be a false negative if the type did not get error set
2142/// resolution prior to this call.
2143pub fn isAnyError(ty: Type, zcu: *const Zcu) bool {
2144    const ip = &zcu.intern_pool;
2145    return switch (ty.toIntern()) {
2146        .anyerror_type => true,
2147        .adhoc_inferred_error_set_type => false,
2148        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2149            .inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type,
2150            else => false,
2151        },
2152    };
2153}
2154
2155pub fn isError(ty: Type, zcu: *const Zcu) bool {
2156    return switch (ty.zigTypeTag(zcu)) {
2157        .error_union, .error_set => true,
2158        else => false,
2159    };
2160}
2161
2162/// Returns whether ty, which must be an error set, includes an error `name`.
2163/// Might return a false negative if `ty` is an inferred error set and not fully
2164/// resolved yet.
2165pub fn errorSetHasFieldIp(
2166    ip: *const InternPool,
2167    ty: InternPool.Index,
2168    name: InternPool.NullTerminatedString,
2169) bool {
2170    return switch (ty) {
2171        .anyerror_type => true,
2172        else => switch (ip.indexToKey(ty)) {
2173            .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
2174            .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
2175                .anyerror_type => true,
2176                .none => false,
2177                else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
2178            },
2179            else => unreachable,
2180        },
2181    };
2182}
2183
2184/// Returns whether ty, which must be an error set, includes an error `name`.
2185/// Might return a false negative if `ty` is an inferred error set and not fully
2186/// resolved yet.
2187pub fn errorSetHasField(ty: Type, name: []const u8, zcu: *const Zcu) bool {
2188    const ip = &zcu.intern_pool;
2189    return switch (ty.toIntern()) {
2190        .anyerror_type => true,
2191        else => switch (ip.indexToKey(ty.toIntern())) {
2192            .error_set_type => |error_set_type| {
2193                // If the string is not interned, then the field certainly is not present.
2194                const field_name_interned = ip.getString(name).unwrap() orelse return false;
2195                return error_set_type.nameIndex(ip, field_name_interned) != null;
2196            },
2197            .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
2198                .anyerror_type => true,
2199                .none => false,
2200                else => |t| {
2201                    // If the string is not interned, then the field certainly is not present.
2202                    const field_name_interned = ip.getString(name).unwrap() orelse return false;
2203                    return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null;
2204                },
2205            },
2206            else => unreachable,
2207        },
2208    };
2209}
2210
2211/// Asserts the type is an array or vector or struct.
2212pub fn arrayLen(ty: Type, zcu: *const Zcu) u64 {
2213    return ty.arrayLenIp(&zcu.intern_pool);
2214}
2215
2216pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 {
2217    return ip.aggregateTypeLen(ty.toIntern());
2218}
2219
2220pub fn arrayLenIncludingSentinel(ty: Type, zcu: *const Zcu) u64 {
2221    return zcu.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
2222}
2223
2224pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
2225    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2226        .vector_type => |vector_type| vector_type.len,
2227        .tuple_type => |tuple| @intCast(tuple.types.len),
2228        else => unreachable,
2229    };
2230}
2231
2232/// Asserts the type is an array, pointer or vector.
2233pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
2234    return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2235        .vector_type,
2236        .struct_type,
2237        .tuple_type,
2238        => null,
2239
2240        .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
2241        .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
2242
2243        else => unreachable,
2244    };
2245}
2246
2247/// Returns true if and only if the type is a fixed-width integer.
2248pub fn isInt(self: Type, zcu: *const Zcu) bool {
2249    return self.toIntern() != .comptime_int_type and
2250        zcu.intern_pool.isIntegerType(self.toIntern());
2251}
2252
2253/// Returns true if and only if the type is a fixed-width, signed integer.
2254pub fn isSignedInt(ty: Type, zcu: *const Zcu) bool {
2255    return switch (ty.toIntern()) {
2256        .c_char_type => zcu.getTarget().cCharSignedness() == .signed,
2257        .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true,
2258        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2259            .int_type => |int_type| int_type.signedness == .signed,
2260            else => false,
2261        },
2262    };
2263}
2264
2265/// Returns true if and only if the type is a fixed-width, unsigned integer.
2266pub fn isUnsignedInt(ty: Type, zcu: *const Zcu) bool {
2267    return switch (ty.toIntern()) {
2268        .c_char_type => zcu.getTarget().cCharSignedness() == .unsigned,
2269        .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true,
2270        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2271            .int_type => |int_type| int_type.signedness == .unsigned,
2272            else => false,
2273        },
2274    };
2275}
2276
2277/// Returns true for integers, enums, error sets, and packed structs.
2278/// If this function returns true, then intInfo() can be called on the type.
2279pub fn isAbiInt(ty: Type, zcu: *const Zcu) bool {
2280    return switch (ty.zigTypeTag(zcu)) {
2281        .int, .@"enum", .error_set => true,
2282        .@"struct" => ty.containerLayout(zcu) == .@"packed",
2283        else => false,
2284    };
2285}
2286
2287/// Asserts the type is an integer, enum, error set, or vector of one of them.
2288pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
2289    const ip = &zcu.intern_pool;
2290    const target = zcu.getTarget();
2291    var ty = starting_ty;
2292
2293    while (true) switch (ty.toIntern()) {
2294        .anyerror_type, .adhoc_inferred_error_set_type => {
2295            return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
2296        },
2297        .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() },
2298        .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() },
2299        .c_char_type => return .{ .signedness = zcu.getTarget().cCharSignedness(), .bits = target.cTypeBitSize(.char) },
2300        .c_short_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.short) },
2301        .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.cTypeBitSize(.ushort) },
2302        .c_int_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.int) },
2303        .c_uint_type => return .{ .signedness = .unsigned, .bits = target.cTypeBitSize(.uint) },
2304        .c_long_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.long) },
2305        .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.cTypeBitSize(.ulong) },
2306        .c_longlong_type => return .{ .signedness = .signed, .bits = target.cTypeBitSize(.longlong) },
2307        .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.cTypeBitSize(.ulonglong) },
2308        else => switch (ip.indexToKey(ty.toIntern())) {
2309            .int_type => |int_type| return int_type,
2310            .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntTypeUnordered(ip)),
2311            .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
2312            .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
2313
2314            .error_set_type, .inferred_error_set_type => {
2315                return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
2316            },
2317
2318            .tuple_type => unreachable,
2319
2320            .ptr_type => unreachable,
2321            .anyframe_type => unreachable,
2322            .array_type => unreachable,
2323
2324            .opt_type => unreachable,
2325            .error_union_type => unreachable,
2326            .func_type => unreachable,
2327            .simple_type => unreachable, // handled via Index enum tag above
2328
2329            .union_type => unreachable,
2330            .opaque_type => unreachable,
2331
2332            // values, not types
2333            .undef,
2334            .simple_value,
2335            .variable,
2336            .@"extern",
2337            .func,
2338            .int,
2339            .err,
2340            .error_union,
2341            .enum_literal,
2342            .enum_tag,
2343            .empty_enum_value,
2344            .float,
2345            .ptr,
2346            .slice,
2347            .opt,
2348            .aggregate,
2349            .un,
2350            // memoization, not types
2351            .memoized_call,
2352            => unreachable,
2353        },
2354    };
2355}
2356
2357pub fn isNamedInt(ty: Type) bool {
2358    return switch (ty.toIntern()) {
2359        .usize_type,
2360        .isize_type,
2361        .c_char_type,
2362        .c_short_type,
2363        .c_ushort_type,
2364        .c_int_type,
2365        .c_uint_type,
2366        .c_long_type,
2367        .c_ulong_type,
2368        .c_longlong_type,
2369        .c_ulonglong_type,
2370        => true,
2371
2372        else => false,
2373    };
2374}
2375
2376/// Returns `false` for `comptime_float`.
2377pub fn isRuntimeFloat(ty: Type) bool {
2378    return switch (ty.toIntern()) {
2379        .f16_type,
2380        .f32_type,
2381        .f64_type,
2382        .f80_type,
2383        .f128_type,
2384        .c_longdouble_type,
2385        => true,
2386
2387        else => false,
2388    };
2389}
2390
2391/// Returns `true` for `comptime_float`.
2392pub fn isAnyFloat(ty: Type) bool {
2393    return switch (ty.toIntern()) {
2394        .f16_type,
2395        .f32_type,
2396        .f64_type,
2397        .f80_type,
2398        .f128_type,
2399        .c_longdouble_type,
2400        .comptime_float_type,
2401        => true,
2402
2403        else => false,
2404    };
2405}
2406
2407/// Asserts the type is a fixed-size float or comptime_float.
2408/// Returns 128 for comptime_float types.
2409pub fn floatBits(ty: Type, target: *const Target) u16 {
2410    return switch (ty.toIntern()) {
2411        .f16_type => 16,
2412        .f32_type => 32,
2413        .f64_type => 64,
2414        .f80_type => 80,
2415        .f128_type, .comptime_float_type => 128,
2416        .c_longdouble_type => target.cTypeBitSize(.longdouble),
2417
2418        else => unreachable,
2419    };
2420}
2421
2422/// Asserts the type is a function or a function pointer.
2423pub fn fnReturnType(ty: Type, zcu: *const Zcu) Type {
2424    return Type.fromInterned(zcu.intern_pool.funcTypeReturnType(ty.toIntern()));
2425}
2426
2427/// Asserts the type is a function.
2428pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.CallingConvention {
2429    return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
2430}
2431
2432pub fn isValidParamType(self: Type, zcu: *const Zcu) bool {
2433    if (self.toIntern() == .generic_poison_type) return true;
2434    return switch (self.zigTypeTag(zcu)) {
2435        .@"opaque", .noreturn => false,
2436        else => true,
2437    };
2438}
2439
2440pub fn isValidReturnType(self: Type, zcu: *const Zcu) bool {
2441    if (self.toIntern() == .generic_poison_type) return true;
2442    return switch (self.zigTypeTag(zcu)) {
2443        .@"opaque" => false,
2444        else => true,
2445    };
2446}
2447
2448/// Asserts the type is a function.
2449pub fn fnIsVarArgs(ty: Type, zcu: *const Zcu) bool {
2450    return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
2451}
2452
2453pub fn fnPtrMaskOrNull(ty: Type, zcu: *const Zcu) ?u64 {
2454    return switch (ty.zigTypeTag(zcu)) {
2455        .@"fn" => target_util.functionPointerMask(zcu.getTarget()),
2456        else => null,
2457    };
2458}
2459
2460pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
2461    return switch (ty.toIntern()) {
2462        .f16_type,
2463        .f32_type,
2464        .f64_type,
2465        .f80_type,
2466        .f128_type,
2467        .c_longdouble_type,
2468        .comptime_int_type,
2469        .comptime_float_type,
2470        .usize_type,
2471        .isize_type,
2472        .c_char_type,
2473        .c_short_type,
2474        .c_ushort_type,
2475        .c_int_type,
2476        .c_uint_type,
2477        .c_long_type,
2478        .c_ulong_type,
2479        .c_longlong_type,
2480        .c_ulonglong_type,
2481        => true,
2482
2483        else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
2484            .int_type => true,
2485            else => false,
2486        },
2487    };
2488}
2489
2490/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which
2491/// resolves field types rather than asserting they are already resolved.
2492pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
2493    const zcu = pt.zcu;
2494    var ty = starting_type;
2495    const ip = &zcu.intern_pool;
2496    while (true) switch (ty.toIntern()) {
2497        .empty_tuple_type => return Value.empty_tuple,
2498
2499        else => switch (ip.indexToKey(ty.toIntern())) {
2500            .int_type => |int_type| {
2501                if (int_type.bits == 0) {
2502                    return try pt.intValue(ty, 0);
2503                } else {
2504                    return null;
2505                }
2506            },
2507
2508            .ptr_type,
2509            .error_union_type,
2510            .func_type,
2511            .anyframe_type,
2512            .error_set_type,
2513            .inferred_error_set_type,
2514            => return null,
2515
2516            inline .array_type, .vector_type => |seq_type, seq_tag| {
2517                const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
2518                if (seq_type.len + @intFromBool(has_sentinel) == 0) {
2519                    return try pt.aggregateValue(ty, &.{});
2520                }
2521                if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| {
2522                    return try pt.aggregateSplatValue(ty, opv);
2523                }
2524                return null;
2525            },
2526            .opt_type => |child| {
2527                if (child == .noreturn_type) {
2528                    return try pt.nullValue(ty);
2529                } else {
2530                    return null;
2531                }
2532            },
2533
2534            .simple_type => |t| switch (t) {
2535                .f16,
2536                .f32,
2537                .f64,
2538                .f80,
2539                .f128,
2540                .usize,
2541                .isize,
2542                .c_char,
2543                .c_short,
2544                .c_ushort,
2545                .c_int,
2546                .c_uint,
2547                .c_long,
2548                .c_ulong,
2549                .c_longlong,
2550                .c_ulonglong,
2551                .c_longdouble,
2552                .anyopaque,
2553                .bool,
2554                .type,
2555                .anyerror,
2556                .comptime_int,
2557                .comptime_float,
2558                .enum_literal,
2559                .adhoc_inferred_error_set,
2560                => return null,
2561
2562                .void => return Value.void,
2563                .noreturn => return Value.@"unreachable",
2564                .null => return Value.null,
2565                .undefined => return Value.undef,
2566
2567                .generic_poison => unreachable,
2568            },
2569            .struct_type => {
2570                const struct_type = ip.loadStructType(ty.toIntern());
2571                assert(struct_type.haveFieldTypes(ip));
2572                if (struct_type.knownNonOpv(ip))
2573                    return null;
2574                const field_vals = try zcu.gpa.alloc(InternPool.Index, struct_type.field_types.len);
2575                defer zcu.gpa.free(field_vals);
2576                for (field_vals, 0..) |*field_val, i_usize| {
2577                    const i: u32 = @intCast(i_usize);
2578                    if (struct_type.fieldIsComptime(ip, i)) {
2579                        assert(struct_type.haveFieldInits(ip));
2580                        field_val.* = struct_type.field_inits.get(ip)[i];
2581                        continue;
2582                    }
2583                    const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
2584                    if (try field_ty.onePossibleValue(pt)) |field_opv| {
2585                        field_val.* = field_opv.toIntern();
2586                    } else return null;
2587                }
2588
2589                // In this case the struct has no runtime-known fields and
2590                // therefore has one possible value.
2591                return try pt.aggregateValue(ty, field_vals);
2592            },
2593
2594            .tuple_type => |tuple| {
2595                if (tuple.types.len == 0) {
2596                    return try pt.aggregateValue(ty, &.{});
2597                }
2598
2599                const field_vals = try zcu.gpa.alloc(
2600                    InternPool.Index,
2601                    tuple.types.len,
2602                );
2603                defer zcu.gpa.free(field_vals);
2604                for (
2605                    field_vals,
2606                    tuple.types.get(ip),
2607                    tuple.values.get(ip),
2608                ) |*field_val, field_ty, field_comptime_val| {
2609                    if (field_comptime_val != .none) {
2610                        field_val.* = field_comptime_val;
2611                        continue;
2612                    }
2613                    if (try Type.fromInterned(field_ty).onePossibleValue(pt)) |opv| {
2614                        field_val.* = opv.toIntern();
2615                    } else return null;
2616                }
2617
2618                return try pt.aggregateValue(ty, field_vals);
2619            },
2620
2621            .union_type => {
2622                const union_obj = ip.loadUnionType(ty.toIntern());
2623                const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(pt)) orelse
2624                    return null;
2625                if (union_obj.field_types.len == 0) {
2626                    const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() });
2627                    return Value.fromInterned(only);
2628                }
2629                const only_field_ty = union_obj.field_types.get(ip)[0];
2630                const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(pt)) orelse
2631                    return null;
2632                const only = try pt.internUnion(.{
2633                    .ty = ty.toIntern(),
2634                    .tag = tag_val.toIntern(),
2635                    .val = val_val.toIntern(),
2636                });
2637                return Value.fromInterned(only);
2638            },
2639            .opaque_type => return null,
2640            .enum_type => {
2641                const enum_type = ip.loadEnumType(ty.toIntern());
2642                switch (enum_type.tag_mode) {
2643                    .nonexhaustive => {
2644                        if (enum_type.tag_ty == .comptime_int_type) return null;
2645
2646                        if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(pt)) |int_opv| {
2647                            const only = try pt.intern(.{ .enum_tag = .{
2648                                .ty = ty.toIntern(),
2649                                .int = int_opv.toIntern(),
2650                            } });
2651                            return Value.fromInterned(only);
2652                        }
2653
2654                        return null;
2655                    },
2656                    .auto, .explicit => {
2657                        if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(zcu)) return null;
2658
2659                        return Value.fromInterned(switch (enum_type.names.len) {
2660                            0 => try pt.intern(.{ .empty_enum_value = ty.toIntern() }),
2661                            1 => try pt.intern(.{ .enum_tag = .{
2662                                .ty = ty.toIntern(),
2663                                .int = if (enum_type.values.len == 0)
2664                                    (try pt.intValue(.fromInterned(enum_type.tag_ty), 0)).toIntern()
2665                                else
2666                                    try ip.getCoercedInts(
2667                                        zcu.gpa,
2668                                        pt.tid,
2669                                        ip.indexToKey(enum_type.values.get(ip)[0]).int,
2670                                        enum_type.tag_ty,
2671                                    ),
2672                            } }),
2673                            else => return null,
2674                        });
2675                    },
2676                }
2677            },
2678
2679            // values, not types
2680            .undef,
2681            .simple_value,
2682            .variable,
2683            .@"extern",
2684            .func,
2685            .int,
2686            .err,
2687            .error_union,
2688            .enum_literal,
2689            .enum_tag,
2690            .empty_enum_value,
2691            .float,
2692            .ptr,
2693            .slice,
2694            .opt,
2695            .aggregate,
2696            .un,
2697            // memoization, not types
2698            .memoized_call,
2699            => unreachable,
2700        },
2701    };
2702}
2703
2704/// During semantic analysis, instead call `ty.comptimeOnlySema` which
2705/// resolves field types rather than asserting they are already resolved.
2706pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool {
2707    return ty.comptimeOnlyInner(.normal, zcu, {}) catch unreachable;
2708}
2709
2710pub fn comptimeOnlySema(ty: Type, pt: Zcu.PerThread) SemaError!bool {
2711    return try ty.comptimeOnlyInner(.sema, pt.zcu, pt.tid);
2712}
2713
2714/// `generic_poison` will return false.
2715/// May return false negatives when structs and unions are having their field types resolved.
2716pub fn comptimeOnlyInner(
2717    ty: Type,
2718    comptime strat: ResolveStrat,
2719    zcu: strat.ZcuPtr(),
2720    tid: strat.Tid(),
2721) SemaError!bool {
2722    const ip = &zcu.intern_pool;
2723    return switch (ty.toIntern()) {
2724        .empty_tuple_type => false,
2725
2726        else => switch (ip.indexToKey(ty.toIntern())) {
2727            .int_type => false,
2728            .ptr_type => |ptr_type| {
2729                const child_ty = Type.fromInterned(ptr_type.child);
2730                switch (child_ty.zigTypeTag(zcu)) {
2731                    .@"fn" => return !try child_ty.fnHasRuntimeBitsInner(strat, zcu, tid),
2732                    .@"opaque" => return false,
2733                    else => return child_ty.comptimeOnlyInner(strat, zcu, tid),
2734                }
2735            },
2736            .anyframe_type => |child| {
2737                if (child == .none) return false;
2738                return Type.fromInterned(child).comptimeOnlyInner(strat, zcu, tid);
2739            },
2740            .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyInner(strat, zcu, tid),
2741            .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyInner(strat, zcu, tid),
2742            .opt_type => |child| return Type.fromInterned(child).comptimeOnlyInner(strat, zcu, tid),
2743            .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyInner(strat, zcu, tid),
2744
2745            .error_set_type,
2746            .inferred_error_set_type,
2747            => false,
2748
2749            // These are function bodies, not function pointers.
2750            .func_type => true,
2751
2752            .simple_type => |t| switch (t) {
2753                .f16,
2754                .f32,
2755                .f64,
2756                .f80,
2757                .f128,
2758                .usize,
2759                .isize,
2760                .c_char,
2761                .c_short,
2762                .c_ushort,
2763                .c_int,
2764                .c_uint,
2765                .c_long,
2766                .c_ulong,
2767                .c_longlong,
2768                .c_ulonglong,
2769                .c_longdouble,
2770                .anyopaque,
2771                .bool,
2772                .void,
2773                .anyerror,
2774                .adhoc_inferred_error_set,
2775                .noreturn,
2776                .generic_poison,
2777                => false,
2778
2779                .type,
2780                .comptime_int,
2781                .comptime_float,
2782                .null,
2783                .undefined,
2784                .enum_literal,
2785                => true,
2786            },
2787            .struct_type => {
2788                const struct_type = ip.loadStructType(ty.toIntern());
2789                // packed structs cannot be comptime-only because they have a well-defined
2790                // memory layout and every field has a well-defined bit pattern.
2791                if (struct_type.layout == .@"packed")
2792                    return false;
2793
2794                return switch (strat) {
2795                    .normal => switch (struct_type.requiresComptime(ip)) {
2796                        .wip => unreachable,
2797                        .no => false,
2798                        .yes => true,
2799                        .unknown => unreachable,
2800                    },
2801                    .sema => switch (struct_type.setRequiresComptimeWip(ip)) {
2802                        .no, .wip => false,
2803                        .yes => true,
2804                        .unknown => {
2805                            if (struct_type.flagsUnordered(ip).field_types_wip) {
2806                                struct_type.setRequiresComptime(ip, .unknown);
2807                                return false;
2808                            }
2809
2810                            errdefer struct_type.setRequiresComptime(ip, .unknown);
2811
2812                            const pt = strat.pt(zcu, tid);
2813                            try ty.resolveFields(pt);
2814
2815                            for (0..struct_type.field_types.len) |i_usize| {
2816                                const i: u32 = @intCast(i_usize);
2817                                if (struct_type.fieldIsComptime(ip, i)) continue;
2818                                const field_ty = struct_type.field_types.get(ip)[i];
2819                                if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
2820                                    // Note that this does not cause the layout to
2821                                    // be considered resolved. Comptime-only types
2822                                    // still maintain a layout of their
2823                                    // runtime-known fields.
2824                                    struct_type.setRequiresComptime(ip, .yes);
2825                                    return true;
2826                                }
2827                            }
2828
2829                            struct_type.setRequiresComptime(ip, .no);
2830                            return false;
2831                        },
2832                    },
2833                };
2834            },
2835
2836            .tuple_type => |tuple| {
2837                for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
2838                    const have_comptime_val = val != .none;
2839                    if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) return true;
2840                }
2841                return false;
2842            },
2843
2844            .union_type => {
2845                const union_type = ip.loadUnionType(ty.toIntern());
2846                return switch (strat) {
2847                    .normal => switch (union_type.requiresComptime(ip)) {
2848                        .wip => unreachable,
2849                        .no => false,
2850                        .yes => true,
2851                        .unknown => unreachable,
2852                    },
2853                    .sema => switch (union_type.setRequiresComptimeWip(ip)) {
2854                        .no, .wip => return false,
2855                        .yes => return true,
2856                        .unknown => {
2857                            if (union_type.flagsUnordered(ip).status == .field_types_wip) {
2858                                union_type.setRequiresComptime(ip, .unknown);
2859                                return false;
2860                            }
2861
2862                            errdefer union_type.setRequiresComptime(ip, .unknown);
2863
2864                            const pt = strat.pt(zcu, tid);
2865                            try ty.resolveFields(pt);
2866
2867                            for (0..union_type.field_types.len) |field_idx| {
2868                                const field_ty = union_type.field_types.get(ip)[field_idx];
2869                                if (try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) {
2870                                    union_type.setRequiresComptime(ip, .yes);
2871                                    return true;
2872                                }
2873                            }
2874
2875                            union_type.setRequiresComptime(ip, .no);
2876                            return false;
2877                        },
2878                    },
2879                };
2880            },
2881
2882            .opaque_type => false,
2883
2884            .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyInner(strat, zcu, tid),
2885
2886            // values, not types
2887            .undef,
2888            .simple_value,
2889            .variable,
2890            .@"extern",
2891            .func,
2892            .int,
2893            .err,
2894            .error_union,
2895            .enum_literal,
2896            .enum_tag,
2897            .empty_enum_value,
2898            .float,
2899            .ptr,
2900            .slice,
2901            .opt,
2902            .aggregate,
2903            .un,
2904            // memoization, not types
2905            .memoized_call,
2906            => unreachable,
2907        },
2908    };
2909}
2910
2911pub fn isVector(ty: Type, zcu: *const Zcu) bool {
2912    return ty.zigTypeTag(zcu) == .vector;
2913}
2914
2915/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
2916pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 {
2917    if (!ty.isVector(zcu)) return 0;
2918    const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type;
2919    return v.len * Type.fromInterned(v.child).bitSize(zcu);
2920}
2921
2922pub fn isArrayOrVector(ty: Type, zcu: *const Zcu) bool {
2923    return switch (ty.zigTypeTag(zcu)) {
2924        .array, .vector => true,
2925        else => false,
2926    };
2927}
2928
2929pub fn isIndexable(ty: Type, zcu: *const Zcu) bool {
2930    return switch (ty.zigTypeTag(zcu)) {
2931        .array, .vector => true,
2932        .pointer => switch (ty.ptrSize(zcu)) {
2933            .slice, .many, .c => true,
2934            .one => switch (ty.childType(zcu).zigTypeTag(zcu)) {
2935                .array, .vector => true,
2936                .@"struct" => ty.childType(zcu).isTuple(zcu),
2937                else => false,
2938            },
2939        },
2940        .@"struct" => ty.isTuple(zcu),
2941        else => false,
2942    };
2943}
2944
2945pub fn indexableHasLen(ty: Type, zcu: *const Zcu) bool {
2946    return switch (ty.zigTypeTag(zcu)) {
2947        .array, .vector => true,
2948        .pointer => switch (ty.ptrSize(zcu)) {
2949            .many, .c => false,
2950            .slice => true,
2951            .one => switch (ty.childType(zcu).zigTypeTag(zcu)) {
2952                .array, .vector => true,
2953                .@"struct" => ty.childType(zcu).isTuple(zcu),
2954                else => false,
2955            },
2956        },
2957        .@"struct" => ty.isTuple(zcu),
2958        else => false,
2959    };
2960}
2961
2962/// Asserts that the type can have a namespace.
2963pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.NamespaceIndex {
2964    return ty.getNamespace(zcu).unwrap().?;
2965}
2966
2967/// Returns null if the type has no namespace.
2968pub fn getNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex {
2969    const ip = &zcu.intern_pool;
2970    return switch (ip.indexToKey(ty.toIntern())) {
2971        .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace.toOptional(),
2972        .struct_type => ip.loadStructType(ty.toIntern()).namespace.toOptional(),
2973        .union_type => ip.loadUnionType(ty.toIntern()).namespace.toOptional(),
2974        .enum_type => ip.loadEnumType(ty.toIntern()).namespace.toOptional(),
2975        else => .none,
2976    };
2977}
2978
2979// TODO: new dwarf structure will also need the enclosing code block for types created in imperative scopes
2980pub fn getParentNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex {
2981    return zcu.namespacePtr(ty.getNamespace(zcu).unwrap() orelse return .none).parent;
2982}
2983
2984// Works for vectors and vectors of integers.
2985pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
2986    const zcu = pt.zcu;
2987    const scalar = try minIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
2988    return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
2989}
2990
2991/// Asserts that the type is an integer.
2992pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
2993    const zcu = pt.zcu;
2994    const info = ty.intInfo(zcu);
2995    if (info.signedness == .unsigned or info.bits == 0) return pt.intValue(dest_ty, 0);
2996
2997    if (std.math.cast(u6, info.bits - 1)) |shift| {
2998        const n = @as(i64, std.math.minInt(i64)) >> (63 - shift);
2999        return pt.intValue(dest_ty, n);
3000    }
3001
3002    var res = try std.math.big.int.Managed.init(zcu.gpa);
3003    defer res.deinit();
3004
3005    try res.setTwosCompIntLimit(.min, info.signedness, info.bits);
3006
3007    return pt.intValue_big(dest_ty, res.toConst());
3008}
3009
3010// Works for vectors and vectors of integers.
3011/// The returned Value will have type dest_ty.
3012pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
3013    const zcu = pt.zcu;
3014    const scalar = try maxIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
3015    return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
3016}
3017
3018/// The returned Value will have type dest_ty.
3019pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
3020    const info = ty.intInfo(pt.zcu);
3021
3022    switch (info.bits) {
3023        0 => return pt.intValue(dest_ty, 0),
3024        1 => return switch (info.signedness) {
3025            .signed => try pt.intValue(dest_ty, 0),
3026            .unsigned => try pt.intValue(dest_ty, 1),
3027        },
3028        else => {},
3029    }
3030
3031    if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) {
3032        .signed => {
3033            const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift);
3034            return pt.intValue(dest_ty, n);
3035        },
3036        .unsigned => {
3037            const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift);
3038            return pt.intValue(dest_ty, n);
3039        },
3040    };
3041
3042    var res = try std.math.big.int.Managed.init(pt.zcu.gpa);
3043    defer res.deinit();
3044
3045    try res.setTwosCompIntLimit(.max, info.signedness, info.bits);
3046
3047    return pt.intValue_big(dest_ty, res.toConst());
3048}
3049
3050/// Asserts the type is an enum or a union.
3051pub fn intTagType(ty: Type, zcu: *const Zcu) Type {
3052    const ip = &zcu.intern_pool;
3053    return switch (ip.indexToKey(ty.toIntern())) {
3054        .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(zcu),
3055        .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
3056        else => unreachable,
3057    };
3058}
3059
3060pub fn isNonexhaustiveEnum(ty: Type, zcu: *const Zcu) bool {
3061    const ip = &zcu.intern_pool;
3062    return switch (ip.indexToKey(ty.toIntern())) {
3063        .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
3064            .nonexhaustive => true,
3065            .auto, .explicit => false,
3066        },
3067        else => false,
3068    };
3069}
3070
3071// Asserts that `ty` is an error set and not `anyerror`.
3072// Asserts that `ty` is resolved if it is an inferred error set.
3073pub fn errorSetNames(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice {
3074    const ip = &zcu.intern_pool;
3075    return switch (ip.indexToKey(ty.toIntern())) {
3076        .error_set_type => |x| x.names,
3077        .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
3078            .none => unreachable, // unresolved inferred error set
3079            .anyerror_type => unreachable,
3080            else => |t| ip.indexToKey(t).error_set_type.names,
3081        },
3082        else => unreachable,
3083    };
3084}
3085
3086pub fn enumFields(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice {
3087    return zcu.intern_pool.loadEnumType(ty.toIntern()).names;
3088}
3089
3090pub fn enumFieldCount(ty: Type, zcu: *const Zcu) usize {
3091    return zcu.intern_pool.loadEnumType(ty.toIntern()).names.len;
3092}
3093
3094pub fn enumFieldName(ty: Type, field_index: usize, zcu: *const Zcu) InternPool.NullTerminatedString {
3095    const ip = &zcu.intern_pool;
3096    return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index];
3097}
3098
3099pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu: *const Zcu) ?u32 {
3100    const ip = &zcu.intern_pool;
3101    const enum_type = ip.loadEnumType(ty.toIntern());
3102    return enum_type.nameIndex(ip, field_name);
3103}
3104
3105/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
3106/// an integer which represents the enum value. Returns the field index in
3107/// declaration order, or `null` if `enum_tag` does not match any field.
3108pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
3109    const ip = &zcu.intern_pool;
3110    const enum_type = ip.loadEnumType(ty.toIntern());
3111    const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
3112        .int => enum_tag.toIntern(),
3113        .enum_tag => |info| info.int,
3114        else => unreachable,
3115    };
3116    assert(ip.typeOf(int_tag) == enum_type.tag_ty);
3117    return enum_type.tagValueIndex(ip, int_tag);
3118}
3119
3120/// Returns none in the case of a tuple which uses the integer index as the field name.
3121pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
3122    const ip = &zcu.intern_pool;
3123    return switch (ip.indexToKey(ty.toIntern())) {
3124        .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index).toOptional(),
3125        .tuple_type => .none,
3126        else => unreachable,
3127    };
3128}
3129
3130pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
3131    const ip = &zcu.intern_pool;
3132    return switch (ip.indexToKey(ty.toIntern())) {
3133        .struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
3134        .tuple_type => |tuple| tuple.types.len,
3135        else => unreachable,
3136    };
3137}
3138
3139/// Returns the field type. Supports structs and unions.
3140pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
3141    const ip = &zcu.intern_pool;
3142    return switch (ip.indexToKey(ty.toIntern())) {
3143        .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
3144        .union_type => {
3145            const union_obj = ip.loadUnionType(ty.toIntern());
3146            return Type.fromInterned(union_obj.field_types.get(ip)[index]);
3147        },
3148        .tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]),
3149        else => unreachable,
3150    };
3151}
3152
3153pub fn fieldAlignment(ty: Type, index: usize, zcu: *Zcu) Alignment {
3154    return ty.fieldAlignmentInner(index, .normal, zcu, {}) catch unreachable;
3155}
3156
3157pub fn fieldAlignmentSema(ty: Type, index: usize, pt: Zcu.PerThread) SemaError!Alignment {
3158    return try ty.fieldAlignmentInner(index, .sema, pt.zcu, pt.tid);
3159}
3160
3161/// Returns the field alignment. Supports structs and unions.
3162/// If `strat` is `.sema`, may perform type resolution.
3163/// Asserts the layout is not packed.
3164///
3165/// Provide the struct field as the `ty`.
3166pub fn fieldAlignmentInner(
3167    ty: Type,
3168    index: usize,
3169    comptime strat: ResolveStrat,
3170    zcu: strat.ZcuPtr(),
3171    tid: strat.Tid(),
3172) SemaError!Alignment {
3173    const ip = &zcu.intern_pool;
3174    switch (ip.indexToKey(ty.toIntern())) {
3175        .struct_type => {
3176            const struct_type = ip.loadStructType(ty.toIntern());
3177            assert(struct_type.layout != .@"packed");
3178            const explicit_align = struct_type.fieldAlign(ip, index);
3179            const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
3180            return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid);
3181        },
3182        .tuple_type => |tuple| {
3183            return (try Type.fromInterned(tuple.types.get(ip)[index]).abiAlignmentInner(
3184                strat.toLazy(),
3185                zcu,
3186                tid,
3187            )).scalar;
3188        },
3189        .union_type => {
3190            const union_obj = ip.loadUnionType(ty.toIntern());
3191            const layout = union_obj.flagsUnordered(ip).layout;
3192            assert(layout != .@"packed");
3193            const explicit_align = union_obj.fieldAlign(ip, index);
3194            const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
3195            return field_ty.unionFieldAlignmentInner(explicit_align, layout, strat, zcu, tid);
3196        },
3197        else => unreachable,
3198    }
3199}
3200
3201/// Returns the alignment of a non-packed struct field. Assert the layout is not packed.
3202///
3203/// Asserts that all resolution needed was done.
3204pub fn structFieldAlignment(
3205    field_ty: Type,
3206    explicit_alignment: InternPool.Alignment,
3207    layout: std.builtin.Type.ContainerLayout,
3208    zcu: *Zcu,
3209) Alignment {
3210    return field_ty.structFieldAlignmentInner(
3211        explicit_alignment,
3212        layout,
3213        .normal,
3214        zcu,
3215        {},
3216    ) catch unreachable;
3217}
3218
3219/// Returns the alignment of a non-packed struct field. Assert the layout is not packed.
3220/// May do type resolution when needed.
3221/// Asserts that all resolution needed was done.
3222pub fn structFieldAlignmentSema(
3223    field_ty: Type,
3224    explicit_alignment: InternPool.Alignment,
3225    layout: std.builtin.Type.ContainerLayout,
3226    pt: Zcu.PerThread,
3227) SemaError!Alignment {
3228    return try field_ty.structFieldAlignmentInner(
3229        explicit_alignment,
3230        layout,
3231        .sema,
3232        pt.zcu,
3233        pt.tid,
3234    );
3235}
3236
3237/// Returns the alignment of a non-packed struct field. Asserts the layout is not packed.
3238/// If `strat` is `.sema`, may perform type resolution.
3239pub fn structFieldAlignmentInner(
3240    field_ty: Type,
3241    explicit_alignment: Alignment,
3242    layout: std.builtin.Type.ContainerLayout,
3243    comptime strat: Type.ResolveStrat,
3244    zcu: strat.ZcuPtr(),
3245    tid: strat.Tid(),
3246) SemaError!Alignment {
3247    assert(layout != .@"packed");
3248    if (explicit_alignment != .none) return explicit_alignment;
3249    const ty_abi_align = (try field_ty.abiAlignmentInner(
3250        strat.toLazy(),
3251        zcu,
3252        tid,
3253    )).scalar;
3254    switch (layout) {
3255        .@"packed" => unreachable,
3256        .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align,
3257        .@"extern" => {},
3258    }
3259    // extern
3260    if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) {
3261        return ty_abi_align.maxStrict(.@"16");
3262    }
3263    return ty_abi_align;
3264}
3265
3266pub fn unionFieldAlignmentSema(
3267    field_ty: Type,
3268    explicit_alignment: Alignment,
3269    layout: std.builtin.Type.ContainerLayout,
3270    pt: Zcu.PerThread,
3271) SemaError!Alignment {
3272    return field_ty.unionFieldAlignmentInner(
3273        explicit_alignment,
3274        layout,
3275        .sema,
3276        pt.zcu,
3277        pt.tid,
3278    );
3279}
3280
3281pub fn unionFieldAlignmentInner(
3282    field_ty: Type,
3283    explicit_alignment: Alignment,
3284    layout: std.builtin.Type.ContainerLayout,
3285    comptime strat: Type.ResolveStrat,
3286    zcu: strat.ZcuPtr(),
3287    tid: strat.Tid(),
3288) SemaError!Alignment {
3289    assert(layout != .@"packed");
3290    if (explicit_alignment != .none) return explicit_alignment;
3291    if (field_ty.isNoReturn(zcu)) return .none;
3292    return (try field_ty.abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar;
3293}
3294
3295pub fn structFieldDefaultValue(ty: Type, index: usize, zcu: *const Zcu) Value {
3296    const ip = &zcu.intern_pool;
3297    switch (ip.indexToKey(ty.toIntern())) {
3298        .struct_type => {
3299            const struct_type = ip.loadStructType(ty.toIntern());
3300            const val = struct_type.fieldInit(ip, index);
3301            // TODO: avoid using `unreachable` to indicate this.
3302            if (val == .none) return Value.@"unreachable";
3303            return Value.fromInterned(val);
3304        },
3305        .tuple_type => |tuple| {
3306            const val = tuple.values.get(ip)[index];
3307            // TODO: avoid using `unreachable` to indicate this.
3308            if (val == .none) return Value.@"unreachable";
3309            return Value.fromInterned(val);
3310        },
3311        else => unreachable,
3312    }
3313}
3314
3315pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Value {
3316    const zcu = pt.zcu;
3317    const ip = &zcu.intern_pool;
3318    switch (ip.indexToKey(ty.toIntern())) {
3319        .struct_type => {
3320            const struct_type = ip.loadStructType(ty.toIntern());
3321            if (struct_type.fieldIsComptime(ip, index)) {
3322                assert(struct_type.haveFieldInits(ip));
3323                return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
3324            } else {
3325                return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
3326            }
3327        },
3328        .tuple_type => |tuple| {
3329            const val = tuple.values.get(ip)[index];
3330            if (val == .none) {
3331                return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt);
3332            } else {
3333                return Value.fromInterned(val);
3334            }
3335        },
3336        else => unreachable,
3337    }
3338}
3339
3340pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool {
3341    const ip = &zcu.intern_pool;
3342    return switch (ip.indexToKey(ty.toIntern())) {
3343        .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
3344        .tuple_type => |tuple| tuple.values.get(ip)[index] != .none,
3345        else => unreachable,
3346    };
3347}
3348
3349pub const FieldOffset = struct {
3350    field: usize,
3351    offset: u64,
3352};
3353
3354/// Supports structs and unions.
3355pub fn structFieldOffset(ty: Type, index: usize, zcu: *const Zcu) u64 {
3356    const ip = &zcu.intern_pool;
3357    switch (ip.indexToKey(ty.toIntern())) {
3358        .struct_type => {
3359            const struct_type = ip.loadStructType(ty.toIntern());
3360            assert(struct_type.haveLayout(ip));
3361            assert(struct_type.layout != .@"packed");
3362            return struct_type.offsets.get(ip)[index];
3363        },
3364
3365        .tuple_type => |tuple| {
3366            var offset: u64 = 0;
3367            var big_align: Alignment = .none;
3368
3369            for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
3370                if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) {
3371                    // comptime field
3372                    if (i == index) return offset;
3373                    continue;
3374                }
3375
3376                const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
3377                big_align = big_align.max(field_align);
3378                offset = field_align.forward(offset);
3379                if (i == index) return offset;
3380                offset += Type.fromInterned(field_ty).abiSize(zcu);
3381            }
3382            offset = big_align.max(.@"1").forward(offset);
3383            return offset;
3384        },
3385
3386        .union_type => {
3387            const union_type = ip.loadUnionType(ty.toIntern());
3388            if (!union_type.hasTag(ip))
3389                return 0;
3390            const layout = Type.getUnionLayout(union_type, zcu);
3391            if (layout.tag_align.compare(.gte, layout.payload_align)) {
3392                // {Tag, Payload}
3393                return layout.payload_align.forward(layout.tag_size);
3394            } else {
3395                // {Payload, Tag}
3396                return 0;
3397            }
3398        },
3399
3400        else => unreachable,
3401    }
3402}
3403
3404pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc {
3405    const ip = &zcu.intern_pool;
3406    return .{
3407        .base_node_inst = switch (ip.indexToKey(ty.toIntern())) {
3408            .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {
3409                .declared => |d| d.zir_index,
3410                .reified => |r| r.zir_index,
3411                .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
3412            },
3413            else => return null,
3414        },
3415        .offset = Zcu.LazySrcLoc.Offset.nodeOffset(.zero),
3416    };
3417}
3418
3419pub fn srcLoc(ty: Type, zcu: *Zcu) Zcu.LazySrcLoc {
3420    return ty.srcLocOrNull(zcu).?;
3421}
3422
3423pub fn isGenericPoison(ty: Type) bool {
3424    return ty.toIntern() == .generic_poison_type;
3425}
3426
3427pub fn isTuple(ty: Type, zcu: *const Zcu) bool {
3428    const ip = &zcu.intern_pool;
3429    return switch (ip.indexToKey(ty.toIntern())) {
3430        .tuple_type => true,
3431        else => false,
3432    };
3433}
3434
3435/// Traverses optional child types and error union payloads until the type
3436/// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`.
3437pub fn optEuBaseType(ty: Type, zcu: *const Zcu) Type {
3438    var cur = ty;
3439    while (true) switch (cur.zigTypeTag(zcu)) {
3440        .optional => cur = cur.optionalChild(zcu),
3441        .error_union => cur = cur.errorUnionPayload(zcu),
3442        else => return cur,
3443    };
3444}
3445
3446pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
3447    const zcu = pt.zcu;
3448    return switch (ty.toIntern()) {
3449        // zig fmt: off
3450        .usize_type,       .isize_type      => .usize,
3451        .c_ushort_type,    .c_short_type    => .c_ushort,
3452        .c_uint_type,      .c_int_type      => .c_uint,
3453        .c_ulong_type,     .c_long_type     => .c_ulong,
3454        .c_ulonglong_type, .c_longlong_type => .c_ulonglong,
3455        // zig fmt: on
3456        else => switch (ty.zigTypeTag(zcu)) {
3457            .int => pt.intType(.unsigned, ty.intInfo(zcu).bits),
3458            .vector => try pt.vectorType(.{
3459                .len = ty.vectorLen(zcu),
3460                .child = (try ty.childType(zcu).toUnsigned(pt)).toIntern(),
3461            }),
3462            else => unreachable,
3463        },
3464    };
3465}
3466
3467pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
3468    const ip = &zcu.intern_pool;
3469    return switch (ip.indexToKey(ty.toIntern())) {
3470        .struct_type => ip.loadStructType(ty.toIntern()).zir_index,
3471        .union_type => ip.loadUnionType(ty.toIntern()).zir_index,
3472        .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
3473        .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
3474        else => null,
3475    };
3476}
3477
3478pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
3479    const ip = &zcu.intern_pool;
3480    return switch (ip.indexToKey(ty.toIntern())) {
3481        .struct_type => ip.loadStructType(ty.toIntern()).zir_index,
3482        .union_type => ip.loadUnionType(ty.toIntern()).zir_index,
3483        .enum_type => |e| switch (e) {
3484            .declared, .reified => ip.loadEnumType(ty.toIntern()).zir_index.unwrap().?,
3485            .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
3486        },
3487        .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
3488        else => null,
3489    };
3490}
3491
3492pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
3493    // Note that changes to ZIR instruction tracking only need to update this code
3494    // if a newly-tracked instruction can be a type's owner `zir_index`.
3495    comptime assert(Zir.inst_tracking_version == 0);
3496
3497    const ip = &zcu.intern_pool;
3498    const tracked = switch (ip.indexToKey(ty.toIntern())) {
3499        .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {
3500            .declared => |d| d.zir_index,
3501            .reified => |r| r.zir_index,
3502            .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
3503        },
3504        else => return null,
3505    };
3506    const info = tracked.resolveFull(&zcu.intern_pool) orelse return null;
3507    const file = zcu.fileByIndex(info.file);
3508    const zir = switch (file.getMode()) {
3509        .zig => file.zir.?,
3510        .zon => return 0,
3511    };
3512    const inst = zir.instructions.get(@intFromEnum(info.inst));
3513    return switch (inst.tag) {
3514        .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_line,
3515        .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_line,
3516        .extended => switch (inst.data.extended.opcode) {
3517            .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line,
3518            .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
3519            .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
3520            .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
3521            .reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.src_line,
3522            .reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.src_line,
3523            .reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.src_line,
3524            else => unreachable,
3525        },
3526        else => unreachable,
3527    };
3528}
3529
3530/// Given a namespace type, returns its list of captured values.
3531pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice {
3532    const ip = &zcu.intern_pool;
3533    return switch (ip.indexToKey(ty.toIntern())) {
3534        .struct_type => ip.loadStructType(ty.toIntern()).captures,
3535        .union_type => ip.loadUnionType(ty.toIntern()).captures,
3536        .enum_type => ip.loadEnumType(ty.toIntern()).captures,
3537        .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures,
3538        else => unreachable,
3539    };
3540}
3541
3542pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
3543    var cur_ty: Type = ty;
3544    var cur_len: u64 = 1;
3545    while (cur_ty.zigTypeTag(zcu) == .array) {
3546        cur_len *= cur_ty.arrayLenIncludingSentinel(zcu);
3547        cur_ty = cur_ty.childType(zcu);
3548    }
3549    return .{ cur_ty, cur_len };
3550}
3551
3552/// Returns a bit-pointer with the same value and a new packed offset.
3553pub fn packedStructFieldPtrInfo(
3554    struct_ty: Type,
3555    parent_ptr_ty: Type,
3556    field_idx: u32,
3557    pt: Zcu.PerThread,
3558) InternPool.Key.PtrType.PackedOffset {
3559    comptime assert(Type.packed_struct_layout_version == 2);
3560
3561    const zcu = pt.zcu;
3562    const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
3563
3564    var bit_offset: u16 = 0;
3565    var running_bits: u16 = 0;
3566    for (0..struct_ty.structFieldCount(zcu)) |i| {
3567        const f_ty = struct_ty.fieldType(i, zcu);
3568        if (i == field_idx) {
3569            bit_offset = running_bits;
3570        }
3571        running_bits += @intCast(f_ty.bitSize(zcu));
3572    }
3573
3574    const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) .{
3575        parent_ptr_info.packed_offset.host_size,
3576        parent_ptr_info.packed_offset.bit_offset + bit_offset,
3577    } else .{
3578        switch (zcu.comp.getZigBackend()) {
3579            else => (running_bits + 7) / 8,
3580            .stage2_x86_64, .stage2_c => @intCast(struct_ty.abiSize(zcu)),
3581        },
3582        bit_offset,
3583    };
3584
3585    return .{
3586        .host_size = res_host_size,
3587        .bit_offset = res_bit_offset,
3588    };
3589}
3590
3591pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
3592    const zcu = pt.zcu;
3593    const ip = &zcu.intern_pool;
3594    switch (ty.zigTypeTag(zcu)) {
3595        .@"struct" => switch (ip.indexToKey(ty.toIntern())) {
3596            .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
3597                const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
3598                try field_ty.resolveLayout(pt);
3599            },
3600            .struct_type => return ty.resolveStructInner(pt, .layout),
3601            else => unreachable,
3602        },
3603        .@"union" => return ty.resolveUnionInner(pt, .layout),
3604        .array => {
3605            if (ty.arrayLenIncludingSentinel(zcu) == 0) return;
3606            const elem_ty = ty.childType(zcu);
3607            return elem_ty.resolveLayout(pt);
3608        },
3609        .optional => {
3610            const payload_ty = ty.optionalChild(zcu);
3611            return payload_ty.resolveLayout(pt);
3612        },
3613        .error_union => {
3614            const payload_ty = ty.errorUnionPayload(zcu);
3615            return payload_ty.resolveLayout(pt);
3616        },
3617        .@"fn" => {
3618            const info = zcu.typeToFunc(ty).?;
3619            if (info.is_generic) {
3620                // Resolving of generic function types is deferred to when
3621                // the function is instantiated.
3622                return;
3623            }
3624            for (0..info.param_types.len) |i| {
3625                const param_ty = info.param_types.get(ip)[i];
3626                try Type.fromInterned(param_ty).resolveLayout(pt);
3627            }
3628            try Type.fromInterned(info.return_type).resolveLayout(pt);
3629        },
3630        else => {},
3631    }
3632}
3633
3634pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
3635    const ip = &pt.zcu.intern_pool;
3636    const ty_ip = ty.toIntern();
3637
3638    switch (ty_ip) {
3639        .none => unreachable,
3640
3641        .u0_type,
3642        .i0_type,
3643        .u1_type,
3644        .u8_type,
3645        .i8_type,
3646        .u16_type,
3647        .i16_type,
3648        .u29_type,
3649        .u32_type,
3650        .i32_type,
3651        .u64_type,
3652        .i64_type,
3653        .u80_type,
3654        .u128_type,
3655        .i128_type,
3656        .usize_type,
3657        .isize_type,
3658        .c_char_type,
3659        .c_short_type,
3660        .c_ushort_type,
3661        .c_int_type,
3662        .c_uint_type,
3663        .c_long_type,
3664        .c_ulong_type,
3665        .c_longlong_type,
3666        .c_ulonglong_type,
3667        .c_longdouble_type,
3668        .f16_type,
3669        .f32_type,
3670        .f64_type,
3671        .f80_type,
3672        .f128_type,
3673        .anyopaque_type,
3674        .bool_type,
3675        .void_type,
3676        .type_type,
3677        .anyerror_type,
3678        .adhoc_inferred_error_set_type,
3679        .comptime_int_type,
3680        .comptime_float_type,
3681        .noreturn_type,
3682        .anyframe_type,
3683        .null_type,
3684        .undefined_type,
3685        .enum_literal_type,
3686        .ptr_usize_type,
3687        .ptr_const_comptime_int_type,
3688        .manyptr_u8_type,
3689        .manyptr_const_u8_type,
3690        .manyptr_const_u8_sentinel_0_type,
3691        .slice_const_u8_type,
3692        .slice_const_u8_sentinel_0_type,
3693        .optional_noreturn_type,
3694        .anyerror_void_error_union_type,
3695        .generic_poison_type,
3696        .empty_tuple_type,
3697        => {},
3698
3699        .undef => unreachable,
3700        .zero => unreachable,
3701        .zero_usize => unreachable,
3702        .zero_u1 => unreachable,
3703        .zero_u8 => unreachable,
3704        .one => unreachable,
3705        .one_usize => unreachable,
3706        .one_u1 => unreachable,
3707        .one_u8 => unreachable,
3708        .four_u8 => unreachable,
3709        .negative_one => unreachable,
3710        .void_value => unreachable,
3711        .unreachable_value => unreachable,
3712        .null_value => unreachable,
3713        .bool_true => unreachable,
3714        .bool_false => unreachable,
3715        .empty_tuple => unreachable,
3716
3717        else => switch (ty_ip.unwrap(ip).getTag(ip)) {
3718            .type_struct,
3719            .type_struct_packed,
3720            .type_struct_packed_inits,
3721            => return ty.resolveStructInner(pt, .fields),
3722
3723            .type_union => return ty.resolveUnionInner(pt, .fields),
3724
3725            else => {},
3726        },
3727    }
3728}
3729
3730pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
3731    const zcu = pt.zcu;
3732    const ip = &zcu.intern_pool;
3733
3734    switch (ty.zigTypeTag(zcu)) {
3735        .type,
3736        .void,
3737        .bool,
3738        .noreturn,
3739        .int,
3740        .float,
3741        .comptime_float,
3742        .comptime_int,
3743        .undefined,
3744        .null,
3745        .error_set,
3746        .@"enum",
3747        .@"opaque",
3748        .frame,
3749        .@"anyframe",
3750        .vector,
3751        .enum_literal,
3752        => {},
3753
3754        .pointer => return ty.childType(zcu).resolveFully(pt),
3755        .array => return ty.childType(zcu).resolveFully(pt),
3756        .optional => return ty.optionalChild(zcu).resolveFully(pt),
3757        .error_union => return ty.errorUnionPayload(zcu).resolveFully(pt),
3758        .@"fn" => {
3759            const info = zcu.typeToFunc(ty).?;
3760            if (info.is_generic) return;
3761            for (0..info.param_types.len) |i| {
3762                const param_ty = info.param_types.get(ip)[i];
3763                try Type.fromInterned(param_ty).resolveFully(pt);
3764            }
3765            try Type.fromInterned(info.return_type).resolveFully(pt);
3766        },
3767
3768        .@"struct" => switch (ip.indexToKey(ty.toIntern())) {
3769            .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
3770                const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
3771                try field_ty.resolveFully(pt);
3772            },
3773            .struct_type => return ty.resolveStructInner(pt, .full),
3774            else => unreachable,
3775        },
3776        .@"union" => return ty.resolveUnionInner(pt, .full),
3777    }
3778}
3779
3780pub fn resolveStructFieldInits(ty: Type, pt: Zcu.PerThread) SemaError!void {
3781    // TODO: stop calling this for tuples!
3782    _ = pt.zcu.typeToStruct(ty) orelse return;
3783    return ty.resolveStructInner(pt, .inits);
3784}
3785
3786pub fn resolveStructAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
3787    return ty.resolveStructInner(pt, .alignment);
3788}
3789
3790pub fn resolveUnionAlignment(ty: Type, pt: Zcu.PerThread) SemaError!void {
3791    return ty.resolveUnionInner(pt, .alignment);
3792}
3793
3794/// `ty` must be a struct.
3795fn resolveStructInner(
3796    ty: Type,
3797    pt: Zcu.PerThread,
3798    resolution: enum { fields, inits, alignment, layout, full },
3799) SemaError!void {
3800    const zcu = pt.zcu;
3801    const gpa = zcu.gpa;
3802
3803    const struct_obj = zcu.typeToStruct(ty).?;
3804    const owner: InternPool.AnalUnit = .wrap(.{ .type = ty.toIntern() });
3805
3806    if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) {
3807        return error.AnalysisFail;
3808    }
3809
3810    if (zcu.comp.debugIncremental()) {
3811        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, owner);
3812        info.last_update_gen = zcu.generation;
3813    }
3814
3815    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
3816    defer analysis_arena.deinit();
3817
3818    var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
3819    defer comptime_err_ret_trace.deinit();
3820
3821    const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir.?;
3822    var sema: Sema = .{
3823        .pt = pt,
3824        .gpa = gpa,
3825        .arena = analysis_arena.allocator(),
3826        .code = zir,
3827        .owner = owner,
3828        .func_index = .none,
3829        .func_is_naked = false,
3830        .fn_ret_ty = Type.void,
3831        .fn_ret_ty_ies = null,
3832        .comptime_err_ret_trace = &comptime_err_ret_trace,
3833    };
3834    defer sema.deinit();
3835
3836    (switch (resolution) {
3837        .fields => sema.resolveStructFieldTypes(ty.toIntern(), struct_obj),
3838        .inits => sema.resolveStructFieldInits(ty),
3839        .alignment => sema.resolveStructAlignment(ty.toIntern(), struct_obj),
3840        .layout => sema.resolveStructLayout(ty),
3841        .full => sema.resolveStructFully(ty),
3842    }) catch |err| switch (err) {
3843        error.AnalysisFail => {
3844            if (!zcu.failed_analysis.contains(owner)) {
3845                try zcu.transitive_failed_analysis.put(gpa, owner, {});
3846            }
3847            return error.AnalysisFail;
3848        },
3849        error.OutOfMemory, error.Canceled => |e| return e,
3850    };
3851}
3852
3853/// `ty` must be a union.
3854fn resolveUnionInner(
3855    ty: Type,
3856    pt: Zcu.PerThread,
3857    resolution: enum { fields, alignment, layout, full },
3858) SemaError!void {
3859    const zcu = pt.zcu;
3860    const gpa = zcu.gpa;
3861
3862    const union_obj = zcu.typeToUnion(ty).?;
3863    const owner: InternPool.AnalUnit = .wrap(.{ .type = ty.toIntern() });
3864
3865    if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) {
3866        return error.AnalysisFail;
3867    }
3868
3869    if (zcu.comp.debugIncremental()) {
3870        const info = try zcu.incremental_debug_state.getUnitInfo(gpa, owner);
3871        info.last_update_gen = zcu.generation;
3872    }
3873
3874    var analysis_arena = std.heap.ArenaAllocator.init(gpa);
3875    defer analysis_arena.deinit();
3876
3877    var comptime_err_ret_trace = std.array_list.Managed(Zcu.LazySrcLoc).init(gpa);
3878    defer comptime_err_ret_trace.deinit();
3879
3880    const zir = zcu.namespacePtr(union_obj.namespace).fileScope(zcu).zir.?;
3881    var sema: Sema = .{
3882        .pt = pt,
3883        .gpa = gpa,
3884        .arena = analysis_arena.allocator(),
3885        .code = zir,
3886        .owner = owner,
3887        .func_index = .none,
3888        .func_is_naked = false,
3889        .fn_ret_ty = Type.void,
3890        .fn_ret_ty_ies = null,
3891        .comptime_err_ret_trace = &comptime_err_ret_trace,
3892    };
3893    defer sema.deinit();
3894
3895    (switch (resolution) {
3896        .fields => sema.resolveUnionFieldTypes(ty, union_obj),
3897        .alignment => sema.resolveUnionAlignment(ty, union_obj),
3898        .layout => sema.resolveUnionLayout(ty),
3899        .full => sema.resolveUnionFully(ty),
3900    }) catch |err| switch (err) {
3901        error.AnalysisFail => {
3902            if (!zcu.failed_analysis.contains(owner)) {
3903                try zcu.transitive_failed_analysis.put(gpa, owner, {});
3904            }
3905            return error.AnalysisFail;
3906        },
3907        error.OutOfMemory => |e| return e,
3908        error.Canceled => |e| return e,
3909    };
3910}
3911
3912pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu) Zcu.UnionLayout {
3913    const ip = &zcu.intern_pool;
3914    assert(loaded_union.haveLayout(ip));
3915    var most_aligned_field: u32 = 0;
3916    var most_aligned_field_align: InternPool.Alignment = .@"1";
3917    var most_aligned_field_size: u64 = 0;
3918    var biggest_field: u32 = 0;
3919    var payload_size: u64 = 0;
3920    var payload_align: InternPool.Alignment = .@"1";
3921    for (loaded_union.field_types.get(ip), 0..) |field_ty_ip_index, field_index| {
3922        const field_ty: Type = .fromInterned(field_ty_ip_index);
3923        if (field_ty.isNoReturn(zcu)) continue;
3924
3925        const explicit_align = loaded_union.fieldAlign(ip, field_index);
3926        const field_align = if (explicit_align != .none)
3927            explicit_align
3928        else
3929            field_ty.abiAlignment(zcu);
3930        if (field_ty.hasRuntimeBits(zcu)) {
3931            const field_size = field_ty.abiSize(zcu);
3932            if (field_size > payload_size) {
3933                payload_size = field_size;
3934                biggest_field = @intCast(field_index);
3935            }
3936            if (field_size > 0 and field_align.compare(.gte, most_aligned_field_align)) {
3937                most_aligned_field = @intCast(field_index);
3938                most_aligned_field_align = field_align;
3939                most_aligned_field_size = field_size;
3940            }
3941        }
3942        payload_align = payload_align.max(field_align);
3943    }
3944    const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag();
3945    if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(zcu)) {
3946        return .{
3947            .abi_size = payload_align.forward(payload_size),
3948            .abi_align = payload_align,
3949            .most_aligned_field = most_aligned_field,
3950            .most_aligned_field_size = most_aligned_field_size,
3951            .biggest_field = biggest_field,
3952            .payload_size = payload_size,
3953            .payload_align = payload_align,
3954            .tag_align = .none,
3955            .tag_size = 0,
3956            .padding = 0,
3957        };
3958    }
3959
3960    const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(zcu);
3961    const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu).max(.@"1");
3962    return .{
3963        .abi_size = loaded_union.sizeUnordered(ip),
3964        .abi_align = tag_align.max(payload_align),
3965        .most_aligned_field = most_aligned_field,
3966        .most_aligned_field_size = most_aligned_field_size,
3967        .biggest_field = biggest_field,
3968        .payload_size = payload_size,
3969        .payload_align = payload_align,
3970        .tag_align = tag_align,
3971        .tag_size = tag_size,
3972        .padding = loaded_union.paddingUnordered(ip),
3973    };
3974}
3975
3976/// Returns the type of a pointer to an element.
3977/// Asserts that the type is a pointer, and that the element type is indexable.
3978/// If the element index is comptime-known, it must be passed in `offset`.
3979/// For *@Vector(n, T), return *align(a:b:h:v) T
3980/// For *[N]T, return *T
3981/// For [*]T, returns *T
3982/// For []T, returns *T
3983/// Handles const-ness and address spaces in particular.
3984/// This code is duplicated in `Sema.analyzePtrArithmetic`.
3985/// May perform type resolution and return a transitive `error.AnalysisFail`.
3986pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
3987    const zcu = pt.zcu;
3988    const ptr_info = ptr_ty.ptrInfo(zcu);
3989    const elem_ty = ptr_ty.elemType2(zcu);
3990    const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0;
3991    const parent_ty = ptr_ty.childType(zcu);
3992
3993    const VI = InternPool.Key.PtrType.VectorIndex;
3994
3995    const vector_info: struct {
3996        host_size: u16 = 0,
3997        alignment: Alignment = .none,
3998        vector_index: VI = .none,
3999    } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .one) blk: {
4000        const elem_bits = elem_ty.bitSize(zcu);
4001        if (elem_bits == 0) break :blk .{};
4002        const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits);
4003        if (!is_packed) break :blk .{};
4004
4005        break :blk .{
4006            .host_size = @intCast(parent_ty.arrayLen(zcu)),
4007            .alignment = parent_ty.abiAlignment(zcu),
4008            .vector_index = @enumFromInt(offset.?),
4009        };
4010    } else .{};
4011
4012    const alignment: Alignment = a: {
4013        // Calculate the new pointer alignment.
4014        if (ptr_info.flags.alignment == .none) {
4015            // In case of an ABI-aligned pointer, any pointer arithmetic
4016            // maintains the same ABI-alignedness.
4017            break :a vector_info.alignment;
4018        }
4019        // If the addend is not a comptime-known value we can still count on
4020        // it being a multiple of the type size.
4021        const elem_size = (try elem_ty.abiSizeInner(.sema, zcu, pt.tid)).scalar;
4022        const addend = if (offset) |off| elem_size * off else elem_size;
4023
4024        // The resulting pointer is aligned to the lcd between the offset (an
4025        // arbitrary number) and the alignment factor (always a power of two,
4026        // non zero).
4027        const new_align: Alignment = @enumFromInt(@min(
4028            @ctz(addend),
4029            ptr_info.flags.alignment.toLog2Units(),
4030        ));
4031        assert(new_align != .none);
4032        break :a new_align;
4033    };
4034    return pt.ptrTypeSema(.{
4035        .child = elem_ty.toIntern(),
4036        .flags = .{
4037            .alignment = alignment,
4038            .is_const = ptr_info.flags.is_const,
4039            .is_volatile = ptr_info.flags.is_volatile,
4040            .is_allowzero = is_allowzero,
4041            .address_space = ptr_info.flags.address_space,
4042            .vector_index = vector_info.vector_index,
4043        },
4044        .packed_offset = .{
4045            .host_size = vector_info.host_size,
4046            .bit_offset = 0,
4047        },
4048    });
4049}
4050
4051pub fn containerTypeName(ty: Type, ip: *const InternPool) InternPool.NullTerminatedString {
4052    return switch (ip.indexToKey(ty.toIntern())) {
4053        .struct_type => ip.loadStructType(ty.toIntern()).name,
4054        .union_type => ip.loadUnionType(ty.toIntern()).name,
4055        .enum_type => ip.loadEnumType(ty.toIntern()).name,
4056        .opaque_type => ip.loadOpaqueType(ty.toIntern()).name,
4057        else => unreachable,
4058    };
4059}
4060
4061/// Returns `true` if a value of this type is always `null`.
4062/// Returns `false` if a value of this type is neve `null`.
4063/// Returns `null` otherwise.
4064pub fn isNullFromType(ty: Type, zcu: *const Zcu) ?bool {
4065    if (ty.zigTypeTag(zcu) != .optional and !ty.isCPtr(zcu)) return false;
4066    const child = ty.optionalChild(zcu);
4067    if (child.zigTypeTag(zcu) == .noreturn) return true; // `?noreturn` is always null
4068    return null;
4069}
4070
4071/// Recursively walks the type and marks for each subtype how many times it has been seen
4072fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUnmanaged(Type, u16)) error{OutOfMemory}!void {
4073    const zcu = pt.zcu;
4074    const ip = &zcu.intern_pool;
4075
4076    const gop = try visited.getOrPut(zcu.gpa, ty);
4077    if (gop.found_existing) {
4078        gop.value_ptr.* += 1;
4079    } else {
4080        gop.value_ptr.* = 1;
4081    }
4082
4083    switch (ip.indexToKey(ty.toIntern())) {
4084        .ptr_type => try collectSubtypes(Type.fromInterned(ty.ptrInfo(zcu).child), pt, visited),
4085        .array_type => |array_type| try collectSubtypes(Type.fromInterned(array_type.child), pt, visited),
4086        .vector_type => |vector_type| try collectSubtypes(Type.fromInterned(vector_type.child), pt, visited),
4087        .opt_type => |child| try collectSubtypes(Type.fromInterned(child), pt, visited),
4088        .error_union_type => |error_union_type| {
4089            try collectSubtypes(Type.fromInterned(error_union_type.error_set_type), pt, visited);
4090            if (error_union_type.payload_type != .generic_poison_type) {
4091                try collectSubtypes(Type.fromInterned(error_union_type.payload_type), pt, visited);
4092            }
4093        },
4094        .tuple_type => |tuple| {
4095            for (tuple.types.get(ip)) |field_ty| {
4096                try collectSubtypes(Type.fromInterned(field_ty), pt, visited);
4097            }
4098        },
4099        .func_type => |fn_info| {
4100            const param_types = fn_info.param_types.get(&zcu.intern_pool);
4101            for (param_types) |param_ty| {
4102                if (param_ty != .generic_poison_type) {
4103                    try collectSubtypes(Type.fromInterned(param_ty), pt, visited);
4104                }
4105            }
4106
4107            if (fn_info.return_type != .generic_poison_type) {
4108                try collectSubtypes(Type.fromInterned(fn_info.return_type), pt, visited);
4109            }
4110        },
4111        .anyframe_type => |child| try collectSubtypes(Type.fromInterned(child), pt, visited),
4112
4113        // leaf types
4114        .undef,
4115        .inferred_error_set_type,
4116        .error_set_type,
4117        .struct_type,
4118        .union_type,
4119        .opaque_type,
4120        .enum_type,
4121        .simple_type,
4122        .int_type,
4123        => {},
4124
4125        // values, not types
4126        .simple_value,
4127        .variable,
4128        .@"extern",
4129        .func,
4130        .int,
4131        .err,
4132        .error_union,
4133        .enum_literal,
4134        .enum_tag,
4135        .empty_enum_value,
4136        .float,
4137        .ptr,
4138        .slice,
4139        .opt,
4140        .aggregate,
4141        .un,
4142        // memoization, not types
4143        .memoized_call,
4144        => unreachable,
4145    }
4146}
4147
4148fn shouldDedupeType(ty: Type, ctx: *Comparison, pt: Zcu.PerThread) error{OutOfMemory}!Comparison.DedupeEntry {
4149    if (ctx.type_occurrences.get(ty)) |occ| {
4150        if (ctx.type_dedupe_cache.get(ty)) |cached| {
4151            return cached;
4152        }
4153
4154        var discarding: std.Io.Writer.Discarding = .init(&.{});
4155
4156        print(ty, &discarding.writer, pt, null) catch
4157            unreachable; // we are writing into a discarding writer, it should never fail
4158
4159        const type_len: i32 = @intCast(discarding.count);
4160
4161        const placeholder_len: i32 = 3;
4162        const min_saved_bytes: i32 = 10;
4163
4164        const saved_bytes = (type_len - placeholder_len) * (occ - 1);
4165        const max_placeholders = 7; // T to Z
4166        const should_dedupe = saved_bytes >= min_saved_bytes and ctx.placeholder_index < max_placeholders;
4167
4168        const entry: Comparison.DedupeEntry = if (should_dedupe) b: {
4169            ctx.placeholder_index += 1;
4170            break :b .{ .dedupe = .{ .index = ctx.placeholder_index - 1 } };
4171        } else .dont_dedupe;
4172
4173        try ctx.type_dedupe_cache.put(pt.zcu.gpa, ty, entry);
4174
4175        return entry;
4176    } else {
4177        return .{ .dont_dedupe = {} };
4178    }
4179}
4180
4181/// The comparison recursively walks all types given and notes how many times
4182/// each subtype occurs. It then while recursively printing decides for each
4183/// subtype whether to print the type inline or create a placeholder based on
4184/// the subtype length and number of occurences. Placeholders are then found by
4185/// iterating `type_dedupe_cache` which caches the inline/placeholder decisions.
4186pub const Comparison = struct {
4187    type_occurrences: std.AutoArrayHashMapUnmanaged(Type, u16),
4188    type_dedupe_cache: std.AutoArrayHashMapUnmanaged(Type, DedupeEntry),
4189    placeholder_index: u8,
4190
4191    pub const Placeholder = struct {
4192        index: u8,
4193
4194        pub fn format(p: Placeholder, writer: *std.Io.Writer) error{WriteFailed}!void {
4195            return writer.print("<{c}>", .{p.index + 'T'});
4196        }
4197    };
4198
4199    pub const DedupeEntry = union(enum) {
4200        dont_dedupe: void,
4201        dedupe: Placeholder,
4202    };
4203
4204    pub fn init(types: []const Type, pt: Zcu.PerThread) error{OutOfMemory}!Comparison {
4205        var cmp: Comparison = .{
4206            .type_occurrences = .empty,
4207            .type_dedupe_cache = .empty,
4208            .placeholder_index = 0,
4209        };
4210
4211        errdefer cmp.deinit(pt);
4212
4213        for (types) |ty| {
4214            try collectSubtypes(ty, pt, &cmp.type_occurrences);
4215        }
4216
4217        return cmp;
4218    }
4219
4220    pub fn deinit(cmp: *Comparison, pt: Zcu.PerThread) void {
4221        const gpa = pt.zcu.gpa;
4222        cmp.type_occurrences.deinit(gpa);
4223        cmp.type_dedupe_cache.deinit(gpa);
4224    }
4225
4226    pub fn fmtType(ctx: *Comparison, ty: Type, pt: Zcu.PerThread) Comparison.Formatter {
4227        return .{ .ty = ty, .ctx = ctx, .pt = pt };
4228    }
4229    pub const Formatter = struct {
4230        ty: Type,
4231        ctx: *Comparison,
4232        pt: Zcu.PerThread,
4233
4234        pub fn format(self: Comparison.Formatter, writer: anytype) error{WriteFailed}!void {
4235            print(self.ty, writer, self.pt, self.ctx) catch return error.WriteFailed;
4236        }
4237    };
4238};
4239
4240pub const @"u1": Type = .{ .ip_index = .u1_type };
4241pub const @"u8": Type = .{ .ip_index = .u8_type };
4242pub const @"u16": Type = .{ .ip_index = .u16_type };
4243pub const @"u29": Type = .{ .ip_index = .u29_type };
4244pub const @"u32": Type = .{ .ip_index = .u32_type };
4245pub const @"u64": Type = .{ .ip_index = .u64_type };
4246pub const @"u80": Type = .{ .ip_index = .u80_type };
4247pub const @"u128": Type = .{ .ip_index = .u128_type };
4248pub const @"u256": Type = .{ .ip_index = .u256_type };
4249
4250pub const @"i8": Type = .{ .ip_index = .i8_type };
4251pub const @"i16": Type = .{ .ip_index = .i16_type };
4252pub const @"i32": Type = .{ .ip_index = .i32_type };
4253pub const @"i64": Type = .{ .ip_index = .i64_type };
4254pub const @"i128": Type = .{ .ip_index = .i128_type };
4255
4256pub const @"f16": Type = .{ .ip_index = .f16_type };
4257pub const @"f32": Type = .{ .ip_index = .f32_type };
4258pub const @"f64": Type = .{ .ip_index = .f64_type };
4259pub const @"f80": Type = .{ .ip_index = .f80_type };
4260pub const @"f128": Type = .{ .ip_index = .f128_type };
4261
4262pub const @"bool": Type = .{ .ip_index = .bool_type };
4263pub const @"usize": Type = .{ .ip_index = .usize_type };
4264pub const @"isize": Type = .{ .ip_index = .isize_type };
4265pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type };
4266pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type };
4267pub const @"void": Type = .{ .ip_index = .void_type };
4268pub const @"type": Type = .{ .ip_index = .type_type };
4269pub const @"anyerror": Type = .{ .ip_index = .anyerror_type };
4270pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type };
4271pub const @"anyframe": Type = .{ .ip_index = .anyframe_type };
4272pub const @"null": Type = .{ .ip_index = .null_type };
4273pub const @"undefined": Type = .{ .ip_index = .undefined_type };
4274pub const @"noreturn": Type = .{ .ip_index = .noreturn_type };
4275pub const enum_literal: Type = .{ .ip_index = .enum_literal_type };
4276
4277pub const @"c_char": Type = .{ .ip_index = .c_char_type };
4278pub const @"c_short": Type = .{ .ip_index = .c_short_type };
4279pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type };
4280pub const @"c_int": Type = .{ .ip_index = .c_int_type };
4281pub const @"c_uint": Type = .{ .ip_index = .c_uint_type };
4282pub const @"c_long": Type = .{ .ip_index = .c_long_type };
4283pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type };
4284pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type };
4285pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type };
4286pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type };
4287
4288pub const ptr_usize: Type = .{ .ip_index = .ptr_usize_type };
4289pub const ptr_const_comptime_int: Type = .{ .ip_index = .ptr_const_comptime_int_type };
4290pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type };
4291pub const manyptr_const_u8: Type = .{ .ip_index = .manyptr_const_u8_type };
4292pub const manyptr_const_u8_sentinel_0: Type = .{ .ip_index = .manyptr_const_u8_sentinel_0_type };
4293pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type };
4294pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
4295pub const slice_const_slice_const_u8: Type = .{ .ip_index = .slice_const_slice_const_u8_type };
4296pub const slice_const_type: Type = .{ .ip_index = .slice_const_type_type };
4297pub const optional_type: Type = .{ .ip_index = .optional_type_type };
4298pub const optional_noreturn: Type = .{ .ip_index = .optional_noreturn_type };
4299
4300pub const vector_8_i8: Type = .{ .ip_index = .vector_8_i8_type };
4301pub const vector_16_i8: Type = .{ .ip_index = .vector_16_i8_type };
4302pub const vector_32_i8: Type = .{ .ip_index = .vector_32_i8_type };
4303pub const vector_64_i8: Type = .{ .ip_index = .vector_64_i8_type };
4304pub const vector_1_u8: Type = .{ .ip_index = .vector_1_u8_type };
4305pub const vector_2_u8: Type = .{ .ip_index = .vector_2_u8_type };
4306pub const vector_4_u8: Type = .{ .ip_index = .vector_4_u8_type };
4307pub const vector_8_u8: Type = .{ .ip_index = .vector_8_u8_type };
4308pub const vector_16_u8: Type = .{ .ip_index = .vector_16_u8_type };
4309pub const vector_32_u8: Type = .{ .ip_index = .vector_32_u8_type };
4310pub const vector_64_u8: Type = .{ .ip_index = .vector_64_u8_type };
4311pub const vector_2_i16: Type = .{ .ip_index = .vector_2_i16_type };
4312pub const vector_4_i16: Type = .{ .ip_index = .vector_4_i16_type };
4313pub const vector_8_i16: Type = .{ .ip_index = .vector_8_i16_type };
4314pub const vector_16_i16: Type = .{ .ip_index = .vector_16_i16_type };
4315pub const vector_32_i16: Type = .{ .ip_index = .vector_32_i16_type };
4316pub const vector_4_u16: Type = .{ .ip_index = .vector_4_u16_type };
4317pub const vector_8_u16: Type = .{ .ip_index = .vector_8_u16_type };
4318pub const vector_16_u16: Type = .{ .ip_index = .vector_16_u16_type };
4319pub const vector_32_u16: Type = .{ .ip_index = .vector_32_u16_type };
4320pub const vector_2_i32: Type = .{ .ip_index = .vector_2_i32_type };
4321pub const vector_4_i32: Type = .{ .ip_index = .vector_4_i32_type };
4322pub const vector_8_i32: Type = .{ .ip_index = .vector_8_i32_type };
4323pub const vector_16_i32: Type = .{ .ip_index = .vector_16_i32_type };
4324pub const vector_4_u32: Type = .{ .ip_index = .vector_4_u32_type };
4325pub const vector_8_u32: Type = .{ .ip_index = .vector_8_u32_type };
4326pub const vector_16_u32: Type = .{ .ip_index = .vector_16_u32_type };
4327pub const vector_2_i64: Type = .{ .ip_index = .vector_2_i64_type };
4328pub const vector_4_i64: Type = .{ .ip_index = .vector_4_i64_type };
4329pub const vector_8_i64: Type = .{ .ip_index = .vector_8_i64_type };
4330pub const vector_2_u64: Type = .{ .ip_index = .vector_2_u64_type };
4331pub const vector_4_u64: Type = .{ .ip_index = .vector_4_u64_type };
4332pub const vector_8_u64: Type = .{ .ip_index = .vector_8_u64_type };
4333pub const vector_1_u128: Type = .{ .ip_index = .vector_1_u128_type };
4334pub const vector_2_u128: Type = .{ .ip_index = .vector_2_u128_type };
4335pub const vector_1_u256: Type = .{ .ip_index = .vector_1_u256_type };
4336pub const vector_4_f16: Type = .{ .ip_index = .vector_4_f16_type };
4337pub const vector_8_f16: Type = .{ .ip_index = .vector_8_f16_type };
4338pub const vector_16_f16: Type = .{ .ip_index = .vector_16_f16_type };
4339pub const vector_32_f16: Type = .{ .ip_index = .vector_32_f16_type };
4340pub const vector_2_f32: Type = .{ .ip_index = .vector_2_f32_type };
4341pub const vector_4_f32: Type = .{ .ip_index = .vector_4_f32_type };
4342pub const vector_8_f32: Type = .{ .ip_index = .vector_8_f32_type };
4343pub const vector_16_f32: Type = .{ .ip_index = .vector_16_f32_type };
4344pub const vector_2_f64: Type = .{ .ip_index = .vector_2_f64_type };
4345pub const vector_4_f64: Type = .{ .ip_index = .vector_4_f64_type };
4346pub const vector_8_f64: Type = .{ .ip_index = .vector_8_f64_type };
4347
4348pub const empty_tuple: Type = .{ .ip_index = .empty_tuple_type };
4349
4350pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
4351
4352pub fn smallestUnsignedBits(max: u64) u16 {
4353    return switch (max) {
4354        0 => 0,
4355        else => @as(u16, 1) + std.math.log2_int(u64, max),
4356    };
4357}
4358
4359/// This is only used for comptime asserts. Bump this number when you make a change
4360/// to packed struct layout to find out all the places in the codebase you need to edit!
4361pub const packed_struct_layout_version = 2;
4362
4363fn cTypeAlign(target: *const Target, c_type: Target.CType) Alignment {
4364    return Alignment.fromByteUnits(target.cTypeAlignment(c_type));
4365}