master
   1//! Analyzed Intermediate Representation.
   2//!
   3//! This data is produced by Sema and consumed by codegen.
   4//! Unlike ZIR where there is one instance for an entire source file, each function
   5//! gets its own `Air` instance.
   6
   7const std = @import("std");
   8const builtin = @import("builtin");
   9const assert = std.debug.assert;
  10
  11const Air = @This();
  12const InternPool = @import("InternPool.zig");
  13const Type = @import("Type.zig");
  14const Value = @import("Value.zig");
  15const Zcu = @import("Zcu.zig");
  16const print = @import("Air/print.zig");
  17const types_resolved = @import("Air/types_resolved.zig");
  18
  19pub const Legalize = @import("Air/Legalize.zig");
  20pub const Liveness = @import("Air/Liveness.zig");
  21
  22instructions: std.MultiArrayList(Inst).Slice,
  23/// The meaning of this data is determined by `Inst.Tag` value.
  24/// The first few indexes are reserved. See `ExtraIndex` for the values.
  25extra: std.ArrayList(u32),
  26
  27pub const ExtraIndex = enum(u32) {
  28    /// Payload index of the main `Block` in the `extra` array.
  29    main_block,
  30
  31    _,
  32};
  33
  34pub const Inst = struct {
  35    tag: Tag,
  36    data: Data,
  37
  38    pub const Tag = enum(u8) {
  39        /// The first N instructions in the main block must be one arg instruction per
  40        /// function parameter. This makes function parameters participate in
  41        /// liveness analysis without any special handling.
  42        /// Uses the `arg` field.
  43        arg,
  44        /// Float or integer addition. For integers, wrapping is illegal behavior.
  45        /// Both operands are guaranteed to be the same type, and the result type
  46        /// is the same as both operands.
  47        /// Uses the `bin_op` field.
  48        add,
  49        /// Integer addition. Wrapping is a safety panic.
  50        /// Both operands are guaranteed to be the same type, and the result type
  51        /// is the same as both operands.
  52        /// The panic handler function must be populated before lowering AIR
  53        /// that contains this instruction.
  54        /// Uses the `bin_op` field.
  55        add_safe,
  56        /// Float addition. The instruction is allowed to have equal or more
  57        /// mathematical accuracy than strict IEEE-757 float addition.
  58        /// If either operand is NaN, the result value is undefined.
  59        /// Uses the `bin_op` field.
  60        add_optimized,
  61        /// Twos complement wrapping integer addition.
  62        /// Both operands are guaranteed to be the same type, and the result type
  63        /// is the same as both operands.
  64        /// Uses the `bin_op` field.
  65        add_wrap,
  66        /// Saturating integer addition.
  67        /// Both operands are guaranteed to be the same type, and the result type
  68        /// is the same as both operands.
  69        /// Uses the `bin_op` field.
  70        add_sat,
  71        /// Float or integer subtraction. For integers, wrapping is illegal behavior.
  72        /// Both operands are guaranteed to be the same type, and the result type
  73        /// is the same as both operands.
  74        /// Uses the `bin_op` field.
  75        sub,
  76        /// Integer subtraction. Wrapping is a safety panic.
  77        /// Both operands are guaranteed to be the same type, and the result type
  78        /// is the same as both operands.
  79        /// The panic handler function must be populated before lowering AIR
  80        /// that contains this instruction.
  81        /// Uses the `bin_op` field.
  82        sub_safe,
  83        /// Float subtraction. The instruction is allowed to have equal or more
  84        /// mathematical accuracy than strict IEEE-757 float subtraction.
  85        /// If either operand is NaN, the result value is undefined.
  86        /// Uses the `bin_op` field.
  87        sub_optimized,
  88        /// Twos complement wrapping integer subtraction.
  89        /// Both operands are guaranteed to be the same type, and the result type
  90        /// is the same as both operands.
  91        /// Uses the `bin_op` field.
  92        sub_wrap,
  93        /// Saturating integer subtraction.
  94        /// Both operands are guaranteed to be the same type, and the result type
  95        /// is the same as both operands.
  96        /// Uses the `bin_op` field.
  97        sub_sat,
  98        /// Float or integer multiplication. For integers, wrapping is illegal behavior.
  99        /// Both operands are guaranteed to be the same type, and the result type
 100        /// is the same as both operands.
 101        /// Uses the `bin_op` field.
 102        mul,
 103        /// Integer multiplication. Wrapping is a safety panic.
 104        /// Both operands are guaranteed to be the same type, and the result type
 105        /// is the same as both operands.
 106        /// The panic handler function must be populated before lowering AIR
 107        /// that contains this instruction.
 108        /// Uses the `bin_op` field.
 109        mul_safe,
 110        /// Float multiplication. The instruction is allowed to have equal or more
 111        /// mathematical accuracy than strict IEEE-757 float multiplication.
 112        /// If either operand is NaN, the result value is undefined.
 113        /// Uses the `bin_op` field.
 114        mul_optimized,
 115        /// Twos complement wrapping integer multiplication.
 116        /// Both operands are guaranteed to be the same type, and the result type
 117        /// is the same as both operands.
 118        /// Uses the `bin_op` field.
 119        mul_wrap,
 120        /// Saturating integer multiplication.
 121        /// Both operands are guaranteed to be the same type, and the result type
 122        /// is the same as both operands.
 123        /// Uses the `bin_op` field.
 124        mul_sat,
 125        /// Float division.
 126        /// Both operands are guaranteed to be the same type, and the result type
 127        /// is the same as both operands.
 128        /// Uses the `bin_op` field.
 129        div_float,
 130        /// Same as `div_float` with optimized float mode.
 131        div_float_optimized,
 132        /// Truncating integer or float division. For integers, wrapping is illegal behavior.
 133        /// Both operands are guaranteed to be the same type, and the result type
 134        /// is the same as both operands.
 135        /// Uses the `bin_op` field.
 136        div_trunc,
 137        /// Same as `div_trunc` with optimized float mode.
 138        div_trunc_optimized,
 139        /// Flooring integer or float division. For integers, wrapping is illegal behavior.
 140        /// Both operands are guaranteed to be the same type, and the result type
 141        /// is the same as both operands.
 142        /// Uses the `bin_op` field.
 143        div_floor,
 144        /// Same as `div_floor` with optimized float mode.
 145        div_floor_optimized,
 146        /// Integer or float division.
 147        /// If a remainder would be produced, illegal behavior occurs.
 148        /// For integers, overflow is illegal behavior.
 149        /// Both operands are guaranteed to be the same type, and the result type
 150        /// is the same as both operands.
 151        /// Uses the `bin_op` field.
 152        div_exact,
 153        /// Same as `div_exact` with optimized float mode.
 154        div_exact_optimized,
 155        /// Integer or float remainder division.
 156        /// Both operands are guaranteed to be the same type, and the result type
 157        /// is the same as both operands.
 158        /// Uses the `bin_op` field.
 159        rem,
 160        /// Same as `rem` with optimized float mode.
 161        rem_optimized,
 162        /// Integer or float modulus division.
 163        /// Both operands are guaranteed to be the same type, and the result type
 164        /// is the same as both operands.
 165        /// Uses the `bin_op` field.
 166        mod,
 167        /// Same as `mod` with optimized float mode.
 168        mod_optimized,
 169        /// Add an offset, in element type units, to a pointer, returning a new
 170        /// pointer. Element type may not be zero bits.
 171        ///
 172        /// Wrapping is illegal behavior. If the newly computed address is
 173        /// outside the provenance of the operand, the result is undefined.
 174        ///
 175        /// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
 176        /// rhs is the offset. Result type is the same as lhs. The operand may
 177        /// be a slice.
 178        ptr_add,
 179        /// Subtract an offset, in element type units, from a pointer,
 180        /// returning a new pointer. Element type may not be zero bits.
 181        ///
 182        /// Wrapping is illegal behavior. If the newly computed address is
 183        /// outside the provenance of the operand, the result is undefined.
 184        ///
 185        /// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
 186        /// rhs is the offset. Result type is the same as lhs. The operand may
 187        /// be a slice.
 188        ptr_sub,
 189        /// Given two operands which can be floats, integers, or vectors, returns the
 190        /// greater of the operands. For vectors it operates element-wise.
 191        /// Both operands are guaranteed to be the same type, and the result type
 192        /// is the same as both operands.
 193        /// Uses the `bin_op` field.
 194        max,
 195        /// Given two operands which can be floats, integers, or vectors, returns the
 196        /// lesser of the operands. For vectors it operates element-wise.
 197        /// Both operands are guaranteed to be the same type, and the result type
 198        /// is the same as both operands.
 199        /// Uses the `bin_op` field.
 200        min,
 201        /// Integer addition with overflow. Both operands are guaranteed to be the same type,
 202        /// and the result is a tuple with .{res, ov}. The wrapped value is written to res
 203        /// and if an overflow happens, ov is 1. Otherwise ov is 0.
 204        /// Uses the `ty_pl` field. Payload is `Bin`.
 205        add_with_overflow,
 206        /// Integer subtraction with overflow. Both operands are guaranteed to be the same type,
 207        /// and the result is a tuple with .{res, ov}. The wrapped value is written to res
 208        /// and if an overflow happens, ov is 1. Otherwise ov is 0.
 209        /// Uses the `ty_pl` field. Payload is `Bin`.
 210        sub_with_overflow,
 211        /// Integer multiplication with overflow. Both operands are guaranteed to be the same type,
 212        /// and the result is a tuple with .{res, ov}. The wrapped value is written to res
 213        /// and if an overflow happens, ov is 1. Otherwise ov is 0.
 214        /// Uses the `ty_pl` field. Payload is `Bin`.
 215        mul_with_overflow,
 216        /// Integer left-shift with overflow. Both operands are guaranteed to be the same type,
 217        /// and the result is a tuple with .{res, ov}. The wrapped value is written to res
 218        /// and if an overflow happens, ov is 1. Otherwise ov is 0.
 219        /// Uses the `ty_pl` field. Payload is `Bin`.
 220        shl_with_overflow,
 221        /// Allocates stack local memory.
 222        /// Uses the `ty` field.
 223        alloc,
 224        /// This special instruction only exists temporarily during semantic
 225        /// analysis and is guaranteed to be unreachable in machine code
 226        /// backends. It tracks a set of types that have been stored to an
 227        /// inferred allocation.
 228        /// Uses the `inferred_alloc` field.
 229        inferred_alloc,
 230        /// This special instruction only exists temporarily during semantic
 231        /// analysis and is guaranteed to be unreachable in machine code
 232        /// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr,
 233        /// and resolve_inferred_alloc instructions for comptime code.
 234        /// Uses the `inferred_alloc_comptime` field.
 235        inferred_alloc_comptime,
 236        /// If the function will pass the result by-ref, this instruction returns the
 237        /// result pointer. Otherwise it is equivalent to `alloc`.
 238        /// Uses the `ty` field.
 239        ret_ptr,
 240        /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`.
 241        assembly,
 242        /// Bitwise AND. `&`.
 243        /// Result type is the same as both operands.
 244        /// Uses the `bin_op` field.
 245        bit_and,
 246        /// Bitwise OR. `|`.
 247        /// Result type is the same as both operands.
 248        /// Uses the `bin_op` field.
 249        bit_or,
 250        /// Shift right. `>>`
 251        /// The rhs type may be a scalar version of the lhs type.
 252        /// Uses the `bin_op` field.
 253        shr,
 254        /// Shift right. The shift produces a poison value if it shifts out any non-zero bits.
 255        /// The rhs type may be a scalar version of the lhs type.
 256        /// Uses the `bin_op` field.
 257        shr_exact,
 258        /// Shift left. `<<`
 259        /// The rhs type may be a scalar version of the lhs type.
 260        /// Uses the `bin_op` field.
 261        shl,
 262        /// Shift left; For unsigned integers, the shift produces a poison value if it shifts
 263        /// out any non-zero bits. For signed integers, the shift produces a poison value if
 264        /// it shifts out any bits that disagree with the resultant sign bit.
 265        /// The rhs type may be a scalar version of the lhs type.
 266        /// Uses the `bin_op` field.
 267        shl_exact,
 268        /// Saturating integer shift left. `<<|`. The result is the same type as the `lhs`.
 269        /// The `rhs` must have the same vector shape as the `lhs`, but with any unsigned
 270        /// integer as the scalar type.
 271        /// The rhs type may be a scalar version of the lhs type.
 272        /// Uses the `bin_op` field.
 273        shl_sat,
 274        /// Bitwise XOR. `^`
 275        /// Uses the `bin_op` field.
 276        xor,
 277        /// Boolean or binary NOT.
 278        /// Uses the `ty_op` field.
 279        not,
 280        /// Reinterpret the bits of a value as a different type.  This is like `@bitCast` but
 281        /// also supports enums and pointers.
 282        /// Uses the `ty_op` field.
 283        bitcast,
 284        /// Uses the `ty_pl` field with payload `Block`.  A block runs its body which always ends
 285        /// with a `noreturn` instruction, so the only way to proceed to the code after the `block`
 286        /// is to encounter a `br` that targets this `block`.  If the `block` type is `noreturn`,
 287        /// then there do not exist any `br` instructions targeting this `block`.
 288        block,
 289        /// A labeled block of code that loops forever. The body must be `noreturn`: loops
 290        /// occur through an explicit `repeat` instruction pointing back to this one.
 291        /// Result type is always `noreturn`; no instructions in a block follow this one.
 292        /// There is always at least one `repeat` instruction referencing the loop.
 293        /// Uses the `ty_pl` field. Payload is `Block`.
 294        loop,
 295        /// Sends control flow back to the beginning of a parent `loop` body.
 296        /// Uses the `repeat` field.
 297        repeat,
 298        /// Return from a block with a result.
 299        /// Result type is always noreturn; no instructions in a block follow this one.
 300        /// Uses the `br` field.
 301        br,
 302        /// Lowers to a trap/jam instruction causing program abortion.
 303        /// This may lower to an instruction known to be invalid.
 304        /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
 305        /// Result type is always noreturn; no instructions in a block follow this one.
 306        trap,
 307        /// Lowers to a trap instruction causing debuggers to break here, or the next best thing.
 308        /// The debugger or something else may allow the program to resume after this point.
 309        /// Sometimes, for the lack of a better instruction, `trap` and `breakpoint` may compile down to the same code.
 310        /// Result type is always void.
 311        breakpoint,
 312        /// Yields the return address of the current function.
 313        /// Uses the `no_op` field.
 314        ret_addr,
 315        /// Implements @frameAddress builtin.
 316        /// Uses the `no_op` field.
 317        frame_addr,
 318        /// Function call.
 319        /// Result type is the return type of the function being called.
 320        /// Uses the `pl_op` field with the `Call` payload. operand is the callee.
 321        /// Triggers `resolveTypeLayout` on the return type of the callee.
 322        call,
 323        /// Same as `call` except with the `always_tail` attribute.
 324        call_always_tail,
 325        /// Same as `call` except with the `never_tail` attribute.
 326        call_never_tail,
 327        /// Same as `call` except with the `never_inline` attribute.
 328        call_never_inline,
 329        /// Count leading zeroes of an integer according to its representation in twos complement.
 330        /// Result type will always be an unsigned integer big enough to fit the answer.
 331        /// Uses the `ty_op` field.
 332        clz,
 333        /// Count trailing zeroes of an integer according to its representation in twos complement.
 334        /// Result type will always be an unsigned integer big enough to fit the answer.
 335        /// Uses the `ty_op` field.
 336        ctz,
 337        /// Count number of 1 bits in an integer according to its representation in twos complement.
 338        /// Result type will always be an unsigned integer big enough to fit the answer.
 339        /// Uses the `ty_op` field.
 340        popcount,
 341        /// Reverse the bytes in an integer according to its representation in twos complement.
 342        /// Uses the `ty_op` field.
 343        byte_swap,
 344        /// Reverse the bits in an integer according to its representation in twos complement.
 345        /// Uses the `ty_op` field.
 346        bit_reverse,
 347
 348        /// Square root of a floating point number.
 349        /// Uses the `un_op` field.
 350        sqrt,
 351        /// Sine function on a floating point number.
 352        /// Uses the `un_op` field.
 353        sin,
 354        /// Cosine function on a floating point number.
 355        /// Uses the `un_op` field.
 356        cos,
 357        /// Tangent function on a floating point number.
 358        /// Uses the `un_op` field.
 359        tan,
 360        /// Base e exponential of a floating point number.
 361        /// Uses the `un_op` field.
 362        exp,
 363        /// Base 2 exponential of a floating point number.
 364        /// Uses the `un_op` field.
 365        exp2,
 366        /// Natural (base e) logarithm of a floating point number.
 367        /// Uses the `un_op` field.
 368        log,
 369        /// Base 2 logarithm of a floating point number.
 370        /// Uses the `un_op` field.
 371        log2,
 372        /// Base 10 logarithm of a floating point number.
 373        /// Uses the `un_op` field.
 374        log10,
 375        /// Absolute value of an integer, floating point number or vector.
 376        /// Result type is always unsigned if the operand is an integer.
 377        /// Uses the `ty_op` field.
 378        abs,
 379        /// Floor: rounds a floating pointer number down to the nearest integer.
 380        /// Uses the `un_op` field.
 381        floor,
 382        /// Ceiling: rounds a floating pointer number up to the nearest integer.
 383        /// Uses the `un_op` field.
 384        ceil,
 385        /// Rounds a floating pointer number to the nearest integer.
 386        /// Uses the `un_op` field.
 387        round,
 388        /// Rounds a floating pointer number to the nearest integer towards zero.
 389        /// Uses the `un_op` field.
 390        trunc_float,
 391        /// Float negation. This affects the sign of zero, inf, and NaN, which is impossible
 392        /// to do with sub. Integers are not allowed and must be represented with sub with
 393        /// LHS of zero.
 394        /// Uses the `un_op` field.
 395        neg,
 396        /// Same as `neg` with optimized float mode.
 397        neg_optimized,
 398
 399        /// `<`. Result type is always bool.
 400        /// Uses the `bin_op` field.
 401        cmp_lt,
 402        /// Same as `cmp_lt` with optimized float mode.
 403        cmp_lt_optimized,
 404        /// `<=`. Result type is always bool.
 405        /// Uses the `bin_op` field.
 406        cmp_lte,
 407        /// Same as `cmp_lte` with optimized float mode.
 408        cmp_lte_optimized,
 409        /// `==`. Result type is always bool.
 410        /// Uses the `bin_op` field.
 411        cmp_eq,
 412        /// Same as `cmp_eq` with optimized float mode.
 413        cmp_eq_optimized,
 414        /// `>=`. Result type is always bool.
 415        /// Uses the `bin_op` field.
 416        cmp_gte,
 417        /// Same as `cmp_gte` with optimized float mode.
 418        cmp_gte_optimized,
 419        /// `>`. Result type is always bool.
 420        /// Uses the `bin_op` field.
 421        cmp_gt,
 422        /// Same as `cmp_gt` with optimized float mode.
 423        cmp_gt_optimized,
 424        /// `!=`. Result type is always bool.
 425        /// Uses the `bin_op` field.
 426        cmp_neq,
 427        /// Same as `cmp_neq` with optimized float mode.
 428        cmp_neq_optimized,
 429        /// Conditional between two vectors.
 430        /// Result type is always a vector of bools.
 431        /// Uses the `ty_pl` field, payload is `VectorCmp`.
 432        cmp_vector,
 433        /// Same as `cmp_vector` with optimized float mode.
 434        cmp_vector_optimized,
 435
 436        /// Conditional branch.
 437        /// Result type is always noreturn; no instructions in a block follow this one.
 438        /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
 439        cond_br,
 440        /// Switch branch.
 441        /// Result type is always noreturn; no instructions in a block follow this one.
 442        /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
 443        switch_br,
 444        /// Switch branch which can dispatch back to itself with a different operand.
 445        /// Result type is always noreturn; no instructions in a block follow this one.
 446        /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
 447        loop_switch_br,
 448        /// Dispatches back to a branch of a parent `loop_switch_br`.
 449        /// Result type is always noreturn; no instructions in a block follow this one.
 450        /// Uses the `br` field. `block_inst` is a `loop_switch_br` instruction.
 451        switch_dispatch,
 452        /// Given an operand which is an error union, splits control flow. In
 453        /// case of error, control flow goes into the block that is part of this
 454        /// instruction, which is guaranteed to end with a return instruction
 455        /// and never breaks out of the block.
 456        /// In the case of non-error, control flow proceeds to the next instruction
 457        /// after the `try`, with the result of this instruction being the unwrapped
 458        /// payload value, as if `unwrap_errunion_payload` was executed on the operand.
 459        /// The error branch is considered to have a branch hint of `.unlikely`.
 460        /// Uses the `pl_op` field. Payload is `Try`.
 461        @"try",
 462        /// Same as `try` except the error branch hint is `.cold`.
 463        try_cold,
 464        /// Same as `try` except the operand is a pointer to an error union, and the
 465        /// result is a pointer to the payload. Result is as if `unwrap_errunion_payload_ptr`
 466        /// was executed on the operand.
 467        /// Uses the `ty_pl` field. Payload is `TryPtr`.
 468        try_ptr,
 469        /// Same as `try_ptr` except the error branch hint is `.cold`.
 470        try_ptr_cold,
 471        /// Notes the beginning of a source code statement and marks the line and column.
 472        /// Result type is always void.
 473        /// Uses the `dbg_stmt` field.
 474        dbg_stmt,
 475        /// Marks a statement that can be stepped to but produces no code.
 476        dbg_empty_stmt,
 477        /// A block that represents an inlined function call.
 478        /// Uses the `ty_pl` field. Payload is `DbgInlineBlock`.
 479        dbg_inline_block,
 480        /// Marks the beginning of a local variable. The operand is a pointer pointing
 481        /// to the storage for the variable. The local may be a const or a var.
 482        /// Result type is always void.
 483        /// Uses `pl_op`. The payload index is the variable name. It points to the extra
 484        /// array, reinterpreting the bytes there as a null-terminated string.
 485        dbg_var_ptr,
 486        /// Same as `dbg_var_ptr` except the local is a const, not a var, and the
 487        /// operand is the local's value.
 488        dbg_var_val,
 489        /// Same as `dbg_var_val` except the local is an inline function argument.
 490        dbg_arg_inline,
 491        /// ?T => bool
 492        /// Result type is always bool.
 493        /// Uses the `un_op` field.
 494        is_null,
 495        /// ?T => bool (inverted logic)
 496        /// Result type is always bool.
 497        /// Uses the `un_op` field.
 498        is_non_null,
 499        /// *?T => bool
 500        /// Result type is always bool.
 501        /// Uses the `un_op` field.
 502        is_null_ptr,
 503        /// *?T => bool (inverted logic)
 504        /// Result type is always bool.
 505        /// Uses the `un_op` field.
 506        is_non_null_ptr,
 507        /// E!T => bool
 508        /// Result type is always bool.
 509        /// Uses the `un_op` field.
 510        is_err,
 511        /// E!T => bool (inverted logic)
 512        /// Result type is always bool.
 513        /// Uses the `un_op` field.
 514        is_non_err,
 515        /// *E!T => bool
 516        /// Result type is always bool.
 517        /// Uses the `un_op` field.
 518        is_err_ptr,
 519        /// *E!T => bool (inverted logic)
 520        /// Result type is always bool.
 521        /// Uses the `un_op` field.
 522        is_non_err_ptr,
 523        /// Result type is always bool.
 524        /// Uses the `bin_op` field.
 525        bool_and,
 526        /// Result type is always bool.
 527        /// Uses the `bin_op` field.
 528        bool_or,
 529        /// Read a value from a pointer.
 530        /// Uses the `ty_op` field.
 531        load,
 532        /// Return a value from a function.
 533        /// Result type is always noreturn; no instructions in a block follow this one.
 534        /// Uses the `un_op` field.
 535        /// Triggers `resolveTypeLayout` on the return type.
 536        ret,
 537        /// Same as `ret`, except if the operand is undefined, the
 538        /// returned value is 0xaa bytes, and any other safety metadata
 539        /// such as Valgrind integrations should be notified of
 540        /// this value being undefined.
 541        ret_safe,
 542        /// This instruction communicates that the function's result value is pointed to by
 543        /// the operand. If the function will pass the result by-ref, the operand is a
 544        /// `ret_ptr` instruction. Otherwise, this instruction is equivalent to a `load`
 545        /// on the operand, followed by a `ret` on the loaded value.
 546        /// Result type is always noreturn; no instructions in a block follow this one.
 547        /// Uses the `un_op` field.
 548        /// Triggers `resolveTypeLayout` on the return type.
 549        ret_load,
 550        /// Write a value to a pointer. LHS is pointer, RHS is value.
 551        /// Result type is always void.
 552        /// Uses the `bin_op` field.
 553        /// The value to store may be undefined, in which case the destination
 554        /// memory region has undefined bytes after this instruction is
 555        /// evaluated. In such case ignoring this instruction is legal
 556        /// lowering.
 557        store,
 558        /// Same as `store`, except if the value to store is undefined, the
 559        /// memory region should be filled with 0xaa bytes, and any other
 560        /// safety metadata such as Valgrind integrations should be notified of
 561        /// this memory region being undefined.
 562        store_safe,
 563        /// Indicates the program counter will never get to this instruction.
 564        /// Result type is always noreturn; no instructions in a block follow this one.
 565        unreach,
 566        /// Convert from a float type to a smaller one.
 567        /// Uses the `ty_op` field.
 568        fptrunc,
 569        /// Convert from a float type to a wider one.
 570        /// Uses the `ty_op` field.
 571        fpext,
 572        /// Returns an integer with a different type than the operand. The new type may have
 573        /// fewer, the same, or more bits than the operand type. The new type may also
 574        /// differ in signedness from the operand type. However, the instruction
 575        /// guarantees that the same integer value fits in both types.
 576        /// The new type may also be an enum type, in which case the integer cast operates on
 577        /// the integer tag type of the enum.
 578        /// See `trunc` for integer truncation.
 579        /// Uses the `ty_op` field.
 580        intcast,
 581        /// Like `intcast`, but includes two safety checks:
 582        /// * triggers a safety panic if the cast truncates bits
 583        /// * triggers a safety panic if the destination type is an exhaustive enum
 584        ///   and the operand is not a valid value of this type; i.e. equivalent to
 585        ///   a safety check based on `.is_named_enum_value`
 586        intcast_safe,
 587        /// Truncate higher bits from an integer, resulting in an integer type with the same
 588        /// sign but an equal or smaller number of bits.
 589        /// Uses the `ty_op` field.
 590        trunc,
 591        /// ?T => T. If the value is null, illegal behavior.
 592        /// Uses the `ty_op` field.
 593        optional_payload,
 594        /// *?T => *T. If the value is null, illegal behavior.
 595        /// Uses the `ty_op` field.
 596        optional_payload_ptr,
 597        /// *?T => *T. Sets the value to non-null with an undefined payload value.
 598        /// Uses the `ty_op` field.
 599        optional_payload_ptr_set,
 600        /// Given a payload value, wraps it in an optional type.
 601        /// Uses the `ty_op` field.
 602        wrap_optional,
 603        /// E!T -> T. If the value is an error, illegal behavior.
 604        /// Uses the `ty_op` field.
 605        unwrap_errunion_payload,
 606        /// E!T -> E. If the value is not an error, illegal behavior.
 607        /// Uses the `ty_op` field.
 608        unwrap_errunion_err,
 609        /// *(E!T) -> *T. If the value is an error, illegal behavior.
 610        /// Uses the `ty_op` field.
 611        unwrap_errunion_payload_ptr,
 612        /// *(E!T) -> E. If the value is not an error, illegal behavior.
 613        /// Uses the `ty_op` field.
 614        unwrap_errunion_err_ptr,
 615        /// *(E!T) => *T. Sets the value to non-error with an undefined payload value.
 616        /// Uses the `ty_op` field.
 617        errunion_payload_ptr_set,
 618        /// wrap from T to E!T
 619        /// Uses the `ty_op` field.
 620        wrap_errunion_payload,
 621        /// wrap from E to E!T
 622        /// Uses the `ty_op` field.
 623        wrap_errunion_err,
 624        /// Given a pointer to a struct or union and a field index, returns a pointer to the field.
 625        /// Uses the `ty_pl` field, payload is `StructField`.
 626        /// TODO rename to `agg_field_ptr`.
 627        struct_field_ptr,
 628        /// Given a pointer to a struct or union, returns a pointer to the field.
 629        /// The field index is the number at the end of the name.
 630        /// Uses `ty_op` field.
 631        /// TODO rename to `agg_field_ptr_index_X`
 632        struct_field_ptr_index_0,
 633        struct_field_ptr_index_1,
 634        struct_field_ptr_index_2,
 635        struct_field_ptr_index_3,
 636        /// Given a byval struct or union and a field index, returns the field byval.
 637        /// Uses the `ty_pl` field, payload is `StructField`.
 638        /// TODO rename to `agg_field_val`
 639        struct_field_val,
 640        /// Given a pointer to a tagged union, set its tag to the provided value.
 641        /// Result type is always void.
 642        /// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value.
 643        set_union_tag,
 644        /// Given a tagged union value, get its tag value.
 645        /// Uses the `ty_op` field.
 646        get_union_tag,
 647        /// Constructs a slice from a pointer and a length.
 648        /// Uses the `ty_pl` field, payload is `Bin`. lhs is ptr, rhs is len.
 649        slice,
 650        /// Given a slice value, return the length.
 651        /// Result type is always usize.
 652        /// Uses the `ty_op` field.
 653        slice_len,
 654        /// Given a slice value, return the pointer.
 655        /// Uses the `ty_op` field.
 656        slice_ptr,
 657        /// Given a pointer to a slice, return a pointer to the length of the slice.
 658        /// Uses the `ty_op` field.
 659        ptr_slice_len_ptr,
 660        /// Given a pointer to a slice, return a pointer to the pointer of the slice.
 661        /// Uses the `ty_op` field.
 662        ptr_slice_ptr_ptr,
 663        /// Given an (array value or vector value) and element index, return the element value at
 664        /// that index. If the lhs is a vector value, the index is guaranteed to be comptime-known.
 665        /// Result type is the element type of the array operand.
 666        /// Uses the `bin_op` field.
 667        array_elem_val,
 668        /// Given a slice value, and element index, return the element value at that index.
 669        /// Result type is the element type of the slice operand.
 670        /// Uses the `bin_op` field.
 671        slice_elem_val,
 672        /// Given a slice value and element index, return a pointer to the element value at that index.
 673        /// Result type is a pointer to the element type of the slice operand.
 674        /// Uses the `ty_pl` field with payload `Bin`.
 675        slice_elem_ptr,
 676        /// Given a pointer value, and element index, return the element value at that index.
 677        /// Result type is the element type of the pointer operand.
 678        /// Uses the `bin_op` field.
 679        ptr_elem_val,
 680        /// Given a pointer value, and element index, return the element pointer at that index.
 681        /// Result type is pointer to the element type of the pointer operand.
 682        /// Uses the `ty_pl` field with payload `Bin`.
 683        ptr_elem_ptr,
 684        /// Given a pointer to an array, return a slice.
 685        /// Uses the `ty_op` field.
 686        array_to_slice,
 687        /// Given a float operand, return the integer with the closest mathematical meaning.
 688        /// Uses the `ty_op` field.
 689        int_from_float,
 690        /// Same as `int_from_float` with optimized float mode.
 691        int_from_float_optimized,
 692        /// Same as `int_from_float`, but with a safety check that the operand is in bounds.
 693        int_from_float_safe,
 694        /// Same as `int_from_float_optimized`, but with a safety check that the operand is in bounds.
 695        int_from_float_optimized_safe,
 696        /// Given an integer operand, return the float with the closest mathematical meaning.
 697        /// Uses the `ty_op` field.
 698        float_from_int,
 699
 700        /// Transforms a vector into a scalar value by performing a sequential
 701        /// horizontal reduction of its elements using the specified operator.
 702        /// The vector element type (and hence result type) will be:
 703        ///  * and, or, xor       => integer or boolean
 704        ///  * min, max, add, mul => integer or float
 705        /// Uses the `reduce` field.
 706        reduce,
 707        /// Same as `reduce` with optimized float mode.
 708        reduce_optimized,
 709        /// Given an integer, bool, float, or pointer operand, return a vector with all elements
 710        /// equal to the scalar value.
 711        /// Uses the `ty_op` field.
 712        splat,
 713        /// Constructs a vector by selecting elements from a single vector based on a mask. Each
 714        /// mask element is either an index into the vector, or a comptime-known value, or "undef".
 715        /// Uses the `ty_pl` field, where the payload index points to:
 716        /// 1. mask_elem: ShuffleOneMask  // for each `mask_len`, which comes from `ty_pl.ty`
 717        /// 2. operand: Ref               // guaranteed not to be an interned value
 718        /// See `unwrapShuffleOne`.
 719        shuffle_one,
 720        /// Constructs a vector by selecting elements from two vectors based on a mask. Each mask
 721        /// element is either an index into one of the vectors, or "undef".
 722        /// Uses the `ty_pl` field, where the payload index points to:
 723        /// 1. mask_elem: ShuffleOneMask  // for each `mask_len`, which comes from `ty_pl.ty`
 724        /// 2. operand_a: Ref             // guaranteed not to be an interned value
 725        /// 3. operand_b: Ref             // guaranteed not to be an interned value
 726        /// See `unwrapShuffleTwo`.
 727        shuffle_two,
 728        /// Constructs a vector element-wise from `a` or `b` based on `pred`.
 729        /// Uses the `pl_op` field with `pred` as operand, and payload `Bin`.
 730        select,
 731
 732        /// Given dest pointer and value, set all elements at dest to value.
 733        /// Dest pointer is either a slice or a pointer to array.
 734        /// The element type may be any type, and the slice may have any alignment.
 735        /// Result type is always void.
 736        /// Uses the `bin_op` field. LHS is the dest slice. RHS is the element value.
 737        /// The element value may be undefined, in which case the destination
 738        /// memory region has undefined bytes after this instruction is
 739        /// evaluated. In such case ignoring this instruction is legal
 740        /// lowering.
 741        /// If the length is compile-time known (due to the destination being a
 742        /// pointer-to-array), then it is guaranteed to be greater than zero.
 743        memset,
 744        /// Same as `memset`, except if the element value is undefined, the memory region
 745        /// should be filled with 0xaa bytes, and any other safety metadata such as Valgrind
 746        /// integrations should be notified of this memory region being undefined.
 747        memset_safe,
 748        /// Given dest pointer and source pointer, copy elements from source to dest.
 749        /// Dest pointer is either a slice or a pointer to array.
 750        /// The dest element type may be any type.
 751        /// Source pointer must have same element type as dest element type.
 752        /// Dest slice may have any alignment; source pointer may have any alignment.
 753        /// The two memory regions must not overlap.
 754        /// Result type is always void.
 755        ///
 756        /// Uses the `bin_op` field. LHS is the dest slice. RHS is the source pointer.
 757        ///
 758        /// If the length is compile-time known (due to the destination or
 759        /// source being a pointer-to-array), then it is guaranteed to be
 760        /// greater than zero.
 761        memcpy,
 762        /// Given dest pointer and source pointer, copy elements from source to dest.
 763        /// Dest pointer is either a slice or a pointer to array.
 764        /// The dest element type may be any type.
 765        /// Source pointer must have same element type as dest element type.
 766        /// Dest slice may have any alignment; source pointer may have any alignment.
 767        /// The two memory regions may overlap.
 768        /// Result type is always void.
 769        ///
 770        /// Uses the `bin_op` field. LHS is the dest slice. RHS is the source pointer.
 771        ///
 772        /// If the length is compile-time known (due to the destination or
 773        /// source being a pointer-to-array), then it is guaranteed to be
 774        /// greater than zero.
 775        memmove,
 776
 777        /// Uses the `ty_pl` field with payload `Cmpxchg`.
 778        cmpxchg_weak,
 779        /// Uses the `ty_pl` field with payload `Cmpxchg`.
 780        cmpxchg_strong,
 781        /// Atomically load from a pointer.
 782        /// Result type is the element type of the pointer.
 783        /// Uses the `atomic_load` field.
 784        atomic_load,
 785        /// Atomically store through a pointer.
 786        /// Result type is always `void`.
 787        /// Uses the `bin_op` field. LHS is pointer, RHS is element.
 788        atomic_store_unordered,
 789        /// Same as `atomic_store_unordered` but with `AtomicOrder.monotonic`.
 790        atomic_store_monotonic,
 791        /// Same as `atomic_store_unordered` but with `AtomicOrder.release`.
 792        atomic_store_release,
 793        /// Same as `atomic_store_unordered` but with `AtomicOrder.seq_cst`.
 794        atomic_store_seq_cst,
 795        /// Atomically read-modify-write via a pointer.
 796        /// Result type is the element type of the pointer.
 797        /// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`.
 798        atomic_rmw,
 799
 800        /// Returns true if enum tag value has a name.
 801        /// Uses the `un_op` field.
 802        is_named_enum_value,
 803
 804        /// Given an enum tag value, returns the tag name. The enum type may be non-exhaustive.
 805        /// Result type is always `[:0]const u8`.
 806        /// Uses the `un_op` field.
 807        tag_name,
 808
 809        /// Given an error value, return the error name. Result type is always `[:0]const u8`.
 810        /// Uses the `un_op` field.
 811        error_name,
 812
 813        /// Returns true if error set has error with value.
 814        /// Uses the `ty_op` field.
 815        error_set_has_value,
 816
 817        /// Constructs a vector, tuple, struct, or array value out of runtime-known elements.
 818        /// Some of the elements may be comptime-known.
 819        /// Uses the `ty_pl` field, payload is index of an array of elements, each of which
 820        /// is a `Ref`. Length of the array is given by the vector type.
 821        /// If the type is an array with a sentinel, the AIR elements do not include it
 822        /// explicitly.
 823        aggregate_init,
 824
 825        /// Constructs a union from a field index and a runtime-known init value.
 826        /// Uses the `ty_pl` field with payload `UnionInit`.
 827        union_init,
 828
 829        /// Communicates an intent to load memory.
 830        /// Result is always unused.
 831        /// Uses the `prefetch` field.
 832        prefetch,
 833
 834        /// Computes `(a * b) + c`, but only rounds once.
 835        /// Uses the `pl_op` field with payload `Bin`.
 836        /// The operand is the addend. The mulends are lhs and rhs.
 837        mul_add,
 838
 839        /// Implements @fieldParentPtr builtin.
 840        /// Uses the `ty_pl` field.
 841        field_parent_ptr,
 842
 843        /// Implements @wasmMemorySize builtin.
 844        /// Result type is always `usize`,
 845        /// Uses the `pl_op` field, payload represents the index of the target memory.
 846        /// The operand is unused and always set to `Ref.none`.
 847        wasm_memory_size,
 848
 849        /// Implements @wasmMemoryGrow builtin.
 850        /// Result type is always `isize`,
 851        /// Uses the `pl_op` field, payload represents the index of the target memory.
 852        wasm_memory_grow,
 853
 854        /// Returns `true` if and only if the operand, an integer with
 855        /// the same size as the error integer type, is less than the
 856        /// total number of errors in the Module.
 857        /// Result type is always `bool`.
 858        /// Uses the `un_op` field.
 859        /// Note that the number of errors in the Module cannot be considered stable until
 860        /// flush().
 861        cmp_lt_errors_len,
 862
 863        /// Returns pointer to current error return trace.
 864        err_return_trace,
 865
 866        /// Sets the operand as the current error return trace,
 867        set_err_return_trace,
 868
 869        /// Convert the address space of a pointer.
 870        /// Uses the `ty_op` field.
 871        addrspace_cast,
 872
 873        /// Saves the error return trace index, if any. Otherwise, returns 0.
 874        /// Uses the `ty_pl` field.
 875        save_err_return_trace_index,
 876
 877        /// Compute a pointer to a `Nav` at runtime, always one of:
 878        ///
 879        /// * `threadlocal var`
 880        /// * `extern threadlocal var` (or corresponding `@extern`)
 881        /// * `@extern` with `.is_dll_import = true`
 882        /// * `@extern` with `.relocation = .pcrel`
 883        ///
 884        /// Such pointers are runtime values, so cannot be represented with an InternPool index.
 885        ///
 886        /// Uses the `ty_nav` field.
 887        runtime_nav_ptr,
 888
 889        /// Implements @cVaArg builtin.
 890        /// Uses the `ty_op` field.
 891        c_va_arg,
 892        /// Implements @cVaCopy builtin.
 893        /// Uses the `ty_op` field.
 894        c_va_copy,
 895        /// Implements @cVaEnd builtin.
 896        /// Uses the `un_op` field.
 897        c_va_end,
 898        /// Implements @cVaStart builtin.
 899        /// Uses the `ty` field.
 900        c_va_start,
 901
 902        /// Implements @workItemId builtin.
 903        /// Result type is always `u32`
 904        /// Uses the `pl_op` field, payload is the dimension to get the work item id for.
 905        /// Operand is unused and set to Ref.none
 906        work_item_id,
 907        /// Implements @workGroupSize builtin.
 908        /// Result type is always `u32`
 909        /// Uses the `pl_op` field, payload is the dimension to get the work group size for.
 910        /// Operand is unused and set to Ref.none
 911        work_group_size,
 912        /// Implements @workGroupId builtin.
 913        /// Result type is always `u32`
 914        /// Uses the `pl_op` field, payload is the dimension to get the work group id for.
 915        /// Operand is unused and set to Ref.none
 916        work_group_id,
 917
 918        // The remaining instructions are not emitted by Sema. They are only emitted by `Legalize`,
 919        // depending on the enabled features. As such, backends can consider them `unreachable` if
 920        // they do not enable the relevant legalizations.
 921
 922        /// Given a pointer to a vector, a runtime-known index, and a scalar value, store the value
 923        /// into the vector at the given index. Zig does not support this operation, but `Legalize`
 924        /// may emit it when scalarizing vector operations.
 925        ///
 926        /// Uses the `pl_op` field with payload `Bin`. `operand` is the vector pointer. `lhs` is the
 927        /// element index of type `usize`. `rhs` is the element value. Result is always void.
 928        legalize_vec_store_elem,
 929        /// Given a vector value and a runtime-known index, return the element value at that index.
 930        /// This instruction is similar to `array_elem_val`; the only difference is that the index
 931        /// here is runtime-known, which is usually not allowed for vectors. `Legalize` may emit
 932        /// this instruction when scalarizing vector operations.
 933        ///
 934        /// Uses the `bin_op` field. `lhs` is the vector pointer. `rhs` is the element index. Result
 935        /// type is the vector element type.
 936        legalize_vec_elem_val,
 937
 938        /// A call to a compiler_rt routine. `Legalize` may emit this instruction if any soft-float
 939        /// legalizations are enabled.
 940        ///
 941        /// Uses the `legalize_compiler_rt_call` union field.
 942        ///
 943        /// The name of the function symbol is given by `func.name(target)`.
 944        /// The calling convention is given by `func.@"callconv"(target)`.
 945        /// The return type (and hence the result type of this instruction) is `func.returnType()`.
 946        /// The parameter types are the types of the arguments given in `Air.Call`.
 947        legalize_compiler_rt_call,
 948
 949        pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
 950            switch (op) {
 951                .lt => return if (optimized) .cmp_lt_optimized else .cmp_lt,
 952                .lte => return if (optimized) .cmp_lte_optimized else .cmp_lte,
 953                .eq => return if (optimized) .cmp_eq_optimized else .cmp_eq,
 954                .gte => return if (optimized) .cmp_gte_optimized else .cmp_gte,
 955                .gt => return if (optimized) .cmp_gt_optimized else .cmp_gt,
 956                .neq => return if (optimized) .cmp_neq_optimized else .cmp_neq,
 957            }
 958        }
 959
 960        pub fn toCmpOp(tag: Tag) ?std.math.CompareOperator {
 961            return switch (tag) {
 962                .cmp_lt, .cmp_lt_optimized => .lt,
 963                .cmp_lte, .cmp_lte_optimized => .lte,
 964                .cmp_eq, .cmp_eq_optimized => .eq,
 965                .cmp_gte, .cmp_gte_optimized => .gte,
 966                .cmp_gt, .cmp_gt_optimized => .gt,
 967                .cmp_neq, .cmp_neq_optimized => .neq,
 968                else => null,
 969            };
 970        }
 971    };
 972
 973    /// The position of an AIR instruction within the `Air` instructions array.
 974    pub const Index = enum(u32) {
 975        _,
 976
 977        pub fn unwrap(index: Index) union(enum) { ref: Inst.Ref, target: u31 } {
 978            const low_index: u31 = @truncate(@intFromEnum(index));
 979            return switch (@as(u1, @intCast(@intFromEnum(index) >> 31))) {
 980                0 => .{ .ref = @enumFromInt(@as(u32, 1 << 31) | low_index) },
 981                1 => .{ .target = low_index },
 982            };
 983        }
 984
 985        pub fn toRef(index: Index) Inst.Ref {
 986            return index.unwrap().ref;
 987        }
 988
 989        pub fn fromTargetIndex(index: u31) Index {
 990            return @enumFromInt((1 << 31) | @as(u32, index));
 991        }
 992
 993        pub fn toTargetIndex(index: Index) u31 {
 994            return index.unwrap().target;
 995        }
 996
 997        pub fn format(index: Index, w: *std.Io.Writer) std.Io.Writer.Error!void {
 998            try w.writeByte('%');
 999            switch (index.unwrap()) {
1000                .ref => {},
1001                .target => try w.writeByte('t'),
1002            }
1003            try w.print("{d}", .{@as(u31, @truncate(@intFromEnum(index)))});
1004        }
1005    };
1006
1007    /// Either a reference to a value stored in the InternPool, or a reference to an AIR instruction.
1008    /// The most-significant bit of the value is a tag bit. This bit is 1 if the value represents an
1009    /// instruction index and 0 if it represents an InternPool index.
1010    ///
1011    /// The ref `none` is an exception: it has the tag bit set but refers to the InternPool.
1012    pub const Ref = enum(u32) {
1013        u0_type = @intFromEnum(InternPool.Index.u0_type),
1014        i0_type = @intFromEnum(InternPool.Index.i0_type),
1015        u1_type = @intFromEnum(InternPool.Index.u1_type),
1016        u8_type = @intFromEnum(InternPool.Index.u8_type),
1017        i8_type = @intFromEnum(InternPool.Index.i8_type),
1018        u16_type = @intFromEnum(InternPool.Index.u16_type),
1019        i16_type = @intFromEnum(InternPool.Index.i16_type),
1020        u29_type = @intFromEnum(InternPool.Index.u29_type),
1021        u32_type = @intFromEnum(InternPool.Index.u32_type),
1022        i32_type = @intFromEnum(InternPool.Index.i32_type),
1023        u64_type = @intFromEnum(InternPool.Index.u64_type),
1024        i64_type = @intFromEnum(InternPool.Index.i64_type),
1025        u80_type = @intFromEnum(InternPool.Index.u80_type),
1026        u128_type = @intFromEnum(InternPool.Index.u128_type),
1027        i128_type = @intFromEnum(InternPool.Index.i128_type),
1028        u256_type = @intFromEnum(InternPool.Index.u256_type),
1029        usize_type = @intFromEnum(InternPool.Index.usize_type),
1030        isize_type = @intFromEnum(InternPool.Index.isize_type),
1031        c_char_type = @intFromEnum(InternPool.Index.c_char_type),
1032        c_short_type = @intFromEnum(InternPool.Index.c_short_type),
1033        c_ushort_type = @intFromEnum(InternPool.Index.c_ushort_type),
1034        c_int_type = @intFromEnum(InternPool.Index.c_int_type),
1035        c_uint_type = @intFromEnum(InternPool.Index.c_uint_type),
1036        c_long_type = @intFromEnum(InternPool.Index.c_long_type),
1037        c_ulong_type = @intFromEnum(InternPool.Index.c_ulong_type),
1038        c_longlong_type = @intFromEnum(InternPool.Index.c_longlong_type),
1039        c_ulonglong_type = @intFromEnum(InternPool.Index.c_ulonglong_type),
1040        c_longdouble_type = @intFromEnum(InternPool.Index.c_longdouble_type),
1041        f16_type = @intFromEnum(InternPool.Index.f16_type),
1042        f32_type = @intFromEnum(InternPool.Index.f32_type),
1043        f64_type = @intFromEnum(InternPool.Index.f64_type),
1044        f80_type = @intFromEnum(InternPool.Index.f80_type),
1045        f128_type = @intFromEnum(InternPool.Index.f128_type),
1046        anyopaque_type = @intFromEnum(InternPool.Index.anyopaque_type),
1047        bool_type = @intFromEnum(InternPool.Index.bool_type),
1048        void_type = @intFromEnum(InternPool.Index.void_type),
1049        type_type = @intFromEnum(InternPool.Index.type_type),
1050        anyerror_type = @intFromEnum(InternPool.Index.anyerror_type),
1051        comptime_int_type = @intFromEnum(InternPool.Index.comptime_int_type),
1052        comptime_float_type = @intFromEnum(InternPool.Index.comptime_float_type),
1053        noreturn_type = @intFromEnum(InternPool.Index.noreturn_type),
1054        anyframe_type = @intFromEnum(InternPool.Index.anyframe_type),
1055        null_type = @intFromEnum(InternPool.Index.null_type),
1056        undefined_type = @intFromEnum(InternPool.Index.undefined_type),
1057        enum_literal_type = @intFromEnum(InternPool.Index.enum_literal_type),
1058        ptr_usize_type = @intFromEnum(InternPool.Index.ptr_usize_type),
1059        ptr_const_comptime_int_type = @intFromEnum(InternPool.Index.ptr_const_comptime_int_type),
1060        manyptr_u8_type = @intFromEnum(InternPool.Index.manyptr_u8_type),
1061        manyptr_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_u8_type),
1062        manyptr_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.manyptr_const_u8_sentinel_0_type),
1063        slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_u8_type),
1064        slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type),
1065        manyptr_const_slice_const_u8_type = @intFromEnum(InternPool.Index.manyptr_const_slice_const_u8_type),
1066        slice_const_slice_const_u8_type = @intFromEnum(InternPool.Index.slice_const_slice_const_u8_type),
1067        optional_type_type = @intFromEnum(InternPool.Index.optional_type_type),
1068        manyptr_const_type_type = @intFromEnum(InternPool.Index.manyptr_const_type_type),
1069        slice_const_type_type = @intFromEnum(InternPool.Index.slice_const_type_type),
1070        vector_8_i8_type = @intFromEnum(InternPool.Index.vector_8_i8_type),
1071        vector_16_i8_type = @intFromEnum(InternPool.Index.vector_16_i8_type),
1072        vector_32_i8_type = @intFromEnum(InternPool.Index.vector_32_i8_type),
1073        vector_64_i8_type = @intFromEnum(InternPool.Index.vector_64_i8_type),
1074        vector_1_u8_type = @intFromEnum(InternPool.Index.vector_1_u8_type),
1075        vector_2_u8_type = @intFromEnum(InternPool.Index.vector_2_u8_type),
1076        vector_4_u8_type = @intFromEnum(InternPool.Index.vector_4_u8_type),
1077        vector_8_u8_type = @intFromEnum(InternPool.Index.vector_8_u8_type),
1078        vector_16_u8_type = @intFromEnum(InternPool.Index.vector_16_u8_type),
1079        vector_32_u8_type = @intFromEnum(InternPool.Index.vector_32_u8_type),
1080        vector_64_u8_type = @intFromEnum(InternPool.Index.vector_64_u8_type),
1081        vector_2_i16_type = @intFromEnum(InternPool.Index.vector_2_i16_type),
1082        vector_4_i16_type = @intFromEnum(InternPool.Index.vector_4_i16_type),
1083        vector_8_i16_type = @intFromEnum(InternPool.Index.vector_8_i16_type),
1084        vector_16_i16_type = @intFromEnum(InternPool.Index.vector_16_i16_type),
1085        vector_32_i16_type = @intFromEnum(InternPool.Index.vector_32_i16_type),
1086        vector_4_u16_type = @intFromEnum(InternPool.Index.vector_4_u16_type),
1087        vector_8_u16_type = @intFromEnum(InternPool.Index.vector_8_u16_type),
1088        vector_16_u16_type = @intFromEnum(InternPool.Index.vector_16_u16_type),
1089        vector_32_u16_type = @intFromEnum(InternPool.Index.vector_32_u16_type),
1090        vector_2_i32_type = @intFromEnum(InternPool.Index.vector_2_i32_type),
1091        vector_4_i32_type = @intFromEnum(InternPool.Index.vector_4_i32_type),
1092        vector_8_i32_type = @intFromEnum(InternPool.Index.vector_8_i32_type),
1093        vector_16_i32_type = @intFromEnum(InternPool.Index.vector_16_i32_type),
1094        vector_4_u32_type = @intFromEnum(InternPool.Index.vector_4_u32_type),
1095        vector_8_u32_type = @intFromEnum(InternPool.Index.vector_8_u32_type),
1096        vector_16_u32_type = @intFromEnum(InternPool.Index.vector_16_u32_type),
1097        vector_2_i64_type = @intFromEnum(InternPool.Index.vector_2_i64_type),
1098        vector_4_i64_type = @intFromEnum(InternPool.Index.vector_4_i64_type),
1099        vector_8_i64_type = @intFromEnum(InternPool.Index.vector_8_i64_type),
1100        vector_2_u64_type = @intFromEnum(InternPool.Index.vector_2_u64_type),
1101        vector_4_u64_type = @intFromEnum(InternPool.Index.vector_4_u64_type),
1102        vector_8_u64_type = @intFromEnum(InternPool.Index.vector_8_u64_type),
1103        vector_1_u128_type = @intFromEnum(InternPool.Index.vector_1_u128_type),
1104        vector_2_u128_type = @intFromEnum(InternPool.Index.vector_2_u128_type),
1105        vector_1_u256_type = @intFromEnum(InternPool.Index.vector_1_u256_type),
1106        vector_4_f16_type = @intFromEnum(InternPool.Index.vector_4_f16_type),
1107        vector_8_f16_type = @intFromEnum(InternPool.Index.vector_8_f16_type),
1108        vector_16_f16_type = @intFromEnum(InternPool.Index.vector_16_f16_type),
1109        vector_32_f16_type = @intFromEnum(InternPool.Index.vector_32_f16_type),
1110        vector_2_f32_type = @intFromEnum(InternPool.Index.vector_2_f32_type),
1111        vector_4_f32_type = @intFromEnum(InternPool.Index.vector_4_f32_type),
1112        vector_8_f32_type = @intFromEnum(InternPool.Index.vector_8_f32_type),
1113        vector_16_f32_type = @intFromEnum(InternPool.Index.vector_16_f32_type),
1114        vector_2_f64_type = @intFromEnum(InternPool.Index.vector_2_f64_type),
1115        vector_4_f64_type = @intFromEnum(InternPool.Index.vector_4_f64_type),
1116        vector_8_f64_type = @intFromEnum(InternPool.Index.vector_8_f64_type),
1117        optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type),
1118        anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type),
1119        adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type),
1120        generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
1121        empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type),
1122        undef = @intFromEnum(InternPool.Index.undef),
1123        undef_bool = @intFromEnum(InternPool.Index.undef_bool),
1124        undef_usize = @intFromEnum(InternPool.Index.undef_usize),
1125        undef_u1 = @intFromEnum(InternPool.Index.undef_u1),
1126        zero = @intFromEnum(InternPool.Index.zero),
1127        zero_usize = @intFromEnum(InternPool.Index.zero_usize),
1128        zero_u1 = @intFromEnum(InternPool.Index.zero_u1),
1129        zero_u8 = @intFromEnum(InternPool.Index.zero_u8),
1130        one = @intFromEnum(InternPool.Index.one),
1131        one_usize = @intFromEnum(InternPool.Index.one_usize),
1132        one_u1 = @intFromEnum(InternPool.Index.one_u1),
1133        one_u8 = @intFromEnum(InternPool.Index.one_u8),
1134        four_u8 = @intFromEnum(InternPool.Index.four_u8),
1135        negative_one = @intFromEnum(InternPool.Index.negative_one),
1136        void_value = @intFromEnum(InternPool.Index.void_value),
1137        unreachable_value = @intFromEnum(InternPool.Index.unreachable_value),
1138        null_value = @intFromEnum(InternPool.Index.null_value),
1139        bool_true = @intFromEnum(InternPool.Index.bool_true),
1140        bool_false = @intFromEnum(InternPool.Index.bool_false),
1141        empty_tuple = @intFromEnum(InternPool.Index.empty_tuple),
1142
1143        /// This Ref does not correspond to any AIR instruction or constant
1144        /// value and may instead be used as a sentinel to indicate null.
1145        none = @intFromEnum(InternPool.Index.none),
1146        _,
1147
1148        pub fn toInterned(ref: Ref) ?InternPool.Index {
1149            assert(ref != .none);
1150            return ref.toInternedAllowNone();
1151        }
1152
1153        pub fn toInternedAllowNone(ref: Ref) ?InternPool.Index {
1154            return switch (ref) {
1155                .none => .none,
1156                else => if (@intFromEnum(ref) >> 31 == 0)
1157                    @enumFromInt(@as(u31, @truncate(@intFromEnum(ref))))
1158                else
1159                    null,
1160            };
1161        }
1162
1163        pub fn toIndex(ref: Ref) ?Index {
1164            assert(ref != .none);
1165            return ref.toIndexAllowNone();
1166        }
1167
1168        pub fn toIndexAllowNone(ref: Ref) ?Index {
1169            return switch (ref) {
1170                .none => null,
1171                else => if (@intFromEnum(ref) >> 31 != 0)
1172                    @enumFromInt(@as(u31, @truncate(@intFromEnum(ref))))
1173                else
1174                    null,
1175            };
1176        }
1177
1178        pub fn toType(ref: Ref) Type {
1179            return .fromInterned(ref.toInterned().?);
1180        }
1181
1182        pub fn fromIntern(ip_index: InternPool.Index) Ref {
1183            return switch (ip_index) {
1184                .none => .none,
1185                else => {
1186                    assert(@intFromEnum(ip_index) >> 31 == 0);
1187                    return @enumFromInt(@as(u31, @intCast(@intFromEnum(ip_index))));
1188                },
1189            };
1190        }
1191
1192        pub fn fromValue(v: Value) Ref {
1193            return .fromIntern(v.toIntern());
1194        }
1195
1196        pub fn fromType(t: Type) Ref {
1197            return .fromIntern(t.toIntern());
1198        }
1199    };
1200
1201    /// All instructions have an 8-byte payload, which is contained within
1202    /// this union. `Tag` determines which union field is active, as well as
1203    /// how to interpret the data within.
1204    pub const Data = union {
1205        no_op: void,
1206        un_op: Ref,
1207
1208        bin_op: struct {
1209            lhs: Ref,
1210            rhs: Ref,
1211        },
1212        ty: Type,
1213        arg: struct {
1214            ty: Ref,
1215            zir_param_index: u32,
1216        },
1217        ty_op: struct {
1218            ty: Ref,
1219            operand: Ref,
1220        },
1221        ty_pl: struct {
1222            ty: Ref,
1223            // Index into a different array.
1224            payload: u32,
1225        },
1226        br: struct {
1227            block_inst: Index,
1228            operand: Ref,
1229        },
1230        repeat: struct {
1231            loop_inst: Index,
1232        },
1233        pl_op: struct {
1234            operand: Ref,
1235            payload: u32,
1236        },
1237        dbg_stmt: struct {
1238            line: u32,
1239            column: u32,
1240        },
1241        atomic_load: struct {
1242            ptr: Ref,
1243            order: std.builtin.AtomicOrder,
1244        },
1245        prefetch: struct {
1246            ptr: Ref,
1247            rw: std.builtin.PrefetchOptions.Rw,
1248            locality: u2,
1249            cache: std.builtin.PrefetchOptions.Cache,
1250        },
1251        reduce: struct {
1252            operand: Ref,
1253            operation: std.builtin.ReduceOp,
1254        },
1255        ty_nav: struct {
1256            ty: InternPool.Index,
1257            nav: InternPool.Nav.Index,
1258        },
1259        legalize_compiler_rt_call: struct {
1260            func: CompilerRtFunc,
1261            /// Index into `extra` to a payload of type `Call`.
1262            payload: u32,
1263        },
1264        inferred_alloc_comptime: InferredAllocComptime,
1265        inferred_alloc: InferredAlloc,
1266
1267        pub const InferredAllocComptime = struct {
1268            alignment: InternPool.Alignment,
1269            is_const: bool,
1270            /// This is `undefined` until we encounter a `store_to_inferred_alloc`,
1271            /// at which point the pointer is created and stored here.
1272            ptr: InternPool.Index,
1273        };
1274
1275        pub const InferredAlloc = struct {
1276            alignment: InternPool.Alignment,
1277            is_const: bool,
1278        };
1279
1280        // Make sure we don't accidentally add a field to make this union
1281        // bigger than expected. Note that in safety builds, Zig is allowed
1282        // to insert a secret field for safety checks.
1283        comptime {
1284            if (!std.debug.runtime_safety) {
1285                assert(@sizeOf(Data) == 8);
1286            }
1287        }
1288    };
1289};
1290
1291/// Trailing is a list of instruction indexes for every `body_len`.
1292pub const Block = struct {
1293    body_len: u32,
1294};
1295
1296/// Trailing is a list of instruction indexes for every `body_len`.
1297pub const DbgInlineBlock = struct {
1298    func: InternPool.Index,
1299    body_len: u32,
1300};
1301
1302/// Trailing is a list of `Inst.Ref` for every `args_len`.
1303pub const Call = struct {
1304    args_len: u32,
1305};
1306
1307/// This data is stored inside extra, with two sets of trailing `Inst.Ref`:
1308/// * 0. the then body, according to `then_body_len`.
1309/// * 1. the else body, according to `else_body_len`.
1310pub const CondBr = struct {
1311    then_body_len: u32,
1312    else_body_len: u32,
1313    branch_hints: BranchHints,
1314    pub const BranchHints = packed struct(u32) {
1315        true: std.builtin.BranchHint = .none,
1316        false: std.builtin.BranchHint = .none,
1317        then_cov: CoveragePoint = .none,
1318        else_cov: CoveragePoint = .none,
1319        _: u24 = 0,
1320    };
1321};
1322
1323/// Trailing:
1324/// * 0. `BranchHint` for each `cases_len + 1`. bit-packed into `u32`
1325///      elems such that each `u32` contains up to 10x `BranchHint`.
1326///      LSBs are first case. Final hint is `else`.
1327/// * 1. `Case` for each `cases_len`
1328/// * 2. the else body, according to `else_body_len`.
1329pub const SwitchBr = struct {
1330    cases_len: u32,
1331    else_body_len: u32,
1332
1333    /// Trailing:
1334    /// * item: Inst.Ref // for each `items_len`
1335    /// * { range_start: Inst.Ref, range_end: Inst.Ref } // for each `ranges_len`
1336    /// * body_inst: Inst.Index // for each `body_len`
1337    pub const Case = struct {
1338        items_len: u32,
1339        ranges_len: u32,
1340        body_len: u32,
1341    };
1342};
1343
1344/// This data is stored inside extra. Trailing:
1345/// 0. body: Inst.Index // for each body_len
1346pub const Try = struct {
1347    body_len: u32,
1348};
1349
1350/// This data is stored inside extra. Trailing:
1351/// 0. body: Inst.Index // for each body_len
1352pub const TryPtr = struct {
1353    ptr: Inst.Ref,
1354    body_len: u32,
1355};
1356
1357pub const StructField = struct {
1358    /// Whether this is a pointer or byval is determined by the AIR tag.
1359    struct_operand: Inst.Ref,
1360    field_index: u32,
1361};
1362
1363pub const Bin = struct {
1364    lhs: Inst.Ref,
1365    rhs: Inst.Ref,
1366};
1367
1368pub const FieldParentPtr = struct {
1369    field_ptr: Inst.Ref,
1370    field_index: u32,
1371};
1372
1373pub const VectorCmp = struct {
1374    lhs: Inst.Ref,
1375    rhs: Inst.Ref,
1376    op: u32,
1377
1378    pub fn compareOperator(self: VectorCmp) std.math.CompareOperator {
1379        return @enumFromInt(@as(u3, @intCast(self.op)));
1380    }
1381
1382    pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 {
1383        return @intFromEnum(compare_operator);
1384    }
1385};
1386
1387/// Used by `Inst.Tag.shuffle_one`. Represents a mask element which either indexes into a
1388/// runtime-known vector, or is a comptime-known value.
1389pub const ShuffleOneMask = packed struct(u32) {
1390    index: u31,
1391    kind: enum(u1) { elem, value },
1392    pub fn elem(idx: u32) ShuffleOneMask {
1393        return .{ .index = @intCast(idx), .kind = .elem };
1394    }
1395    pub fn value(val: Value) ShuffleOneMask {
1396        return .{ .index = @intCast(@intFromEnum(val.toIntern())), .kind = .value };
1397    }
1398    pub const Unwrapped = union(enum) {
1399        /// The resulting element is this index into the runtime vector.
1400        elem: u32,
1401        /// The resulting element is this comptime-known value.
1402        /// It is correctly typed. It might be `undefined`.
1403        value: InternPool.Index,
1404    };
1405    pub fn unwrap(raw: ShuffleOneMask) Unwrapped {
1406        return switch (raw.kind) {
1407            .elem => .{ .elem = raw.index },
1408            .value => .{ .value = @enumFromInt(raw.index) },
1409        };
1410    }
1411};
1412
1413/// Used by `Inst.Tag.shuffle_two`. Represents a mask element which either indexes into one
1414/// of two runtime-known vectors, or is undefined.
1415pub const ShuffleTwoMask = enum(u32) {
1416    undef = std.math.maxInt(u32),
1417    _,
1418    pub fn aElem(idx: u32) ShuffleTwoMask {
1419        return @enumFromInt(idx << 1);
1420    }
1421    pub fn bElem(idx: u32) ShuffleTwoMask {
1422        return @enumFromInt(idx << 1 | 1);
1423    }
1424    pub const Unwrapped = union(enum) {
1425        /// The resulting element is this index into the first runtime vector.
1426        a_elem: u32,
1427        /// The resulting element is this index into the second runtime vector.
1428        b_elem: u32,
1429        /// The resulting element is `undefined`.
1430        undef,
1431    };
1432    pub fn unwrap(raw: ShuffleTwoMask) Unwrapped {
1433        switch (raw) {
1434            .undef => return .undef,
1435            _ => {},
1436        }
1437        const x = @intFromEnum(raw);
1438        return switch (@as(u1, @truncate(x))) {
1439            0 => .{ .a_elem = x >> 1 },
1440            1 => .{ .b_elem = x >> 1 },
1441        };
1442    }
1443};
1444
1445/// Trailing:
1446/// 0. `Inst.Ref` for every outputs_len
1447/// 1. `Inst.Ref` for every inputs_len
1448/// 2. for every outputs_len
1449///    - constraint: memory at this position is reinterpreted as a null
1450///      terminated string.
1451///    - name: memory at this position is reinterpreted as a null
1452///      terminated string. pad to the next u32 after the null byte.
1453/// 3. for every inputs_len
1454///    - constraint: memory at this position is reinterpreted as a null
1455///      terminated string.
1456///    - name: memory at this position is reinterpreted as a null
1457///      terminated string. pad to the next u32 after the null byte.
1458/// 4. A number of u32 elements follow according to the equation `(source_len + 3) / 4`.
1459///    Memory starting at this position is reinterpreted as the source bytes.
1460pub const Asm = struct {
1461    /// Length of the assembly source in bytes.
1462    source_len: u32,
1463    inputs_len: u32,
1464    /// A comptime `std.builtin.assembly.Clobbers` value for the target architecture.
1465    clobbers: InternPool.Index,
1466    flags: Flags,
1467
1468    pub const Flags = packed struct(u32) {
1469        outputs_len: u31,
1470        is_volatile: bool,
1471    };
1472};
1473
1474pub const Cmpxchg = struct {
1475    ptr: Inst.Ref,
1476    expected_value: Inst.Ref,
1477    new_value: Inst.Ref,
1478    /// 0b00000000000000000000000000000XXX - success_order
1479    /// 0b00000000000000000000000000XXX000 - failure_order
1480    flags: u32,
1481
1482    pub fn successOrder(self: Cmpxchg) std.builtin.AtomicOrder {
1483        return @enumFromInt(@as(u3, @truncate(self.flags)));
1484    }
1485
1486    pub fn failureOrder(self: Cmpxchg) std.builtin.AtomicOrder {
1487        return @enumFromInt(@as(u3, @intCast(self.flags >> 3)));
1488    }
1489};
1490
1491pub const AtomicRmw = struct {
1492    operand: Inst.Ref,
1493    /// 0b00000000000000000000000000000XXX - ordering
1494    /// 0b0000000000000000000000000XXXX000 - op
1495    flags: u32,
1496
1497    pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder {
1498        return @enumFromInt(@as(u3, @truncate(self.flags)));
1499    }
1500
1501    pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp {
1502        return @enumFromInt(@as(u4, @intCast(self.flags >> 3)));
1503    }
1504};
1505
1506pub const UnionInit = struct {
1507    field_index: u32,
1508    init: Inst.Ref,
1509};
1510
1511pub fn getMainBody(air: Air) []const Air.Inst.Index {
1512    const body_index = air.extra.items[@intFromEnum(ExtraIndex.main_block)];
1513    const extra = air.extraData(Block, body_index);
1514    return @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]);
1515}
1516
1517pub fn typeOf(air: *const Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
1518    if (inst.toInterned()) |ip_index| {
1519        return .fromInterned(ip.typeOf(ip_index));
1520    } else {
1521        return air.typeOfIndex(inst.toIndex().?, ip);
1522    }
1523}
1524
1525pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) Type {
1526    const datas = air.instructions.items(.data);
1527    switch (air.instructions.items(.tag)[@intFromEnum(inst)]) {
1528        .add,
1529        .add_safe,
1530        .add_wrap,
1531        .add_sat,
1532        .sub,
1533        .sub_safe,
1534        .sub_wrap,
1535        .sub_sat,
1536        .mul,
1537        .mul_safe,
1538        .mul_wrap,
1539        .mul_sat,
1540        .div_float,
1541        .div_trunc,
1542        .div_floor,
1543        .div_exact,
1544        .rem,
1545        .mod,
1546        .bit_and,
1547        .bit_or,
1548        .xor,
1549        .shr,
1550        .shr_exact,
1551        .shl,
1552        .shl_exact,
1553        .shl_sat,
1554        .min,
1555        .max,
1556        .bool_and,
1557        .bool_or,
1558        .add_optimized,
1559        .sub_optimized,
1560        .mul_optimized,
1561        .div_float_optimized,
1562        .div_trunc_optimized,
1563        .div_floor_optimized,
1564        .div_exact_optimized,
1565        .rem_optimized,
1566        .mod_optimized,
1567        => return air.typeOf(datas[@intFromEnum(inst)].bin_op.lhs, ip),
1568
1569        .sqrt,
1570        .sin,
1571        .cos,
1572        .tan,
1573        .exp,
1574        .exp2,
1575        .log,
1576        .log2,
1577        .log10,
1578        .floor,
1579        .ceil,
1580        .round,
1581        .trunc_float,
1582        .neg,
1583        .neg_optimized,
1584        => return air.typeOf(datas[@intFromEnum(inst)].un_op, ip),
1585
1586        .cmp_lt,
1587        .cmp_lte,
1588        .cmp_eq,
1589        .cmp_gte,
1590        .cmp_gt,
1591        .cmp_neq,
1592        .cmp_lt_optimized,
1593        .cmp_lte_optimized,
1594        .cmp_eq_optimized,
1595        .cmp_gte_optimized,
1596        .cmp_gt_optimized,
1597        .cmp_neq_optimized,
1598        .cmp_lt_errors_len,
1599        .is_null,
1600        .is_non_null,
1601        .is_null_ptr,
1602        .is_non_null_ptr,
1603        .is_err,
1604        .is_non_err,
1605        .is_err_ptr,
1606        .is_non_err_ptr,
1607        .is_named_enum_value,
1608        .error_set_has_value,
1609        => return .bool,
1610
1611        .alloc,
1612        .ret_ptr,
1613        .err_return_trace,
1614        .c_va_start,
1615        => return datas[@intFromEnum(inst)].ty,
1616
1617        .arg => return datas[@intFromEnum(inst)].arg.ty.toType(),
1618
1619        .assembly,
1620        .block,
1621        .dbg_inline_block,
1622        .struct_field_ptr,
1623        .struct_field_val,
1624        .slice_elem_ptr,
1625        .ptr_elem_ptr,
1626        .cmpxchg_weak,
1627        .cmpxchg_strong,
1628        .slice,
1629        .aggregate_init,
1630        .union_init,
1631        .field_parent_ptr,
1632        .cmp_vector,
1633        .cmp_vector_optimized,
1634        .add_with_overflow,
1635        .sub_with_overflow,
1636        .mul_with_overflow,
1637        .shl_with_overflow,
1638        .ptr_add,
1639        .ptr_sub,
1640        .try_ptr,
1641        .try_ptr_cold,
1642        .shuffle_one,
1643        .shuffle_two,
1644        => return datas[@intFromEnum(inst)].ty_pl.ty.toType(),
1645
1646        .not,
1647        .bitcast,
1648        .load,
1649        .fpext,
1650        .fptrunc,
1651        .intcast,
1652        .intcast_safe,
1653        .trunc,
1654        .optional_payload,
1655        .optional_payload_ptr,
1656        .optional_payload_ptr_set,
1657        .errunion_payload_ptr_set,
1658        .wrap_optional,
1659        .unwrap_errunion_payload,
1660        .unwrap_errunion_err,
1661        .unwrap_errunion_payload_ptr,
1662        .unwrap_errunion_err_ptr,
1663        .wrap_errunion_payload,
1664        .wrap_errunion_err,
1665        .slice_ptr,
1666        .ptr_slice_len_ptr,
1667        .ptr_slice_ptr_ptr,
1668        .struct_field_ptr_index_0,
1669        .struct_field_ptr_index_1,
1670        .struct_field_ptr_index_2,
1671        .struct_field_ptr_index_3,
1672        .array_to_slice,
1673        .int_from_float,
1674        .int_from_float_optimized,
1675        .int_from_float_safe,
1676        .int_from_float_optimized_safe,
1677        .float_from_int,
1678        .splat,
1679        .get_union_tag,
1680        .clz,
1681        .ctz,
1682        .popcount,
1683        .byte_swap,
1684        .bit_reverse,
1685        .addrspace_cast,
1686        .c_va_arg,
1687        .c_va_copy,
1688        .abs,
1689        => return datas[@intFromEnum(inst)].ty_op.ty.toType(),
1690
1691        .loop,
1692        .repeat,
1693        .br,
1694        .cond_br,
1695        .switch_br,
1696        .loop_switch_br,
1697        .switch_dispatch,
1698        .ret,
1699        .ret_safe,
1700        .ret_load,
1701        .unreach,
1702        .trap,
1703        => return .noreturn,
1704
1705        .breakpoint,
1706        .dbg_stmt,
1707        .dbg_empty_stmt,
1708        .dbg_var_ptr,
1709        .dbg_var_val,
1710        .dbg_arg_inline,
1711        .store,
1712        .store_safe,
1713        .atomic_store_unordered,
1714        .atomic_store_monotonic,
1715        .atomic_store_release,
1716        .atomic_store_seq_cst,
1717        .memset,
1718        .memset_safe,
1719        .memcpy,
1720        .memmove,
1721        .set_union_tag,
1722        .prefetch,
1723        .set_err_return_trace,
1724        .c_va_end,
1725        .legalize_vec_store_elem,
1726        => return .void,
1727
1728        .slice_len,
1729        .ret_addr,
1730        .frame_addr,
1731        .save_err_return_trace_index,
1732        => return .usize,
1733
1734        .wasm_memory_grow => return .isize,
1735        .wasm_memory_size => return .usize,
1736
1737        .tag_name, .error_name => return .slice_const_u8_sentinel_0,
1738
1739        .call, .call_always_tail, .call_never_tail, .call_never_inline => {
1740            const callee_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
1741            return .fromInterned(ip.funcTypeReturnType(callee_ty.toIntern()));
1742        },
1743
1744        .slice_elem_val, .ptr_elem_val, .array_elem_val, .legalize_vec_elem_val => {
1745            const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].bin_op.lhs, ip);
1746            return ptr_ty.childTypeIp(ip);
1747        },
1748        .atomic_load => {
1749            const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].atomic_load.ptr, ip);
1750            return ptr_ty.childTypeIp(ip);
1751        },
1752        .atomic_rmw => {
1753            const ptr_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
1754            return ptr_ty.childTypeIp(ip);
1755        },
1756
1757        .reduce, .reduce_optimized => {
1758            const operand_ty = air.typeOf(datas[@intFromEnum(inst)].reduce.operand, ip);
1759            return .fromInterned(ip.indexToKey(operand_ty.ip_index).vector_type.child);
1760        },
1761
1762        .mul_add => return air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip),
1763        .select => {
1764            const extra = air.extraData(Air.Bin, datas[@intFromEnum(inst)].pl_op.payload).data;
1765            return air.typeOf(extra.lhs, ip);
1766        },
1767
1768        .@"try", .try_cold => {
1769            const err_union_ty = air.typeOf(datas[@intFromEnum(inst)].pl_op.operand, ip);
1770            return .fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
1771        },
1772
1773        .runtime_nav_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty),
1774
1775        .work_item_id,
1776        .work_group_size,
1777        .work_group_id,
1778        => return .u32,
1779
1780        .legalize_compiler_rt_call => return datas[@intFromEnum(inst)].legalize_compiler_rt_call.func.returnType(),
1781
1782        .inferred_alloc => unreachable,
1783        .inferred_alloc_comptime => unreachable,
1784    }
1785}
1786
1787/// Returns the requested data, as well as the new index which is at the start of the
1788/// trailers for the object.
1789pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } {
1790    const fields = std.meta.fields(T);
1791    var i: usize = index;
1792    var result: T = undefined;
1793    inline for (fields) |field| {
1794        @field(result, field.name) = switch (field.type) {
1795            u32 => air.extra.items[i],
1796            InternPool.Index, Inst.Ref => @enumFromInt(air.extra.items[i]),
1797            i32, CondBr.BranchHints, Asm.Flags => @bitCast(air.extra.items[i]),
1798            else => @compileError("bad field type: " ++ @typeName(field.type)),
1799        };
1800        i += 1;
1801    }
1802    return .{
1803        .data = result,
1804        .end = i,
1805    };
1806}
1807
1808pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
1809    air.instructions.deinit(gpa);
1810    air.extra.deinit(gpa);
1811    air.* = undefined;
1812}
1813
1814pub fn internedToRef(ip_index: InternPool.Index) Inst.Ref {
1815    return .fromIntern(ip_index);
1816}
1817
1818/// Returns `null` if runtime-known.
1819pub fn value(air: Air, inst: Inst.Ref, pt: Zcu.PerThread) !?Value {
1820    if (inst.toInterned()) |ip_index| {
1821        return .fromInterned(ip_index);
1822    }
1823    const index = inst.toIndex().?;
1824    return air.typeOfIndex(index, &pt.zcu.intern_pool).onePossibleValue(pt);
1825}
1826
1827pub const NullTerminatedString = enum(u32) {
1828    none = std.math.maxInt(u32),
1829    _,
1830
1831    pub fn toSlice(nts: NullTerminatedString, air: Air) [:0]const u8 {
1832        if (nts == .none) return "";
1833        const bytes = std.mem.sliceAsBytes(air.extra.items[@intFromEnum(nts)..]);
1834        return bytes[0..std.mem.indexOfScalar(u8, bytes, 0).? :0];
1835    }
1836};
1837
1838/// Returns whether the given instruction must always be lowered, for instance
1839/// because it can cause side effects. If an instruction does not need to be
1840/// lowered, and Liveness determines its result is unused, backends should
1841/// avoid lowering it.
1842pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
1843    const data = air.instructions.items(.data)[@intFromEnum(inst)];
1844    return switch (air.instructions.items(.tag)[@intFromEnum(inst)]) {
1845        .arg,
1846        .assembly,
1847        .block,
1848        .loop,
1849        .repeat,
1850        .br,
1851        .trap,
1852        .breakpoint,
1853        .call,
1854        .call_always_tail,
1855        .call_never_tail,
1856        .call_never_inline,
1857        .cond_br,
1858        .switch_br,
1859        .loop_switch_br,
1860        .switch_dispatch,
1861        .@"try",
1862        .try_cold,
1863        .try_ptr,
1864        .try_ptr_cold,
1865        .dbg_stmt,
1866        .dbg_empty_stmt,
1867        .dbg_inline_block,
1868        .dbg_var_ptr,
1869        .dbg_var_val,
1870        .dbg_arg_inline,
1871        .ret,
1872        .ret_safe,
1873        .ret_load,
1874        .store,
1875        .store_safe,
1876        .unreach,
1877        .optional_payload_ptr_set,
1878        .errunion_payload_ptr_set,
1879        .set_union_tag,
1880        .memset,
1881        .memset_safe,
1882        .memcpy,
1883        .memmove,
1884        .cmpxchg_weak,
1885        .cmpxchg_strong,
1886        .atomic_store_unordered,
1887        .atomic_store_monotonic,
1888        .atomic_store_release,
1889        .atomic_store_seq_cst,
1890        .atomic_rmw,
1891        .prefetch,
1892        .wasm_memory_grow,
1893        .set_err_return_trace,
1894        .c_va_arg,
1895        .c_va_copy,
1896        .c_va_end,
1897        .c_va_start,
1898        .add_safe,
1899        .sub_safe,
1900        .mul_safe,
1901        .intcast_safe,
1902        .int_from_float_safe,
1903        .int_from_float_optimized_safe,
1904        .legalize_vec_store_elem,
1905        .legalize_compiler_rt_call,
1906        => true,
1907
1908        .add,
1909        .add_optimized,
1910        .add_wrap,
1911        .add_sat,
1912        .sub,
1913        .sub_optimized,
1914        .sub_wrap,
1915        .sub_sat,
1916        .mul,
1917        .mul_optimized,
1918        .mul_wrap,
1919        .mul_sat,
1920        .div_float,
1921        .div_float_optimized,
1922        .div_trunc,
1923        .div_trunc_optimized,
1924        .div_floor,
1925        .div_floor_optimized,
1926        .div_exact,
1927        .div_exact_optimized,
1928        .rem,
1929        .rem_optimized,
1930        .mod,
1931        .mod_optimized,
1932        .ptr_add,
1933        .ptr_sub,
1934        .max,
1935        .min,
1936        .add_with_overflow,
1937        .sub_with_overflow,
1938        .mul_with_overflow,
1939        .shl_with_overflow,
1940        .alloc,
1941        .inferred_alloc,
1942        .inferred_alloc_comptime,
1943        .ret_ptr,
1944        .bit_and,
1945        .bit_or,
1946        .shr,
1947        .shr_exact,
1948        .shl,
1949        .shl_exact,
1950        .shl_sat,
1951        .xor,
1952        .not,
1953        .bitcast,
1954        .ret_addr,
1955        .frame_addr,
1956        .clz,
1957        .ctz,
1958        .popcount,
1959        .byte_swap,
1960        .bit_reverse,
1961        .sqrt,
1962        .sin,
1963        .cos,
1964        .tan,
1965        .exp,
1966        .exp2,
1967        .log,
1968        .log2,
1969        .log10,
1970        .abs,
1971        .floor,
1972        .ceil,
1973        .round,
1974        .trunc_float,
1975        .neg,
1976        .neg_optimized,
1977        .cmp_lt,
1978        .cmp_lt_optimized,
1979        .cmp_lte,
1980        .cmp_lte_optimized,
1981        .cmp_eq,
1982        .cmp_eq_optimized,
1983        .cmp_gte,
1984        .cmp_gte_optimized,
1985        .cmp_gt,
1986        .cmp_gt_optimized,
1987        .cmp_neq,
1988        .cmp_neq_optimized,
1989        .cmp_vector,
1990        .cmp_vector_optimized,
1991        .is_null,
1992        .is_non_null,
1993        .is_err,
1994        .is_non_err,
1995        .bool_and,
1996        .bool_or,
1997        .fptrunc,
1998        .fpext,
1999        .intcast,
2000        .trunc,
2001        .optional_payload,
2002        .optional_payload_ptr,
2003        .wrap_optional,
2004        .unwrap_errunion_payload,
2005        .unwrap_errunion_err,
2006        .unwrap_errunion_payload_ptr,
2007        .wrap_errunion_payload,
2008        .wrap_errunion_err,
2009        .struct_field_ptr,
2010        .struct_field_ptr_index_0,
2011        .struct_field_ptr_index_1,
2012        .struct_field_ptr_index_2,
2013        .struct_field_ptr_index_3,
2014        .struct_field_val,
2015        .get_union_tag,
2016        .slice,
2017        .slice_len,
2018        .slice_ptr,
2019        .ptr_slice_len_ptr,
2020        .ptr_slice_ptr_ptr,
2021        .array_elem_val,
2022        .slice_elem_ptr,
2023        .ptr_elem_ptr,
2024        .array_to_slice,
2025        .int_from_float,
2026        .int_from_float_optimized,
2027        .float_from_int,
2028        .reduce,
2029        .reduce_optimized,
2030        .splat,
2031        .shuffle_one,
2032        .shuffle_two,
2033        .select,
2034        .is_named_enum_value,
2035        .tag_name,
2036        .error_name,
2037        .error_set_has_value,
2038        .aggregate_init,
2039        .union_init,
2040        .mul_add,
2041        .field_parent_ptr,
2042        .wasm_memory_size,
2043        .cmp_lt_errors_len,
2044        .err_return_trace,
2045        .addrspace_cast,
2046        .save_err_return_trace_index,
2047        .runtime_nav_ptr,
2048        .work_item_id,
2049        .work_group_size,
2050        .work_group_id,
2051        .legalize_vec_elem_val,
2052        => false,
2053
2054        .is_non_null_ptr, .is_null_ptr, .is_non_err_ptr, .is_err_ptr => air.typeOf(data.un_op, ip).isVolatilePtrIp(ip),
2055        .load, .unwrap_errunion_err_ptr => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
2056        .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip),
2057        .atomic_load => switch (data.atomic_load.order) {
2058            .unordered, .monotonic => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
2059            else => true, // Stronger memory orderings have inter-thread side effects.
2060        },
2061    };
2062}
2063
2064pub const UnwrappedSwitch = struct {
2065    air: *const Air,
2066    operand: Inst.Ref,
2067    cases_len: u32,
2068    else_body_len: u32,
2069    branch_hints_start: u32,
2070    cases_start: u32,
2071
2072    /// Asserts that `case_idx < us.cases_len`.
2073    pub fn getHint(us: UnwrappedSwitch, case_idx: u32) std.builtin.BranchHint {
2074        assert(case_idx < us.cases_len);
2075        return us.getHintInner(case_idx);
2076    }
2077    pub fn getElseHint(us: UnwrappedSwitch) std.builtin.BranchHint {
2078        return us.getHintInner(us.cases_len);
2079    }
2080    fn getHintInner(us: UnwrappedSwitch, idx: u32) std.builtin.BranchHint {
2081        const bag = us.air.extra.items[us.branch_hints_start..][idx / 10];
2082        const bits: u3 = @truncate(bag >> @intCast(3 * (idx % 10)));
2083        return @enumFromInt(bits);
2084    }
2085
2086    pub fn iterateCases(us: UnwrappedSwitch) CaseIterator {
2087        return .{
2088            .air = us.air,
2089            .cases_len = us.cases_len,
2090            .else_body_len = us.else_body_len,
2091            .next_case = 0,
2092            .extra_index = us.cases_start,
2093        };
2094    }
2095    pub const CaseIterator = struct {
2096        air: *const Air,
2097        cases_len: u32,
2098        else_body_len: u32,
2099        next_case: u32,
2100        extra_index: u32,
2101
2102        pub fn next(it: *CaseIterator) ?Case {
2103            if (it.next_case == it.cases_len) return null;
2104            const idx = it.next_case;
2105            it.next_case += 1;
2106
2107            const extra = it.air.extraData(SwitchBr.Case, it.extra_index);
2108            var extra_index = extra.end;
2109            const items: []const Inst.Ref = @ptrCast(it.air.extra.items[extra_index..][0..extra.data.items_len]);
2110            extra_index += items.len;
2111            // TODO: ptrcast from []const Inst.Ref to []const [2]Inst.Ref when supported
2112            const ranges_ptr: [*]const [2]Inst.Ref = @ptrCast(it.air.extra.items[extra_index..]);
2113            const ranges: []const [2]Inst.Ref = ranges_ptr[0..extra.data.ranges_len];
2114            extra_index += ranges.len * 2;
2115            const body: []const Inst.Index = @ptrCast(it.air.extra.items[extra_index..][0..extra.data.body_len]);
2116            extra_index += body.len;
2117            it.extra_index = @intCast(extra_index);
2118
2119            return .{
2120                .idx = idx,
2121                .items = items,
2122                .ranges = ranges,
2123                .body = body,
2124            };
2125        }
2126        /// Only valid to call once all cases have been iterated, i.e. `next` returns `null`.
2127        /// Returns the body of the "default" (`else`) case.
2128        pub fn elseBody(it: *CaseIterator) []const Inst.Index {
2129            assert(it.next_case == it.cases_len);
2130            return @ptrCast(it.air.extra.items[it.extra_index..][0..it.else_body_len]);
2131        }
2132        pub const Case = struct {
2133            idx: u32,
2134            items: []const Inst.Ref,
2135            ranges: []const [2]Inst.Ref,
2136            body: []const Inst.Index,
2137        };
2138    };
2139};
2140
2141pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch {
2142    const inst = air.instructions.get(@intFromEnum(switch_inst));
2143    switch (inst.tag) {
2144        .switch_br, .loop_switch_br => {},
2145        else => unreachable, // assertion failure
2146    }
2147    const pl_op = inst.data.pl_op;
2148    const extra = air.extraData(SwitchBr, pl_op.payload);
2149    const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable;
2150    return .{
2151        .air = air,
2152        .operand = pl_op.operand,
2153        .cases_len = extra.data.cases_len,
2154        .else_body_len = extra.data.else_body_len,
2155        .branch_hints_start = @intCast(extra.end),
2156        .cases_start = @intCast(extra.end + hint_bag_count),
2157    };
2158}
2159
2160pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
2161    result_ty: Type,
2162    operand: Inst.Ref,
2163    mask: []const ShuffleOneMask,
2164} {
2165    const inst = air.instructions.get(@intFromEnum(inst_index));
2166    switch (inst.tag) {
2167        .shuffle_one => {},
2168        else => unreachable, // assertion failure
2169    }
2170    const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?);
2171    const mask_len: u32 = result_ty.vectorLen(zcu);
2172    const extra_idx = inst.data.ty_pl.payload;
2173    return .{
2174        .result_ty = result_ty,
2175        .operand = @enumFromInt(air.extra.items[extra_idx + mask_len]),
2176        .mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]),
2177    };
2178}
2179
2180pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
2181    result_ty: Type,
2182    operand_a: Inst.Ref,
2183    operand_b: Inst.Ref,
2184    mask: []const ShuffleTwoMask,
2185} {
2186    const inst = air.instructions.get(@intFromEnum(inst_index));
2187    switch (inst.tag) {
2188        .shuffle_two => {},
2189        else => unreachable, // assertion failure
2190    }
2191    const result_ty: Type = .fromInterned(inst.data.ty_pl.ty.toInterned().?);
2192    const mask_len: u32 = result_ty.vectorLen(zcu);
2193    const extra_idx = inst.data.ty_pl.payload;
2194    return .{
2195        .result_ty = result_ty,
2196        .operand_a = @enumFromInt(air.extra.items[extra_idx + mask_len + 0]),
2197        .operand_b = @enumFromInt(air.extra.items[extra_idx + mask_len + 1]),
2198        .mask = @ptrCast(air.extra.items[extra_idx..][0..mask_len]),
2199    };
2200}
2201
2202pub const typesFullyResolved = types_resolved.typesFullyResolved;
2203pub const typeFullyResolved = types_resolved.checkType;
2204pub const valFullyResolved = types_resolved.checkVal;
2205pub const legalize = Legalize.legalize;
2206pub const write = print.write;
2207pub const writeInst = print.writeInst;
2208pub const dump = print.dump;
2209pub const dumpInst = print.dumpInst;
2210
2211pub const CoveragePoint = enum(u1) {
2212    /// Indicates the block is not a place of interest corresponding to
2213    /// a source location for coverage purposes.
2214    none,
2215    /// Point of interest. The next instruction emitted corresponds to
2216    /// a source location used for coverage instrumentation.
2217    poi,
2218};
2219
2220pub const CompilerRtFunc = enum(u32) {
2221    // zig fmt: off
2222
2223    // float simple arithmetic
2224    __addhf3, __addsf3, __adddf3, __addxf3, __addtf3,
2225    __subhf3, __subsf3, __subdf3, __subxf3, __subtf3,
2226    __mulhf3, __mulsf3, __muldf3, __mulxf3, __multf3,
2227    __divhf3, __divsf3, __divdf3, __divxf3, __divtf3,
2228
2229    // float minmax
2230    __fminh, fminf, fmin, __fminx, fminq,
2231    __fmaxh, fmaxf, fmax, __fmaxx, fmaxq,
2232
2233    // float round
2234    __ceilh,  ceilf,  ceil,  __ceilx,  ceilq,
2235    __floorh, floorf, floor, __floorx, floorq,
2236    __trunch, truncf, trunc, __truncx, truncq,
2237    __roundh, roundf, round, __roundx, roundq,
2238
2239    // float log
2240    __logh,   logf,   log,   __logx,   logq,
2241    __log2h,  log2f,  log2,  __log2x,  log2q,
2242    __log10h, log10f, log10, __log10x, log10q,
2243
2244    // float exp
2245    __exph,  expf,  exp,  __expx,  expq,
2246    __exp2h, exp2f, exp2, __exp2x, exp2q,
2247
2248    // float trigonometry
2249    __sinh, sinf, sin, __sinx, sinq,
2250    __cosh, cosf, cos, __cosx, cosq,
2251    __tanh, tanf, tan, __tanx, tanq,
2252
2253    // float misc ops
2254    __fabsh, fabsf, fabs, __fabsx, fabsq,
2255    __sqrth, sqrtf, sqrt, __sqrtx, sqrtq,
2256    __fmodh, fmodf, fmod, __fmodx, fmodq,
2257    __fmah,  fmaf,  fma,  __fmax,  fmaq,
2258
2259    // float comparison
2260    __eqhf2, __eqsf2, __eqdf2, __eqxf2, __eqtf2, // == iff return == 0
2261    __nehf2, __nesf2, __nedf2, __nexf2, __netf2, // != iff return != 0
2262    __lthf2, __ltsf2, __ltdf2, __ltxf2, __lttf2, // <  iff return < 0
2263    __lehf2, __lesf2, __ledf2, __lexf2, __letf2, // <= iff return <= 0
2264    __gthf2, __gtsf2, __gtdf2, __gtxf2, __gttf2, // >  iff return > 0
2265    __gehf2, __gesf2, __gedf2, __gexf2, __getf2, // >= iff return >= 0
2266
2267    // AEABI float comparison. On ARM, the `sf`/`df` functions above are not available,
2268    // and these must be used instead. They are not just aliases for the above functions
2269    // because they have a different (better) ABI.
2270    __aeabi_fcmpeq, __aeabi_dcmpeq, // ==, returns bool
2271    __aeabi_fcmplt, __aeabi_dcmplt, // <, returns bool
2272    __aeabi_fcmple, __aeabi_dcmple, // <=, returns bool
2273    __aeabi_fcmpgt, __aeabi_dcmpgt, // >, returns bool
2274    __aeabi_fcmpge, __aeabi_dcmpge, // >=, returns bool
2275
2276    // float shortening
2277    // to f16     // to f32     // to f64     // to f80
2278    __trunctfhf2, __trunctfsf2, __trunctfdf2, __trunctfxf2, // from f128
2279    __truncxfhf2, __truncxfsf2, __truncxfdf2,               // from f80
2280    __truncdfhf2, __truncdfsf2,                             // from f64
2281    __truncsfhf2,                                           // from f32
2282
2283    // float widening
2284    // to f128     // to f80      // to f64      // to f32
2285    __extendhftf2, __extendhfxf2, __extendhfdf2, __extendhfsf2, // from f16
2286    __extendsftf2, __extendsfxf2, __extendsfdf2,                // from f32
2287    __extenddftf2, __extenddfxf2,                               // from f64
2288    __extendxftf2,                                              // from f80
2289
2290    // int to float
2291    __floatsihf, __floatsisf, __floatsidf, __floatsixf, __floatsitf, // i32 to float
2292    __floatdihf, __floatdisf, __floatdidf, __floatdixf, __floatditf, // i64 to float
2293    __floattihf, __floattisf, __floattidf, __floattixf, __floattitf, // i128 to float
2294    __floateihf, __floateisf, __floateidf, __floateixf, __floateitf, // arbitrary iN to float
2295    __floatunsihf, __floatunsisf, __floatunsidf, __floatunsixf, __floatunsitf, // u32 to float
2296    __floatundihf, __floatundisf, __floatundidf, __floatundixf, __floatunditf, // u64 to float
2297    __floatuntihf, __floatuntisf, __floatuntidf, __floatuntixf, __floatuntitf, // u128 to float
2298    __floatuneihf, __floatuneisf, __floatuneidf, __floatuneixf, __floatuneitf, // arbitrary uN to float
2299
2300    // float to int
2301    __fixhfsi, __fixsfsi, __fixdfsi, __fixxfsi, __fixtfsi, // float to i32
2302    __fixhfdi, __fixsfdi, __fixdfdi, __fixxfdi, __fixtfdi, // float to i64
2303    __fixhfti, __fixsfti, __fixdfti, __fixxfti, __fixtfti, // float to i128
2304    __fixhfei, __fixsfei, __fixdfei, __fixxfei, __fixtfei, // float to arbitray iN
2305    __fixunshfsi, __fixunssfsi, __fixunsdfsi, __fixunsxfsi, __fixunstfsi, // float to u32
2306    __fixunshfdi, __fixunssfdi, __fixunsdfdi, __fixunsxfdi, __fixunstfdi, // float to u64
2307    __fixunshfti, __fixunssfti, __fixunsdfti, __fixunsxfti, __fixunstfti, // float to u128
2308    __fixunshfei, __fixunssfei, __fixunsdfei, __fixunsxfei, __fixunstfei, // float to arbitray uN
2309
2310    // zig fmt: on
2311
2312    /// Usually, the tag names of `CompilerRtFunc` match the corresponding symbol name, but not
2313    /// always; some target triples have slightly different compiler-rt ABIs for one reason or
2314    /// another.
2315    pub fn name(f: CompilerRtFunc, target: *const std.Target) []const u8 {
2316        const use_gnu_f16_abi = switch (target.cpu.arch) {
2317            .wasm32,
2318            .wasm64,
2319            .riscv64,
2320            .riscv64be,
2321            .riscv32,
2322            .riscv32be,
2323            => false,
2324            .x86, .x86_64 => true,
2325            .arm, .armeb, .thumb, .thumbeb => switch (target.abi) {
2326                .eabi, .eabihf => false,
2327                else => true,
2328            },
2329            else => !target.os.tag.isDarwin(),
2330        };
2331        const use_aeabi = target.cpu.arch.isArm() and switch (target.abi) {
2332            .eabi,
2333            .eabihf,
2334            .musleabi,
2335            .musleabihf,
2336            .gnueabi,
2337            .gnueabihf,
2338            .android,
2339            .androideabi,
2340            => true,
2341            else => false,
2342        };
2343
2344        // GNU didn't like the standard names specifically for conversions between f16
2345        // and f32, so decided to make their own naming convention with blackjack and
2346        // hookers (but only use it on a few random targets of course). This overrides
2347        // the ARM EABI in some cases. I don't like GNU.
2348        if (use_gnu_f16_abi) switch (f) {
2349            .__truncsfhf2 => return "__gnu_f2h_ieee",
2350            .__extendhfsf2 => return "__gnu_h2f_ieee",
2351            else => {},
2352        };
2353
2354        if (use_aeabi) return switch (f) {
2355            .__addsf3 => "__aeabi_fadd",
2356            .__adddf3 => "__aeabi_dadd",
2357            .__subsf3 => "__aeabi_fsub",
2358            .__subdf3 => "__aeabi_dsub",
2359            .__mulsf3 => "__aeabi_fmul",
2360            .__muldf3 => "__aeabi_dmul",
2361            .__divsf3 => "__aeabi_fdiv",
2362            .__divdf3 => "__aeabi_ddiv",
2363            .__truncdfhf2 => "__aeabi_d2h",
2364            .__truncdfsf2 => "__aeabi_d2f",
2365            .__truncsfhf2 => "__aeabi_f2h",
2366            .__extendsfdf2 => "__aeabi_f2d",
2367            .__extendhfsf2 => "__aeabi_h2f",
2368            .__floatsisf => "__aeabi_i2f",
2369            .__floatsidf => "__aeabi_i2d",
2370            .__floatdisf => "__aeabi_l2f",
2371            .__floatdidf => "__aeabi_l2d",
2372            .__floatunsisf => "__aeabi_ui2f",
2373            .__floatunsidf => "__aeabi_ui2d",
2374            .__floatundisf => "__aeabi_ul2f",
2375            .__floatundidf => "__aeabi_ul2d",
2376            .__fixsfsi => "__aeabi_f2iz",
2377            .__fixdfsi => "__aeabi_d2iz",
2378            .__fixsfdi => "__aeabi_f2lz",
2379            .__fixdfdi => "__aeabi_d2lz",
2380            .__fixunssfsi => "__aeabi_f2uiz",
2381            .__fixunsdfsi => "__aeabi_d2uiz",
2382            .__fixunssfdi => "__aeabi_f2ulz",
2383            .__fixunsdfdi => "__aeabi_d2ulz",
2384
2385            // These functions are not available on AEABI. The AEABI equivalents are
2386            // separate fields rather than aliases because they have a different ABI.
2387            .__eqsf2, .__eqdf2 => unreachable,
2388            .__nesf2, .__nedf2 => unreachable,
2389            .__ltsf2, .__ltdf2 => unreachable,
2390            .__lesf2, .__ledf2 => unreachable,
2391            .__gtsf2, .__gtdf2 => unreachable,
2392            .__gesf2, .__gedf2 => unreachable,
2393
2394            else => @tagName(f),
2395        };
2396
2397        return switch (f) {
2398            // These functions are only available on AEABI.
2399            .__aeabi_fcmpeq, .__aeabi_dcmpeq => unreachable,
2400            .__aeabi_fcmplt, .__aeabi_dcmplt => unreachable,
2401            .__aeabi_fcmple, .__aeabi_dcmple => unreachable,
2402            .__aeabi_fcmpgt, .__aeabi_dcmpgt => unreachable,
2403            .__aeabi_fcmpge, .__aeabi_dcmpge => unreachable,
2404
2405            else => @tagName(f),
2406        };
2407    }
2408
2409    pub fn @"callconv"(f: CompilerRtFunc, target: *const std.Target) std.builtin.CallingConvention {
2410        const use_gnu_f16_abi = switch (target.cpu.arch) {
2411            .wasm32,
2412            .wasm64,
2413            .riscv64,
2414            .riscv64be,
2415            .riscv32,
2416            .riscv32be,
2417            => false,
2418            .x86, .x86_64 => true,
2419            .arm, .armeb, .thumb, .thumbeb => switch (target.abi) {
2420                .eabi, .eabihf => false,
2421                else => true,
2422            },
2423            else => !target.os.tag.isDarwin(),
2424        };
2425        const use_aeabi = target.cpu.arch.isArm() and switch (target.abi) {
2426            .eabi,
2427            .eabihf,
2428            .musleabi,
2429            .musleabihf,
2430            .gnueabi,
2431            .gnueabihf,
2432            .android,
2433            .androideabi,
2434            => true,
2435            else => false,
2436        };
2437
2438        if (use_gnu_f16_abi) switch (f) {
2439            .__truncsfhf2,
2440            .__extendhfsf2,
2441            => return target.cCallingConvention().?,
2442            else => {},
2443        };
2444
2445        if (use_aeabi) switch (f) {
2446            // zig fmt: off
2447            .__addsf3, .__adddf3, .__subsf3, .__subdf3,
2448            .__mulsf3, .__muldf3, .__divsf3, .__divdf3,
2449            .__truncdfhf2,  .__truncdfsf2, .__truncsfhf2,
2450            .__extendsfdf2, .__extendhfsf2,
2451            .__floatsisf,   .__floatsidf,   .__floatdisf,   .__floatdidf,
2452            .__floatunsisf, .__floatunsidf, .__floatundisf, .__floatundidf,
2453            .__fixsfsi,    .__fixdfsi,    .__fixsfdi,    .__fixdfdi,
2454            .__fixunssfsi, .__fixunsdfsi, .__fixunssfdi, .__fixunsdfdi,
2455            => return .{ .arm_aapcs = .{} },
2456            // zig fmt: on
2457            else => {},
2458        };
2459
2460        return target.cCallingConvention().?;
2461    }
2462
2463    pub fn returnType(f: CompilerRtFunc) Type {
2464        return switch (f) {
2465            .__addhf3, .__subhf3, .__mulhf3, .__divhf3 => .f16,
2466            .__addsf3, .__subsf3, .__mulsf3, .__divsf3 => .f32,
2467            .__adddf3, .__subdf3, .__muldf3, .__divdf3 => .f64,
2468            .__addxf3, .__subxf3, .__mulxf3, .__divxf3 => .f80,
2469            .__addtf3, .__subtf3, .__multf3, .__divtf3 => .f128,
2470
2471            // zig fmt: off
2472            .__fminh, .__fmaxh,
2473            .__ceilh, .__floorh, .__trunch, .__roundh,
2474            .__logh, .__log2h, .__log10h,
2475            .__exph, .__exp2h,
2476            .__sinh, .__cosh, .__tanh,
2477            .__fabsh, .__sqrth, .__fmodh, .__fmah,
2478            => .f16,
2479            .fminf, .fmaxf,
2480            .ceilf, .floorf, .truncf, .roundf,
2481            .logf, .log2f, .log10f,
2482            .expf, .exp2f,
2483            .sinf, .cosf, .tanf,
2484            .fabsf, .sqrtf, .fmodf, .fmaf,
2485            => .f32,
2486            .fmin, .fmax,
2487            .ceil, .floor, .trunc, .round,
2488            .log, .log2, .log10,
2489            .exp, .exp2,
2490            .sin, .cos, .tan,
2491            .fabs, .sqrt, .fmod, .fma,
2492            => .f64,
2493            .__fminx, .__fmaxx,
2494            .__ceilx, .__floorx, .__truncx, .__roundx,
2495            .__logx, .__log2x, .__log10x,
2496            .__expx, .__exp2x,
2497            .__sinx, .__cosx, .__tanx,
2498            .__fabsx, .__sqrtx, .__fmodx, .__fmax,
2499            => .f80,
2500            .fminq, .fmaxq,
2501            .ceilq, .floorq, .truncq, .roundq,
2502            .logq, .log2q, .log10q,
2503            .expq, .exp2q,
2504            .sinq, .cosq, .tanq,
2505            .fabsq, .sqrtq, .fmodq, .fmaq,
2506            => .f128,
2507            // zig fmt: on
2508
2509            .__eqhf2, .__eqsf2, .__eqdf2, .__eqxf2, .__eqtf2 => .i32,
2510            .__nehf2, .__nesf2, .__nedf2, .__nexf2, .__netf2 => .i32,
2511            .__lthf2, .__ltsf2, .__ltdf2, .__ltxf2, .__lttf2 => .i32,
2512            .__lehf2, .__lesf2, .__ledf2, .__lexf2, .__letf2 => .i32,
2513            .__gthf2, .__gtsf2, .__gtdf2, .__gtxf2, .__gttf2 => .i32,
2514            .__gehf2, .__gesf2, .__gedf2, .__gexf2, .__getf2 => .i32,
2515
2516            .__aeabi_fcmpeq, .__aeabi_dcmpeq => .i32,
2517            .__aeabi_fcmplt, .__aeabi_dcmplt => .i32,
2518            .__aeabi_fcmple, .__aeabi_dcmple => .i32,
2519            .__aeabi_fcmpgt, .__aeabi_dcmpgt => .i32,
2520            .__aeabi_fcmpge, .__aeabi_dcmpge => .i32,
2521
2522            .__trunctfhf2, .__truncxfhf2, .__truncdfhf2, .__truncsfhf2 => .f16,
2523            .__trunctfsf2, .__truncxfsf2, .__truncdfsf2 => .f32,
2524            .__trunctfdf2, .__truncxfdf2 => .f64,
2525            .__trunctfxf2 => .f80,
2526
2527            .__extendhftf2, .__extendsftf2, .__extenddftf2, .__extendxftf2 => .f128,
2528            .__extendhfxf2, .__extendsfxf2, .__extenddfxf2 => .f80,
2529            .__extendhfdf2, .__extendsfdf2 => .f64,
2530            .__extendhfsf2 => .f32,
2531
2532            .__floatsihf, .__floatdihf, .__floattihf, .__floateihf => .f16,
2533            .__floatsisf, .__floatdisf, .__floattisf, .__floateisf => .f32,
2534            .__floatsidf, .__floatdidf, .__floattidf, .__floateidf => .f64,
2535            .__floatsixf, .__floatdixf, .__floattixf, .__floateixf => .f80,
2536            .__floatsitf, .__floatditf, .__floattitf, .__floateitf => .f128,
2537            .__floatunsihf, .__floatundihf, .__floatuntihf, .__floatuneihf => .f16,
2538            .__floatunsisf, .__floatundisf, .__floatuntisf, .__floatuneisf => .f32,
2539            .__floatunsidf, .__floatundidf, .__floatuntidf, .__floatuneidf => .f64,
2540            .__floatunsixf, .__floatundixf, .__floatuntixf, .__floatuneixf => .f80,
2541            .__floatunsitf, .__floatunditf, .__floatuntitf, .__floatuneitf => .f128,
2542
2543            .__fixhfsi, .__fixsfsi, .__fixdfsi, .__fixxfsi, .__fixtfsi => .i32,
2544            .__fixhfdi, .__fixsfdi, .__fixdfdi, .__fixxfdi, .__fixtfdi => .i64,
2545            .__fixhfti, .__fixsfti, .__fixdfti, .__fixxfti, .__fixtfti => .i128,
2546            .__fixhfei, .__fixsfei, .__fixdfei, .__fixxfei, .__fixtfei => .void,
2547            .__fixunshfsi, .__fixunssfsi, .__fixunsdfsi, .__fixunsxfsi, .__fixunstfsi => .u32,
2548            .__fixunshfdi, .__fixunssfdi, .__fixunsdfdi, .__fixunsxfdi, .__fixunstfdi => .u64,
2549            .__fixunshfti, .__fixunssfti, .__fixunsdfti, .__fixunsxfti, .__fixunstfti => .u128,
2550            .__fixunshfei, .__fixunssfei, .__fixunsdfei, .__fixunsxfei, .__fixunstfei => .void,
2551        };
2552    }
2553};