master
   1//! Temporary, dynamically allocated structures used only during flush.
   2//! Could be constructed fresh each time, or kept around between updates to reduce heap allocations.
   3
   4const Flush = @This();
   5const Wasm = @import("../Wasm.zig");
   6const Object = @import("Object.zig");
   7const Zcu = @import("../../Zcu.zig");
   8const Alignment = Wasm.Alignment;
   9const String = Wasm.String;
  10const Relocation = Wasm.Relocation;
  11const InternPool = @import("../../InternPool.zig");
  12const Mir = @import("../../codegen/wasm/Mir.zig");
  13
  14const build_options = @import("build_options");
  15
  16const std = @import("std");
  17const Allocator = std.mem.Allocator;
  18const mem = std.mem;
  19const leb = std.leb;
  20const log = std.log.scoped(.link);
  21const assert = std.debug.assert;
  22const ArrayList = std.ArrayList;
  23
  24/// Ordered list of data segments that will appear in the final binary.
  25/// When sorted, to-be-merged segments will be made adjacent.
  26/// Values are virtual address.
  27data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegmentId, u32) = .empty,
  28/// Each time a `data_segment` offset equals zero it indicates a new group, and
  29/// the next element in this array will contain the total merged segment size.
  30/// Value is the virtual memory address of the end of the segment.
  31data_segment_groups: ArrayList(DataSegmentGroup) = .empty,
  32
  33binary_bytes: ArrayList(u8) = .empty,
  34missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
  35function_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.FunctionImportId) = .empty,
  36global_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.GlobalImportId) = .empty,
  37data_imports: std.AutoArrayHashMapUnmanaged(String, Wasm.DataImportId) = .empty,
  38
  39indirect_function_table: std.AutoArrayHashMapUnmanaged(Wasm.OutputFunctionIndex, void) = .empty,
  40
  41/// A subset of the full interned function type list created only during flush.
  42func_types: std.AutoArrayHashMapUnmanaged(Wasm.FunctionType.Index, void) = .empty,
  43
  44/// For debug purposes only.
  45memory_layout_finished: bool = false,
  46
  47/// Index into `func_types`.
  48pub const FuncTypeIndex = enum(u32) {
  49    _,
  50
  51    pub fn fromTypeIndex(i: Wasm.FunctionType.Index, f: *const Flush) FuncTypeIndex {
  52        return @enumFromInt(f.func_types.getIndex(i).?);
  53    }
  54};
  55
  56/// Index into `indirect_function_table`.
  57const IndirectFunctionTableIndex = enum(u32) {
  58    _,
  59
  60    fn fromObjectFunctionHandlingWeak(wasm: *const Wasm, index: Wasm.ObjectFunctionIndex) IndirectFunctionTableIndex {
  61        return fromOutputFunctionIndex(&wasm.flush_buffer, .fromObjectFunctionHandlingWeak(wasm, index));
  62    }
  63
  64    fn fromSymbolName(wasm: *const Wasm, name: String) IndirectFunctionTableIndex {
  65        return fromOutputFunctionIndex(&wasm.flush_buffer, .fromSymbolName(wasm, name));
  66    }
  67
  68    fn fromOutputFunctionIndex(f: *const Flush, i: Wasm.OutputFunctionIndex) IndirectFunctionTableIndex {
  69        return @enumFromInt(f.indirect_function_table.getIndex(i).?);
  70    }
  71
  72    fn fromZcuIndirectFunctionSetIndex(i: Wasm.ZcuIndirectFunctionSetIndex) IndirectFunctionTableIndex {
  73        // These are the same since those are added to the table first.
  74        return @enumFromInt(@intFromEnum(i));
  75    }
  76
  77    fn toAbi(i: IndirectFunctionTableIndex) u32 {
  78        return @intFromEnum(i) + 1;
  79    }
  80};
  81
  82const DataSegmentGroup = struct {
  83    first_segment: Wasm.DataSegmentId,
  84    end_addr: u32,
  85};
  86
  87pub fn clear(f: *Flush) void {
  88    f.data_segments.clearRetainingCapacity();
  89    f.data_segment_groups.clearRetainingCapacity();
  90    f.binary_bytes.clearRetainingCapacity();
  91    f.indirect_function_table.clearRetainingCapacity();
  92    f.func_types.clearRetainingCapacity();
  93    f.memory_layout_finished = false;
  94}
  95
  96pub fn deinit(f: *Flush, gpa: Allocator) void {
  97    f.data_segments.deinit(gpa);
  98    f.data_segment_groups.deinit(gpa);
  99    f.binary_bytes.deinit(gpa);
 100    f.missing_exports.deinit(gpa);
 101    f.function_imports.deinit(gpa);
 102    f.global_imports.deinit(gpa);
 103    f.data_imports.deinit(gpa);
 104    f.indirect_function_table.deinit(gpa);
 105    f.func_types.deinit(gpa);
 106    f.* = undefined;
 107}
 108
 109pub fn finish(f: *Flush, wasm: *Wasm) !void {
 110    const comp = wasm.base.comp;
 111    const shared_memory = comp.config.shared_memory;
 112    const diags = &comp.link_diags;
 113    const gpa = comp.gpa;
 114    const import_memory = comp.config.import_memory;
 115    const export_memory = comp.config.export_memory;
 116    const target = &comp.root_mod.resolved_target.result;
 117    const is64 = switch (target.cpu.arch) {
 118        .wasm32 => false,
 119        .wasm64 => true,
 120        else => unreachable,
 121    };
 122    const is_obj = comp.config.output_mode == .Obj;
 123    const allow_undefined = is_obj or wasm.import_symbols;
 124
 125    const entry_name = if (wasm.entry_resolution.isNavOrUnresolved(wasm)) wasm.entry_name else .none;
 126
 127    if (comp.zcu) |zcu| {
 128        const ip: *const InternPool = &zcu.intern_pool; // No mutations allowed!
 129
 130        // Detect any intrinsics that were called; they need to have dependencies on the symbols marked.
 131        // Likewise detect `@tagName` calls so those functions can be included in the output and synthesized.
 132        for (wasm.mir_instructions.items(.tag), wasm.mir_instructions.items(.data)) |tag, *data| switch (tag) {
 133            .call_intrinsic => {
 134                const symbol_name = try wasm.internString(@tagName(data.intrinsic));
 135                const i: Wasm.FunctionImport.Index = @enumFromInt(wasm.object_function_imports.getIndex(symbol_name) orelse {
 136                    return diags.fail("missing compiler runtime intrinsic '{s}' (undefined linker symbol)", .{
 137                        @tagName(data.intrinsic),
 138                    });
 139                });
 140                try wasm.markFunctionImport(symbol_name, i.value(wasm), i);
 141            },
 142            .call_tag_name => {
 143                assert(ip.indexToKey(data.ip_index) == .enum_type);
 144                const gop = try wasm.zcu_funcs.getOrPut(gpa, data.ip_index);
 145                if (!gop.found_existing) {
 146                    wasm.tag_name_table_ref_count += 1;
 147                    const int_tag_ty = Zcu.Type.fromInterned(data.ip_index).intTagType(zcu);
 148                    gop.value_ptr.* = .{ .tag_name = .{
 149                        .symbol_name = try wasm.internStringFmt("__zig_tag_name_{d}", .{@intFromEnum(data.ip_index)}),
 150                        .type_index = try wasm.internFunctionType(.auto, &.{int_tag_ty.ip_index}, .slice_const_u8_sentinel_0, target),
 151                        .table_index = @intCast(wasm.tag_name_offs.items.len),
 152                    } };
 153                    try wasm.functions.put(gpa, .fromZcuFunc(wasm, @enumFromInt(gop.index)), {});
 154                    const tag_names = ip.loadEnumType(data.ip_index).names;
 155                    for (tag_names.get(ip)) |tag_name| {
 156                        const slice = tag_name.toSlice(ip);
 157                        try wasm.tag_name_offs.append(gpa, @intCast(wasm.tag_name_bytes.items.len));
 158                        try wasm.tag_name_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]);
 159                    }
 160                }
 161            },
 162            else => continue,
 163        };
 164
 165        {
 166            var i = wasm.function_imports_len_prelink;
 167            while (i < f.function_imports.entries.len) {
 168                const symbol_name = f.function_imports.keys()[i];
 169                if (wasm.object_function_imports.getIndex(symbol_name)) |import_index_usize| {
 170                    const import_index: Wasm.FunctionImport.Index = @enumFromInt(import_index_usize);
 171                    try wasm.markFunctionImport(symbol_name, import_index.value(wasm), import_index);
 172                    f.function_imports.swapRemoveAt(i);
 173                    continue;
 174                }
 175                i += 1;
 176            }
 177        }
 178
 179        {
 180            var i = wasm.data_imports_len_prelink;
 181            while (i < f.data_imports.entries.len) {
 182                const symbol_name = f.data_imports.keys()[i];
 183                if (wasm.object_data_imports.getIndex(symbol_name)) |import_index_usize| {
 184                    const import_index: Wasm.ObjectDataImport.Index = @enumFromInt(import_index_usize);
 185                    try wasm.markDataImport(symbol_name, import_index.value(wasm), import_index);
 186                    f.data_imports.swapRemoveAt(i);
 187                    continue;
 188                }
 189                i += 1;
 190            }
 191        }
 192
 193        if (wasm.error_name_table_ref_count > 0) {
 194            // Ensure Zcu error name structures are populated.
 195            const full_error_names = ip.global_error_set.getNamesFromMainThread();
 196            try wasm.error_name_offs.ensureTotalCapacity(gpa, full_error_names.len + 1);
 197            if (wasm.error_name_offs.items.len == 0) {
 198                // Dummy entry at index 0 to avoid a sub instruction at `@errorName` sites.
 199                wasm.error_name_offs.appendAssumeCapacity(0);
 200            }
 201            const new_error_names = full_error_names[wasm.error_name_offs.items.len - 1 ..];
 202            for (new_error_names) |error_name| {
 203                wasm.error_name_offs.appendAssumeCapacity(@intCast(wasm.error_name_bytes.items.len));
 204                const s: [:0]const u8 = error_name.toSlice(ip);
 205                try wasm.error_name_bytes.appendSlice(gpa, s[0 .. s.len + 1]);
 206            }
 207        }
 208
 209        for (wasm.nav_exports.keys(), wasm.nav_exports.values()) |*nav_export, export_index| {
 210            if (ip.isFunctionType(ip.getNav(nav_export.nav_index).typeOf(ip))) {
 211                log.debug("flush export '{s}' nav={d}", .{ nav_export.name.slice(wasm), nav_export.nav_index });
 212                const function_index = Wasm.FunctionIndex.fromIpNav(wasm, nav_export.nav_index).?;
 213                const explicit = f.missing_exports.swapRemove(nav_export.name);
 214                const is_hidden = !explicit and switch (export_index.ptr(zcu).opts.visibility) {
 215                    .hidden => true,
 216                    .default, .protected => false,
 217                };
 218                if (is_hidden) {
 219                    try wasm.hidden_function_exports.put(gpa, nav_export.name, function_index);
 220                } else {
 221                    try wasm.function_exports.put(gpa, nav_export.name, function_index);
 222                }
 223                _ = f.function_imports.swapRemove(nav_export.name);
 224
 225                if (nav_export.name.toOptional() == entry_name)
 226                    wasm.entry_resolution = .fromIpNav(wasm, nav_export.nav_index);
 227            } else {
 228                // This is a data export because Zcu currently has no way to
 229                // export wasm globals.
 230                _ = f.missing_exports.swapRemove(nav_export.name);
 231                _ = f.data_imports.swapRemove(nav_export.name);
 232                if (!is_obj) {
 233                    diags.addError("unable to export data symbol '{s}'; not emitting a relocatable", .{
 234                        nav_export.name.slice(wasm),
 235                    });
 236                }
 237            }
 238        }
 239
 240        for (f.missing_exports.keys()) |exp_name| {
 241            diags.addError("manually specified export name '{s}' undefined", .{exp_name.slice(wasm)});
 242        }
 243    }
 244
 245    if (entry_name.unwrap()) |name| {
 246        if (wasm.entry_resolution == .unresolved) {
 247            var err = try diags.addErrorWithNotes(1);
 248            try err.addMsg("entry symbol '{s}' missing", .{name.slice(wasm)});
 249            err.addNote("'-fno-entry' suppresses this error", .{});
 250        }
 251    }
 252
 253    if (!allow_undefined) {
 254        for (f.function_imports.keys(), f.function_imports.values()) |name, function_import_id| {
 255            if (function_import_id.undefinedAllowed(wasm)) continue;
 256            const src_loc = function_import_id.sourceLocation(wasm);
 257            src_loc.addError(wasm, "undefined function: {s}", .{name.slice(wasm)});
 258        }
 259        for (f.global_imports.keys(), f.global_imports.values()) |name, global_import_id| {
 260            const src_loc = global_import_id.sourceLocation(wasm);
 261            src_loc.addError(wasm, "undefined global: {s}", .{name.slice(wasm)});
 262        }
 263        for (wasm.table_imports.keys(), wasm.table_imports.values()) |name, table_import_id| {
 264            const src_loc = table_import_id.value(wasm).source_location;
 265            src_loc.addError(wasm, "undefined table: {s}", .{name.slice(wasm)});
 266        }
 267        for (f.data_imports.keys(), f.data_imports.values()) |name, data_import_id| {
 268            const src_loc = data_import_id.sourceLocation(wasm);
 269            src_loc.addError(wasm, "undefined data: {s}", .{name.slice(wasm)});
 270        }
 271    }
 272
 273    if (diags.hasErrors()) return error.LinkFailure;
 274
 275    // Merge indirect function tables.
 276    try f.indirect_function_table.ensureUnusedCapacity(gpa, wasm.zcu_indirect_function_set.entries.len +
 277        wasm.object_indirect_function_import_set.entries.len + wasm.object_indirect_function_set.entries.len);
 278    // This one goes first so the indexes can be stable for MIR lowering.
 279    for (wasm.zcu_indirect_function_set.keys()) |nav_index|
 280        f.indirect_function_table.putAssumeCapacity(.fromIpNav(wasm, nav_index), {});
 281    for (wasm.object_indirect_function_import_set.keys()) |symbol_name|
 282        f.indirect_function_table.putAssumeCapacity(.fromSymbolName(wasm, symbol_name), {});
 283    for (wasm.object_indirect_function_set.keys()) |object_function_index|
 284        f.indirect_function_table.putAssumeCapacity(.fromObjectFunction(wasm, object_function_index), {});
 285
 286    if (wasm.object_init_funcs.items.len > 0) {
 287        // Zig has no constructors so these are only for object file inputs.
 288        mem.sortUnstable(Wasm.InitFunc, wasm.object_init_funcs.items, {}, Wasm.InitFunc.lessThan);
 289        try wasm.functions.put(gpa, .__wasm_call_ctors, {});
 290    }
 291
 292    // Merge and order the data segments. Depends on garbage collection so that
 293    // unused segments can be omitted.
 294    try f.data_segments.ensureUnusedCapacity(gpa, wasm.data_segments.entries.len +
 295        wasm.uavs_obj.entries.len + wasm.navs_obj.entries.len +
 296        wasm.uavs_exe.entries.len + wasm.navs_exe.entries.len + 4);
 297    if (is_obj) assert(wasm.uavs_exe.entries.len == 0);
 298    if (is_obj) assert(wasm.navs_exe.entries.len == 0);
 299    if (!is_obj) assert(wasm.uavs_obj.entries.len == 0);
 300    if (!is_obj) assert(wasm.navs_obj.entries.len == 0);
 301    for (0..wasm.uavs_obj.entries.len) |uavs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{
 302        .uav_obj = @enumFromInt(uavs_index),
 303    }), @as(u32, undefined));
 304    for (0..wasm.navs_obj.entries.len) |navs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{
 305        .nav_obj = @enumFromInt(navs_index),
 306    }), @as(u32, undefined));
 307    for (0..wasm.uavs_exe.entries.len) |uavs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{
 308        .uav_exe = @enumFromInt(uavs_index),
 309    }), @as(u32, undefined));
 310    for (0..wasm.navs_exe.entries.len) |navs_index| f.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{
 311        .nav_exe = @enumFromInt(navs_index),
 312    }), @as(u32, undefined));
 313    if (wasm.error_name_table_ref_count > 0) {
 314        f.data_segments.putAssumeCapacity(.__zig_error_names, @as(u32, undefined));
 315        f.data_segments.putAssumeCapacity(.__zig_error_name_table, @as(u32, undefined));
 316    }
 317    if (wasm.tag_name_table_ref_count > 0) {
 318        f.data_segments.putAssumeCapacity(.__zig_tag_names, @as(u32, undefined));
 319        f.data_segments.putAssumeCapacity(.__zig_tag_name_table, @as(u32, undefined));
 320    }
 321    for (wasm.data_segments.keys()) |data_id| f.data_segments.putAssumeCapacity(data_id, @as(u32, undefined));
 322
 323    try wasm.functions.ensureUnusedCapacity(gpa, 3);
 324
 325    // Passive segments are used to avoid memory being reinitialized on each
 326    // thread's instantiation. These passive segments are initialized and
 327    // dropped in __wasm_init_memory, which is registered as the start function
 328    // We also initialize bss segments (using memory.fill) as part of this
 329    // function.
 330    if (wasm.any_passive_inits) {
 331        try wasm.addFunction(.__wasm_init_memory, &.{}, &.{});
 332    }
 333
 334    try wasm.tables.ensureUnusedCapacity(gpa, 1);
 335
 336    if (f.indirect_function_table.entries.len > 0) {
 337        wasm.tables.putAssumeCapacity(.__indirect_function_table, {});
 338    }
 339
 340    // Sort order:
 341    // 0. Segment category (tls, data, zero)
 342    // 1. Segment name prefix
 343    // 2. Segment alignment
 344    // 3. Reference count, descending (optimize for LEB encoding)
 345    // 4. Segment name suffix
 346    // 5. Segment ID interpreted as an integer (for determinism)
 347    //
 348    // TLS segments are intended to be merged with each other, and segments
 349    // with a common prefix name are intended to be merged with each other.
 350    // Sorting ensures the segments intended to be merged will be adjacent.
 351    //
 352    // Each Zcu Nav and Cau has an independent data segment ID in this logic.
 353    // For the purposes of sorting, they are implicitly all named ".data".
 354    const Sort = struct {
 355        wasm: *const Wasm,
 356        segments: []const Wasm.DataSegmentId,
 357        pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool {
 358            const lhs_segment = ctx.segments[lhs];
 359            const rhs_segment = ctx.segments[rhs];
 360            const lhs_category = @intFromEnum(lhs_segment.category(ctx.wasm));
 361            const rhs_category = @intFromEnum(rhs_segment.category(ctx.wasm));
 362            switch (std.math.order(lhs_category, rhs_category)) {
 363                .lt => return true,
 364                .gt => return false,
 365                .eq => {},
 366            }
 367            const lhs_segment_name = lhs_segment.name(ctx.wasm);
 368            const rhs_segment_name = rhs_segment.name(ctx.wasm);
 369            const lhs_prefix, const lhs_suffix = splitSegmentName(lhs_segment_name);
 370            const rhs_prefix, const rhs_suffix = splitSegmentName(rhs_segment_name);
 371            switch (mem.order(u8, lhs_prefix, rhs_prefix)) {
 372                .lt => return true,
 373                .gt => return false,
 374                .eq => {},
 375            }
 376            const lhs_alignment = lhs_segment.alignment(ctx.wasm);
 377            const rhs_alignment = rhs_segment.alignment(ctx.wasm);
 378            switch (lhs_alignment.order(rhs_alignment)) {
 379                .lt => return false,
 380                .gt => return true,
 381                .eq => {},
 382            }
 383            switch (std.math.order(lhs_segment.refCount(ctx.wasm), rhs_segment.refCount(ctx.wasm))) {
 384                .lt => return false,
 385                .gt => return true,
 386                .eq => {},
 387            }
 388            switch (mem.order(u8, lhs_suffix, rhs_suffix)) {
 389                .lt => return true,
 390                .gt => return false,
 391                .eq => {},
 392            }
 393            return @intFromEnum(lhs_segment) < @intFromEnum(rhs_segment);
 394        }
 395    };
 396    f.data_segments.sortUnstable(@as(Sort, .{
 397        .wasm = wasm,
 398        .segments = f.data_segments.keys(),
 399    }));
 400
 401    const page_size = std.wasm.page_size; // 64kb
 402    const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention
 403    const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention
 404    const pointer_alignment: Alignment = .@"4";
 405    // Always place the stack at the start by default unless the user specified the global-base flag.
 406    const place_stack_first, var memory_ptr: u64 = if (wasm.global_base) |base| .{ false, base } else .{ true, 0 };
 407
 408    var virtual_addrs: VirtualAddrs = .{
 409        .stack_pointer = undefined,
 410        .heap_base = undefined,
 411        .heap_end = undefined,
 412        .tls_base = null,
 413        .tls_align = .none,
 414        .tls_size = null,
 415        .init_memory_flag = null,
 416    };
 417
 418    if (place_stack_first and !is_obj) {
 419        memory_ptr = stack_alignment.forward(memory_ptr);
 420        memory_ptr += wasm.base.stack_size;
 421        virtual_addrs.stack_pointer = @intCast(memory_ptr);
 422    }
 423
 424    const segment_ids = f.data_segments.keys();
 425    const segment_vaddrs = f.data_segments.values();
 426    assert(f.data_segment_groups.items.len == 0);
 427    const data_vaddr: u32 = @intCast(memory_ptr);
 428    if (segment_ids.len > 0) {
 429        var seen_tls: enum { before, during, after } = .before;
 430        var category: Wasm.DataSegmentId.Category = undefined;
 431        var first_segment: Wasm.DataSegmentId = segment_ids[0];
 432        for (segment_ids, segment_vaddrs, 0..) |segment_id, *segment_vaddr, i| {
 433            const alignment = segment_id.alignment(wasm);
 434            category = segment_id.category(wasm);
 435            const start_addr = alignment.forward(memory_ptr);
 436
 437            const want_new_segment = b: {
 438                if (is_obj) break :b false;
 439                switch (seen_tls) {
 440                    .before => switch (category) {
 441                        .tls => {
 442                            virtual_addrs.tls_base = if (shared_memory) 0 else @intCast(start_addr);
 443                            virtual_addrs.tls_align = alignment;
 444                            seen_tls = .during;
 445                            break :b f.data_segment_groups.items.len > 0;
 446                        },
 447                        else => {},
 448                    },
 449                    .during => switch (category) {
 450                        .tls => {
 451                            virtual_addrs.tls_align = virtual_addrs.tls_align.maxStrict(alignment);
 452                            virtual_addrs.tls_size = @intCast(memory_ptr - virtual_addrs.tls_base.?);
 453                            break :b false;
 454                        },
 455                        else => {
 456                            seen_tls = .after;
 457                            break :b true;
 458                        },
 459                    },
 460                    .after => {},
 461                }
 462                break :b i >= 1 and !wantSegmentMerge(wasm, segment_ids[i - 1], segment_id, category);
 463            };
 464            if (want_new_segment) {
 465                log.debug("new segment group at 0x{x} {} {s} {}", .{ start_addr, segment_id, segment_id.name(wasm), category });
 466                try f.data_segment_groups.append(gpa, .{
 467                    .end_addr = @intCast(memory_ptr),
 468                    .first_segment = first_segment,
 469                });
 470                first_segment = segment_id;
 471            }
 472
 473            const size = segment_id.size(wasm);
 474            segment_vaddr.* = @intCast(start_addr);
 475            log.debug("0x{x} {d} {s}", .{ start_addr, @intFromEnum(segment_id), segment_id.name(wasm) });
 476            memory_ptr = start_addr + size;
 477        }
 478        if (category != .zero) try f.data_segment_groups.append(gpa, .{
 479            .first_segment = first_segment,
 480            .end_addr = @intCast(memory_ptr),
 481        });
 482        if (category == .tls and seen_tls == .during) {
 483            virtual_addrs.tls_size = @intCast(memory_ptr - virtual_addrs.tls_base.?);
 484        }
 485    }
 486
 487    if (shared_memory and wasm.any_passive_inits) {
 488        memory_ptr = pointer_alignment.forward(memory_ptr);
 489        virtual_addrs.init_memory_flag = @intCast(memory_ptr);
 490        memory_ptr += 4;
 491    }
 492
 493    if (!place_stack_first and !is_obj) {
 494        memory_ptr = stack_alignment.forward(memory_ptr);
 495        memory_ptr += wasm.base.stack_size;
 496        virtual_addrs.stack_pointer = @intCast(memory_ptr);
 497    }
 498
 499    memory_ptr = heap_alignment.forward(memory_ptr);
 500    virtual_addrs.heap_base = @intCast(memory_ptr);
 501
 502    if (wasm.initial_memory) |initial_memory| {
 503        if (!mem.isAlignedGeneric(u64, initial_memory, page_size)) {
 504            diags.addError("initial memory value {d} is not {d}-byte aligned", .{ initial_memory, page_size });
 505        }
 506        if (memory_ptr > initial_memory) {
 507            diags.addError("initial memory value {d} insufficient; minimum {d}", .{ initial_memory, memory_ptr });
 508        }
 509        if (initial_memory > std.math.maxInt(u32)) {
 510            diags.addError("initial memory value {d} exceeds 32-bit address space", .{initial_memory});
 511        }
 512        if (diags.hasErrors()) return error.LinkFailure;
 513        memory_ptr = initial_memory;
 514    } else {
 515        memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size);
 516    }
 517    virtual_addrs.heap_end = @intCast(memory_ptr);
 518
 519    // In case we do not import memory, but define it ourselves, set the
 520    // minimum amount of pages on the memory section.
 521    wasm.memories.limits.min = @intCast(memory_ptr / page_size);
 522    log.debug("total memory pages: {d}", .{wasm.memories.limits.min});
 523
 524    if (wasm.max_memory) |max_memory| {
 525        if (!mem.isAlignedGeneric(u64, max_memory, page_size)) {
 526            diags.addError("maximum memory value {d} is not {d}-byte aligned", .{ max_memory, page_size });
 527        }
 528        if (memory_ptr > max_memory) {
 529            diags.addError("maximum memory value {d} insufficient; minimum {d}", .{ max_memory, memory_ptr });
 530        }
 531        if (max_memory > std.math.maxInt(u32)) {
 532            diags.addError("maximum memory value {d} exceeds 32-bit address space", .{max_memory});
 533        }
 534        if (diags.hasErrors()) return error.LinkFailure;
 535        wasm.memories.limits.max = @intCast(max_memory / page_size);
 536        wasm.memories.limits.flags.has_max = true;
 537        if (shared_memory) wasm.memories.limits.flags.is_shared = true;
 538        log.debug("maximum memory pages: {d}", .{wasm.memories.limits.max});
 539    }
 540    f.memory_layout_finished = true;
 541
 542    // When we have TLS GOT entries and shared memory is enabled, we must
 543    // perform runtime relocations or else we don't create the function.
 544    if (shared_memory and virtual_addrs.tls_base != null) {
 545        // This logic that checks `any_tls_relocs` is missing the part where it
 546        // also notices threadlocal globals from Zcu code.
 547        if (wasm.any_tls_relocs) try wasm.addFunction(.__wasm_apply_global_tls_relocs, &.{}, &.{});
 548        try wasm.addFunction(.__wasm_init_tls, &.{.i32}, &.{});
 549        try wasm.globals.ensureUnusedCapacity(gpa, 3);
 550        wasm.globals.putAssumeCapacity(.__tls_base, {});
 551        wasm.globals.putAssumeCapacity(.__tls_size, {});
 552        wasm.globals.putAssumeCapacity(.__tls_align, {});
 553    }
 554
 555    var section_index: u32 = 0;
 556    // Index of the code section. Used to tell relocation table where the section lives.
 557    var code_section_index: ?u32 = null;
 558    // Index of the data section. Used to tell relocation table where the section lives.
 559    var data_section_index: ?u32 = null;
 560
 561    const binary_bytes = &f.binary_bytes;
 562    assert(binary_bytes.items.len == 0);
 563
 564    try binary_bytes.appendSlice(gpa, &std.wasm.magic ++ &std.wasm.version);
 565    assert(binary_bytes.items.len == 8);
 566
 567    // Type section.
 568    for (f.function_imports.values()) |id| {
 569        try f.func_types.put(gpa, id.functionType(wasm), {});
 570    }
 571    for (wasm.functions.keys()) |function| {
 572        try f.func_types.put(gpa, function.typeIndex(wasm), {});
 573    }
 574    if (f.func_types.entries.len != 0) {
 575        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 576        for (f.func_types.keys()) |func_type_index| {
 577            const func_type = func_type_index.ptr(wasm);
 578            try appendLeb128(gpa, binary_bytes, std.wasm.function_type);
 579            const params = func_type.params.slice(wasm);
 580            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(params.len)));
 581            for (params) |param_ty| {
 582                try appendLeb128(gpa, binary_bytes, @intFromEnum(param_ty));
 583            }
 584            const returns = func_type.returns.slice(wasm);
 585            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(returns.len)));
 586            for (returns) |ret_ty| {
 587                try appendLeb128(gpa, binary_bytes, @intFromEnum(ret_ty));
 588            }
 589        }
 590        replaceVecSectionHeader(binary_bytes, header_offset, .type, @intCast(f.func_types.entries.len));
 591        section_index += 1;
 592    }
 593
 594    if (!is_obj) {
 595        // TODO: sort function_imports by ref count descending for optimal LEB encodings
 596        // TODO: sort   global_imports by ref count descending for optimal LEB encodings
 597        // TODO: sort output functions by ref count descending for optimal LEB encodings
 598    }
 599
 600    // Import section
 601    {
 602        var total_imports: usize = 0;
 603        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 604
 605        for (f.function_imports.values()) |id| {
 606            const module_name = id.moduleName(wasm).slice(wasm).?;
 607            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
 608            try binary_bytes.appendSlice(gpa, module_name);
 609
 610            const name = id.importName(wasm).slice(wasm);
 611            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 612            try binary_bytes.appendSlice(gpa, name);
 613
 614            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
 615            const type_index: FuncTypeIndex = .fromTypeIndex(id.functionType(wasm), f);
 616            try appendLeb128(gpa, binary_bytes, @intFromEnum(type_index));
 617        }
 618        total_imports += f.function_imports.entries.len;
 619
 620        for (wasm.table_imports.values()) |id| {
 621            const table_import = id.value(wasm);
 622            const module_name = table_import.module_name.slice(wasm);
 623            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
 624            try binary_bytes.appendSlice(gpa, module_name);
 625
 626            const name = table_import.name.slice(wasm);
 627            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 628            try binary_bytes.appendSlice(gpa, name);
 629
 630            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
 631            try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
 632            try emitLimits(gpa, binary_bytes, table_import.limits());
 633        }
 634        total_imports += wasm.table_imports.entries.len;
 635
 636        if (import_memory) {
 637            const name = if (is_obj) wasm.preloaded_strings.__linear_memory else wasm.preloaded_strings.memory;
 638            try emitMemoryImport(wasm, binary_bytes, name, &.{
 639                // TODO the import_memory option needs to specify from which module
 640                .module_name = wasm.object_host_name.unwrap().?,
 641                .limits_min = wasm.memories.limits.min,
 642                .limits_max = wasm.memories.limits.max,
 643                .limits_has_max = wasm.memories.limits.flags.has_max,
 644                .limits_is_shared = wasm.memories.limits.flags.is_shared,
 645                .source_location = .none,
 646            });
 647            total_imports += 1;
 648        }
 649
 650        for (f.global_imports.values()) |id| {
 651            const module_name = id.moduleName(wasm).slice(wasm).?;
 652            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
 653            try binary_bytes.appendSlice(gpa, module_name);
 654
 655            const name = id.importName(wasm).slice(wasm);
 656            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 657            try binary_bytes.appendSlice(gpa, name);
 658
 659            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
 660            const global_type = id.globalType(wasm);
 661            try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
 662            try binary_bytes.append(gpa, @intFromBool(global_type.mutable));
 663        }
 664        total_imports += f.global_imports.entries.len;
 665
 666        if (total_imports > 0) {
 667            replaceVecSectionHeader(binary_bytes, header_offset, .import, @intCast(total_imports));
 668            section_index += 1;
 669        } else {
 670            binary_bytes.shrinkRetainingCapacity(header_offset);
 671        }
 672    }
 673
 674    // Function section
 675    if (wasm.functions.count() != 0) {
 676        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 677        for (wasm.functions.keys()) |function| {
 678            const index: FuncTypeIndex = .fromTypeIndex(function.typeIndex(wasm), f);
 679            try appendLeb128(gpa, binary_bytes, @intFromEnum(index));
 680        }
 681
 682        replaceVecSectionHeader(binary_bytes, header_offset, .function, @intCast(wasm.functions.count()));
 683        section_index += 1;
 684    }
 685
 686    // Table section
 687    if (wasm.tables.entries.len > 0) {
 688        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 689
 690        for (wasm.tables.keys()) |table| {
 691            try appendLeb128(gpa, binary_bytes, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
 692            try emitLimits(gpa, binary_bytes, table.limits(wasm));
 693        }
 694
 695        replaceVecSectionHeader(binary_bytes, header_offset, .table, @intCast(wasm.tables.entries.len));
 696        section_index += 1;
 697    }
 698
 699    // Memory section. wasm currently only supports 1 linear memory segment.
 700    if (!import_memory) {
 701        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 702        try emitLimits(gpa, binary_bytes, wasm.memories.limits);
 703        replaceVecSectionHeader(binary_bytes, header_offset, .memory, 1);
 704        section_index += 1;
 705    }
 706
 707    // Global section.
 708    const globals_len: u32 = @intCast(wasm.globals.entries.len);
 709    if (globals_len > 0) {
 710        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 711
 712        for (wasm.globals.keys()) |global_resolution| {
 713            switch (global_resolution.unpack(wasm)) {
 714                .unresolved => unreachable,
 715                .__heap_base => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_base),
 716                .__heap_end => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_end),
 717                .__stack_pointer => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.stack_pointer),
 718                .__tls_align => try appendGlobal(gpa, binary_bytes, 0, @intCast(virtual_addrs.tls_align.toByteUnits().?)),
 719                .__tls_base => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.tls_base.?),
 720                .__tls_size => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.tls_size.?),
 721                .object_global => |i| {
 722                    const global = i.ptr(wasm);
 723                    try binary_bytes.appendSlice(gpa, &.{
 724                        @intFromEnum(@as(std.wasm.Valtype, global.flags.global_type.valtype.to())),
 725                        @intFromBool(global.flags.global_type.mutable),
 726                    });
 727                    try emitExpr(wasm, binary_bytes, global.expr);
 728                },
 729                .nav_exe => unreachable, // Zig source code currently cannot represent this.
 730                .nav_obj => unreachable, // Zig source code currently cannot represent this.
 731            }
 732        }
 733
 734        replaceVecSectionHeader(binary_bytes, header_offset, .global, globals_len);
 735        section_index += 1;
 736    }
 737
 738    // Export section
 739    {
 740        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 741        var exports_len: usize = 0;
 742
 743        for (wasm.function_exports.keys(), wasm.function_exports.values()) |exp_name, function_index| {
 744            const name = exp_name.slice(wasm);
 745            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 746            try binary_bytes.appendSlice(gpa, name);
 747            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
 748            const func_index = Wasm.OutputFunctionIndex.fromFunctionIndex(wasm, function_index);
 749            try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
 750        }
 751        exports_len += wasm.function_exports.entries.len;
 752
 753        if (wasm.export_table and f.indirect_function_table.entries.len > 0) {
 754            const name = "__indirect_function_table";
 755            const index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
 756            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 757            try binary_bytes.appendSlice(gpa, name);
 758            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
 759            try appendLeb128(gpa, binary_bytes, index);
 760            exports_len += 1;
 761        }
 762
 763        if (export_memory) {
 764            const name = "memory";
 765            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 766            try binary_bytes.appendSlice(gpa, name);
 767            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
 768            try appendLeb128(gpa, binary_bytes, @as(u32, 0));
 769            exports_len += 1;
 770        }
 771
 772        for (wasm.global_exports.items) |exp| {
 773            const name = exp.name.slice(wasm);
 774            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
 775            try binary_bytes.appendSlice(gpa, name);
 776            try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
 777            try appendLeb128(gpa, binary_bytes, @intFromEnum(exp.global_index));
 778        }
 779        exports_len += wasm.global_exports.items.len;
 780
 781        if (exports_len > 0) {
 782            replaceVecSectionHeader(binary_bytes, header_offset, .@"export", @intCast(exports_len));
 783            section_index += 1;
 784        } else {
 785            binary_bytes.shrinkRetainingCapacity(header_offset);
 786        }
 787    }
 788
 789    // start section
 790    if (wasm.functions.getIndex(.__wasm_init_memory)) |func_index| {
 791        try emitStartSection(gpa, binary_bytes, .fromFunctionIndex(wasm, @enumFromInt(func_index)));
 792    } else if (Wasm.OutputFunctionIndex.fromResolution(wasm, wasm.entry_resolution)) |func_index| {
 793        try emitStartSection(gpa, binary_bytes, func_index);
 794    }
 795
 796    // element section
 797    if (f.indirect_function_table.entries.len > 0) {
 798        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 799
 800        // indirect function table elements
 801        const table_index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
 802        // passive with implicit 0-index table or set table index manually
 803        const flags: u32 = if (table_index == 0) 0x0 else 0x02;
 804        try appendLeb128(gpa, binary_bytes, flags);
 805        if (flags == 0x02) {
 806            try appendLeb128(gpa, binary_bytes, table_index);
 807        }
 808        // We start at index 1, so unresolved function pointers are invalid
 809        {
 810            var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
 811            defer binary_bytes.* = aw.toArrayList();
 812            try emitInit(&aw.writer, .{ .i32_const = 1 });
 813        }
 814        if (flags == 0x02) {
 815            try appendLeb128(gpa, binary_bytes, @as(u8, 0)); // represents funcref
 816        }
 817        try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(f.indirect_function_table.entries.len)));
 818        for (f.indirect_function_table.keys()) |func_index| {
 819            try appendLeb128(gpa, binary_bytes, @intFromEnum(func_index));
 820        }
 821
 822        replaceVecSectionHeader(binary_bytes, header_offset, .element, 1);
 823        section_index += 1;
 824    }
 825
 826    // When the shared-memory option is enabled, we *must* emit the 'data count' section.
 827    if (f.data_segment_groups.items.len > 0) {
 828        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 829        replaceVecSectionHeader(binary_bytes, header_offset, .data_count, @intCast(f.data_segment_groups.items.len));
 830    }
 831
 832    // Code section.
 833    if (wasm.functions.count() != 0) {
 834        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 835
 836        for (wasm.functions.keys()) |resolution| switch (resolution.unpack(wasm)) {
 837            .unresolved => unreachable,
 838            .__wasm_apply_global_tls_relocs => @panic("TODO lower __wasm_apply_global_tls_relocs"),
 839            .__wasm_call_ctors => {
 840                const code_start = try reserveSize(gpa, binary_bytes);
 841                defer replaceSize(binary_bytes, code_start);
 842                try emitCallCtorsFunction(wasm, binary_bytes);
 843            },
 844            .__wasm_init_memory => {
 845                const code_start = try reserveSize(gpa, binary_bytes);
 846                defer replaceSize(binary_bytes, code_start);
 847                try emitInitMemoryFunction(wasm, binary_bytes, &virtual_addrs);
 848            },
 849            .__wasm_init_tls => {
 850                const code_start = try reserveSize(gpa, binary_bytes);
 851                defer replaceSize(binary_bytes, code_start);
 852                try emitInitTlsFunction(wasm, binary_bytes);
 853            },
 854            .object_function => |i| {
 855                const ptr = i.ptr(wasm);
 856                const code = ptr.code.slice(wasm);
 857                try appendLeb128(gpa, binary_bytes, code.len);
 858                const code_start = binary_bytes.items.len;
 859                try binary_bytes.appendSlice(gpa, code);
 860                if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
 861            },
 862            .zcu_func => |i| {
 863                const code_start = try reserveSize(gpa, binary_bytes);
 864                defer replaceSize(binary_bytes, code_start);
 865
 866                log.debug("lowering function code for '{s}'", .{resolution.name(wasm).?});
 867
 868                const zcu = comp.zcu.?;
 869                const ip = &zcu.intern_pool;
 870                const ip_index = i.key(wasm).*;
 871                switch (ip.indexToKey(ip_index)) {
 872                    .enum_type => {
 873                        try emitTagNameFunction(wasm, binary_bytes, f.data_segments.get(.__zig_tag_name_table).?, i.value(wasm).tag_name.table_index, ip_index);
 874                    },
 875                    else => {
 876                        const func = i.value(wasm).function;
 877                        const mir: Mir = .{
 878                            .instructions = wasm.mir_instructions.slice().subslice(func.instructions_off, func.instructions_len),
 879                            .extra = wasm.mir_extra.items[func.extra_off..][0..func.extra_len],
 880                            .locals = wasm.mir_locals.items[func.locals_off..][0..func.locals_len],
 881                            .prologue = func.prologue,
 882                            // These fields are unused by `lower`.
 883                            .uavs = undefined,
 884                            .indirect_function_set = undefined,
 885                            .func_tys = undefined,
 886                            .error_name_table_ref_count = undefined,
 887                        };
 888                        try mir.lower(wasm, binary_bytes);
 889                    },
 890                }
 891            },
 892        };
 893
 894        replaceVecSectionHeader(binary_bytes, header_offset, .code, @intCast(wasm.functions.entries.len));
 895        code_section_index = section_index;
 896        section_index += 1;
 897    }
 898
 899    if (!is_obj) {
 900        for (wasm.uav_fixups.items) |uav_fixup| {
 901            const ds_id: Wasm.DataSegmentId = .pack(wasm, .{ .uav_exe = uav_fixup.uavs_exe_index });
 902            const vaddr = f.data_segments.get(ds_id).? + uav_fixup.addend;
 903            if (!is64) {
 904                mem.writeInt(u32, wasm.string_bytes.items[uav_fixup.offset..][0..4], vaddr, .little);
 905            } else {
 906                mem.writeInt(u64, wasm.string_bytes.items[uav_fixup.offset..][0..8], vaddr, .little);
 907            }
 908        }
 909        for (wasm.nav_fixups.items) |nav_fixup| {
 910            const ds_id: Wasm.DataSegmentId = .pack(wasm, .{ .nav_exe = nav_fixup.navs_exe_index });
 911            const vaddr = f.data_segments.get(ds_id).? + nav_fixup.addend;
 912            if (!is64) {
 913                mem.writeInt(u32, wasm.string_bytes.items[nav_fixup.offset..][0..4], vaddr, .little);
 914            } else {
 915                mem.writeInt(u64, wasm.string_bytes.items[nav_fixup.offset..][0..8], vaddr, .little);
 916            }
 917        }
 918        for (wasm.func_table_fixups.items) |fixup| {
 919            const table_index: IndirectFunctionTableIndex = .fromZcuIndirectFunctionSetIndex(fixup.table_index);
 920            if (!is64) {
 921                mem.writeInt(u32, wasm.string_bytes.items[fixup.offset..][0..4], table_index.toAbi(), .little);
 922            } else {
 923                mem.writeInt(u64, wasm.string_bytes.items[fixup.offset..][0..8], table_index.toAbi(), .little);
 924            }
 925        }
 926    }
 927
 928    // Data section.
 929    if (f.data_segment_groups.items.len != 0) {
 930        const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
 931
 932        var group_index: u32 = 0;
 933        var segment_offset: u32 = 0;
 934        var group_start_addr: u32 = data_vaddr;
 935        var group_end_addr = f.data_segment_groups.items[group_index].end_addr;
 936        for (segment_ids, segment_vaddrs) |segment_id, segment_vaddr| {
 937            if (segment_vaddr >= group_end_addr) {
 938                try binary_bytes.appendNTimes(gpa, 0, group_end_addr - group_start_addr - segment_offset);
 939                group_index += 1;
 940                if (group_index >= f.data_segment_groups.items.len) {
 941                    // All remaining segments are zero.
 942                    break;
 943                }
 944                group_start_addr = group_end_addr;
 945                group_end_addr = f.data_segment_groups.items[group_index].end_addr;
 946                segment_offset = 0;
 947            }
 948            if (segment_offset == 0) {
 949                const group_size = group_end_addr - group_start_addr;
 950                log.debug("emit data section group, {d} bytes", .{group_size});
 951                const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
 952                try appendLeb128(gpa, binary_bytes, @intFromEnum(flags));
 953                // Passive segments are initialized at runtime.
 954                if (flags != .passive) {
 955                    var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, binary_bytes);
 956                    defer binary_bytes.* = aw.toArrayList();
 957                    try emitInit(&aw.writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
 958                }
 959                try appendLeb128(gpa, binary_bytes, group_size);
 960            }
 961            if (segment_id.isEmpty(wasm)) {
 962                // It counted for virtual memory but it does not go into the binary.
 963                continue;
 964            }
 965
 966            // Padding for alignment.
 967            const needed_offset = segment_vaddr - group_start_addr;
 968            try binary_bytes.appendNTimes(gpa, 0, needed_offset - segment_offset);
 969            segment_offset = needed_offset;
 970
 971            const code_start = binary_bytes.items.len;
 972            append: {
 973                const code = switch (segment_id.unpack(wasm)) {
 974                    .__heap_base => {
 975                        mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_base, .little);
 976                        break :append;
 977                    },
 978                    .__heap_end => {
 979                        mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_end, .little);
 980                        break :append;
 981                    },
 982                    .__zig_error_names => {
 983                        try binary_bytes.appendSlice(gpa, wasm.error_name_bytes.items);
 984                        break :append;
 985                    },
 986                    .__zig_error_name_table => {
 987                        if (is_obj) @panic("TODO error name table reloc");
 988                        const base = f.data_segments.get(.__zig_error_names).?;
 989                        if (!is64) {
 990                            try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u32);
 991                        } else {
 992                            try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u64);
 993                        }
 994                        break :append;
 995                    },
 996                    .__zig_tag_names => {
 997                        try binary_bytes.appendSlice(gpa, wasm.tag_name_bytes.items);
 998                        break :append;
 999                    },
1000                    .__zig_tag_name_table => {
1001                        if (is_obj) @panic("TODO tag name table reloc");
1002                        const base = f.data_segments.get(.__zig_tag_names).?;
1003                        if (!is64) {
1004                            try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u32);
1005                        } else {
1006                            try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u64);
1007                        }
1008                        break :append;
1009                    },
1010                    .object => |i| {
1011                        const ptr = i.ptr(wasm);
1012                        try binary_bytes.appendSlice(gpa, ptr.payload.slice(wasm));
1013                        if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
1014                        break :append;
1015                    },
1016                    inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code,
1017                };
1018                try binary_bytes.appendSlice(gpa, code.slice(wasm));
1019            }
1020            segment_offset += @intCast(binary_bytes.items.len - code_start);
1021        }
1022
1023        replaceVecSectionHeader(binary_bytes, header_offset, .data, @intCast(f.data_segment_groups.items.len));
1024        data_section_index = section_index;
1025        section_index += 1;
1026    }
1027
1028    if (is_obj) {
1029        @panic("TODO emit link section for object file and emit modified relocations");
1030    } else if (comp.config.debug_format != .strip) {
1031        try emitNameSection(wasm, f.data_segment_groups.items, binary_bytes);
1032    }
1033
1034    if (comp.config.debug_format != .strip) {
1035        // The build id must be computed on the main sections only,
1036        // so we have to do it now, before the debug sections.
1037        switch (wasm.base.build_id) {
1038            .none => {},
1039            .fast => {
1040                var id: [16]u8 = undefined;
1041                std.crypto.hash.sha3.TurboShake128(null).hash(binary_bytes.items, &id, .{});
1042                var uuid: [36]u8 = undefined;
1043                _ = try std.fmt.bufPrint(&uuid, "{x}-{x}-{x}-{x}-{x}", .{
1044                    id[0..4], id[4..6], id[6..8], id[8..10], id[10..],
1045                });
1046                try emitBuildIdSection(gpa, binary_bytes, &uuid);
1047            },
1048            .hexstring => |hs| {
1049                var buffer: [32 * 2]u8 = undefined;
1050                const str = std.fmt.bufPrint(&buffer, "{x}", .{hs.toSlice()}) catch unreachable;
1051                try emitBuildIdSection(gpa, binary_bytes, str);
1052            },
1053            else => |mode| {
1054                var err = try diags.addErrorWithNotes(0);
1055                try err.addMsg("build-id '{s}' is not supported for WebAssembly", .{@tagName(mode)});
1056            },
1057        }
1058
1059        var debug_bytes = std.array_list.Managed(u8).init(gpa);
1060        defer debug_bytes.deinit();
1061
1062        try emitProducerSection(gpa, binary_bytes);
1063        try emitFeaturesSection(gpa, binary_bytes, target);
1064    }
1065
1066    // Finally, write the entire binary into the file.
1067    var file_writer = wasm.base.file.?.writer(&.{});
1068    file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) {
1069        error.WriteFailed => return file_writer.err.?,
1070    };
1071    file_writer.end() catch |err| switch (err) {
1072        error.WriteFailed => return file_writer.err.?,
1073        else => |e| return e,
1074    };
1075}
1076
1077const VirtualAddrs = struct {
1078    stack_pointer: u32,
1079    heap_base: u32,
1080    heap_end: u32,
1081    tls_base: ?u32,
1082    tls_align: Alignment,
1083    tls_size: ?u32,
1084    init_memory_flag: ?u32,
1085};
1086
1087fn emitNameSection(
1088    wasm: *Wasm,
1089    data_segment_groups: []const DataSegmentGroup,
1090    binary_bytes: *ArrayList(u8),
1091) !void {
1092    const f = &wasm.flush_buffer;
1093    const comp = wasm.base.comp;
1094    const gpa = comp.gpa;
1095
1096    const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1097    defer writeCustomSectionHeader(binary_bytes, header_offset);
1098
1099    const name_name = "name";
1100    try appendLeb128(gpa, binary_bytes, @as(u32, name_name.len));
1101    try binary_bytes.appendSlice(gpa, name_name);
1102
1103    {
1104        const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1105        defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.function));
1106
1107        const total_functions: u32 = @intCast(f.function_imports.entries.len + wasm.functions.entries.len);
1108        try appendLeb128(gpa, binary_bytes, total_functions);
1109
1110        for (f.function_imports.keys(), 0..) |name_index, function_index| {
1111            const name = name_index.slice(wasm);
1112            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
1113            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1114            try binary_bytes.appendSlice(gpa, name);
1115        }
1116        for (wasm.functions.keys(), f.function_imports.entries.len..) |resolution, function_index| {
1117            const name = resolution.name(wasm).?;
1118            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(function_index)));
1119            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1120            try binary_bytes.appendSlice(gpa, name);
1121        }
1122    }
1123
1124    {
1125        const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1126        defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.global));
1127
1128        const total_globals: u32 = @intCast(f.global_imports.entries.len + wasm.globals.entries.len);
1129        try appendLeb128(gpa, binary_bytes, total_globals);
1130
1131        for (f.global_imports.keys(), 0..) |name_index, global_index| {
1132            const name = name_index.slice(wasm);
1133            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
1134            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1135            try binary_bytes.appendSlice(gpa, name);
1136        }
1137        for (wasm.globals.keys(), f.global_imports.entries.len..) |resolution, global_index| {
1138            const name = resolution.name(wasm).?;
1139            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(global_index)));
1140            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1141            try binary_bytes.appendSlice(gpa, name);
1142        }
1143    }
1144
1145    {
1146        const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1147        defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.data_segment));
1148
1149        const total_data_segments: u32 = @intCast(data_segment_groups.len);
1150        try appendLeb128(gpa, binary_bytes, total_data_segments);
1151
1152        for (data_segment_groups, 0..) |group, i| {
1153            const name, _ = splitSegmentName(group.first_segment.name(wasm));
1154            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(i)));
1155            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1156            try binary_bytes.appendSlice(gpa, name);
1157        }
1158    }
1159}
1160
1161fn emitFeaturesSection(
1162    gpa: Allocator,
1163    binary_bytes: *ArrayList(u8),
1164    target: *const std.Target,
1165) Allocator.Error!void {
1166    const feature_count = target.cpu.features.count();
1167    if (feature_count == 0) return;
1168
1169    const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1170    defer writeCustomSectionHeader(binary_bytes, header_offset);
1171
1172    const target_features = "target_features";
1173    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(target_features.len)));
1174    try binary_bytes.appendSlice(gpa, target_features);
1175
1176    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(feature_count)));
1177
1178    var safety_count = feature_count;
1179    for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| {
1180        if (!target.cpu.has(.wasm, @as(std.Target.wasm.Feature, @enumFromInt(i)))) continue;
1181        safety_count -= 1;
1182
1183        try appendLeb128(gpa, binary_bytes, @as(u32, '+'));
1184        // Depends on llvm_name for the hyphenated version that matches wasm tooling conventions.
1185        const name = feature.llvm_name.?;
1186        try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1187        try binary_bytes.appendSlice(gpa, name);
1188    }
1189    assert(safety_count == 0);
1190}
1191
1192fn emitBuildIdSection(gpa: Allocator, binary_bytes: *ArrayList(u8), build_id: []const u8) !void {
1193    const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1194    defer writeCustomSectionHeader(binary_bytes, header_offset);
1195
1196    const hdr_build_id = "build_id";
1197    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(hdr_build_id.len)));
1198    try binary_bytes.appendSlice(gpa, hdr_build_id);
1199
1200    try appendLeb128(gpa, binary_bytes, @as(u32, 1));
1201    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_id.len)));
1202    try binary_bytes.appendSlice(gpa, build_id);
1203}
1204
1205fn emitProducerSection(gpa: Allocator, binary_bytes: *ArrayList(u8)) !void {
1206    const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
1207    defer writeCustomSectionHeader(binary_bytes, header_offset);
1208
1209    const producers = "producers";
1210    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(producers.len)));
1211    try binary_bytes.appendSlice(gpa, producers);
1212
1213    try appendLeb128(gpa, binary_bytes, @as(u32, 2)); // 2 fields: Language + processed-by
1214
1215    // language field
1216    {
1217        const language = "language";
1218        try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(language.len)));
1219        try binary_bytes.appendSlice(gpa, language);
1220
1221        // field_value_count (TODO: Parse object files for producer sections to detect their language)
1222        try appendLeb128(gpa, binary_bytes, @as(u32, 1));
1223
1224        // versioned name
1225        {
1226            try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
1227            try binary_bytes.appendSlice(gpa, "Zig");
1228
1229            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
1230            try binary_bytes.appendSlice(gpa, build_options.version);
1231        }
1232    }
1233
1234    // processed-by field
1235    {
1236        const processed_by = "processed-by";
1237        try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(processed_by.len)));
1238        try binary_bytes.appendSlice(gpa, processed_by);
1239
1240        // field_value_count (TODO: Parse object files for producer sections to detect other used tools)
1241        try appendLeb128(gpa, binary_bytes, @as(u32, 1));
1242
1243        // versioned name
1244        {
1245            try appendLeb128(gpa, binary_bytes, @as(u32, 3)); // len of "Zig"
1246            try binary_bytes.appendSlice(gpa, "Zig");
1247
1248            try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(build_options.version.len)));
1249            try binary_bytes.appendSlice(gpa, build_options.version);
1250        }
1251    }
1252}
1253
1254fn splitSegmentName(name: []const u8) struct { []const u8, []const u8 } {
1255    const start = @intFromBool(name.len >= 1 and name[0] == '.');
1256    const pivot = mem.indexOfScalarPos(u8, name, start, '.') orelse name.len;
1257    return .{ name[0..pivot], name[pivot..] };
1258}
1259
1260test splitSegmentName {
1261    {
1262        const a, const b = splitSegmentName(".data");
1263        try std.testing.expectEqualStrings(".data", a);
1264        try std.testing.expectEqualStrings("", b);
1265    }
1266}
1267
1268fn wantSegmentMerge(
1269    wasm: *const Wasm,
1270    a_id: Wasm.DataSegmentId,
1271    b_id: Wasm.DataSegmentId,
1272    b_category: Wasm.DataSegmentId.Category,
1273) bool {
1274    const a_category = a_id.category(wasm);
1275    if (a_category != b_category) return false;
1276    if (a_category == .tls or b_category == .tls) return false;
1277    if (a_id.isPassive(wasm) != b_id.isPassive(wasm)) return false;
1278    if (b_category == .zero) return true;
1279    const a_name = a_id.name(wasm);
1280    const b_name = b_id.name(wasm);
1281    const a_prefix, _ = splitSegmentName(a_name);
1282    const b_prefix, _ = splitSegmentName(b_name);
1283    return mem.eql(u8, a_prefix, b_prefix);
1284}
1285
1286/// section id + fixed leb contents size + fixed leb vector length
1287const section_header_reserve_size = 1 + 5 + 5;
1288const section_header_size = 5 + 1;
1289
1290fn reserveVecSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
1291    try bytes.appendNTimes(gpa, 0, section_header_reserve_size);
1292    return @intCast(bytes.items.len - section_header_reserve_size);
1293}
1294
1295fn replaceVecSectionHeader(
1296    bytes: *ArrayList(u8),
1297    offset: u32,
1298    section: std.wasm.Section,
1299    n_items: u32,
1300) void {
1301    const size: u32 = @intCast(bytes.items.len - offset - section_header_reserve_size + uleb128size(n_items));
1302    var buf: [section_header_reserve_size]u8 = undefined;
1303    var w: std.Io.Writer = .fixed(&buf);
1304    w.writeByte(@intFromEnum(section)) catch unreachable;
1305    w.writeUleb128(size) catch unreachable;
1306    w.writeUleb128(n_items) catch unreachable;
1307    bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, w.buffered());
1308}
1309
1310fn reserveCustomSectionHeader(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
1311    try bytes.appendNTimes(gpa, 0, section_header_size);
1312    return @intCast(bytes.items.len - section_header_size);
1313}
1314
1315fn writeCustomSectionHeader(bytes: *ArrayList(u8), offset: u32) void {
1316    return replaceHeader(bytes, offset, 0); // 0 = 'custom' section
1317}
1318
1319fn replaceHeader(bytes: *ArrayList(u8), offset: u32, tag: u8) void {
1320    const size: u32 = @intCast(bytes.items.len - offset - section_header_size);
1321    var buf: [section_header_size]u8 = undefined;
1322    var w: std.Io.Writer = .fixed(&buf);
1323    w.writeByte(tag) catch unreachable;
1324    w.writeUleb128(size) catch unreachable;
1325    bytes.replaceRangeAssumeCapacity(offset, section_header_size, w.buffered());
1326}
1327
1328const max_size_encoding = 5;
1329
1330fn reserveSize(gpa: Allocator, bytes: *ArrayList(u8)) Allocator.Error!u32 {
1331    try bytes.appendNTimes(gpa, 0, max_size_encoding);
1332    return @intCast(bytes.items.len - max_size_encoding);
1333}
1334
1335fn replaceSize(bytes: *ArrayList(u8), offset: u32) void {
1336    const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding);
1337    var buf: [max_size_encoding]u8 = undefined;
1338    var w: std.Io.Writer = .fixed(&buf);
1339    w.writeUleb128(size) catch unreachable;
1340    bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, w.buffered());
1341}
1342
1343fn emitLimits(
1344    gpa: Allocator,
1345    binary_bytes: *ArrayList(u8),
1346    limits: std.wasm.Limits,
1347) Allocator.Error!void {
1348    try binary_bytes.append(gpa, @bitCast(limits.flags));
1349    try appendLeb128(gpa, binary_bytes, limits.min);
1350    if (limits.flags.has_max) try appendLeb128(gpa, binary_bytes, limits.max);
1351}
1352
1353fn emitMemoryImport(
1354    wasm: *Wasm,
1355    binary_bytes: *ArrayList(u8),
1356    name_index: String,
1357    memory_import: *const Wasm.MemoryImport,
1358) Allocator.Error!void {
1359    const gpa = wasm.base.comp.gpa;
1360    const module_name = memory_import.module_name.slice(wasm);
1361    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(module_name.len)));
1362    try binary_bytes.appendSlice(gpa, module_name);
1363
1364    const name = name_index.slice(wasm);
1365    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(name.len)));
1366    try binary_bytes.appendSlice(gpa, name);
1367
1368    try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
1369    try emitLimits(gpa, binary_bytes, memory_import.limits());
1370}
1371
1372fn emitInit(writer: *std.Io.Writer, init_expr: std.wasm.InitExpression) !void {
1373    switch (init_expr) {
1374        .i32_const => |val| {
1375            try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
1376            try writer.writeSleb128(val);
1377        },
1378        .i64_const => |val| {
1379            try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
1380            try writer.writeSleb128(val);
1381        },
1382        .f32_const => |val| {
1383            try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
1384            try writer.writeInt(u32, @bitCast(val), .little);
1385        },
1386        .f64_const => |val| {
1387            try writer.writeByte(@intFromEnum(std.wasm.Opcode.f64_const));
1388            try writer.writeInt(u64, @bitCast(val), .little);
1389        },
1390        .global_get => |val| {
1391            try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get));
1392            try writer.writeUleb128(val);
1393        },
1394    }
1395    try writer.writeByte(@intFromEnum(std.wasm.Opcode.end));
1396}
1397
1398pub fn emitExpr(wasm: *const Wasm, binary_bytes: *ArrayList(u8), expr: Wasm.Expr) Allocator.Error!void {
1399    const gpa = wasm.base.comp.gpa;
1400    const slice = expr.slice(wasm);
1401    try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode
1402}
1403
1404fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.array_list.Managed(u8)) !void {
1405    const gpa = wasm.base.comp.gpa;
1406    try appendLeb128(gpa, binary_bytes, @intFromEnum(Wasm.SubsectionType.segment_info));
1407    const segment_offset = binary_bytes.items.len;
1408
1409    try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(wasm.segment_info.count())));
1410    for (wasm.segment_info.values()) |segment_info| {
1411        log.debug("Emit segment: {s} align({d}) flags({b})", .{
1412            segment_info.name,
1413            segment_info.alignment,
1414            segment_info.flags,
1415        });
1416        try appendLeb128(gpa, binary_bytes, @as(u32, @intCast(segment_info.name.len)));
1417        try binary_bytes.appendSlice(gpa, segment_info.name);
1418        try appendLeb128(gpa, binary_bytes, segment_info.alignment.toLog2Units());
1419        try appendLeb128(gpa, binary_bytes, segment_info.flags);
1420    }
1421
1422    var buf: [5]u8 = undefined;
1423    leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset)));
1424    try binary_bytes.insertSlice(segment_offset, &buf);
1425}
1426
1427fn uleb128size(x: u32) u32 {
1428    var value = x;
1429    var size: u32 = 0;
1430    while (value != 0) : (size += 1) value >>= 7;
1431    return size;
1432}
1433
1434fn emitTagNameTable(
1435    gpa: Allocator,
1436    code: *ArrayList(u8),
1437    tag_name_offs: []const u32,
1438    tag_name_bytes: []const u8,
1439    base: u32,
1440    comptime Int: type,
1441) error{OutOfMemory}!void {
1442    const ptr_size_bytes = @divExact(@bitSizeOf(Int), 8);
1443    try code.ensureUnusedCapacity(gpa, ptr_size_bytes * 2 * tag_name_offs.len);
1444    for (tag_name_offs) |off| {
1445        const name_len: u32 = @intCast(mem.indexOfScalar(u8, tag_name_bytes[off..], 0).?);
1446        mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), base + off, .little);
1447        mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), name_len, .little);
1448    }
1449}
1450
1451fn applyRelocs(code: []u8, code_offset: u32, relocs: Wasm.ObjectRelocation.IterableSlice, wasm: *const Wasm) void {
1452    for (
1453        relocs.slice.tags(wasm),
1454        relocs.slice.pointees(wasm),
1455        relocs.slice.offsets(wasm),
1456        relocs.slice.addends(wasm),
1457    ) |tag, pointee, offset, *addend| {
1458        if (offset >= relocs.end) break;
1459        const sliced_code = code[offset - code_offset ..];
1460        switch (tag) {
1461            .function_index_i32 => reloc_u32_function(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1462            .function_index_leb => reloc_leb_function(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1463            .function_offset_i32 => @panic("TODO this value is not known yet"),
1464            .function_offset_i64 => @panic("TODO this value is not known yet"),
1465            .table_index_i32 => reloc_u32_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1466            .table_index_i64 => reloc_u64_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1467            .table_index_rel_sleb => @panic("TODO what does this reloc tag mean?"),
1468            .table_index_rel_sleb64 => @panic("TODO what does this reloc tag mean?"),
1469            .table_index_sleb => reloc_sleb_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1470            .table_index_sleb64 => reloc_sleb64_table_index(sliced_code, .fromObjectFunctionHandlingWeak(wasm, pointee.function)),
1471
1472            .function_import_index_i32 => reloc_u32_function(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1473            .function_import_index_leb => reloc_leb_function(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1474            .function_import_offset_i32 => @panic("TODO this value is not known yet"),
1475            .function_import_offset_i64 => @panic("TODO this value is not known yet"),
1476            .table_import_index_i32 => reloc_u32_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1477            .table_import_index_i64 => reloc_u64_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1478            .table_import_index_rel_sleb => @panic("TODO what does this reloc tag mean?"),
1479            .table_import_index_rel_sleb64 => @panic("TODO what does this reloc tag mean?"),
1480            .table_import_index_sleb => reloc_sleb_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1481            .table_import_index_sleb64 => reloc_sleb64_table_index(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1482
1483            .global_index_i32 => reloc_u32_global(sliced_code, .fromObjectGlobalHandlingWeak(wasm, pointee.global)),
1484            .global_index_leb => reloc_leb_global(sliced_code, .fromObjectGlobalHandlingWeak(wasm, pointee.global)),
1485
1486            .global_import_index_i32 => reloc_u32_global(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1487            .global_import_index_leb => reloc_leb_global(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1488
1489            .memory_addr_i32 => reloc_u32_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1490            .memory_addr_i64 => reloc_u64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1491            .memory_addr_leb => reloc_leb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1492            .memory_addr_leb64 => reloc_leb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1493            .memory_addr_locrel_i32 => @panic("TODO implement relocation memory_addr_locrel_i32"),
1494            .memory_addr_rel_sleb => @panic("TODO implement relocation memory_addr_rel_sleb"),
1495            .memory_addr_rel_sleb64 => @panic("TODO implement relocation memory_addr_rel_sleb64"),
1496            .memory_addr_sleb => reloc_sleb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1497            .memory_addr_sleb64 => reloc_sleb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1498            .memory_addr_tls_sleb => reloc_sleb_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1499            .memory_addr_tls_sleb64 => reloc_sleb64_addr(sliced_code, .fromObjectData(wasm, pointee.data, addend.*)),
1500
1501            .memory_addr_import_i32 => reloc_u32_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1502            .memory_addr_import_i64 => reloc_u64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1503            .memory_addr_import_leb => reloc_leb_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1504            .memory_addr_import_leb64 => reloc_leb64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1505            .memory_addr_import_locrel_i32 => @panic("TODO implement relocation memory_addr_import_locrel_i32"),
1506            .memory_addr_import_rel_sleb => @panic("TODO implement relocation memory_addr_import_rel_sleb"),
1507            .memory_addr_import_rel_sleb64 => @panic("TODO implement memory_addr_import_rel_sleb64"),
1508            .memory_addr_import_sleb => reloc_sleb_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1509            .memory_addr_import_sleb64 => reloc_sleb64_addr(sliced_code, .fromSymbolName(wasm, pointee.symbol_name, addend.*)),
1510            .memory_addr_import_tls_sleb => @panic("TODO"),
1511            .memory_addr_import_tls_sleb64 => @panic("TODO"),
1512
1513            .section_offset_i32 => @panic("TODO this value is not known yet"),
1514
1515            .table_number_leb => reloc_leb_table(sliced_code, .fromObjectTable(wasm, pointee.table)),
1516            .table_import_number_leb => reloc_leb_table(sliced_code, .fromSymbolName(wasm, pointee.symbol_name)),
1517
1518            .type_index_leb => reloc_leb_type(sliced_code, .fromTypeIndex(pointee.type_index, &wasm.flush_buffer)),
1519        }
1520    }
1521}
1522
1523fn reloc_u32_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
1524    mem.writeInt(u32, code[0..4], i.toAbi(), .little);
1525}
1526
1527fn reloc_u64_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
1528    mem.writeInt(u64, code[0..8], i.toAbi(), .little);
1529}
1530
1531fn reloc_sleb_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
1532    leb.writeSignedFixed(5, code[0..5], i.toAbi());
1533}
1534
1535fn reloc_sleb64_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
1536    leb.writeSignedFixed(11, code[0..11], i.toAbi());
1537}
1538
1539fn reloc_u32_function(code: []u8, function: Wasm.OutputFunctionIndex) void {
1540    mem.writeInt(u32, code[0..4], @intFromEnum(function), .little);
1541}
1542
1543fn reloc_leb_function(code: []u8, function: Wasm.OutputFunctionIndex) void {
1544    leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(function));
1545}
1546
1547fn reloc_u32_global(code: []u8, global: Wasm.GlobalIndex) void {
1548    mem.writeInt(u32, code[0..4], @intFromEnum(global), .little);
1549}
1550
1551fn reloc_leb_global(code: []u8, global: Wasm.GlobalIndex) void {
1552    leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(global));
1553}
1554
1555const RelocAddr = struct {
1556    addr: u32,
1557
1558    fn fromObjectData(wasm: *const Wasm, i: Wasm.ObjectData.Index, addend: i32) RelocAddr {
1559        return fromDataLoc(&wasm.flush_buffer, .fromObjectDataIndex(wasm, i), addend);
1560    }
1561
1562    fn fromSymbolName(wasm: *const Wasm, name: String, addend: i32) RelocAddr {
1563        const flush = &wasm.flush_buffer;
1564        if (wasm.object_data_imports.getPtr(name)) |import| {
1565            return fromDataLoc(flush, import.resolution.dataLoc(wasm), addend);
1566        } else if (wasm.data_imports.get(name)) |id| {
1567            return fromDataLoc(flush, .fromDataImportId(wasm, id), addend);
1568        } else {
1569            unreachable;
1570        }
1571    }
1572
1573    fn fromDataLoc(flush: *const Flush, data_loc: Wasm.DataLoc, addend: i32) RelocAddr {
1574        const base_addr: i64 = flush.data_segments.get(data_loc.segment).?;
1575        return .{ .addr = @intCast(base_addr + data_loc.offset + addend) };
1576    }
1577};
1578
1579fn reloc_u32_addr(code: []u8, ra: RelocAddr) void {
1580    mem.writeInt(u32, code[0..4], ra.addr, .little);
1581}
1582
1583fn reloc_u64_addr(code: []u8, ra: RelocAddr) void {
1584    mem.writeInt(u64, code[0..8], ra.addr, .little);
1585}
1586
1587fn reloc_leb_addr(code: []u8, ra: RelocAddr) void {
1588    leb.writeUnsignedFixed(5, code[0..5], ra.addr);
1589}
1590
1591fn reloc_leb64_addr(code: []u8, ra: RelocAddr) void {
1592    leb.writeUnsignedFixed(11, code[0..11], ra.addr);
1593}
1594
1595fn reloc_sleb_addr(code: []u8, ra: RelocAddr) void {
1596    leb.writeSignedFixed(5, code[0..5], ra.addr);
1597}
1598
1599fn reloc_sleb64_addr(code: []u8, ra: RelocAddr) void {
1600    leb.writeSignedFixed(11, code[0..11], ra.addr);
1601}
1602
1603fn reloc_leb_table(code: []u8, table: Wasm.TableIndex) void {
1604    leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(table));
1605}
1606
1607fn reloc_leb_type(code: []u8, index: FuncTypeIndex) void {
1608    leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index));
1609}
1610
1611fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *ArrayList(u8)) Allocator.Error!void {
1612    const gpa = wasm.base.comp.gpa;
1613
1614    try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
1615    appendReservedUleb32(binary_bytes, 0); // no locals
1616
1617    for (wasm.object_init_funcs.items) |init_func| {
1618        const func = init_func.function_index.ptr(wasm);
1619        if (!func.object_index.ptr(wasm).is_included) continue;
1620        const ty = func.type_index.ptr(wasm);
1621        const n_returns = ty.returns.slice(wasm).len;
1622
1623        // Call function by its function index
1624        try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + n_returns + 1);
1625        const call_index: Wasm.OutputFunctionIndex = .fromObjectFunction(wasm, init_func.function_index);
1626        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
1627        appendReservedUleb32(binary_bytes, @intFromEnum(call_index));
1628
1629        // drop all returned values from the stack as __wasm_call_ctors has no return value
1630        binary_bytes.appendNTimesAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop), n_returns);
1631    }
1632
1633    binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end function body
1634}
1635
1636fn emitInitMemoryFunction(
1637    wasm: *const Wasm,
1638    binary_bytes: *ArrayList(u8),
1639    virtual_addrs: *const VirtualAddrs,
1640) Allocator.Error!void {
1641    const comp = wasm.base.comp;
1642    const gpa = comp.gpa;
1643    const shared_memory = comp.config.shared_memory;
1644
1645    // Passive segments are used to avoid memory being reinitialized on each
1646    // thread's instantiation. These passive segments are initialized and
1647    // dropped in __wasm_init_memory, which is registered as the start function
1648    // We also initialize bss segments (using memory.fill) as part of this
1649    // function.
1650    assert(wasm.any_passive_inits);
1651
1652    try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
1653    appendReservedUleb32(binary_bytes, 0); // no locals
1654
1655    if (virtual_addrs.init_memory_flag) |flag_address| {
1656        assert(shared_memory);
1657        try binary_bytes.ensureUnusedCapacity(gpa, 2 * 3 + 6 * 3 + 1 + 6 * 3 + 1 + 5 * 4 + 1 + 1);
1658        // destination blocks
1659        // based on values we jump to corresponding label
1660        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $drop
1661        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
1662
1663        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $wait
1664        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
1665
1666        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $init
1667        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
1668
1669        // atomically check
1670        appendReservedI32Const(binary_bytes, flag_address);
1671        appendReservedI32Const(binary_bytes, 0);
1672        appendReservedI32Const(binary_bytes, 1);
1673        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
1674        appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_rmw_cmpxchg));
1675        appendReservedUleb32(binary_bytes, 2); // alignment
1676        appendReservedUleb32(binary_bytes, 0); // offset
1677
1678        // based on the value from the atomic check, jump to the label.
1679        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
1680        appendReservedUleb32(binary_bytes, 2); // length of the table (we have 3 blocks but because of the mandatory default the length is 2).
1681        appendReservedUleb32(binary_bytes, 0); // $init
1682        appendReservedUleb32(binary_bytes, 1); // $wait
1683        appendReservedUleb32(binary_bytes, 2); // $drop
1684        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1685    }
1686
1687    const segment_groups = wasm.flush_buffer.data_segment_groups.items;
1688    var prev_end: u32 = 0;
1689    for (segment_groups, 0..) |group, segment_index| {
1690        defer prev_end = group.end_addr;
1691        const segment = group.first_segment;
1692        if (!segment.isPassive(wasm)) continue;
1693
1694        const start_addr: u32 = @intCast(segment.alignment(wasm).forward(prev_end));
1695        const segment_size: u32 = group.end_addr - start_addr;
1696
1697        try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 5 + 6 + 6 + 1 + 6 * 2 + 1 + 1);
1698
1699        // For passive BSS segments we can simply issue a memory.fill(0). For
1700        // non-BSS segments we do a memory.init. Both instructions take as
1701        // their first argument the destination address.
1702        appendReservedI32Const(binary_bytes, start_addr);
1703
1704        if (shared_memory and segment.isTls(wasm)) {
1705            // When we initialize the TLS segment we also set the `__tls_base`
1706            // global.  This allows the runtime to use this static copy of the
1707            // TLS data for the first/main thread.
1708            appendReservedI32Const(binary_bytes, start_addr);
1709            binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
1710            appendReservedUleb32(binary_bytes, virtual_addrs.tls_base.?);
1711        }
1712
1713        appendReservedI32Const(binary_bytes, 0);
1714        appendReservedI32Const(binary_bytes, segment_size);
1715        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
1716        if (segment.isBss(wasm)) {
1717            // fill bss segment with zeroes
1718            appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_fill));
1719        } else {
1720            // initialize the segment
1721            appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init));
1722            appendReservedUleb32(binary_bytes, @intCast(segment_index));
1723        }
1724        binary_bytes.appendAssumeCapacity(0); // memory index immediate
1725    }
1726
1727    if (virtual_addrs.init_memory_flag) |flag_address| {
1728        assert(shared_memory);
1729        try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 3 * 5 + 6 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 5 + 1 + 6 * 2 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 1);
1730        // we set the init memory flag to value '2'
1731        appendReservedI32Const(binary_bytes, flag_address);
1732        appendReservedI32Const(binary_bytes, 2);
1733        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
1734        appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_store));
1735        appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
1736        appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
1737
1738        // notify any waiters for segment initialization completion
1739        appendReservedI32Const(binary_bytes, flag_address);
1740        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
1741        appendReservedLeb128(binary_bytes, @as(i32, -1)); // number of waiters
1742        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
1743        appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify));
1744        appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
1745        appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
1746        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop));
1747
1748        // branch and drop segments
1749        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br));
1750        appendReservedUleb32(binary_bytes, @as(u32, 1));
1751
1752        // wait for thread to initialize memory segments
1753        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $wait
1754        appendReservedI32Const(binary_bytes, flag_address);
1755        appendReservedI32Const(binary_bytes, 1); // expected flag value
1756        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
1757        appendReservedLeb128(binary_bytes, @as(i64, -1)); // timeout
1758        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
1759        appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32));
1760        appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
1761        appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
1762        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop));
1763
1764        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $drop
1765    }
1766
1767    for (segment_groups, 0..) |group, segment_index| {
1768        const segment = group.first_segment;
1769        if (!segment.isPassive(wasm)) continue;
1770        if (segment.isBss(wasm)) continue;
1771        // The TLS region should not be dropped since its is needed
1772        // during the initialization of each thread (__wasm_init_tls).
1773        if (shared_memory and segment.isTls(wasm)) continue;
1774
1775        try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + 5 + 1);
1776
1777        binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
1778        appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.data_drop));
1779        appendReservedUleb32(binary_bytes, @intCast(segment_index));
1780    }
1781
1782    // End of the function body
1783    binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1784}
1785
1786fn emitInitTlsFunction(wasm: *const Wasm, bytes: *ArrayList(u8)) Allocator.Error!void {
1787    const comp = wasm.base.comp;
1788    const gpa = comp.gpa;
1789
1790    assert(comp.config.shared_memory);
1791
1792    try bytes.ensureUnusedCapacity(gpa, 5 * 10 + 8);
1793
1794    appendReservedUleb32(bytes, 0); // no locals
1795
1796    // If there's a TLS segment, initialize it during runtime using the bulk-memory feature
1797    // TLS segment is always the first one due to how we sort the data segments.
1798    const data_segments = wasm.flush_buffer.data_segments.keys();
1799    if (data_segments.len > 0 and data_segments[0].isTls(wasm)) {
1800        const start_addr = wasm.flush_buffer.data_segments.values()[0];
1801        const end_addr = wasm.flush_buffer.data_segment_groups.items[0].end_addr;
1802        const group_size = end_addr - start_addr;
1803        const data_segment_index = 0;
1804
1805        const param_local: u32 = 0;
1806
1807        bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1808        appendReservedUleb32(bytes, param_local);
1809
1810        const tls_base_global_index: Wasm.GlobalIndex = @enumFromInt(wasm.globals.getIndex(.__tls_base).?);
1811        bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
1812        appendReservedUleb32(bytes, @intFromEnum(tls_base_global_index));
1813
1814        // load stack values for the bulk-memory operation
1815        {
1816            bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1817            appendReservedUleb32(bytes, param_local);
1818
1819            bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
1820            appendReservedUleb32(bytes, 0); //segment offset
1821
1822            bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
1823            appendReservedUleb32(bytes, group_size); //segment offset
1824        }
1825
1826        // perform the bulk-memory operation to initialize the data segment
1827        bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
1828        appendReservedUleb32(bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init));
1829        // segment immediate
1830        appendReservedUleb32(bytes, data_segment_index);
1831        // memory index immediate (always 0)
1832        appendReservedUleb32(bytes, 0);
1833    }
1834
1835    // If we have to perform any TLS relocations, call the corresponding function
1836    // which performs all runtime TLS relocations. This is a synthetic function,
1837    // generated by the linker.
1838    if (wasm.functions.getIndex(.__wasm_apply_global_tls_relocs)) |function_index| {
1839        const output_function_index: Wasm.OutputFunctionIndex = .fromFunctionIndex(wasm, @enumFromInt(function_index));
1840        bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
1841        appendReservedUleb32(bytes, @intFromEnum(output_function_index));
1842    }
1843
1844    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1845}
1846
1847fn emitStartSection(gpa: Allocator, bytes: *ArrayList(u8), i: Wasm.OutputFunctionIndex) !void {
1848    const header_offset = try reserveVecSectionHeader(gpa, bytes);
1849    replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i));
1850}
1851
1852fn emitTagNameFunction(
1853    wasm: *Wasm,
1854    code: *ArrayList(u8),
1855    table_base_addr: u32,
1856    table_index: u32,
1857    enum_type_ip: InternPool.Index,
1858) !void {
1859    const comp = wasm.base.comp;
1860    const gpa = comp.gpa;
1861    const diags = &comp.link_diags;
1862    const zcu = comp.zcu.?;
1863    const ip = &zcu.intern_pool;
1864    const enum_type = ip.loadEnumType(enum_type_ip);
1865    const tag_values = enum_type.values.get(ip);
1866
1867    try code.ensureUnusedCapacity(gpa, 7 * 5 + 6 + 1 * 6);
1868    appendReservedUleb32(code, 0); // no locals
1869
1870    const slice_abi_size = 8;
1871    const encoded_alignment = @ctz(@as(u32, 4));
1872    if (tag_values.len == 0) {
1873        // Then it's auto-numbered and therefore a direct table lookup.
1874        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1875        appendReservedUleb32(code, 0);
1876
1877        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1878        appendReservedUleb32(code, 1);
1879
1880        appendReservedI32Const(code, slice_abi_size);
1881        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_mul));
1882
1883        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
1884        appendReservedUleb32(code, encoded_alignment);
1885        appendReservedUleb32(code, table_base_addr + table_index * 8);
1886
1887        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store));
1888        appendReservedUleb32(code, encoded_alignment);
1889        appendReservedUleb32(code, 0);
1890    } else {
1891        const int_info = Zcu.Type.intInfo(.fromInterned(enum_type.tag_ty), zcu);
1892        const outer_block_type: std.wasm.BlockType = switch (int_info.bits) {
1893            0...32 => .i32,
1894            33...64 => .i64,
1895            else => return diags.fail("wasm linker does not yet implement @tagName for sparse enums with more than 64 bit integer tag types", .{}),
1896        };
1897
1898        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1899        appendReservedUleb32(code, 0);
1900
1901        // Outer block that computes table offset.
1902        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
1903        code.appendAssumeCapacity(@intFromEnum(outer_block_type));
1904
1905        for (tag_values, 0..) |tag_value, tag_index| {
1906            // block for this if case
1907            code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
1908            code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
1909
1910            // Tag value whose name should be returned.
1911            code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
1912            appendReservedUleb32(code, 1);
1913
1914            const val: Zcu.Value = .fromInterned(tag_value);
1915            switch (outer_block_type) {
1916                .i32 => {
1917                    const x: u32 = switch (int_info.signedness) {
1918                        .signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))),
1919                        .unsigned => @intCast(val.toUnsignedInt(zcu)),
1920                    };
1921                    appendReservedI32Const(code, x);
1922                    code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne));
1923                },
1924                .i64 => {
1925                    const x: u64 = switch (int_info.signedness) {
1926                        .signed => @bitCast(val.toSignedInt(zcu)),
1927                        .unsigned => val.toUnsignedInt(zcu),
1928                    };
1929                    appendReservedI64Const(code, x);
1930                    code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
1931                },
1932                else => unreachable,
1933            }
1934
1935            // if they're not equal, break out of current branch
1936            code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
1937            appendReservedUleb32(code, 0);
1938
1939            // Put the table offset of the result on the stack.
1940            appendReservedI32Const(code, @intCast(tag_index * slice_abi_size));
1941
1942            // break outside blocks
1943            code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br));
1944            appendReservedUleb32(code, 1);
1945
1946            // end the block for this case
1947            code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1948        }
1949        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.@"unreachable"));
1950        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1951
1952        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
1953        appendReservedUleb32(code, encoded_alignment);
1954        appendReservedUleb32(code, table_base_addr + table_index * 8);
1955
1956        code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store));
1957        appendReservedUleb32(code, encoded_alignment);
1958        appendReservedUleb32(code, 0);
1959    }
1960
1961    // End of the function body
1962    code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
1963}
1964
1965/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value.
1966fn appendReservedI32Const(bytes: *ArrayList(u8), val: u32) void {
1967    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
1968    var w: std.Io.Writer = .fromArrayList(bytes);
1969    defer bytes.* = w.toArrayList();
1970    return w.writeSleb128(val) catch |err| switch (err) {
1971        error.WriteFailed => unreachable,
1972    };
1973}
1974
1975/// Writes an unsigned 64-bit integer as a LEB128-encoded 'i64.const' value.
1976fn appendReservedI64Const(bytes: *ArrayList(u8), val: u64) void {
1977    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
1978    var w: std.Io.Writer = .fromArrayList(bytes);
1979    defer bytes.* = w.toArrayList();
1980    return w.writeSleb128(val) catch |err| switch (err) {
1981        error.WriteFailed => unreachable,
1982    };
1983}
1984
1985fn appendReservedUleb32(bytes: *ArrayList(u8), val: u32) void {
1986    var w: std.Io.Writer = .fromArrayList(bytes);
1987    defer bytes.* = w.toArrayList();
1988    return w.writeUleb128(val) catch |err| switch (err) {
1989        error.WriteFailed => unreachable,
1990    };
1991}
1992
1993fn appendGlobal(gpa: Allocator, bytes: *ArrayList(u8), mutable: u8, val: u32) Allocator.Error!void {
1994    try bytes.ensureUnusedCapacity(gpa, 9);
1995    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Valtype.i32));
1996    bytes.appendAssumeCapacity(mutable);
1997    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
1998    appendReservedUleb32(bytes, val);
1999    bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
2000}
2001
2002fn appendLeb128(gpa: Allocator, bytes: *ArrayList(u8), value: anytype) Allocator.Error!void {
2003    var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, bytes);
2004    defer bytes.* = aw.toArrayList();
2005    return aw.writer.writeLeb128(value) catch |err| switch (err) {
2006        error.WriteFailed => return error.OutOfMemory,
2007    };
2008}
2009
2010fn appendReservedLeb128(bytes: *ArrayList(u8), value: anytype) void {
2011    var w: std.Io.Writer = .fromArrayList(bytes);
2012    defer bytes.* = w.toArrayList();
2013    return w.writeLeb128(value) catch |err| switch (err) {
2014        error.WriteFailed => unreachable,
2015    };
2016}