master
   1pub const Atom = @import("Elf/Atom.zig");
   2
   3base: link.File,
   4zig_object: ?*ZigObject,
   5rpath_table: std.StringArrayHashMapUnmanaged(void),
   6image_base: u64,
   7z_nodelete: bool,
   8z_notext: bool,
   9z_defs: bool,
  10z_origin: bool,
  11z_nocopyreloc: bool,
  12z_now: bool,
  13z_relro: bool,
  14/// TODO make this non optional and resolve the default in open()
  15z_common_page_size: ?u64,
  16/// TODO make this non optional and resolve the default in open()
  17z_max_page_size: ?u64,
  18soname: ?[]const u8,
  19entry_name: ?[]const u8,
  20
  21ptr_width: PtrWidth,
  22
  23/// A list of all input files.
  24/// First index is a special "null file". Order is otherwise not observed.
  25files: std.MultiArrayList(File.Entry) = .{},
  26/// Long-lived list of all file descriptors.
  27/// We store them globally rather than per actual File so that we can re-use
  28/// one file handle per every object file within an archive.
  29file_handles: std.ArrayList(File.Handle) = .empty,
  30zig_object_index: ?File.Index = null,
  31linker_defined_index: ?File.Index = null,
  32objects: std.ArrayList(File.Index) = .empty,
  33shared_objects: std.StringArrayHashMapUnmanaged(File.Index) = .empty,
  34
  35/// List of all output sections and their associated metadata.
  36sections: std.MultiArrayList(Section) = .{},
  37/// File offset into the shdr table.
  38shdr_table_offset: ?u64 = null,
  39
  40/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
  41/// Same order as in the file.
  42phdrs: ProgramHeaderList = .empty,
  43
  44/// Special program headers.
  45phdr_indexes: ProgramHeaderIndexes = .{},
  46section_indexes: SectionIndexes = .{},
  47
  48page_size: u32,
  49default_sym_version: elf.Versym,
  50
  51/// .shstrtab buffer
  52shstrtab: std.ArrayList(u8) = .empty,
  53/// .symtab buffer
  54symtab: std.ArrayList(elf.Elf64_Sym) = .empty,
  55/// .strtab buffer
  56strtab: std.ArrayList(u8) = .empty,
  57/// Dynamic symbol table. Only populated and emitted when linking dynamically.
  58dynsym: DynsymSection = .{},
  59/// .dynstrtab buffer
  60dynstrtab: std.ArrayList(u8) = .empty,
  61/// Version symbol table. Only populated and emitted when linking dynamically.
  62versym: std.ArrayList(elf.Versym) = .empty,
  63/// .verneed section
  64verneed: VerneedSection = .{},
  65/// .got section
  66got: GotSection = .{},
  67/// .rela.dyn section
  68rela_dyn: std.ArrayList(elf.Elf64_Rela) = .empty,
  69/// .dynamic section
  70dynamic: DynamicSection = .{},
  71/// .hash section
  72hash: HashSection = .{},
  73/// .gnu.hash section
  74gnu_hash: GnuHashSection = .{},
  75/// .plt section
  76plt: PltSection = .{},
  77/// .got.plt section
  78got_plt: GotPltSection = .{},
  79/// .plt.got section
  80plt_got: PltGotSection = .{},
  81/// .copyrel section
  82copy_rel: CopyRelSection = .{},
  83/// .rela.plt section
  84rela_plt: std.ArrayList(elf.Elf64_Rela) = .empty,
  85/// SHT_GROUP sections
  86/// Applies only to a relocatable.
  87group_sections: std.ArrayList(GroupSection) = .empty,
  88
  89resolver: SymbolResolver = .{},
  90
  91has_text_reloc: bool = false,
  92num_ifunc_dynrelocs: usize = 0,
  93
  94/// List of range extension thunks.
  95thunks: std.ArrayList(Thunk) = .empty,
  96
  97/// List of output merge sections with deduped contents.
  98merge_sections: std.ArrayList(Merge.Section) = .empty,
  99comment_merge_section_index: ?Merge.Section.Index = null,
 100
 101/// `--verbose-link` output.
 102/// Initialized on creation, appended to as inputs are added, printed during `flush`.
 103dump_argv_list: std.ArrayList([]const u8),
 104
 105const SectionIndexes = struct {
 106    copy_rel: ?u32 = null,
 107    dynamic: ?u32 = null,
 108    dynstrtab: ?u32 = null,
 109    dynsymtab: ?u32 = null,
 110    eh_frame: ?u32 = null,
 111    eh_frame_rela: ?u32 = null,
 112    eh_frame_hdr: ?u32 = null,
 113    hash: ?u32 = null,
 114    gnu_hash: ?u32 = null,
 115    got: ?u32 = null,
 116    got_plt: ?u32 = null,
 117    interp: ?u32 = null,
 118    plt: ?u32 = null,
 119    plt_got: ?u32 = null,
 120    rela_dyn: ?u32 = null,
 121    rela_plt: ?u32 = null,
 122    versym: ?u32 = null,
 123    verneed: ?u32 = null,
 124
 125    shstrtab: ?u32 = null,
 126    strtab: ?u32 = null,
 127    symtab: ?u32 = null,
 128};
 129
 130const ProgramHeaderList = std.ArrayList(elf.Elf64_Phdr);
 131
 132const OptionalProgramHeaderIndex = enum(u16) {
 133    none = std.math.maxInt(u16),
 134    _,
 135
 136    fn unwrap(i: OptionalProgramHeaderIndex) ?ProgramHeaderIndex {
 137        if (i == .none) return null;
 138        return @enumFromInt(@intFromEnum(i));
 139    }
 140
 141    fn int(i: OptionalProgramHeaderIndex) ?u16 {
 142        if (i == .none) return null;
 143        return @intFromEnum(i);
 144    }
 145};
 146
 147const ProgramHeaderIndex = enum(u16) {
 148    _,
 149
 150    fn toOptional(i: ProgramHeaderIndex) OptionalProgramHeaderIndex {
 151        const result: OptionalProgramHeaderIndex = @enumFromInt(@intFromEnum(i));
 152        assert(result != .none);
 153        return result;
 154    }
 155
 156    fn int(i: ProgramHeaderIndex) u16 {
 157        return @intFromEnum(i);
 158    }
 159};
 160
 161const ProgramHeaderIndexes = struct {
 162    /// PT_PHDR
 163    table: OptionalProgramHeaderIndex = .none,
 164    /// PT_LOAD for PHDR table
 165    /// We add this special load segment to ensure the EHDR and PHDR table are always
 166    /// loaded into memory.
 167    table_load: OptionalProgramHeaderIndex = .none,
 168    /// PT_INTERP
 169    interp: OptionalProgramHeaderIndex = .none,
 170    /// PT_DYNAMIC
 171    dynamic: OptionalProgramHeaderIndex = .none,
 172    /// PT_GNU_EH_FRAME
 173    gnu_eh_frame: OptionalProgramHeaderIndex = .none,
 174    /// PT_GNU_STACK
 175    gnu_stack: OptionalProgramHeaderIndex = .none,
 176    /// PT_TLS
 177    /// TODO I think ELF permits multiple TLS segments but for now, assume one per file.
 178    tls: OptionalProgramHeaderIndex = .none,
 179};
 180
 181/// When allocating, the ideal_capacity is calculated by
 182/// actual_capacity + (actual_capacity / ideal_factor)
 183const ideal_factor = 3;
 184
 185/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
 186/// it as a possible place to put new symbols, it must have enough room for this many bytes
 187/// (plus extra for reserved capacity).
 188const minimum_atom_size = 64;
 189pub const min_text_capacity = padToIdeal(minimum_atom_size);
 190
 191pub const PtrWidth = enum { p32, p64 };
 192
 193pub fn createEmpty(
 194    arena: Allocator,
 195    comp: *Compilation,
 196    emit: Path,
 197    options: link.File.OpenOptions,
 198) !*Elf {
 199    const target = &comp.root_mod.resolved_target.result;
 200    assert(target.ofmt == .elf);
 201
 202    const use_llvm = comp.config.use_llvm;
 203    const opt_zcu = comp.zcu;
 204    const output_mode = comp.config.output_mode;
 205    const link_mode = comp.config.link_mode;
 206    const optimize_mode = comp.root_mod.optimize_mode;
 207    const is_native_os = comp.root_mod.resolved_target.is_native_os;
 208    const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
 209        0...32 => .p32,
 210        33...64 => .p64,
 211        else => return error.UnsupportedELFArchitecture,
 212    };
 213
 214    // This is the max page size that the target system can run with, aka the ABI page size. Not to
 215    // be confused with the common page size, which is the page size that's used in practice on most
 216    // systems.
 217    const page_size: u32 = switch (target.cpu.arch) {
 218        .bpfel,
 219        .bpfeb,
 220        .sparc64,
 221        => 0x100000,
 222        .aarch64,
 223        .aarch64_be,
 224        .amdgcn,
 225        .hexagon,
 226        .mips,
 227        .mipsel,
 228        .mips64,
 229        .mips64el,
 230        .powerpc,
 231        .powerpcle,
 232        .powerpc64,
 233        .powerpc64le,
 234        .sparc,
 235        => 0x10000,
 236        .loongarch32,
 237        .loongarch64,
 238        => 0x4000,
 239        .arc,
 240        .m68k,
 241        => 0x2000,
 242        .msp430,
 243        => 0x4,
 244        .avr,
 245        => 0x1,
 246        else => 0x1000,
 247    };
 248
 249    const is_dyn_lib = output_mode == .Lib and link_mode == .dynamic;
 250    const default_sym_version: elf.Versym = if (is_dyn_lib or comp.config.rdynamic) .GLOBAL else .LOCAL;
 251
 252    var rpath_table: std.StringArrayHashMapUnmanaged(void) = .empty;
 253    try rpath_table.entries.resize(arena, options.rpath_list.len);
 254    @memcpy(rpath_table.entries.items(.key), options.rpath_list);
 255    try rpath_table.reIndex(arena);
 256
 257    const self = try arena.create(Elf);
 258    self.* = .{
 259        .base = .{
 260            .tag = .elf,
 261            .comp = comp,
 262            .emit = emit,
 263            .zcu_object_basename = if (use_llvm)
 264                try std.fmt.allocPrint(arena, "{s}_zcu.o", .{fs.path.stem(emit.sub_path)})
 265            else
 266                null,
 267            .gc_sections = options.gc_sections orelse (optimize_mode != .Debug and output_mode != .Obj),
 268            .print_gc_sections = options.print_gc_sections,
 269            .stack_size = options.stack_size orelse 16777216,
 270            .allow_shlib_undefined = options.allow_shlib_undefined orelse !is_native_os,
 271            .file = null,
 272            .build_id = options.build_id,
 273        },
 274        .zig_object = null,
 275        .rpath_table = rpath_table,
 276        .ptr_width = ptr_width,
 277        .page_size = page_size,
 278        .default_sym_version = default_sym_version,
 279
 280        .entry_name = switch (options.entry) {
 281            .disabled => null,
 282            .default => if (output_mode != .Exe) null else defaultEntrySymbolName(target.cpu.arch),
 283            .enabled => defaultEntrySymbolName(target.cpu.arch),
 284            .named => |name| name,
 285        },
 286
 287        .image_base = b: {
 288            if (is_dyn_lib) break :b 0;
 289            if (output_mode == .Exe and comp.config.pie) break :b 0;
 290            break :b options.image_base orelse switch (ptr_width) {
 291                .p32 => 0x10000,
 292                .p64 => 0x1000000,
 293            };
 294        },
 295
 296        .z_nodelete = options.z_nodelete,
 297        .z_notext = options.z_notext,
 298        .z_defs = options.z_defs,
 299        .z_origin = options.z_origin,
 300        .z_nocopyreloc = options.z_nocopyreloc,
 301        .z_now = options.z_now,
 302        .z_relro = options.z_relro,
 303        .z_common_page_size = options.z_common_page_size,
 304        .z_max_page_size = options.z_max_page_size,
 305        .soname = options.soname,
 306        .dump_argv_list = .empty,
 307    };
 308    errdefer self.base.destroy();
 309
 310    // --verbose-link
 311    if (comp.verbose_link) try dumpArgvInit(self, arena);
 312
 313    const is_obj = output_mode == .Obj;
 314    const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
 315
 316    // What path should this ELF linker code output to?
 317    const sub_path = emit.sub_path;
 318    self.base.file = try emit.root_dir.handle.createFile(sub_path, .{
 319        .truncate = true,
 320        .read = true,
 321        .mode = link.File.determineMode(output_mode, link_mode),
 322    });
 323
 324    const gpa = comp.gpa;
 325
 326    // Append null file at index 0
 327    try self.files.append(gpa, .null);
 328    // Append null byte to string tables
 329    try self.shstrtab.append(gpa, 0);
 330    try self.strtab.append(gpa, 0);
 331    // There must always be a null shdr in index 0
 332    _ = try self.addSection(.{});
 333    // Append null symbol in output symtab
 334    try self.symtab.append(gpa, null_sym);
 335
 336    if (!is_obj_or_ar) {
 337        try self.dynstrtab.append(gpa, 0);
 338
 339        // Initialize PT_PHDR program header
 340        const p_align: u16 = switch (self.ptr_width) {
 341            .p32 => @alignOf(elf.Elf32_Phdr),
 342            .p64 => @alignOf(elf.Elf64_Phdr),
 343        };
 344        const ehsize: u64 = switch (self.ptr_width) {
 345            .p32 => @sizeOf(elf.Elf32_Ehdr),
 346            .p64 => @sizeOf(elf.Elf64_Ehdr),
 347        };
 348        const phsize: u64 = switch (self.ptr_width) {
 349            .p32 => @sizeOf(elf.Elf32_Phdr),
 350            .p64 => @sizeOf(elf.Elf64_Phdr),
 351        };
 352        const max_nphdrs = comptime getMaxNumberOfPhdrs();
 353        const reserved: u64 = mem.alignForward(u64, padToIdeal(max_nphdrs * phsize), self.page_size);
 354        self.phdr_indexes.table = (try self.addPhdr(.{
 355            .type = elf.PT_PHDR,
 356            .flags = elf.PF_R,
 357            .@"align" = p_align,
 358            .addr = self.image_base + ehsize,
 359            .offset = ehsize,
 360            .filesz = reserved,
 361            .memsz = reserved,
 362        })).toOptional();
 363        self.phdr_indexes.table_load = (try self.addPhdr(.{
 364            .type = elf.PT_LOAD,
 365            .flags = elf.PF_R,
 366            .@"align" = self.page_size,
 367            .addr = self.image_base,
 368            .offset = 0,
 369            .filesz = reserved + ehsize,
 370            .memsz = reserved + ehsize,
 371        })).toOptional();
 372    }
 373
 374    if (opt_zcu) |zcu| {
 375        if (!use_llvm) {
 376            const index: File.Index = @intCast(try self.files.addOne(gpa));
 377            self.files.set(index, .zig_object);
 378            self.zig_object_index = index;
 379            const zig_object = try arena.create(ZigObject);
 380            self.zig_object = zig_object;
 381            zig_object.* = .{
 382                .index = index,
 383                .basename = try std.fmt.allocPrint(arena, "{s}.o", .{
 384                    fs.path.stem(zcu.main_mod.root_src_path),
 385                }),
 386            };
 387            try zig_object.init(self, .{
 388                .symbol_count_hint = options.symbol_count_hint,
 389                .program_code_size_hint = options.program_code_size_hint,
 390            });
 391        }
 392    }
 393
 394    return self;
 395}
 396
 397pub fn open(
 398    arena: Allocator,
 399    comp: *Compilation,
 400    emit: Path,
 401    options: link.File.OpenOptions,
 402) !*Elf {
 403    // TODO: restore saved linker state, don't truncate the file, and
 404    // participate in incremental compilation.
 405    return createEmpty(arena, comp, emit, options);
 406}
 407
 408pub fn deinit(self: *Elf) void {
 409    const gpa = self.base.comp.gpa;
 410
 411    for (self.file_handles.items) |fh| {
 412        fh.close();
 413    }
 414    self.file_handles.deinit(gpa);
 415
 416    for (self.files.items(.tags), self.files.items(.data)) |tag, *data| switch (tag) {
 417        .null, .zig_object => {},
 418        .linker_defined => data.linker_defined.deinit(gpa),
 419        .object => data.object.deinit(gpa),
 420        .shared_object => data.shared_object.deinit(gpa),
 421    };
 422    if (self.zig_object) |zig_object| {
 423        zig_object.deinit(gpa);
 424    }
 425    self.files.deinit(gpa);
 426    self.objects.deinit(gpa);
 427    self.shared_objects.deinit(gpa);
 428
 429    for (self.sections.items(.atom_list_2), self.sections.items(.atom_list), self.sections.items(.free_list)) |*atom_list, *atoms, *free_list| {
 430        atom_list.deinit(gpa);
 431        atoms.deinit(gpa);
 432        free_list.deinit(gpa);
 433    }
 434    self.sections.deinit(gpa);
 435    self.phdrs.deinit(gpa);
 436    self.shstrtab.deinit(gpa);
 437    self.symtab.deinit(gpa);
 438    self.strtab.deinit(gpa);
 439    self.resolver.deinit(gpa);
 440
 441    for (self.thunks.items) |*th| {
 442        th.deinit(gpa);
 443    }
 444    self.thunks.deinit(gpa);
 445    for (self.merge_sections.items) |*sect| {
 446        sect.deinit(gpa);
 447    }
 448    self.merge_sections.deinit(gpa);
 449
 450    self.got.deinit(gpa);
 451    self.plt.deinit(gpa);
 452    self.plt_got.deinit(gpa);
 453    self.dynsym.deinit(gpa);
 454    self.dynstrtab.deinit(gpa);
 455    self.dynamic.deinit(gpa);
 456    self.hash.deinit(gpa);
 457    self.versym.deinit(gpa);
 458    self.verneed.deinit(gpa);
 459    self.copy_rel.deinit(gpa);
 460    self.rela_dyn.deinit(gpa);
 461    self.rela_plt.deinit(gpa);
 462    self.group_sections.deinit(gpa);
 463    self.dump_argv_list.deinit(gpa);
 464}
 465
 466pub fn getNavVAddr(self: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 {
 467    return self.zigObjectPtr().?.getNavVAddr(self, pt, nav_index, reloc_info);
 468}
 469
 470pub fn lowerUav(
 471    self: *Elf,
 472    pt: Zcu.PerThread,
 473    uav: InternPool.Index,
 474    explicit_alignment: InternPool.Alignment,
 475    src_loc: Zcu.LazySrcLoc,
 476) !codegen.SymbolResult {
 477    return self.zigObjectPtr().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
 478}
 479
 480pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
 481    return self.zigObjectPtr().?.getUavVAddr(self, uav, reloc_info);
 482}
 483
 484/// Returns end pos of collision, if any.
 485fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
 486    const small_ptr = self.ptr_width == .p32;
 487    const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
 488    if (start < ehdr_size)
 489        return ehdr_size;
 490
 491    var at_end = true;
 492    const end = start + padToIdeal(size);
 493
 494    if (self.shdr_table_offset) |off| {
 495        const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
 496        const tight_size = self.sections.items(.shdr).len * shdr_size;
 497        const increased_size = padToIdeal(tight_size);
 498        const test_end = off +| increased_size;
 499        if (start < test_end) {
 500            if (end > off) return test_end;
 501            if (test_end < std.math.maxInt(u64)) at_end = false;
 502        }
 503    }
 504
 505    for (self.sections.items(.shdr)) |shdr| {
 506        if (shdr.sh_type == elf.SHT_NOBITS) continue;
 507        const increased_size = padToIdeal(shdr.sh_size);
 508        const test_end = shdr.sh_offset +| increased_size;
 509        if (start < test_end) {
 510            if (end > shdr.sh_offset) return test_end;
 511            if (test_end < std.math.maxInt(u64)) at_end = false;
 512        }
 513    }
 514
 515    for (self.phdrs.items) |phdr| {
 516        if (phdr.p_type != elf.PT_LOAD) continue;
 517        const increased_size = padToIdeal(phdr.p_filesz);
 518        const test_end = phdr.p_offset +| increased_size;
 519        if (start < test_end) {
 520            if (end > phdr.p_offset) return test_end;
 521            if (test_end < std.math.maxInt(u64)) at_end = false;
 522        }
 523    }
 524
 525    if (at_end) try self.base.file.?.setEndPos(end);
 526    return null;
 527}
 528
 529pub fn allocatedSize(self: *Elf, start: u64) u64 {
 530    if (start == 0) return 0;
 531    var min_pos: u64 = std.math.maxInt(u64);
 532    if (self.shdr_table_offset) |off| {
 533        if (off > start and off < min_pos) min_pos = off;
 534    }
 535    for (self.sections.items(.shdr)) |section| {
 536        if (section.sh_offset <= start) continue;
 537        if (section.sh_offset < min_pos) min_pos = section.sh_offset;
 538    }
 539    for (self.phdrs.items) |phdr| {
 540        if (phdr.p_offset <= start) continue;
 541        if (phdr.p_offset < min_pos) min_pos = phdr.p_offset;
 542    }
 543    return min_pos - start;
 544}
 545
 546pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
 547    var start: u64 = 0;
 548    while (try self.detectAllocCollision(start, object_size)) |item_end| {
 549        start = mem.alignForward(u64, item_end, min_alignment);
 550    }
 551    return start;
 552}
 553
 554pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void {
 555    const shdr = &self.sections.items(.shdr)[shdr_index];
 556
 557    if (shdr.sh_type != elf.SHT_NOBITS) {
 558        const allocated_size = self.allocatedSize(shdr.sh_offset);
 559        log.debug("allocated size {x} of '{s}', needed size {x}", .{
 560            allocated_size,
 561            self.getShString(shdr.sh_name),
 562            needed_size,
 563        });
 564
 565        if (needed_size > allocated_size) {
 566            const existing_size = shdr.sh_size;
 567            shdr.sh_size = 0;
 568            // Must move the entire section.
 569            const new_offset = try self.findFreeSpace(needed_size, min_alignment);
 570
 571            log.debug("moving '{s}' from 0x{x} to 0x{x}", .{
 572                self.getShString(shdr.sh_name),
 573                shdr.sh_offset,
 574                new_offset,
 575            });
 576
 577            const amt = try self.base.file.?.copyRangeAll(
 578                shdr.sh_offset,
 579                self.base.file.?,
 580                new_offset,
 581                existing_size,
 582            );
 583            // TODO figure out what to about this error condition - how to communicate it up.
 584            if (amt != existing_size) return error.InputOutput;
 585
 586            shdr.sh_offset = new_offset;
 587        } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
 588            try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
 589        }
 590    }
 591
 592    shdr.sh_size = needed_size;
 593    self.markDirty(shdr_index);
 594}
 595
 596fn markDirty(self: *Elf, shdr_index: u32) void {
 597    if (self.zigObjectPtr()) |zo| {
 598        for ([_]?Symbol.Index{
 599            zo.debug_info_index,
 600            zo.debug_abbrev_index,
 601            zo.debug_aranges_index,
 602            zo.debug_str_index,
 603            zo.debug_line_index,
 604            zo.debug_line_str_index,
 605            zo.debug_loclists_index,
 606            zo.debug_rnglists_index,
 607        }, [_]*bool{
 608            &zo.debug_info_section_dirty,
 609            &zo.debug_abbrev_section_dirty,
 610            &zo.debug_aranges_section_dirty,
 611            &zo.debug_str_section_dirty,
 612            &zo.debug_line_section_dirty,
 613            &zo.debug_line_str_section_dirty,
 614            &zo.debug_loclists_section_dirty,
 615            &zo.debug_rnglists_section_dirty,
 616        }) |maybe_sym_index, dirty| {
 617            const sym_index = maybe_sym_index orelse continue;
 618            if (zo.symbol(sym_index).atom(self).?.output_section_index == shdr_index) {
 619                dirty.* = true;
 620                break;
 621            }
 622        }
 623    }
 624}
 625
 626const AllocateChunkResult = struct {
 627    value: u64,
 628    placement: Ref,
 629};
 630
 631pub fn allocateChunk(self: *Elf, args: struct {
 632    size: u64,
 633    shndx: u32,
 634    alignment: Atom.Alignment,
 635    requires_padding: bool = true,
 636}) !AllocateChunkResult {
 637    const slice = self.sections.slice();
 638    const shdr = &slice.items(.shdr)[args.shndx];
 639    const free_list = &slice.items(.free_list)[args.shndx];
 640    const last_atom_ref = &slice.items(.last_atom)[args.shndx];
 641    const new_atom_ideal_capacity = if (args.requires_padding) padToIdeal(args.size) else args.size;
 642
 643    // First we look for an appropriately sized free list node.
 644    // The list is unordered. We'll just take the first thing that works.
 645    const res: AllocateChunkResult = blk: {
 646        var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len;
 647        while (i < free_list.items.len) {
 648            const big_atom_ref = free_list.items[i];
 649            const big_atom = self.atom(big_atom_ref).?;
 650            // We now have a pointer to a live atom that has too much capacity.
 651            // Is it enough that we could fit this new atom?
 652            const cap = big_atom.capacity(self);
 653            const ideal_capacity = if (args.requires_padding) padToIdeal(cap) else cap;
 654            const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
 655            const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
 656            const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
 657            const new_start_vaddr = args.alignment.backward(new_start_vaddr_unaligned);
 658            if (new_start_vaddr < ideal_capacity_end_vaddr) {
 659                // Additional bookkeeping here to notice if this free list node
 660                // should be deleted because the block that it points to has grown to take up
 661                // more of the extra capacity.
 662                if (!big_atom.freeListEligible(self)) {
 663                    _ = free_list.swapRemove(i);
 664                } else {
 665                    i += 1;
 666                }
 667                continue;
 668            }
 669            // At this point we know that we will place the new block here. But the
 670            // remaining question is whether there is still yet enough capacity left
 671            // over for there to still be a free list node.
 672            const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
 673            const keep_free_list_node = remaining_capacity >= min_text_capacity;
 674
 675            if (!keep_free_list_node) {
 676                _ = free_list.swapRemove(i);
 677            }
 678            break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref };
 679        } else if (self.atom(last_atom_ref.*)) |last_atom| {
 680            const ideal_capacity = if (args.requires_padding) padToIdeal(last_atom.size) else last_atom.size;
 681            const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
 682            const new_start_vaddr = args.alignment.forward(ideal_capacity_end_vaddr);
 683            break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() };
 684        } else {
 685            break :blk .{ .value = 0, .placement = .{} };
 686        }
 687    };
 688
 689    const expand_section = if (self.atom(res.placement)) |placement_atom|
 690        placement_atom.nextAtom(self) == null
 691    else
 692        true;
 693    if (expand_section) {
 694        const needed_size = res.value + args.size;
 695        try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?);
 696    }
 697
 698    log.debug("allocated chunk (size({x}),align({x})) in {s} at 0x{x} (file(0x{x}))", .{
 699        args.size,
 700        args.alignment.toByteUnits().?,
 701        self.getShString(shdr.sh_name),
 702        shdr.sh_addr + res.value,
 703        shdr.sh_offset + res.value,
 704    });
 705    log.debug("  placement {f}, {s}", .{
 706        res.placement,
 707        if (self.atom(res.placement)) |atom_ptr| atom_ptr.name(self) else "",
 708    });
 709
 710    return res;
 711}
 712
 713pub fn loadInput(self: *Elf, input: link.Input) !void {
 714    const comp = self.base.comp;
 715    const gpa = comp.gpa;
 716    const diags = &comp.link_diags;
 717    const target = self.getTarget();
 718    const debug_fmt_strip = comp.config.debug_format == .strip;
 719    const default_sym_version = self.default_sym_version;
 720    const is_static_lib = self.base.isStaticLib();
 721
 722    if (comp.verbose_link) {
 723        comp.mutex.lock(); // protect comp.arena
 724        defer comp.mutex.unlock();
 725
 726        const argv = &self.dump_argv_list;
 727        switch (input) {
 728            .res => unreachable,
 729            .dso_exact => |dso_exact| try argv.appendSlice(gpa, &.{ "-l", dso_exact.name }),
 730            .object, .archive => |obj| try argv.append(gpa, try obj.path.toString(comp.arena)),
 731            .dso => |dso| try argv.append(gpa, try dso.path.toString(comp.arena)),
 732        }
 733    }
 734
 735    switch (input) {
 736        .res => unreachable,
 737        .dso_exact => @panic("TODO"),
 738        .object => |obj| try parseObject(self, obj),
 739        .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
 740        .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target),
 741    }
 742}
 743
 744pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
 745    const tracy = trace(@src());
 746    defer tracy.end();
 747
 748    const comp = self.base.comp;
 749    const diags = &comp.link_diags;
 750
 751    if (comp.verbose_link) Compilation.dump_argv(self.dump_argv_list.items);
 752
 753    const sub_prog_node = prog_node.start("ELF Flush", 0);
 754    defer sub_prog_node.end();
 755
 756    return flushInner(self, arena, tid) catch |err| switch (err) {
 757        error.OutOfMemory => return error.OutOfMemory,
 758        error.LinkFailure => return error.LinkFailure,
 759        else => |e| return diags.fail("ELF flush failed: {s}", .{@errorName(e)}),
 760    };
 761}
 762
 763fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
 764    const comp = self.base.comp;
 765    const gpa = comp.gpa;
 766    const diags = &comp.link_diags;
 767
 768    const zcu_obj_path: ?Path = if (self.base.zcu_object_basename) |raw| p: {
 769        break :p try comp.resolveEmitPathFlush(arena, .temp, raw);
 770    } else null;
 771
 772    if (self.zigObjectPtr()) |zig_object| try zig_object.flush(self, tid);
 773
 774    if (zcu_obj_path) |path| openParseObjectReportingFailure(self, path);
 775
 776    switch (comp.config.output_mode) {
 777        .Obj => return relocatable.flushObject(self, comp),
 778        .Lib => switch (comp.config.link_mode) {
 779            .dynamic => {},
 780            .static => return relocatable.flushStaticLib(self, comp),
 781        },
 782        .Exe => {},
 783    }
 784
 785    if (diags.hasErrors()) return error.LinkFailure;
 786
 787    // If we haven't already, create a linker-generated input file comprising of
 788    // linker-defined synthetic symbols only such as `_DYNAMIC`, etc.
 789    if (self.linker_defined_index == null) {
 790        const index: File.Index = @intCast(try self.files.addOne(gpa));
 791        self.files.set(index, .{ .linker_defined = .{ .index = index } });
 792        self.linker_defined_index = index;
 793        const object = self.linkerDefinedPtr().?;
 794        try object.init(gpa);
 795        try object.initSymbols(self);
 796    }
 797
 798    // Now, we are ready to resolve the symbols across all input files.
 799    // We will first resolve the files in the ZigObject, next in the parsed
 800    // input Object files.
 801    // Any qualifing unresolved symbol will be upgraded to an absolute, weak
 802    // symbol for potential resolution at load-time.
 803    try self.resolveSymbols();
 804    self.markEhFrameAtomsDead();
 805    try self.resolveMergeSections();
 806
 807    for (self.objects.items) |index| {
 808        try self.file(index).?.object.convertCommonSymbols(self);
 809    }
 810    self.markImportsExports();
 811
 812    if (self.base.gc_sections) {
 813        try gc.gcAtoms(self);
 814    }
 815
 816    self.checkDuplicates() catch |err| switch (err) {
 817        error.HasDuplicates => return error.LinkFailure,
 818        else => |e| return e,
 819    };
 820
 821    try self.addCommentString();
 822    try self.finalizeMergeSections();
 823    try self.initOutputSections();
 824    if (self.linkerDefinedPtr()) |obj| {
 825        try obj.initStartStopSymbols(self);
 826    }
 827    self.claimUnresolved();
 828
 829    // Scan and create missing synthetic entries such as GOT indirection.
 830    try self.scanRelocs();
 831
 832    // Generate and emit synthetic sections.
 833    try self.initSyntheticSections();
 834    try self.initSpecialPhdrs();
 835    try sortShdrs(
 836        gpa,
 837        &self.section_indexes,
 838        &self.sections,
 839        self.shstrtab.items,
 840        self.merge_sections.items,
 841        self.group_sections.items,
 842        self.zigObjectPtr(),
 843        self.files,
 844    );
 845
 846    try self.setDynamicSection(self.rpath_table.keys());
 847    self.sortDynamicSymtab();
 848    try self.setHashSections();
 849    try self.setVersionSymtab();
 850
 851    try self.sortInitFini();
 852    try self.updateMergeSectionSizes();
 853    try self.updateSectionSizes();
 854
 855    try self.addLoadPhdrs();
 856    try self.allocatePhdrTable();
 857    try self.allocateAllocSections();
 858    try sortPhdrs(gpa, &self.phdrs, &self.phdr_indexes, self.sections.items(.phndx));
 859    try self.allocateNonAllocSections();
 860    self.allocateSpecialPhdrs();
 861    if (self.linkerDefinedPtr()) |obj| {
 862        obj.allocateSymbols(self);
 863    }
 864
 865    // Dump the state for easy debugging.
 866    // State can be dumped via `--debug-log link_state`.
 867    if (build_options.enable_logging) {
 868        state_log.debug("{f}", .{self.dumpState()});
 869    }
 870
 871    // Beyond this point, everything has been allocated a virtual address and we can resolve
 872    // the relocations, and commit objects to file.
 873    for (self.objects.items) |index| {
 874        self.file(index).?.object.dirty = false;
 875    }
 876    // TODO: would state tracking be more appropriate here? perhaps even custom relocation type?
 877    self.rela_dyn.clearRetainingCapacity();
 878    self.rela_plt.clearRetainingCapacity();
 879
 880    if (self.zigObjectPtr()) |zo| {
 881        var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
 882        defer {
 883            for (undefs.values()) |*refs| refs.deinit();
 884            undefs.deinit();
 885        }
 886
 887        var has_reloc_errors = false;
 888        for (zo.atoms_indexes.items) |atom_index| {
 889            const atom_ptr = zo.atom(atom_index) orelse continue;
 890            if (!atom_ptr.alive) continue;
 891            const out_shndx = atom_ptr.output_section_index;
 892            const shdr = &self.sections.items(.shdr)[out_shndx];
 893            if (shdr.sh_type == elf.SHT_NOBITS) continue;
 894            const code = try zo.codeAlloc(self, atom_index);
 895            defer gpa.free(code);
 896            const file_offset = atom_ptr.offset(self);
 897            (if (shdr.sh_flags & elf.SHF_ALLOC == 0)
 898                atom_ptr.resolveRelocsNonAlloc(self, code, &undefs)
 899            else
 900                atom_ptr.resolveRelocsAlloc(self, code)) catch |err| switch (err) {
 901                error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
 902                error.UnsupportedCpuArch => {
 903                    try self.reportUnsupportedCpuArch();
 904                    return error.LinkFailure;
 905                },
 906                else => |e| return e,
 907            };
 908            try self.pwriteAll(code, file_offset);
 909        }
 910
 911        try self.reportUndefinedSymbols(&undefs);
 912
 913        if (has_reloc_errors) return error.LinkFailure;
 914    }
 915
 916    try self.writePhdrTable();
 917    try self.writeShdrTable();
 918    try self.writeAtoms();
 919    try self.writeMergeSections();
 920
 921    self.writeSyntheticSections() catch |err| switch (err) {
 922        error.RelocFailure => return error.LinkFailure,
 923        error.UnsupportedCpuArch => {
 924            try self.reportUnsupportedCpuArch();
 925            return error.LinkFailure;
 926        },
 927        else => |e| return e,
 928    };
 929
 930    if (self.base.isExe() and self.linkerDefinedPtr().?.entry_index == null) {
 931        log.debug("flushing. no_entry_point_found = true", .{});
 932        diags.flags.no_entry_point_found = true;
 933    } else {
 934        log.debug("flushing. no_entry_point_found = false", .{});
 935        diags.flags.no_entry_point_found = false;
 936        try self.writeElfHeader();
 937    }
 938
 939    if (diags.hasErrors()) return error.LinkFailure;
 940}
 941
 942fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
 943    const comp = self.base.comp;
 944    const gpa = comp.gpa;
 945    const target = self.getTarget();
 946    const full_out_path = try self.base.emit.root_dir.join(arena, &[_][]const u8{self.base.emit.sub_path});
 947
 948    const argv = &self.dump_argv_list;
 949
 950    try argv.append(gpa, "zig");
 951
 952    if (self.base.isStaticLib()) {
 953        try argv.append(gpa, "ar");
 954    } else {
 955        try argv.append(gpa, "ld");
 956    }
 957
 958    if (self.base.isObject()) {
 959        try argv.append(gpa, "-r");
 960    }
 961
 962    try argv.append(gpa, "-o");
 963    try argv.append(gpa, full_out_path);
 964
 965    if (!self.base.isRelocatable()) {
 966        if (!self.base.isStatic()) {
 967            if (target.dynamic_linker.get()) |path| {
 968                try argv.appendSlice(gpa, &.{ "-dynamic-linker", try arena.dupe(u8, path) });
 969            }
 970        }
 971
 972        if (self.base.isDynLib()) {
 973            if (self.soname) |name| {
 974                try argv.append(gpa, "-soname");
 975                try argv.append(gpa, name);
 976            }
 977        }
 978
 979        if (self.entry_name) |name| {
 980            try argv.appendSlice(gpa, &.{ "--entry", name });
 981        }
 982
 983        for (self.rpath_table.keys()) |rpath| {
 984            try argv.appendSlice(gpa, &.{ "-rpath", rpath });
 985        }
 986
 987        try argv.appendSlice(gpa, &.{
 988            "-z",
 989            try std.fmt.allocPrint(arena, "stack-size={d}", .{self.base.stack_size}),
 990        });
 991
 992        try argv.append(gpa, try std.fmt.allocPrint(arena, "--image-base={d}", .{self.image_base}));
 993
 994        if (self.base.gc_sections) {
 995            try argv.append(gpa, "--gc-sections");
 996        }
 997
 998        if (self.base.print_gc_sections) {
 999            try argv.append(gpa, "--print-gc-sections");
1000        }
1001
1002        if (comp.link_eh_frame_hdr) {
1003            try argv.append(gpa, "--eh-frame-hdr");
1004        }
1005
1006        if (comp.config.rdynamic) {
1007            try argv.append(gpa, "--export-dynamic");
1008        }
1009
1010        if (self.z_notext) {
1011            try argv.append(gpa, "-z");
1012            try argv.append(gpa, "notext");
1013        }
1014
1015        if (self.z_nocopyreloc) {
1016            try argv.append(gpa, "-z");
1017            try argv.append(gpa, "nocopyreloc");
1018        }
1019
1020        if (self.z_now) {
1021            try argv.append(gpa, "-z");
1022            try argv.append(gpa, "now");
1023        }
1024
1025        if (self.base.isStatic()) {
1026            try argv.append(gpa, "-static");
1027        } else if (self.isEffectivelyDynLib()) {
1028            try argv.append(gpa, "-shared");
1029        }
1030
1031        if (comp.config.pie and self.base.isExe()) {
1032            try argv.append(gpa, "-pie");
1033        }
1034
1035        if (comp.config.debug_format == .strip) {
1036            try argv.append(gpa, "-s");
1037        }
1038
1039        if (comp.config.link_libc) {
1040            if (self.base.comp.libc_installation) |lci| {
1041                try argv.append(gpa, "-L");
1042                try argv.append(gpa, lci.crt_dir.?);
1043            }
1044        }
1045    }
1046}
1047
1048pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
1049    const diags = &self.base.comp.link_diags;
1050    const obj = link.openObject(path, false, false) catch |err| {
1051        switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
1052            error.LinkFailure => return,
1053        }
1054    };
1055    self.parseObjectReportingFailure(obj);
1056}
1057
1058fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
1059    const diags = &self.base.comp.link_diags;
1060    self.parseObject(obj) catch |err| switch (err) {
1061        error.LinkFailure => return, // already reported
1062        else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
1063    };
1064}
1065
1066fn parseObject(self: *Elf, obj: link.Input.Object) !void {
1067    const tracy = trace(@src());
1068    defer tracy.end();
1069
1070    const gpa = self.base.comp.gpa;
1071    const diags = &self.base.comp.link_diags;
1072    const target = &self.base.comp.root_mod.resolved_target.result;
1073    const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
1074    const default_sym_version = self.default_sym_version;
1075    const file_handles = &self.file_handles;
1076
1077    const handle = obj.file;
1078    const fh = try addFileHandle(gpa, file_handles, handle);
1079
1080    const index: File.Index = @intCast(try self.files.addOne(gpa));
1081    self.files.set(index, .{ .object = .{
1082        .path = .{
1083            .root_dir = obj.path.root_dir,
1084            .sub_path = try gpa.dupe(u8, obj.path.sub_path),
1085        },
1086        .file_handle = fh,
1087        .index = index,
1088    } });
1089    try self.objects.append(gpa, index);
1090
1091    const object = self.file(index).?.object;
1092    try object.parseCommon(gpa, diags, obj.path, handle, target);
1093    if (!self.base.isStaticLib()) {
1094        try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
1095    }
1096}
1097
1098fn parseArchive(
1099    gpa: Allocator,
1100    diags: *Diags,
1101    file_handles: *std.ArrayList(File.Handle),
1102    files: *std.MultiArrayList(File.Entry),
1103    target: *const std.Target,
1104    debug_fmt_strip: bool,
1105    default_sym_version: elf.Versym,
1106    objects: *std.ArrayList(File.Index),
1107    obj: link.Input.Object,
1108    is_static_lib: bool,
1109) !void {
1110    const tracy = trace(@src());
1111    defer tracy.end();
1112
1113    const fh = try addFileHandle(gpa, file_handles, obj.file);
1114    var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh);
1115    defer archive.deinit(gpa);
1116
1117    const init_alive = if (is_static_lib) true else obj.must_link;
1118
1119    for (archive.objects) |extracted| {
1120        const index: File.Index = @intCast(try files.addOne(gpa));
1121        files.set(index, .{ .object = extracted });
1122        const object = &files.items(.data)[index].object;
1123        object.index = index;
1124        object.alive = init_alive;
1125        try object.parseCommon(gpa, diags, obj.path, obj.file, target);
1126        if (!is_static_lib)
1127            try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
1128        try objects.append(gpa, index);
1129    }
1130}
1131
1132fn parseDso(
1133    gpa: Allocator,
1134    diags: *Diags,
1135    dso: link.Input.Dso,
1136    shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
1137    files: *std.MultiArrayList(File.Entry),
1138    target: *const std.Target,
1139) !void {
1140    const tracy = trace(@src());
1141    defer tracy.end();
1142
1143    const handle = dso.file;
1144
1145    const stat = Stat.fromFs(try handle.stat());
1146    var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
1147    defer header.deinit(gpa);
1148
1149    const soname = header.soname() orelse dso.path.basename();
1150
1151    const gop = try shared_objects.getOrPut(gpa, soname);
1152    if (gop.found_existing) return;
1153    errdefer _ = shared_objects.pop();
1154
1155    const index: File.Index = @intCast(try files.addOne(gpa));
1156    errdefer _ = files.pop();
1157
1158    gop.value_ptr.* = index;
1159
1160    var parsed = try SharedObject.parse(gpa, &header, handle);
1161    errdefer parsed.deinit(gpa);
1162
1163    const duped_path: Path = .{
1164        .root_dir = dso.path.root_dir,
1165        .sub_path = try gpa.dupe(u8, dso.path.sub_path),
1166    };
1167    errdefer gpa.free(duped_path.sub_path);
1168
1169    files.set(index, .{
1170        .shared_object = .{
1171            .parsed = parsed,
1172            .path = duped_path,
1173            .index = index,
1174            .needed = dso.needed,
1175            .alive = dso.needed,
1176            .aliases = null,
1177            .symbols = .empty,
1178            .symbols_extra = .empty,
1179            .symbols_resolver = .empty,
1180            .output_symtab_ctx = .{},
1181        },
1182    });
1183    const so = fileLookup(files.*, index, null).?.shared_object;
1184
1185    // TODO: save this work for later
1186    const nsyms = parsed.symbols.len;
1187    try so.symbols.ensureTotalCapacityPrecise(gpa, nsyms);
1188    try so.symbols_extra.ensureTotalCapacityPrecise(gpa, nsyms * @typeInfo(Symbol.Extra).@"struct".fields.len);
1189    try so.symbols_resolver.ensureTotalCapacityPrecise(gpa, nsyms);
1190    so.symbols_resolver.appendNTimesAssumeCapacity(0, nsyms);
1191
1192    for (parsed.symtab, parsed.symbols, parsed.versyms, 0..) |esym, sym, versym, i| {
1193        const out_sym_index = so.addSymbolAssumeCapacity();
1194        const out_sym = &so.symbols.items[out_sym_index];
1195        out_sym.value = @intCast(esym.st_value);
1196        out_sym.name_offset = sym.mangled_name;
1197        out_sym.ref = .{ .index = 0, .file = 0 };
1198        out_sym.esym_index = @intCast(i);
1199        out_sym.version_index = versym;
1200        out_sym.extra_index = so.addSymbolExtraAssumeCapacity(.{});
1201    }
1202}
1203
1204/// When resolving symbols, we approach the problem similarly to `mold`.
1205/// 1. Resolve symbols across all objects (including those preemptively extracted archives).
1206/// 2. Resolve symbols across all shared objects.
1207/// 3. Mark live objects (see `Elf.markLive`)
1208/// 4. Reset state of all resolved globals since we will redo this bit on the pruned set.
1209/// 5. Remove references to dead objects/shared objects
1210/// 6. Re-run symbol resolution on pruned objects and shared objects sets.
1211pub fn resolveSymbols(self: *Elf) !void {
1212    // This function mutates `shared_objects`.
1213    const shared_objects = &self.shared_objects;
1214
1215    // Resolve symbols in the ZigObject. For now, we assume that it's always live.
1216    if (self.zigObjectPtr()) |zo| try zo.asFile().resolveSymbols(self);
1217    // Resolve symbols on the set of all objects and shared objects (even if some are unneeded).
1218    for (self.objects.items) |index| try self.file(index).?.resolveSymbols(self);
1219    for (shared_objects.values()) |index| try self.file(index).?.resolveSymbols(self);
1220    if (self.linkerDefinedPtr()) |obj| try obj.asFile().resolveSymbols(self);
1221
1222    // Mark live objects.
1223    self.markLive();
1224
1225    // Reset state of all globals after marking live objects.
1226    self.resolver.reset();
1227
1228    // Prune dead objects and shared objects.
1229    var i: usize = 0;
1230    while (i < self.objects.items.len) {
1231        const index = self.objects.items[i];
1232        if (!self.file(index).?.isAlive()) {
1233            _ = self.objects.orderedRemove(i);
1234        } else i += 1;
1235    }
1236    // TODO This loop has 2 major flaws:
1237    // 1. It is O(N^2) which is never allowed in the codebase.
1238    // 2. It mutates shared_objects, which is a non-starter for incremental compilation.
1239    i = 0;
1240    while (i < shared_objects.values().len) {
1241        const index = shared_objects.values()[i];
1242        if (!self.file(index).?.isAlive()) {
1243            _ = shared_objects.orderedRemoveAt(i);
1244        } else i += 1;
1245    }
1246
1247    {
1248        // Dedup groups.
1249        var table = std.StringHashMap(Ref).init(self.base.comp.gpa);
1250        defer table.deinit();
1251
1252        for (self.objects.items) |index| {
1253            try self.file(index).?.object.resolveGroups(self, &table);
1254        }
1255
1256        for (self.objects.items) |index| {
1257            self.file(index).?.object.markGroupsDead(self);
1258        }
1259    }
1260
1261    // Re-resolve the symbols.
1262    if (self.zigObjectPtr()) |zo| try zo.asFile().resolveSymbols(self);
1263    for (self.objects.items) |index| try self.file(index).?.resolveSymbols(self);
1264    for (shared_objects.values()) |index| try self.file(index).?.resolveSymbols(self);
1265    if (self.linkerDefinedPtr()) |obj| try obj.asFile().resolveSymbols(self);
1266}
1267
1268/// Traverses all objects and shared objects marking any object referenced by
1269/// a live object/shared object as alive itself.
1270/// This routine will prune unneeded objects extracted from archives and
1271/// unneeded shared objects.
1272fn markLive(self: *Elf) void {
1273    const shared_objects = self.shared_objects.values();
1274    if (self.zigObjectPtr()) |zig_object| zig_object.asFile().markLive(self);
1275    for (self.objects.items) |index| {
1276        const file_ptr = self.file(index).?;
1277        if (file_ptr.isAlive()) file_ptr.markLive(self);
1278    }
1279    for (shared_objects) |index| {
1280        const file_ptr = self.file(index).?;
1281        if (file_ptr.isAlive()) file_ptr.markLive(self);
1282    }
1283}
1284
1285pub fn markEhFrameAtomsDead(self: *Elf) void {
1286    for (self.objects.items) |index| {
1287        const file_ptr = self.file(index).?;
1288        if (!file_ptr.isAlive()) continue;
1289        file_ptr.object.markEhFrameAtomsDead(self);
1290    }
1291}
1292
1293fn markImportsExports(self: *Elf) void {
1294    const shared_objects = self.shared_objects.values();
1295    if (self.zigObjectPtr()) |zo| {
1296        zo.markImportsExports(self);
1297    }
1298    for (self.objects.items) |index| {
1299        self.file(index).?.object.markImportsExports(self);
1300    }
1301    if (!self.isEffectivelyDynLib()) {
1302        for (shared_objects) |index| {
1303            self.file(index).?.shared_object.markImportExports(self);
1304        }
1305    }
1306}
1307
1308fn claimUnresolved(self: *Elf) void {
1309    if (self.zigObjectPtr()) |zig_object| {
1310        zig_object.claimUnresolved(self);
1311    }
1312    for (self.objects.items) |index| {
1313        self.file(index).?.object.claimUnresolved(self);
1314    }
1315}
1316
1317/// In scanRelocs we will go over all live atoms and scan their relocs.
1318/// This will help us work out what synthetics to emit, GOT indirection, etc.
1319/// This is also the point where we will report undefined symbols for any
1320/// alloc sections.
1321fn scanRelocs(self: *Elf) !void {
1322    const gpa = self.base.comp.gpa;
1323    const shared_objects = self.shared_objects.values();
1324
1325    var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
1326    defer {
1327        for (undefs.values()) |*refs| refs.deinit();
1328        undefs.deinit();
1329    }
1330
1331    var has_reloc_errors = false;
1332    if (self.zigObjectPtr()) |zo| {
1333        zo.asFile().scanRelocs(self, &undefs) catch |err| switch (err) {
1334            error.RelaxFailure => unreachable,
1335            error.UnsupportedCpuArch => {
1336                try self.reportUnsupportedCpuArch();
1337                return error.LinkFailure;
1338            },
1339            error.RelocFailure => has_reloc_errors = true,
1340            else => |e| return e,
1341        };
1342    }
1343    for (self.objects.items) |index| {
1344        self.file(index).?.scanRelocs(self, &undefs) catch |err| switch (err) {
1345            error.RelaxFailure => unreachable,
1346            error.UnsupportedCpuArch => {
1347                try self.reportUnsupportedCpuArch();
1348                return error.LinkFailure;
1349            },
1350            error.RelocFailure => has_reloc_errors = true,
1351            else => |e| return e,
1352        };
1353    }
1354
1355    try self.reportUndefinedSymbols(&undefs);
1356
1357    if (has_reloc_errors) return error.LinkFailure;
1358
1359    if (self.zigObjectPtr()) |zo| {
1360        try zo.asFile().createSymbolIndirection(self);
1361    }
1362    for (self.objects.items) |index| {
1363        try self.file(index).?.createSymbolIndirection(self);
1364    }
1365    for (shared_objects) |index| {
1366        try self.file(index).?.createSymbolIndirection(self);
1367    }
1368    if (self.linkerDefinedPtr()) |obj| {
1369        try obj.asFile().createSymbolIndirection(self);
1370    }
1371    if (self.got.flags.needs_tlsld) {
1372        log.debug("program needs TLSLD", .{});
1373        try self.got.addTlsLdSymbol(self);
1374    }
1375}
1376
1377pub fn initOutputSection(self: *Elf, args: struct {
1378    name: [:0]const u8,
1379    flags: u64,
1380    type: u32,
1381}) error{OutOfMemory}!u32 {
1382    const name = blk: {
1383        if (self.base.isRelocatable()) break :blk args.name;
1384        if (args.flags & elf.SHF_MERGE != 0) break :blk args.name;
1385        const name_prefixes: []const [:0]const u8 = &.{
1386            ".text",       ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro",       ".bss",
1387            ".init_array", ".fini_array",  ".tbss", ".tdata",  ".gcc_except_table", ".ctors",
1388            ".dtors",      ".gnu.warning",
1389        };
1390        inline for (name_prefixes) |prefix| {
1391            if (mem.eql(u8, args.name, prefix) or mem.startsWith(u8, args.name, prefix ++ ".")) {
1392                break :blk prefix;
1393            }
1394        }
1395        break :blk args.name;
1396    };
1397    const @"type" = tt: {
1398        if (self.getTarget().cpu.arch == .x86_64 and args.type == elf.SHT_X86_64_UNWIND)
1399            break :tt elf.SHT_PROGBITS;
1400        switch (args.type) {
1401            elf.SHT_NULL => unreachable,
1402            elf.SHT_PROGBITS => {
1403                if (mem.eql(u8, args.name, ".init_array") or mem.startsWith(u8, args.name, ".init_array."))
1404                    break :tt elf.SHT_INIT_ARRAY;
1405                if (mem.eql(u8, args.name, ".fini_array") or mem.startsWith(u8, args.name, ".fini_array."))
1406                    break :tt elf.SHT_FINI_ARRAY;
1407                break :tt args.type;
1408            },
1409            else => break :tt args.type,
1410        }
1411    };
1412    const flags = blk: {
1413        var flags = args.flags;
1414        if (!self.base.isRelocatable()) {
1415            flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN);
1416        }
1417        break :blk switch (@"type") {
1418            elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE,
1419            else => flags,
1420        };
1421    };
1422    const out_shndx = self.sectionByName(name) orelse try self.addSection(.{
1423        .type = @"type",
1424        .flags = flags,
1425        .name = try self.insertShString(name),
1426    });
1427    return out_shndx;
1428}
1429
1430pub fn writeShdrTable(self: *Elf) !void {
1431    const gpa = self.base.comp.gpa;
1432    const target_endian = self.getTarget().cpu.arch.endian();
1433    const foreign_endian = target_endian != builtin.cpu.arch.endian();
1434    const shsize: u64 = switch (self.ptr_width) {
1435        .p32 => @sizeOf(elf.Elf32_Shdr),
1436        .p64 => @sizeOf(elf.Elf64_Shdr),
1437    };
1438    const shalign: u16 = switch (self.ptr_width) {
1439        .p32 => @alignOf(elf.Elf32_Shdr),
1440        .p64 => @alignOf(elf.Elf64_Shdr),
1441    };
1442
1443    const shoff = self.shdr_table_offset orelse 0;
1444    const needed_size = self.sections.items(.shdr).len * shsize;
1445
1446    if (needed_size > self.allocatedSize(shoff)) {
1447        self.shdr_table_offset = null;
1448        self.shdr_table_offset = try self.findFreeSpace(needed_size, shalign);
1449    }
1450
1451    log.debug("writing section headers from 0x{x} to 0x{x}", .{
1452        self.shdr_table_offset.?,
1453        self.shdr_table_offset.? + needed_size,
1454    });
1455
1456    switch (self.ptr_width) {
1457        .p32 => {
1458            const buf = try gpa.alloc(elf.Elf32_Shdr, self.sections.items(.shdr).len);
1459            defer gpa.free(buf);
1460
1461            for (buf, 0..) |*shdr, i| {
1462                assert(self.sections.items(.shdr)[i].sh_offset != math.maxInt(u64));
1463                shdr.* = shdrTo32(self.sections.items(.shdr)[i]);
1464                if (foreign_endian) {
1465                    mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
1466                }
1467            }
1468            try self.pwriteAll(@ptrCast(buf), self.shdr_table_offset.?);
1469        },
1470        .p64 => {
1471            const buf = try gpa.alloc(elf.Elf64_Shdr, self.sections.items(.shdr).len);
1472            defer gpa.free(buf);
1473
1474            for (buf, 0..) |*shdr, i| {
1475                assert(self.sections.items(.shdr)[i].sh_offset != math.maxInt(u64));
1476                shdr.* = self.sections.items(.shdr)[i];
1477                if (foreign_endian) {
1478                    mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
1479                }
1480            }
1481            try self.pwriteAll(@ptrCast(buf), self.shdr_table_offset.?);
1482        },
1483    }
1484}
1485
1486fn writePhdrTable(self: *Elf) !void {
1487    const gpa = self.base.comp.gpa;
1488    const target_endian = self.getTarget().cpu.arch.endian();
1489    const foreign_endian = target_endian != builtin.cpu.arch.endian();
1490    const phdr_table = &self.phdrs.items[self.phdr_indexes.table.int().?];
1491
1492    log.debug("writing program headers from 0x{x} to 0x{x}", .{
1493        phdr_table.p_offset,
1494        phdr_table.p_offset + phdr_table.p_filesz,
1495    });
1496
1497    switch (self.ptr_width) {
1498        .p32 => {
1499            const buf = try gpa.alloc(elf.Elf32_Phdr, self.phdrs.items.len);
1500            defer gpa.free(buf);
1501
1502            for (buf, 0..) |*phdr, i| {
1503                phdr.* = phdrTo32(self.phdrs.items[i]);
1504                if (foreign_endian) {
1505                    mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
1506                }
1507            }
1508            try self.pwriteAll(@ptrCast(buf), phdr_table.p_offset);
1509        },
1510        .p64 => {
1511            const buf = try gpa.alloc(elf.Elf64_Phdr, self.phdrs.items.len);
1512            defer gpa.free(buf);
1513
1514            for (buf, 0..) |*phdr, i| {
1515                phdr.* = self.phdrs.items[i];
1516                if (foreign_endian) {
1517                    mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
1518                }
1519            }
1520            try self.pwriteAll(@ptrCast(buf), phdr_table.p_offset);
1521        },
1522    }
1523}
1524
1525pub fn writeElfHeader(self: *Elf) !void {
1526    const diags = &self.base.comp.link_diags;
1527    if (diags.hasErrors()) return; // We had errors, so skip flushing to render the output unusable
1528
1529    const comp = self.base.comp;
1530    var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined;
1531
1532    var index: usize = 0;
1533    hdr_buf[0..4].* = elf.MAGIC.*;
1534    index += 4;
1535
1536    hdr_buf[index] = switch (self.ptr_width) {
1537        .p32 => elf.ELFCLASS32,
1538        .p64 => elf.ELFCLASS64,
1539    };
1540    index += 1;
1541
1542    const target = self.getTarget();
1543    const endian = target.cpu.arch.endian();
1544    hdr_buf[index] = switch (endian) {
1545        .little => elf.ELFDATA2LSB,
1546        .big => elf.ELFDATA2MSB,
1547    };
1548    index += 1;
1549
1550    hdr_buf[index] = 1; // ELF version
1551    index += 1;
1552
1553    hdr_buf[index] = @intFromEnum(@as(elf.OSABI, switch (target.cpu.arch) {
1554        .amdgcn => switch (target.os.tag) {
1555            .amdhsa => .AMDGPU_HSA,
1556            .amdpal => .AMDGPU_PAL,
1557            .mesa3d => .AMDGPU_MESA3D,
1558            else => .NONE,
1559        },
1560        .msp430 => .STANDALONE,
1561        else => switch (target.os.tag) {
1562            .freebsd, .ps4 => .FREEBSD,
1563            .hermit => .STANDALONE,
1564            .illumos => .SOLARIS,
1565            .openbsd => .OPENBSD,
1566            else => .NONE,
1567        },
1568    }));
1569    index += 1;
1570
1571    // ABI Version, possibly used by glibc but not by static executables
1572    // padding
1573    @memset(hdr_buf[index..][0..8], 0);
1574    index += 8;
1575
1576    assert(index == 16);
1577
1578    const output_mode = comp.config.output_mode;
1579    const link_mode = comp.config.link_mode;
1580    const elf_type: elf.ET = switch (output_mode) {
1581        .Exe => if (comp.config.pie or target.os.tag == .haiku) .DYN else .EXEC,
1582        .Obj => .REL,
1583        .Lib => switch (link_mode) {
1584            .static => @as(elf.ET, .REL),
1585            .dynamic => .DYN,
1586        },
1587    };
1588    mem.writeInt(u16, hdr_buf[index..][0..2], @intFromEnum(elf_type), endian);
1589    index += 2;
1590
1591    const machine = target.toElfMachine();
1592    mem.writeInt(u16, hdr_buf[index..][0..2], @intFromEnum(machine), endian);
1593    index += 2;
1594
1595    // ELF Version, again
1596    mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
1597    index += 4;
1598
1599    const e_entry: u64 = if (self.linkerDefinedPtr()) |obj| blk: {
1600        const entry_sym = obj.entrySymbol(self) orelse break :blk 0;
1601        break :blk @intCast(entry_sym.address(.{}, self));
1602    } else 0;
1603    const phdr_table_offset = if (self.phdr_indexes.table.int()) |phndx| self.phdrs.items[phndx].p_offset else 0;
1604    switch (self.ptr_width) {
1605        .p32 => {
1606            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(e_entry), endian);
1607            index += 4;
1608
1609            // e_phoff
1610            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(phdr_table_offset), endian);
1611            index += 4;
1612
1613            // e_shoff
1614            mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(self.shdr_table_offset.?), endian);
1615            index += 4;
1616        },
1617        .p64 => {
1618            // e_entry
1619            mem.writeInt(u64, hdr_buf[index..][0..8], e_entry, endian);
1620            index += 8;
1621
1622            // e_phoff
1623            mem.writeInt(u64, hdr_buf[index..][0..8], phdr_table_offset, endian);
1624            index += 8;
1625
1626            // e_shoff
1627            mem.writeInt(u64, hdr_buf[index..][0..8], self.shdr_table_offset.?, endian);
1628            index += 8;
1629        },
1630    }
1631
1632    const e_flags = 0;
1633    mem.writeInt(u32, hdr_buf[index..][0..4], e_flags, endian);
1634    index += 4;
1635
1636    const e_ehsize: u16 = switch (self.ptr_width) {
1637        .p32 => @sizeOf(elf.Elf32_Ehdr),
1638        .p64 => @sizeOf(elf.Elf64_Ehdr),
1639    };
1640    mem.writeInt(u16, hdr_buf[index..][0..2], e_ehsize, endian);
1641    index += 2;
1642
1643    const e_phentsize: u16 = switch (self.ptr_width) {
1644        .p32 => @sizeOf(elf.Elf32_Phdr),
1645        .p64 => @sizeOf(elf.Elf64_Phdr),
1646    };
1647    mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian);
1648    index += 2;
1649
1650    const e_phnum = @as(u16, @intCast(self.phdrs.items.len));
1651    mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
1652    index += 2;
1653
1654    const e_shentsize: u16 = switch (self.ptr_width) {
1655        .p32 => @sizeOf(elf.Elf32_Shdr),
1656        .p64 => @sizeOf(elf.Elf64_Shdr),
1657    };
1658    mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
1659    index += 2;
1660
1661    const e_shnum: u16 = @intCast(self.sections.items(.shdr).len);
1662    mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
1663    index += 2;
1664
1665    mem.writeInt(u16, hdr_buf[index..][0..2], @intCast(self.section_indexes.shstrtab.?), endian);
1666    index += 2;
1667
1668    assert(index == e_ehsize);
1669
1670    try self.pwriteAll(hdr_buf[0..index], 0);
1671}
1672
1673pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void {
1674    return self.zigObjectPtr().?.freeNav(self, nav);
1675}
1676
1677pub fn updateFunc(
1678    self: *Elf,
1679    pt: Zcu.PerThread,
1680    func_index: InternPool.Index,
1681    mir: *const codegen.AnyMir,
1682) link.File.UpdateNavError!void {
1683    if (build_options.skip_non_native and builtin.object_format != .elf) {
1684        @panic("Attempted to compile for object format that was disabled by build configuration");
1685    }
1686    return self.zigObjectPtr().?.updateFunc(self, pt, func_index, mir);
1687}
1688
1689pub fn updateNav(
1690    self: *Elf,
1691    pt: Zcu.PerThread,
1692    nav: InternPool.Nav.Index,
1693) link.File.UpdateNavError!void {
1694    if (build_options.skip_non_native and builtin.object_format != .elf) {
1695        @panic("Attempted to compile for object format that was disabled by build configuration");
1696    }
1697    return self.zigObjectPtr().?.updateNav(self, pt, nav);
1698}
1699
1700pub fn updateContainerType(
1701    self: *Elf,
1702    pt: Zcu.PerThread,
1703    ty: InternPool.Index,
1704) link.File.UpdateContainerTypeError!void {
1705    if (build_options.skip_non_native and builtin.object_format != .elf) {
1706        @panic("Attempted to compile for object format that was disabled by build configuration");
1707    }
1708    const zcu = pt.zcu;
1709    const gpa = zcu.gpa;
1710    return self.zigObjectPtr().?.updateContainerType(pt, ty) catch |err| switch (err) {
1711        error.OutOfMemory => return error.OutOfMemory,
1712        else => |e| {
1713            try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create(
1714                gpa,
1715                zcu.typeSrcLoc(ty),
1716                "failed to update container type: {s}",
1717                .{@errorName(e)},
1718            ));
1719            return error.TypeFailureReported;
1720        },
1721    };
1722}
1723
1724pub fn updateExports(
1725    self: *Elf,
1726    pt: Zcu.PerThread,
1727    exported: Zcu.Exported,
1728    export_indices: []const Zcu.Export.Index,
1729) link.File.UpdateExportsError!void {
1730    if (build_options.skip_non_native and builtin.object_format != .elf) {
1731        @panic("Attempted to compile for object format that was disabled by build configuration");
1732    }
1733    return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices);
1734}
1735
1736pub fn updateLineNumber(self: *Elf, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
1737    return self.zigObjectPtr().?.updateLineNumber(pt, ti_id);
1738}
1739
1740pub fn deleteExport(
1741    self: *Elf,
1742    exported: Zcu.Exported,
1743    name: InternPool.NullTerminatedString,
1744) void {
1745    return self.zigObjectPtr().?.deleteExport(self, exported, name);
1746}
1747
1748fn checkDuplicates(self: *Elf) !void {
1749    const gpa = self.base.comp.gpa;
1750
1751    var dupes = std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(File.Index)).init(gpa);
1752    defer {
1753        for (dupes.values()) |*list| {
1754            list.deinit(gpa);
1755        }
1756        dupes.deinit();
1757    }
1758
1759    if (self.zigObjectPtr()) |zig_object| {
1760        try zig_object.checkDuplicates(&dupes, self);
1761    }
1762    for (self.objects.items) |index| {
1763        try self.file(index).?.object.checkDuplicates(&dupes, self);
1764    }
1765
1766    try self.reportDuplicates(dupes);
1767}
1768
1769pub fn addCommentString(self: *Elf) !void {
1770    const gpa = self.base.comp.gpa;
1771    if (self.comment_merge_section_index != null) return;
1772    const msec_index = try self.getOrCreateMergeSection(".comment", elf.SHF_MERGE | elf.SHF_STRINGS, elf.SHT_PROGBITS);
1773    const msec = self.mergeSection(msec_index);
1774    const res = try msec.insertZ(gpa, "zig " ++ builtin.zig_version_string);
1775    if (res.found_existing) return;
1776    const msub_index = try msec.addMergeSubsection(gpa);
1777    const msub = msec.mergeSubsection(msub_index);
1778    msub.merge_section_index = msec_index;
1779    msub.string_index = res.key.pos;
1780    msub.alignment = .@"1";
1781    msub.size = res.key.len;
1782    msub.entsize = 1;
1783    msub.alive = true;
1784    res.sub.* = msub_index;
1785    self.comment_merge_section_index = msec_index;
1786}
1787
1788pub fn resolveMergeSections(self: *Elf) !void {
1789    const tracy = trace(@src());
1790    defer tracy.end();
1791
1792    var has_errors = false;
1793    for (self.objects.items) |index| {
1794        const object = self.file(index).?.object;
1795        if (!object.alive) continue;
1796        if (!object.dirty) continue;
1797        object.initInputMergeSections(self) catch |err| switch (err) {
1798            error.LinkFailure => has_errors = true,
1799            else => |e| return e,
1800        };
1801    }
1802
1803    if (has_errors) return error.LinkFailure;
1804
1805    for (self.objects.items) |index| {
1806        const object = self.file(index).?.object;
1807        if (!object.alive) continue;
1808        if (!object.dirty) continue;
1809        try object.initOutputMergeSections(self);
1810    }
1811
1812    for (self.objects.items) |index| {
1813        const object = self.file(index).?.object;
1814        if (!object.alive) continue;
1815        if (!object.dirty) continue;
1816        object.resolveMergeSubsections(self) catch |err| switch (err) {
1817            error.LinkFailure => has_errors = true,
1818            else => |e| return e,
1819        };
1820    }
1821
1822    if (has_errors) return error.LinkFailure;
1823}
1824
1825pub fn finalizeMergeSections(self: *Elf) !void {
1826    for (self.merge_sections.items) |*msec| {
1827        try msec.finalize(self.base.comp.gpa);
1828    }
1829}
1830
1831pub fn updateMergeSectionSizes(self: *Elf) !void {
1832    for (self.merge_sections.items) |*msec| {
1833        msec.updateSize();
1834    }
1835    for (self.merge_sections.items) |*msec| {
1836        const shdr = &self.sections.items(.shdr)[msec.output_section_index];
1837        const offset = msec.alignment.forward(shdr.sh_size);
1838        const padding = offset - shdr.sh_size;
1839        msec.value = @intCast(offset);
1840        shdr.sh_size += padding + msec.size;
1841        shdr.sh_addralign = @max(shdr.sh_addralign, msec.alignment.toByteUnits() orelse 1);
1842        shdr.sh_entsize = if (shdr.sh_entsize == 0) msec.entsize else @min(shdr.sh_entsize, msec.entsize);
1843    }
1844}
1845
1846pub fn writeMergeSections(self: *Elf) !void {
1847    const gpa = self.base.comp.gpa;
1848    var buffer = std.array_list.Managed(u8).init(gpa);
1849    defer buffer.deinit();
1850
1851    for (self.merge_sections.items) |*msec| {
1852        const shdr = self.sections.items(.shdr)[msec.output_section_index];
1853        const fileoff = try self.cast(usize, msec.value + shdr.sh_offset);
1854        const size = try self.cast(usize, msec.size);
1855        try buffer.ensureTotalCapacity(size);
1856        buffer.appendNTimesAssumeCapacity(0, size);
1857
1858        for (msec.finalized_subsections.items) |msub_index| {
1859            const msub = msec.mergeSubsection(msub_index);
1860            assert(msub.alive);
1861            const string = msub.getString(self);
1862            const off = try self.cast(usize, msub.value);
1863            @memcpy(buffer.items[off..][0..string.len], string);
1864        }
1865
1866        try self.pwriteAll(buffer.items, fileoff);
1867        buffer.clearRetainingCapacity();
1868    }
1869}
1870
1871fn initOutputSections(self: *Elf) !void {
1872    for (self.objects.items) |index| {
1873        try self.file(index).?.object.initOutputSections(self);
1874    }
1875    for (self.merge_sections.items) |*msec| {
1876        if (msec.finalized_subsections.items.len == 0) continue;
1877        try msec.initOutputSection(self);
1878    }
1879}
1880
1881fn initSyntheticSections(self: *Elf) !void {
1882    const comp = self.base.comp;
1883    const target = self.getTarget();
1884    const ptr_size = self.ptrWidthBytes();
1885
1886    const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
1887        .Exe => true,
1888        .Lib => comp.config.link_mode == .dynamic,
1889        .Obj => false,
1890    };
1891    const have_dynamic_linker = comp.config.link_mode == .dynamic and is_exe_or_dyn_lib;
1892
1893    const needs_eh_frame = blk: {
1894        if (self.zigObjectPtr()) |zo|
1895            if (zo.eh_frame_index != null) break :blk true;
1896        break :blk for (self.objects.items) |index| {
1897            if (self.file(index).?.object.cies.items.len > 0) break true;
1898        } else false;
1899    };
1900
1901    if (needs_eh_frame) {
1902        if (self.section_indexes.eh_frame == null) {
1903            self.section_indexes.eh_frame = self.sectionByName(".eh_frame") orelse try self.addSection(.{
1904                .name = try self.insertShString(".eh_frame"),
1905                .type = if (target.cpu.arch == .x86_64)
1906                    elf.SHT_X86_64_UNWIND
1907                else
1908                    elf.SHT_PROGBITS,
1909                .flags = elf.SHF_ALLOC,
1910                .addralign = ptr_size,
1911            });
1912        }
1913        if (comp.link_eh_frame_hdr and self.section_indexes.eh_frame_hdr == null) {
1914            self.section_indexes.eh_frame_hdr = try self.addSection(.{
1915                .name = try self.insertShString(".eh_frame_hdr"),
1916                .type = elf.SHT_PROGBITS,
1917                .flags = elf.SHF_ALLOC,
1918                .addralign = 4,
1919            });
1920        }
1921    }
1922
1923    if (self.got.entries.items.len > 0 and self.section_indexes.got == null) {
1924        self.section_indexes.got = try self.addSection(.{
1925            .name = try self.insertShString(".got"),
1926            .type = elf.SHT_PROGBITS,
1927            .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1928            .addralign = ptr_size,
1929        });
1930    }
1931
1932    if (have_dynamic_linker) {
1933        if (self.section_indexes.got_plt == null) {
1934            self.section_indexes.got_plt = try self.addSection(.{
1935                .name = try self.insertShString(".got.plt"),
1936                .type = elf.SHT_PROGBITS,
1937                .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1938                .addralign = @alignOf(u64),
1939            });
1940        }
1941    } else {
1942        assert(self.plt.symbols.items.len == 0);
1943    }
1944
1945    const needs_rela_dyn = blk: {
1946        if (self.got.flags.needs_rela or self.got.flags.needs_tlsld or self.copy_rel.symbols.items.len > 0)
1947            break :blk true;
1948        if (self.zigObjectPtr()) |zig_object| {
1949            if (zig_object.num_dynrelocs > 0) break :blk true;
1950        }
1951        for (self.objects.items) |index| {
1952            if (self.file(index).?.object.num_dynrelocs > 0) break :blk true;
1953        }
1954        break :blk false;
1955    };
1956    if (needs_rela_dyn and self.section_indexes.rela_dyn == null) {
1957        self.section_indexes.rela_dyn = try self.addSection(.{
1958            .name = try self.insertShString(".rela.dyn"),
1959            .type = elf.SHT_RELA,
1960            .flags = elf.SHF_ALLOC,
1961            .addralign = @alignOf(elf.Elf64_Rela),
1962            .entsize = @sizeOf(elf.Elf64_Rela),
1963        });
1964    }
1965
1966    if (self.plt.symbols.items.len > 0) {
1967        if (self.section_indexes.plt == null) {
1968            self.section_indexes.plt = try self.addSection(.{
1969                .name = try self.insertShString(".plt"),
1970                .type = elf.SHT_PROGBITS,
1971                .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
1972                .addralign = 16,
1973            });
1974        }
1975        if (self.section_indexes.rela_plt == null) {
1976            self.section_indexes.rela_plt = try self.addSection(.{
1977                .name = try self.insertShString(".rela.plt"),
1978                .type = elf.SHT_RELA,
1979                .flags = elf.SHF_ALLOC,
1980                .addralign = @alignOf(elf.Elf64_Rela),
1981                .entsize = @sizeOf(elf.Elf64_Rela),
1982            });
1983        }
1984    }
1985
1986    if (self.plt_got.symbols.items.len > 0 and self.section_indexes.plt_got == null) {
1987        self.section_indexes.plt_got = try self.addSection(.{
1988            .name = try self.insertShString(".plt.got"),
1989            .type = elf.SHT_PROGBITS,
1990            .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
1991            .addralign = 16,
1992        });
1993    }
1994
1995    if (self.copy_rel.symbols.items.len > 0 and self.section_indexes.copy_rel == null) {
1996        self.section_indexes.copy_rel = try self.addSection(.{
1997            .name = try self.insertShString(".copyrel"),
1998            .type = elf.SHT_NOBITS,
1999            .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
2000        });
2001    }
2002
2003    if (needs_interp: {
2004        if (comp.config.link_mode == .static) break :needs_interp false;
2005        if (target.dynamic_linker.get() == null) break :needs_interp false;
2006        break :needs_interp switch (comp.config.output_mode) {
2007            .Exe => true,
2008            .Lib => comp.root_mod.resolved_target.is_explicit_dynamic_linker,
2009            .Obj => false,
2010        };
2011    } and self.section_indexes.interp == null) {
2012        self.section_indexes.interp = try self.addSection(.{
2013            .name = try self.insertShString(".interp"),
2014            .type = elf.SHT_PROGBITS,
2015            .flags = elf.SHF_ALLOC,
2016            .addralign = 1,
2017        });
2018    }
2019
2020    if (have_dynamic_linker or comp.config.pie or self.isEffectivelyDynLib()) {
2021        if (self.section_indexes.dynstrtab == null) {
2022            self.section_indexes.dynstrtab = try self.addSection(.{
2023                .name = try self.insertShString(".dynstr"),
2024                .flags = elf.SHF_ALLOC,
2025                .type = elf.SHT_STRTAB,
2026                .entsize = 1,
2027                .addralign = 1,
2028            });
2029        }
2030        if (self.section_indexes.dynamic == null) {
2031            self.section_indexes.dynamic = try self.addSection(.{
2032                .name = try self.insertShString(".dynamic"),
2033                .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
2034                .type = elf.SHT_DYNAMIC,
2035                .entsize = @sizeOf(elf.Elf64_Dyn),
2036                .addralign = @alignOf(elf.Elf64_Dyn),
2037            });
2038        }
2039        if (self.section_indexes.dynsymtab == null) {
2040            self.section_indexes.dynsymtab = try self.addSection(.{
2041                .name = try self.insertShString(".dynsym"),
2042                .flags = elf.SHF_ALLOC,
2043                .type = elf.SHT_DYNSYM,
2044                .addralign = @alignOf(elf.Elf64_Sym),
2045                .entsize = @sizeOf(elf.Elf64_Sym),
2046                .info = 1,
2047            });
2048        }
2049        if (self.section_indexes.hash == null) {
2050            self.section_indexes.hash = try self.addSection(.{
2051                .name = try self.insertShString(".hash"),
2052                .flags = elf.SHF_ALLOC,
2053                .type = elf.SHT_HASH,
2054                .addralign = 4,
2055                .entsize = 4,
2056            });
2057        }
2058        if (self.section_indexes.gnu_hash == null) {
2059            self.section_indexes.gnu_hash = try self.addSection(.{
2060                .name = try self.insertShString(".gnu.hash"),
2061                .flags = elf.SHF_ALLOC,
2062                .type = elf.SHT_GNU_HASH,
2063                .addralign = 8,
2064            });
2065        }
2066
2067        const needs_versions = for (self.dynsym.entries.items) |entry| {
2068            const sym = self.symbol(entry.ref).?;
2069            if (sym.flags.import and sym.version_index.VERSION > elf.Versym.GLOBAL.VERSION) break true;
2070        } else false;
2071        if (needs_versions) {
2072            if (self.section_indexes.versym == null) {
2073                self.section_indexes.versym = try self.addSection(.{
2074                    .name = try self.insertShString(".gnu.version"),
2075                    .flags = elf.SHF_ALLOC,
2076                    .type = elf.SHT_GNU_VERSYM,
2077                    .addralign = @alignOf(elf.Versym),
2078                    .entsize = @sizeOf(elf.Versym),
2079                });
2080            }
2081            if (self.section_indexes.verneed == null) {
2082                self.section_indexes.verneed = try self.addSection(.{
2083                    .name = try self.insertShString(".gnu.version_r"),
2084                    .flags = elf.SHF_ALLOC,
2085                    .type = elf.SHT_GNU_VERNEED,
2086                    .addralign = @alignOf(elf.Elf64_Verneed),
2087                });
2088            }
2089        }
2090    }
2091
2092    try self.initSymtab();
2093    try self.initShStrtab();
2094}
2095
2096pub fn initSymtab(self: *Elf) !void {
2097    const small_ptr = switch (self.ptr_width) {
2098        .p32 => true,
2099        .p64 => false,
2100    };
2101    if (self.section_indexes.symtab == null) {
2102        self.section_indexes.symtab = try self.addSection(.{
2103            .name = try self.insertShString(".symtab"),
2104            .type = elf.SHT_SYMTAB,
2105            .addralign = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym),
2106            .entsize = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym),
2107        });
2108    }
2109    if (self.section_indexes.strtab == null) {
2110        self.section_indexes.strtab = try self.addSection(.{
2111            .name = try self.insertShString(".strtab"),
2112            .type = elf.SHT_STRTAB,
2113            .entsize = 1,
2114            .addralign = 1,
2115        });
2116    }
2117}
2118
2119pub fn initShStrtab(self: *Elf) !void {
2120    if (self.section_indexes.shstrtab == null) {
2121        self.section_indexes.shstrtab = try self.addSection(.{
2122            .name = try self.insertShString(".shstrtab"),
2123            .type = elf.SHT_STRTAB,
2124            .entsize = 1,
2125            .addralign = 1,
2126        });
2127    }
2128}
2129
2130fn initSpecialPhdrs(self: *Elf) !void {
2131    comptime assert(max_number_of_special_phdrs == 5);
2132
2133    if (self.section_indexes.interp != null and self.phdr_indexes.interp == .none) {
2134        self.phdr_indexes.interp = (try self.addPhdr(.{
2135            .type = elf.PT_INTERP,
2136            .flags = elf.PF_R,
2137            .@"align" = 1,
2138        })).toOptional();
2139    }
2140    if (self.section_indexes.dynamic != null and self.phdr_indexes.dynamic == .none) {
2141        self.phdr_indexes.dynamic = (try self.addPhdr(.{
2142            .type = elf.PT_DYNAMIC,
2143            .flags = elf.PF_R | elf.PF_W,
2144        })).toOptional();
2145    }
2146    if (self.section_indexes.eh_frame_hdr != null and self.phdr_indexes.gnu_eh_frame == .none) {
2147        self.phdr_indexes.gnu_eh_frame = (try self.addPhdr(.{
2148            .type = elf.PT_GNU_EH_FRAME,
2149            .flags = elf.PF_R,
2150        })).toOptional();
2151    }
2152    if (self.phdr_indexes.gnu_stack == .none) {
2153        self.phdr_indexes.gnu_stack = (try self.addPhdr(.{
2154            .type = elf.PT_GNU_STACK,
2155            .flags = elf.PF_W | elf.PF_R,
2156            .memsz = self.base.stack_size,
2157            .@"align" = 1,
2158        })).toOptional();
2159    }
2160
2161    const has_tls = for (self.sections.items(.shdr)) |shdr| {
2162        if (shdr.sh_flags & elf.SHF_TLS != 0) break true;
2163    } else false;
2164    if (has_tls and self.phdr_indexes.tls == .none) {
2165        self.phdr_indexes.tls = (try self.addPhdr(.{
2166            .type = elf.PT_TLS,
2167            .flags = elf.PF_R,
2168            .@"align" = 1,
2169        })).toOptional();
2170    }
2171}
2172
2173/// We need to sort constructors/destuctors in the following sections:
2174/// * .init_array
2175/// * .fini_array
2176/// * .preinit_array
2177/// * .ctors
2178/// * .dtors
2179/// The prority of inclusion is defined as part of the input section's name. For example, .init_array.10000.
2180/// If no priority value has been specified,
2181/// * for .init_array, .fini_array and .preinit_array, we automatically assign that section max value of maxInt(i32)
2182///   and push it to the back of the queue,
2183/// * for .ctors and .dtors, we automatically assign that section min value of -1
2184///   and push it to the front of the queue,
2185/// crtbegin and ctrend are assigned minInt(i32) and maxInt(i32) respectively.
2186/// Ties are broken by the file prority which corresponds to the inclusion of input sections in this output section
2187/// we are about to sort.
2188fn sortInitFini(self: *Elf) !void {
2189    const gpa = self.base.comp.gpa;
2190    const slice = self.sections.slice();
2191
2192    const Entry = struct {
2193        priority: i32,
2194        atom_ref: Ref,
2195
2196        pub fn lessThan(ctx: *Elf, lhs: @This(), rhs: @This()) bool {
2197            if (lhs.priority == rhs.priority) {
2198                return ctx.atom(lhs.atom_ref).?.priority(ctx) < ctx.atom(rhs.atom_ref).?.priority(ctx);
2199            }
2200            return lhs.priority < rhs.priority;
2201        }
2202    };
2203
2204    for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
2205        if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
2206        if (atom_list.atoms.keys().len == 0) continue;
2207
2208        var is_init_fini = false;
2209        var is_ctor_dtor = false;
2210        switch (shdr.sh_type) {
2211            elf.SHT_PREINIT_ARRAY,
2212            elf.SHT_INIT_ARRAY,
2213            elf.SHT_FINI_ARRAY,
2214            => is_init_fini = true,
2215            else => {
2216                const name = self.getShString(shdr.sh_name);
2217                is_ctor_dtor = mem.indexOf(u8, name, ".ctors") != null or mem.indexOf(u8, name, ".dtors") != null;
2218            },
2219        }
2220        if (!is_init_fini and !is_ctor_dtor) continue;
2221
2222        var entries = std.array_list.Managed(Entry).init(gpa);
2223        try entries.ensureTotalCapacityPrecise(atom_list.atoms.keys().len);
2224        defer entries.deinit();
2225
2226        for (atom_list.atoms.keys()) |ref| {
2227            const atom_ptr = self.atom(ref).?;
2228            const object = atom_ptr.file(self).?.object;
2229            const priority = blk: {
2230                if (is_ctor_dtor) {
2231                    const basename = object.path.basename();
2232                    if (mem.eql(u8, basename, "crtbegin.o")) break :blk std.math.minInt(i32);
2233                    if (mem.eql(u8, basename, "crtend.o")) break :blk std.math.maxInt(i32);
2234                }
2235                const default: i32 = if (is_ctor_dtor) -1 else std.math.maxInt(i32);
2236                const name = atom_ptr.name(self);
2237                var it = mem.splitBackwardsScalar(u8, name, '.');
2238                const priority = std.fmt.parseUnsigned(u16, it.first(), 10) catch default;
2239                break :blk priority;
2240            };
2241            entries.appendAssumeCapacity(.{ .priority = priority, .atom_ref = ref });
2242        }
2243
2244        mem.sort(Entry, entries.items, self, Entry.lessThan);
2245
2246        atom_list.atoms.clearRetainingCapacity();
2247        for (entries.items) |entry| {
2248            _ = atom_list.atoms.getOrPutAssumeCapacity(entry.atom_ref);
2249        }
2250    }
2251}
2252
2253fn setDynamicSection(self: *Elf, rpaths: []const []const u8) !void {
2254    if (self.section_indexes.dynamic == null) return;
2255
2256    const shared_objects = self.shared_objects.values();
2257
2258    for (shared_objects) |index| {
2259        const shared_object = self.file(index).?.shared_object;
2260        if (!shared_object.alive) continue;
2261        try self.dynamic.addNeeded(shared_object, self);
2262    }
2263
2264    if (self.isEffectivelyDynLib()) {
2265        if (self.soname) |soname| {
2266            try self.dynamic.setSoname(soname, self);
2267        }
2268    }
2269
2270    try self.dynamic.setRpath(rpaths, self);
2271}
2272
2273fn sortDynamicSymtab(self: *Elf) void {
2274    if (self.section_indexes.gnu_hash == null) return;
2275    self.dynsym.sort(self);
2276}
2277
2278fn setVersionSymtab(self: *Elf) !void {
2279    const gpa = self.base.comp.gpa;
2280    if (self.section_indexes.versym == null) return;
2281    try self.versym.resize(gpa, self.dynsym.count());
2282    self.versym.items[0] = .LOCAL;
2283    for (self.dynsym.entries.items, 1..) |entry, i| {
2284        const sym = self.symbol(entry.ref).?;
2285        self.versym.items[i] = sym.version_index;
2286    }
2287
2288    if (self.section_indexes.verneed) |shndx| {
2289        try self.verneed.generate(self);
2290        const shdr = &self.sections.items(.shdr)[shndx];
2291        shdr.sh_info = @as(u32, @intCast(self.verneed.verneed.items.len));
2292    }
2293}
2294
2295fn setHashSections(self: *Elf) !void {
2296    if (self.section_indexes.hash != null) {
2297        try self.hash.generate(self);
2298    }
2299    if (self.section_indexes.gnu_hash != null) {
2300        try self.gnu_hash.calcSize(self);
2301    }
2302}
2303
2304fn phdrRank(phdr: elf.Elf64_Phdr) u8 {
2305    return switch (phdr.p_type) {
2306        elf.PT_NULL => 0,
2307        elf.PT_PHDR => 1,
2308        elf.PT_INTERP => 2,
2309        elf.PT_LOAD => 3,
2310        elf.PT_DYNAMIC, elf.PT_TLS => 4,
2311        elf.PT_GNU_EH_FRAME => 5,
2312        elf.PT_GNU_STACK => 6,
2313        else => 7,
2314    };
2315}
2316
2317fn sortPhdrs(
2318    gpa: Allocator,
2319    phdrs: *ProgramHeaderList,
2320    special_indexes: *ProgramHeaderIndexes,
2321    section_indexes: []OptionalProgramHeaderIndex,
2322) error{OutOfMemory}!void {
2323    const Entry = struct {
2324        phndx: u16,
2325
2326        pub fn lessThan(program_headers: []const elf.Elf64_Phdr, lhs: @This(), rhs: @This()) bool {
2327            const lhs_phdr = program_headers[lhs.phndx];
2328            const rhs_phdr = program_headers[rhs.phndx];
2329            const lhs_rank = phdrRank(lhs_phdr);
2330            const rhs_rank = phdrRank(rhs_phdr);
2331            if (lhs_rank == rhs_rank) return lhs_phdr.p_vaddr < rhs_phdr.p_vaddr;
2332            return lhs_rank < rhs_rank;
2333        }
2334    };
2335
2336    const entries = try gpa.alloc(Entry, phdrs.items.len);
2337    defer gpa.free(entries);
2338    for (entries, 0..) |*entry, phndx| {
2339        entry.* = .{ .phndx = @intCast(phndx) };
2340    }
2341
2342    // The `@as` here works around a bug in the C backend.
2343    mem.sort(Entry, entries, @as([]const elf.Elf64_Phdr, phdrs.items), Entry.lessThan);
2344
2345    const backlinks = try gpa.alloc(u16, entries.len);
2346    defer gpa.free(backlinks);
2347    const slice = try phdrs.toOwnedSlice(gpa);
2348    defer gpa.free(slice);
2349    try phdrs.resize(gpa, slice.len);
2350
2351    for (entries, phdrs.items, 0..) |entry, *phdr, i| {
2352        backlinks[entry.phndx] = @intCast(i);
2353        phdr.* = slice[entry.phndx];
2354    }
2355
2356    inline for (@typeInfo(ProgramHeaderIndexes).@"struct".fields) |field| {
2357        if (@field(special_indexes, field.name).int()) |special_index| {
2358            @field(special_indexes, field.name) = @enumFromInt(backlinks[special_index]);
2359        }
2360    }
2361
2362    for (section_indexes) |*opt_phndx| {
2363        if (opt_phndx.int()) |index| {
2364            opt_phndx.* = @enumFromInt(backlinks[index]);
2365        }
2366    }
2367}
2368
2369fn shdrRank(shdr: elf.Elf64_Shdr, shstrtab: []const u8) u8 {
2370    const name = shString(shstrtab, shdr.sh_name);
2371    const flags = shdr.sh_flags;
2372
2373    switch (shdr.sh_type) {
2374        elf.SHT_NULL => return 0,
2375        elf.SHT_DYNSYM => return 2,
2376        elf.SHT_HASH => return 3,
2377        elf.SHT_GNU_HASH => return 3,
2378        elf.SHT_GNU_VERSYM => return 4,
2379        elf.SHT_GNU_VERDEF => return 4,
2380        elf.SHT_GNU_VERNEED => return 4,
2381
2382        elf.SHT_PREINIT_ARRAY,
2383        elf.SHT_INIT_ARRAY,
2384        elf.SHT_FINI_ARRAY,
2385        => return 0xf1,
2386
2387        elf.SHT_DYNAMIC => return 0xf2,
2388
2389        elf.SHT_RELA, elf.SHT_GROUP => return 0xf,
2390
2391        elf.SHT_PROGBITS => if (flags & elf.SHF_ALLOC != 0) {
2392            if (flags & elf.SHF_EXECINSTR != 0) {
2393                return 0xf0;
2394            } else if (flags & elf.SHF_WRITE != 0) {
2395                return if (flags & elf.SHF_TLS != 0) 0xf3 else 0xf5;
2396            } else if (mem.eql(u8, name, ".interp")) {
2397                return 1;
2398            } else if (mem.startsWith(u8, name, ".eh_frame")) {
2399                return 0xe1;
2400            } else {
2401                return 0xe0;
2402            }
2403        } else {
2404            if (mem.startsWith(u8, name, ".debug")) {
2405                return 0xf7;
2406            } else {
2407                return 0xf8;
2408            }
2409        },
2410        elf.SHT_X86_64_UNWIND => return 0xe1,
2411
2412        elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6,
2413        elf.SHT_SYMTAB => return 0xf9,
2414        elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfa,
2415        else => return 0xff,
2416    }
2417}
2418
2419pub fn sortShdrs(
2420    gpa: Allocator,
2421    section_indexes: *SectionIndexes,
2422    sections: *std.MultiArrayList(Section),
2423    shstrtab: []const u8,
2424    merge_sections: []Merge.Section,
2425    comdat_group_sections: []GroupSection,
2426    zig_object_ptr: ?*ZigObject,
2427    files: std.MultiArrayList(File.Entry),
2428) !void {
2429    const Entry = struct {
2430        shndx: u32,
2431
2432        const Context = struct {
2433            shdrs: []const elf.Elf64_Shdr,
2434            shstrtab: []const u8,
2435        };
2436
2437        pub fn lessThan(ctx: Context, lhs: @This(), rhs: @This()) bool {
2438            const lhs_rank = shdrRank(ctx.shdrs[lhs.shndx], ctx.shstrtab);
2439            const rhs_rank = shdrRank(ctx.shdrs[rhs.shndx], ctx.shstrtab);
2440            if (lhs_rank == rhs_rank) {
2441                const lhs_name = shString(ctx.shstrtab, ctx.shdrs[lhs.shndx].sh_name);
2442                const rhs_name = shString(ctx.shstrtab, ctx.shdrs[rhs.shndx].sh_name);
2443                return std.mem.lessThan(u8, lhs_name, rhs_name);
2444            }
2445            return lhs_rank < rhs_rank;
2446        }
2447    };
2448
2449    const shdrs = sections.items(.shdr);
2450
2451    const entries = try gpa.alloc(Entry, shdrs.len);
2452    defer gpa.free(entries);
2453    for (entries, 0..shdrs.len) |*entry, shndx| {
2454        entry.* = .{ .shndx = @intCast(shndx) };
2455    }
2456
2457    const sort_context: Entry.Context = .{
2458        .shdrs = shdrs,
2459        .shstrtab = shstrtab,
2460    };
2461    mem.sortUnstable(Entry, entries, sort_context, Entry.lessThan);
2462
2463    const backlinks = try gpa.alloc(u32, entries.len);
2464    defer gpa.free(backlinks);
2465    {
2466        var slice = sections.toOwnedSlice();
2467        defer slice.deinit(gpa);
2468        try sections.resize(gpa, slice.len);
2469
2470        for (entries, 0..) |entry, i| {
2471            backlinks[entry.shndx] = @intCast(i);
2472            sections.set(i, slice.get(entry.shndx));
2473        }
2474    }
2475
2476    inline for (@typeInfo(SectionIndexes).@"struct".fields) |field| {
2477        if (@field(section_indexes, field.name)) |special_index| {
2478            @field(section_indexes, field.name) = backlinks[special_index];
2479        }
2480    }
2481
2482    for (merge_sections) |*msec| {
2483        msec.output_section_index = backlinks[msec.output_section_index];
2484    }
2485
2486    const slice = sections.slice();
2487    for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| {
2488        atom_list.output_section_index = backlinks[atom_list.output_section_index];
2489        for (atom_list.atoms.keys()) |ref| {
2490            fileLookup(files, ref.file, zig_object_ptr).?.atom(ref.index).?.output_section_index = atom_list.output_section_index;
2491        }
2492        if (shdr.sh_type == elf.SHT_RELA) {
2493            shdr.sh_link = section_indexes.symtab.?;
2494            shdr.sh_info = backlinks[shdr.sh_info];
2495        }
2496    }
2497
2498    if (zig_object_ptr) |zo| zo.resetShdrIndexes(backlinks);
2499
2500    for (comdat_group_sections) |*cg| {
2501        cg.shndx = backlinks[cg.shndx];
2502    }
2503
2504    if (section_indexes.symtab) |index| {
2505        const shdr = &slice.items(.shdr)[index];
2506        shdr.sh_link = section_indexes.strtab.?;
2507    }
2508
2509    if (section_indexes.dynamic) |index| {
2510        const shdr = &slice.items(.shdr)[index];
2511        shdr.sh_link = section_indexes.dynstrtab.?;
2512    }
2513
2514    if (section_indexes.dynsymtab) |index| {
2515        const shdr = &slice.items(.shdr)[index];
2516        shdr.sh_link = section_indexes.dynstrtab.?;
2517    }
2518
2519    if (section_indexes.hash) |index| {
2520        const shdr = &slice.items(.shdr)[index];
2521        shdr.sh_link = section_indexes.dynsymtab.?;
2522    }
2523
2524    if (section_indexes.gnu_hash) |index| {
2525        const shdr = &slice.items(.shdr)[index];
2526        shdr.sh_link = section_indexes.dynsymtab.?;
2527    }
2528
2529    if (section_indexes.versym) |index| {
2530        const shdr = &slice.items(.shdr)[index];
2531        shdr.sh_link = section_indexes.dynsymtab.?;
2532    }
2533
2534    if (section_indexes.verneed) |index| {
2535        const shdr = &slice.items(.shdr)[index];
2536        shdr.sh_link = section_indexes.dynstrtab.?;
2537    }
2538
2539    if (section_indexes.rela_dyn) |index| {
2540        const shdr = &slice.items(.shdr)[index];
2541        shdr.sh_link = section_indexes.dynsymtab orelse 0;
2542    }
2543
2544    if (section_indexes.rela_plt) |index| {
2545        const shdr = &slice.items(.shdr)[index];
2546        shdr.sh_link = section_indexes.dynsymtab.?;
2547        shdr.sh_info = section_indexes.plt.?;
2548    }
2549
2550    if (section_indexes.eh_frame_rela) |index| {
2551        const shdr = &slice.items(.shdr)[index];
2552        shdr.sh_link = section_indexes.symtab.?;
2553        shdr.sh_info = section_indexes.eh_frame.?;
2554    }
2555}
2556
2557fn updateSectionSizes(self: *Elf) !void {
2558    const slice = self.sections.slice();
2559    for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
2560        if (atom_list.atoms.keys().len == 0) continue;
2561        if (!atom_list.dirty) continue;
2562        if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
2563        atom_list.updateSize(self);
2564        try atom_list.allocate(self);
2565        atom_list.dirty = false;
2566    }
2567
2568    if (self.requiresThunks()) {
2569        for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
2570            if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue;
2571            if (atom_list.atoms.keys().len == 0) continue;
2572            if (!atom_list.dirty) continue;
2573
2574            // Create jump/branch range extenders if needed.
2575            try self.createThunks(atom_list);
2576            try atom_list.allocate(self);
2577            atom_list.dirty = false;
2578        }
2579
2580        // This might not be needed if there was a link from Atom/Thunk to AtomList.
2581        for (self.thunks.items) |*th| {
2582            th.value += slice.items(.atom_list_2)[th.output_section_index].value;
2583        }
2584    }
2585
2586    const shdrs = slice.items(.shdr);
2587    if (self.section_indexes.eh_frame) |index| {
2588        shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
2589    }
2590
2591    if (self.section_indexes.eh_frame_hdr) |index| {
2592        shdrs[index].sh_size = eh_frame.calcEhFrameHdrSize(self);
2593    }
2594
2595    if (self.section_indexes.got) |index| {
2596        shdrs[index].sh_size = self.got.size(self);
2597    }
2598
2599    if (self.section_indexes.plt) |index| {
2600        shdrs[index].sh_size = self.plt.size(self);
2601    }
2602
2603    if (self.section_indexes.got_plt) |index| {
2604        shdrs[index].sh_size = self.got_plt.size(self);
2605    }
2606
2607    if (self.section_indexes.plt_got) |index| {
2608        shdrs[index].sh_size = self.plt_got.size(self);
2609    }
2610
2611    if (self.section_indexes.rela_dyn) |shndx| {
2612        var num = self.got.numRela(self) + self.copy_rel.numRela();
2613        if (self.zigObjectPtr()) |zig_object| {
2614            num += zig_object.num_dynrelocs;
2615        }
2616        for (self.objects.items) |index| {
2617            num += self.file(index).?.object.num_dynrelocs;
2618        }
2619        shdrs[shndx].sh_size = num * @sizeOf(elf.Elf64_Rela);
2620    }
2621
2622    if (self.section_indexes.rela_plt) |index| {
2623        shdrs[index].sh_size = self.plt.numRela() * @sizeOf(elf.Elf64_Rela);
2624    }
2625
2626    if (self.section_indexes.copy_rel) |index| {
2627        try self.copy_rel.updateSectionSize(index, self);
2628    }
2629
2630    if (self.section_indexes.interp) |index| {
2631        shdrs[index].sh_size = self.getTarget().dynamic_linker.get().?.len + 1;
2632    }
2633
2634    if (self.section_indexes.hash) |index| {
2635        shdrs[index].sh_size = self.hash.size();
2636    }
2637
2638    if (self.section_indexes.gnu_hash) |index| {
2639        shdrs[index].sh_size = self.gnu_hash.size();
2640    }
2641
2642    if (self.section_indexes.dynamic) |index| {
2643        shdrs[index].sh_size = self.dynamic.size(self);
2644    }
2645
2646    if (self.section_indexes.dynsymtab) |index| {
2647        shdrs[index].sh_size = self.dynsym.size();
2648    }
2649
2650    if (self.section_indexes.dynstrtab) |index| {
2651        shdrs[index].sh_size = self.dynstrtab.items.len;
2652    }
2653
2654    if (self.section_indexes.versym) |index| {
2655        shdrs[index].sh_size = self.versym.items.len * @sizeOf(elf.Versym);
2656    }
2657
2658    if (self.section_indexes.verneed) |index| {
2659        shdrs[index].sh_size = self.verneed.size();
2660    }
2661
2662    try self.updateSymtabSize();
2663    self.updateShStrtabSize();
2664}
2665
2666pub fn updateShStrtabSize(self: *Elf) void {
2667    if (self.section_indexes.shstrtab) |index| {
2668        self.sections.items(.shdr)[index].sh_size = self.shstrtab.items.len;
2669    }
2670}
2671
2672fn shdrToPhdrFlags(sh_flags: u64) u32 {
2673    const write = sh_flags & elf.SHF_WRITE != 0;
2674    const exec = sh_flags & elf.SHF_EXECINSTR != 0;
2675    var out_flags: u32 = elf.PF_R;
2676    if (write) out_flags |= elf.PF_W;
2677    if (exec) out_flags |= elf.PF_X;
2678    return out_flags;
2679}
2680
2681/// Returns maximum number of program headers that may be emitted by the linker.
2682/// (This is an upper bound so that we can reserve enough space for the header and progam header
2683/// table without running out of space and being forced to move things around.)
2684fn getMaxNumberOfPhdrs() u64 {
2685    // The estimated maximum number of segments the linker can emit for input sections are:
2686    var num: u64 = max_number_of_object_segments;
2687    // Any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP:
2688    num += max_number_of_special_phdrs;
2689    // PHDR program header and corresponding read-only load segment:
2690    num += 2;
2691    return num;
2692}
2693
2694fn addLoadPhdrs(self: *Elf) error{OutOfMemory}!void {
2695    for (self.sections.items(.shdr)) |shdr| {
2696        if (shdr.sh_type == elf.SHT_NULL) continue;
2697        if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
2698        const flags = shdrToPhdrFlags(shdr.sh_flags);
2699        if (self.getPhdr(.{ .flags = flags, .type = elf.PT_LOAD }) == .none) {
2700            _ = try self.addPhdr(.{ .flags = flags, .type = elf.PT_LOAD });
2701        }
2702    }
2703}
2704
2705/// Allocates PHDR table in virtual memory and in file.
2706fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void {
2707    const diags = &self.base.comp.link_diags;
2708    const phdr_table = &self.phdrs.items[self.phdr_indexes.table.int().?];
2709    const phdr_table_load = &self.phdrs.items[self.phdr_indexes.table_load.int().?];
2710
2711    const ehsize: u64 = switch (self.ptr_width) {
2712        .p32 => @sizeOf(elf.Elf32_Ehdr),
2713        .p64 => @sizeOf(elf.Elf64_Ehdr),
2714    };
2715    const phsize: u64 = switch (self.ptr_width) {
2716        .p32 => @sizeOf(elf.Elf32_Phdr),
2717        .p64 => @sizeOf(elf.Elf64_Phdr),
2718    };
2719    const needed_size = self.phdrs.items.len * phsize;
2720    const available_space = self.allocatedSize(phdr_table.p_offset);
2721
2722    if (needed_size > available_space) {
2723        // In this case, we have two options:
2724        // 1. increase the available padding for EHDR + PHDR table so that we don't overflow it
2725        //    (revisit getMaxNumberOfPhdrs())
2726        // 2. shift everything in file to free more space for EHDR + PHDR table
2727        // TODO verify `getMaxNumberOfPhdrs()` is accurate and convert this into no-op
2728        var err = try diags.addErrorWithNotes(1);
2729        try err.addMsg("fatal linker error: not enough space reserved for EHDR and PHDR table", .{});
2730        err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space });
2731    }
2732
2733    phdr_table_load.p_filesz = needed_size + ehsize;
2734    phdr_table_load.p_memsz = needed_size + ehsize;
2735    phdr_table.p_filesz = needed_size;
2736    phdr_table.p_memsz = needed_size;
2737}
2738
2739/// Allocates alloc sections and creates load segments for sections
2740/// extracted from input object files.
2741pub fn allocateAllocSections(self: *Elf) !void {
2742    // We use this struct to track maximum alignment of all TLS sections.
2743    // According to https://github.com/rui314/mold/commit/bd46edf3f0fe9e1a787ea453c4657d535622e61f in mold,
2744    // in-file offsets have to be aligned against the start of TLS program header.
2745    // If that's not ensured, then in a multi-threaded context, TLS variables across a shared object
2746    // boundary may not get correctly loaded at an aligned address.
2747    const Align = struct {
2748        tls_start_align: u64 = 1,
2749        first_tls_index: ?usize = null,
2750
2751        fn isFirstTlsShdr(this: @This(), other: usize) bool {
2752            if (this.first_tls_index) |index| return index == other;
2753            return false;
2754        }
2755
2756        fn @"align"(this: @This(), index: usize, sh_addralign: u64, addr: u64) u64 {
2757            const alignment = if (this.isFirstTlsShdr(index)) this.tls_start_align else sh_addralign;
2758            return mem.alignForward(u64, addr, alignment);
2759        }
2760    };
2761
2762    const slice = self.sections.slice();
2763    var alignment = Align{};
2764    for (slice.items(.shdr), 0..) |shdr, i| {
2765        if (shdr.sh_type == elf.SHT_NULL) continue;
2766        if (shdr.sh_flags & elf.SHF_TLS == 0) continue;
2767        if (alignment.first_tls_index == null) alignment.first_tls_index = i;
2768        alignment.tls_start_align = @max(alignment.tls_start_align, shdr.sh_addralign);
2769    }
2770
2771    // Next, calculate segment covers by scanning all alloc sections.
2772    // If a section matches segment flags with the preceeding section,
2773    // we put it in the same segment. Otherwise, we create a new cover.
2774    // This algorithm is simple but suboptimal in terms of space re-use:
2775    // normally we would also take into account any gaps in allocated
2776    // virtual and file offsets. However, the simple one will do for one
2777    // as we are more interested in quick turnaround and compatibility
2778    // with `findFreeSpace` mechanics than anything else.
2779    const Cover = std.array_list.Managed(u32);
2780    const gpa = self.base.comp.gpa;
2781    var covers: [max_number_of_object_segments]Cover = undefined;
2782    for (&covers) |*cover| {
2783        cover.* = Cover.init(gpa);
2784    }
2785    defer for (&covers) |*cover| {
2786        cover.deinit();
2787    };
2788
2789    for (slice.items(.shdr), 0..) |shdr, shndx| {
2790        if (shdr.sh_type == elf.SHT_NULL) continue;
2791        if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
2792        const flags = shdrToPhdrFlags(shdr.sh_flags);
2793        try covers[flags - 1].append(@intCast(shndx));
2794    }
2795
2796    // Now we can proceed with allocating the sections in virtual memory.
2797    // As the base address we take the end address of the PHDR table.
2798    // When allocating we first find the largest required alignment
2799    // of any section that is contained in a cover and use it to align
2800    // the start address of the segement (and first section).
2801    const phdr_table = &self.phdrs.items[self.phdr_indexes.table_load.int().?];
2802    var addr = phdr_table.p_vaddr + phdr_table.p_memsz;
2803
2804    for (covers) |cover| {
2805        if (cover.items.len == 0) continue;
2806
2807        var @"align": u64 = self.page_size;
2808        for (cover.items) |shndx| {
2809            const shdr = slice.items(.shdr)[shndx];
2810            if (shdr.sh_type == elf.SHT_NOBITS and shdr.sh_flags & elf.SHF_TLS != 0) continue;
2811            @"align" = @max(@"align", shdr.sh_addralign);
2812        }
2813
2814        addr = mem.alignForward(u64, addr, @"align");
2815
2816        var memsz: u64 = 0;
2817        var filesz: u64 = 0;
2818        var i: usize = 0;
2819        while (i < cover.items.len) : (i += 1) {
2820            const shndx = cover.items[i];
2821            const shdr = &slice.items(.shdr)[shndx];
2822            if (shdr.sh_type == elf.SHT_NOBITS and shdr.sh_flags & elf.SHF_TLS != 0) {
2823                // .tbss is a little special as it's used only by the loader meaning it doesn't
2824                // need to be actually mmap'ed at runtime. We still need to correctly increment
2825                // the addresses of every TLS zerofill section tho. Thus, we hack it so that
2826                // we increment the start address like normal, however, after we are done,
2827                // the next ALLOC section will get its start address allocated within the same
2828                // range as the .tbss sections. We will get something like this:
2829                //
2830                // ...
2831                // .tbss 0x10
2832                // .tcommon 0x20
2833                // .data 0x10
2834                // ...
2835                var tbss_addr = addr;
2836                while (i < cover.items.len and
2837                    slice.items(.shdr)[cover.items[i]].sh_type == elf.SHT_NOBITS and
2838                    slice.items(.shdr)[cover.items[i]].sh_flags & elf.SHF_TLS != 0) : (i += 1)
2839                {
2840                    const tbss_shndx = cover.items[i];
2841                    const tbss_shdr = &slice.items(.shdr)[tbss_shndx];
2842                    tbss_addr = alignment.@"align"(tbss_shndx, tbss_shdr.sh_addralign, tbss_addr);
2843                    tbss_shdr.sh_addr = tbss_addr;
2844                    tbss_addr += tbss_shdr.sh_size;
2845                }
2846                i -= 1;
2847                continue;
2848            }
2849            const next = alignment.@"align"(shndx, shdr.sh_addralign, addr);
2850            const padding = next - addr;
2851            addr = next;
2852            shdr.sh_addr = addr;
2853            if (shdr.sh_type != elf.SHT_NOBITS) {
2854                filesz += padding + shdr.sh_size;
2855            }
2856            memsz += padding + shdr.sh_size;
2857            addr += shdr.sh_size;
2858        }
2859
2860        const first = slice.items(.shdr)[cover.items[0]];
2861        const phndx = self.getPhdr(.{ .type = elf.PT_LOAD, .flags = shdrToPhdrFlags(first.sh_flags) }).unwrap().?;
2862        const phdr = &self.phdrs.items[phndx.int()];
2863        const allocated_size = self.allocatedSize(phdr.p_offset);
2864        if (filesz > allocated_size) {
2865            const old_offset = phdr.p_offset;
2866            phdr.p_offset = 0;
2867            var new_offset = try self.findFreeSpace(filesz, @"align");
2868            phdr.p_offset = new_offset;
2869
2870            log.debug("moving phdr({d}) from 0x{x} to 0x{x}", .{ phndx, old_offset, new_offset });
2871
2872            for (cover.items) |shndx| {
2873                const shdr = &slice.items(.shdr)[shndx];
2874                slice.items(.phndx)[shndx] = phndx.toOptional();
2875                if (shdr.sh_type == elf.SHT_NOBITS) {
2876                    shdr.sh_offset = 0;
2877                    continue;
2878                }
2879                new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset);
2880
2881                log.debug("moving {s} from 0x{x} to 0x{x}", .{
2882                    self.getShString(shdr.sh_name),
2883                    shdr.sh_offset,
2884                    new_offset,
2885                });
2886
2887                if (shdr.sh_offset > 0) {
2888                    // Get size actually commited to the output file.
2889                    const existing_size = self.sectionSize(shndx);
2890                    const amt = try self.base.file.?.copyRangeAll(
2891                        shdr.sh_offset,
2892                        self.base.file.?,
2893                        new_offset,
2894                        existing_size,
2895                    );
2896                    if (amt != existing_size) return error.InputOutput;
2897                }
2898
2899                shdr.sh_offset = new_offset;
2900                new_offset += shdr.sh_size;
2901            }
2902        }
2903
2904        phdr.p_vaddr = first.sh_addr;
2905        phdr.p_paddr = first.sh_addr;
2906        phdr.p_memsz = memsz;
2907        phdr.p_filesz = filesz;
2908        phdr.p_align = @"align";
2909
2910        addr = mem.alignForward(u64, addr, self.page_size);
2911    }
2912}
2913
2914/// Allocates non-alloc sections (debug info, symtabs, etc.).
2915pub fn allocateNonAllocSections(self: *Elf) !void {
2916    for (self.sections.items(.shdr), 0..) |*shdr, shndx| {
2917        if (shdr.sh_type == elf.SHT_NULL) continue;
2918        if (shdr.sh_flags & elf.SHF_ALLOC != 0) continue;
2919        const needed_size = shdr.sh_size;
2920        if (needed_size > self.allocatedSize(shdr.sh_offset)) {
2921            shdr.sh_size = 0;
2922            const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign);
2923
2924            log.debug("moving {s} from 0x{x} to 0x{x}", .{
2925                self.getShString(shdr.sh_name),
2926                shdr.sh_offset,
2927                new_offset,
2928            });
2929
2930            if (shdr.sh_offset > 0) {
2931                const existing_size = self.sectionSize(@intCast(shndx));
2932                const amt = try self.base.file.?.copyRangeAll(
2933                    shdr.sh_offset,
2934                    self.base.file.?,
2935                    new_offset,
2936                    existing_size,
2937                );
2938                if (amt != existing_size) return error.InputOutput;
2939            }
2940
2941            shdr.sh_offset = new_offset;
2942            shdr.sh_size = needed_size;
2943        }
2944    }
2945}
2946
2947fn allocateSpecialPhdrs(self: *Elf) void {
2948    const slice = self.sections.slice();
2949
2950    for (&[_]struct { OptionalProgramHeaderIndex, ?u32 }{
2951        .{ self.phdr_indexes.interp, self.section_indexes.interp },
2952        .{ self.phdr_indexes.dynamic, self.section_indexes.dynamic },
2953        .{ self.phdr_indexes.gnu_eh_frame, self.section_indexes.eh_frame_hdr },
2954    }) |pair| {
2955        if (pair[0].int()) |index| {
2956            const shdr = slice.items(.shdr)[pair[1].?];
2957            const phdr = &self.phdrs.items[index];
2958            phdr.p_align = shdr.sh_addralign;
2959            phdr.p_offset = shdr.sh_offset;
2960            phdr.p_vaddr = shdr.sh_addr;
2961            phdr.p_paddr = shdr.sh_addr;
2962            phdr.p_filesz = shdr.sh_size;
2963            phdr.p_memsz = shdr.sh_size;
2964        }
2965    }
2966
2967    // Set the TLS segment boundaries.
2968    // We assume TLS sections are laid out contiguously and that there is
2969    // a single TLS segment.
2970    if (self.phdr_indexes.tls.int()) |index| {
2971        const shdrs = slice.items(.shdr);
2972        const phdr = &self.phdrs.items[index];
2973        var shndx: u32 = 0;
2974        while (shndx < shdrs.len) {
2975            const shdr = shdrs[shndx];
2976            if (shdr.sh_flags & elf.SHF_TLS == 0) {
2977                shndx += 1;
2978                continue;
2979            }
2980            phdr.p_offset = shdr.sh_offset;
2981            phdr.p_vaddr = shdr.sh_addr;
2982            phdr.p_paddr = shdr.sh_addr;
2983            phdr.p_align = shdr.sh_addralign;
2984            shndx += 1;
2985            phdr.p_align = @max(phdr.p_align, shdr.sh_addralign);
2986            if (shdr.sh_type != elf.SHT_NOBITS) {
2987                phdr.p_filesz = shdr.sh_offset + shdr.sh_size - phdr.p_offset;
2988            }
2989            phdr.p_memsz = shdr.sh_addr + shdr.sh_size - phdr.p_vaddr;
2990
2991            while (shndx < shdrs.len) : (shndx += 1) {
2992                const next = shdrs[shndx];
2993                if (next.sh_flags & elf.SHF_TLS == 0) break;
2994                phdr.p_align = @max(phdr.p_align, next.sh_addralign);
2995                if (next.sh_type != elf.SHT_NOBITS) {
2996                    phdr.p_filesz = next.sh_offset + next.sh_size - phdr.p_offset;
2997                }
2998                phdr.p_memsz = next.sh_addr + next.sh_size - phdr.p_vaddr;
2999            }
3000        }
3001    }
3002}
3003
3004fn writeAtoms(self: *Elf) !void {
3005    const gpa = self.base.comp.gpa;
3006
3007    var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
3008    defer {
3009        for (undefs.values()) |*refs| refs.deinit();
3010        undefs.deinit();
3011    }
3012
3013    var buffer: std.Io.Writer.Allocating = .init(gpa);
3014    defer buffer.deinit();
3015
3016    const slice = self.sections.slice();
3017    var has_reloc_errors = false;
3018    for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
3019        if (shdr.sh_type == elf.SHT_NOBITS) continue;
3020        if (atom_list.atoms.keys().len == 0) continue;
3021        atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
3022            error.UnsupportedCpuArch => {
3023                try self.reportUnsupportedCpuArch();
3024                return error.LinkFailure;
3025            },
3026            error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
3027            else => |e| return e,
3028        };
3029    }
3030
3031    try self.reportUndefinedSymbols(&undefs);
3032    if (has_reloc_errors) return error.LinkFailure;
3033
3034    if (self.requiresThunks()) {
3035        for (self.thunks.items) |th| {
3036            const thunk_size = th.size(self);
3037            try buffer.ensureUnusedCapacity(thunk_size);
3038            const shdr = slice.items(.shdr)[th.output_section_index];
3039            const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
3040            try th.write(self, &buffer.writer);
3041            assert(buffer.written().len == thunk_size);
3042            try self.pwriteAll(buffer.written(), offset);
3043            buffer.clearRetainingCapacity();
3044        }
3045    }
3046}
3047
3048pub fn updateSymtabSize(self: *Elf) !void {
3049    var nlocals: u32 = 0;
3050    var nglobals: u32 = 0;
3051    var strsize: u32 = 0;
3052
3053    const gpa = self.base.comp.gpa;
3054    const shared_objects = self.shared_objects.values();
3055
3056    var files = std.array_list.Managed(File.Index).init(gpa);
3057    defer files.deinit();
3058    try files.ensureTotalCapacityPrecise(self.objects.items.len + shared_objects.len + 2);
3059
3060    if (self.zig_object_index) |index| files.appendAssumeCapacity(index);
3061    for (self.objects.items) |index| files.appendAssumeCapacity(index);
3062    for (shared_objects) |index| files.appendAssumeCapacity(index);
3063    if (self.linker_defined_index) |index| files.appendAssumeCapacity(index);
3064
3065    // Section symbols
3066    nlocals += @intCast(self.sections.slice().len);
3067
3068    if (self.requiresThunks()) for (self.thunks.items) |*th| {
3069        th.output_symtab_ctx.reset();
3070        th.output_symtab_ctx.ilocal = nlocals;
3071        th.calcSymtabSize(self);
3072        nlocals += th.output_symtab_ctx.nlocals;
3073        strsize += th.output_symtab_ctx.strsize;
3074    };
3075
3076    for (files.items) |index| {
3077        const file_ptr = self.file(index).?;
3078        const ctx = switch (file_ptr) {
3079            inline else => |x| &x.output_symtab_ctx,
3080        };
3081        ctx.reset();
3082        ctx.ilocal = nlocals;
3083        ctx.iglobal = nglobals;
3084        try file_ptr.updateSymtabSize(self);
3085        nlocals += ctx.nlocals;
3086        nglobals += ctx.nglobals;
3087        strsize += ctx.strsize;
3088    }
3089
3090    if (self.section_indexes.got) |_| {
3091        self.got.output_symtab_ctx.reset();
3092        self.got.output_symtab_ctx.ilocal = nlocals;
3093        self.got.updateSymtabSize(self);
3094        nlocals += self.got.output_symtab_ctx.nlocals;
3095        strsize += self.got.output_symtab_ctx.strsize;
3096    }
3097
3098    if (self.section_indexes.plt) |_| {
3099        self.plt.output_symtab_ctx.reset();
3100        self.plt.output_symtab_ctx.ilocal = nlocals;
3101        self.plt.updateSymtabSize(self);
3102        nlocals += self.plt.output_symtab_ctx.nlocals;
3103        strsize += self.plt.output_symtab_ctx.strsize;
3104    }
3105
3106    if (self.section_indexes.plt_got) |_| {
3107        self.plt_got.output_symtab_ctx.reset();
3108        self.plt_got.output_symtab_ctx.ilocal = nlocals;
3109        self.plt_got.updateSymtabSize(self);
3110        nlocals += self.plt_got.output_symtab_ctx.nlocals;
3111        strsize += self.plt_got.output_symtab_ctx.strsize;
3112    }
3113
3114    for (files.items) |index| {
3115        const file_ptr = self.file(index).?;
3116        const ctx = switch (file_ptr) {
3117            inline else => |x| &x.output_symtab_ctx,
3118        };
3119        ctx.iglobal += nlocals;
3120    }
3121
3122    const slice = self.sections.slice();
3123    const symtab_shdr = &slice.items(.shdr)[self.section_indexes.symtab.?];
3124    symtab_shdr.sh_info = nlocals;
3125    symtab_shdr.sh_link = self.section_indexes.strtab.?;
3126
3127    const sym_size: u64 = switch (self.ptr_width) {
3128        .p32 => @sizeOf(elf.Elf32_Sym),
3129        .p64 => @sizeOf(elf.Elf64_Sym),
3130    };
3131    const needed_size = (nlocals + nglobals) * sym_size;
3132    symtab_shdr.sh_size = needed_size;
3133
3134    const strtab = &slice.items(.shdr)[self.section_indexes.strtab.?];
3135    strtab.sh_size = strsize + 1;
3136}
3137
3138fn writeSyntheticSections(self: *Elf) !void {
3139    const gpa = self.base.comp.gpa;
3140    const slice = self.sections.slice();
3141
3142    if (self.section_indexes.interp) |shndx| {
3143        var buffer: [256]u8 = undefined;
3144        const interp = self.getTarget().dynamic_linker.get().?;
3145        @memcpy(buffer[0..interp.len], interp);
3146        buffer[interp.len] = 0;
3147        const contents = buffer[0 .. interp.len + 1];
3148        const shdr = slice.items(.shdr)[shndx];
3149        assert(shdr.sh_size == contents.len);
3150        try self.pwriteAll(contents, shdr.sh_offset);
3151    }
3152
3153    if (self.section_indexes.hash) |shndx| {
3154        const shdr = slice.items(.shdr)[shndx];
3155        try self.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
3156    }
3157
3158    if (self.section_indexes.gnu_hash) |shndx| {
3159        const shdr = slice.items(.shdr)[shndx];
3160        var aw: std.Io.Writer.Allocating = .init(gpa);
3161        try aw.ensureUnusedCapacity(self.gnu_hash.size());
3162        defer aw.deinit();
3163        try self.gnu_hash.write(self, &aw.writer);
3164        try self.pwriteAll(aw.written(), shdr.sh_offset);
3165    }
3166
3167    if (self.section_indexes.versym) |shndx| {
3168        const shdr = slice.items(.shdr)[shndx];
3169        try self.pwriteAll(@ptrCast(self.versym.items), shdr.sh_offset);
3170    }
3171
3172    if (self.section_indexes.verneed) |shndx| {
3173        const shdr = slice.items(.shdr)[shndx];
3174        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.verneed.size());
3175        defer buffer.deinit();
3176        try self.verneed.write(&buffer.writer);
3177        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3178    }
3179
3180    if (self.section_indexes.dynamic) |shndx| {
3181        const shdr = slice.items(.shdr)[shndx];
3182        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynamic.size(self));
3183        defer buffer.deinit();
3184        try self.dynamic.write(self, &buffer.writer);
3185        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3186    }
3187
3188    if (self.section_indexes.dynsymtab) |shndx| {
3189        const shdr = slice.items(.shdr)[shndx];
3190        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.dynsym.size());
3191        defer buffer.deinit();
3192        try self.dynsym.write(self, &buffer.writer);
3193        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3194    }
3195
3196    if (self.section_indexes.dynstrtab) |shndx| {
3197        const shdr = slice.items(.shdr)[shndx];
3198        try self.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
3199    }
3200
3201    if (self.section_indexes.eh_frame) |shndx| {
3202        const existing_size = existing_size: {
3203            const zo = self.zigObjectPtr() orelse break :existing_size 0;
3204            const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
3205            break :existing_size sym.atom(self).?.size;
3206        };
3207        const shdr = slice.items(.shdr)[shndx];
3208        const sh_size = try self.cast(usize, shdr.sh_size);
3209        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, @intCast(sh_size - existing_size));
3210        defer buffer.deinit();
3211        try eh_frame.writeEhFrame(self, &buffer.writer);
3212        assert(buffer.written().len == sh_size - existing_size);
3213        try self.pwriteAll(buffer.written(), shdr.sh_offset + existing_size);
3214    }
3215
3216    if (self.section_indexes.eh_frame_hdr) |shndx| {
3217        const shdr = slice.items(.shdr)[shndx];
3218        const sh_size = try self.cast(usize, shdr.sh_size);
3219        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, sh_size);
3220        defer buffer.deinit();
3221        try eh_frame.writeEhFrameHdr(self, &buffer.writer);
3222        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3223    }
3224
3225    if (self.section_indexes.got) |index| {
3226        const shdr = slice.items(.shdr)[index];
3227        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got.size(self));
3228        defer buffer.deinit();
3229        try self.got.write(self, &buffer.writer);
3230        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3231    }
3232
3233    if (self.section_indexes.rela_dyn) |shndx| {
3234        const shdr = slice.items(.shdr)[shndx];
3235        try self.got.addRela(self);
3236        try self.copy_rel.addRela(self);
3237        self.sortRelaDyn();
3238        try self.pwriteAll(@ptrCast(self.rela_dyn.items), shdr.sh_offset);
3239    }
3240
3241    if (self.section_indexes.plt) |shndx| {
3242        const shdr = slice.items(.shdr)[shndx];
3243        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt.size(self));
3244        defer buffer.deinit();
3245        try self.plt.write(self, &buffer.writer);
3246        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3247    }
3248
3249    if (self.section_indexes.got_plt) |shndx| {
3250        const shdr = slice.items(.shdr)[shndx];
3251        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.got_plt.size(self));
3252        defer buffer.deinit();
3253        try self.got_plt.write(self, &buffer.writer);
3254        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3255    }
3256
3257    if (self.section_indexes.plt_got) |shndx| {
3258        const shdr = slice.items(.shdr)[shndx];
3259        var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.plt_got.size(self));
3260        defer buffer.deinit();
3261        try self.plt_got.write(self, &buffer.writer);
3262        try self.pwriteAll(buffer.written(), shdr.sh_offset);
3263    }
3264
3265    if (self.section_indexes.rela_plt) |shndx| {
3266        const shdr = slice.items(.shdr)[shndx];
3267        try self.plt.addRela(self);
3268        try self.pwriteAll(@ptrCast(self.rela_plt.items), shdr.sh_offset);
3269    }
3270
3271    try self.writeSymtab();
3272    try self.writeShStrtab();
3273}
3274
3275pub fn writeShStrtab(self: *Elf) !void {
3276    if (self.section_indexes.shstrtab) |index| {
3277        const shdr = self.sections.items(.shdr)[index];
3278        log.debug("writing .shstrtab from 0x{x} to 0x{x}", .{ shdr.sh_offset, shdr.sh_offset + shdr.sh_size });
3279        try self.pwriteAll(self.shstrtab.items, shdr.sh_offset);
3280    }
3281}
3282
3283pub fn writeSymtab(self: *Elf) !void {
3284    const gpa = self.base.comp.gpa;
3285    const shared_objects = self.shared_objects.values();
3286
3287    const slice = self.sections.slice();
3288    const symtab_shdr = slice.items(.shdr)[self.section_indexes.symtab.?];
3289    const strtab_shdr = slice.items(.shdr)[self.section_indexes.strtab.?];
3290    const sym_size: u64 = switch (self.ptr_width) {
3291        .p32 => @sizeOf(elf.Elf32_Sym),
3292        .p64 => @sizeOf(elf.Elf64_Sym),
3293    };
3294    const nsyms = try self.cast(usize, @divExact(symtab_shdr.sh_size, sym_size));
3295
3296    log.debug("writing {d} symbols in .symtab from 0x{x} to 0x{x}", .{
3297        nsyms,
3298        symtab_shdr.sh_offset,
3299        symtab_shdr.sh_offset + symtab_shdr.sh_size,
3300    });
3301    log.debug("writing .strtab from 0x{x} to 0x{x}", .{
3302        strtab_shdr.sh_offset,
3303        strtab_shdr.sh_offset + strtab_shdr.sh_size,
3304    });
3305
3306    try self.symtab.resize(gpa, nsyms);
3307    const needed_strtab_size = try self.cast(usize, strtab_shdr.sh_size - 1);
3308    // TODO we could resize instead and in ZigObject/Object always access as slice
3309    self.strtab.clearRetainingCapacity();
3310    self.strtab.appendAssumeCapacity(0);
3311    try self.strtab.ensureUnusedCapacity(gpa, needed_strtab_size);
3312
3313    for (slice.items(.shdr), 0..) |shdr, shndx| {
3314        const out_sym = &self.symtab.items[shndx];
3315        out_sym.* = .{
3316            .st_name = 0,
3317            .st_value = shdr.sh_addr,
3318            .st_info = if (shdr.sh_type == elf.SHT_NULL) elf.STT_NOTYPE else elf.STT_SECTION,
3319            .st_shndx = @intCast(shndx),
3320            .st_size = 0,
3321            .st_other = 0,
3322        };
3323    }
3324
3325    if (self.requiresThunks()) for (self.thunks.items) |th| {
3326        th.writeSymtab(self);
3327    };
3328
3329    if (self.zigObjectPtr()) |zig_object| {
3330        zig_object.asFile().writeSymtab(self);
3331    }
3332
3333    for (self.objects.items) |index| {
3334        const file_ptr = self.file(index).?;
3335        file_ptr.writeSymtab(self);
3336    }
3337
3338    for (shared_objects) |index| {
3339        const file_ptr = self.file(index).?;
3340        file_ptr.writeSymtab(self);
3341    }
3342
3343    if (self.linkerDefinedPtr()) |obj| {
3344        obj.asFile().writeSymtab(self);
3345    }
3346
3347    if (self.section_indexes.got) |_| {
3348        self.got.writeSymtab(self);
3349    }
3350
3351    if (self.section_indexes.plt) |_| {
3352        self.plt.writeSymtab(self);
3353    }
3354
3355    if (self.section_indexes.plt_got) |_| {
3356        self.plt_got.writeSymtab(self);
3357    }
3358
3359    const foreign_endian = self.getTarget().cpu.arch.endian() != builtin.cpu.arch.endian();
3360    switch (self.ptr_width) {
3361        .p32 => {
3362            const buf = try gpa.alloc(elf.Elf32_Sym, self.symtab.items.len);
3363            defer gpa.free(buf);
3364
3365            for (buf, self.symtab.items) |*out, sym| {
3366                out.* = .{
3367                    .st_name = sym.st_name,
3368                    .st_info = sym.st_info,
3369                    .st_other = sym.st_other,
3370                    .st_shndx = sym.st_shndx,
3371                    .st_value = @intCast(sym.st_value),
3372                    .st_size = @intCast(sym.st_size),
3373                };
3374                if (foreign_endian) mem.byteSwapAllFields(elf.Elf32_Sym, out);
3375            }
3376            try self.pwriteAll(@ptrCast(buf), symtab_shdr.sh_offset);
3377        },
3378        .p64 => {
3379            if (foreign_endian) {
3380                for (self.symtab.items) |*sym| mem.byteSwapAllFields(elf.Elf64_Sym, sym);
3381            }
3382            try self.pwriteAll(@ptrCast(self.symtab.items), symtab_shdr.sh_offset);
3383        },
3384    }
3385
3386    try self.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
3387}
3388
3389/// Always 4 or 8 depending on whether this is 32-bit ELF or 64-bit ELF.
3390pub fn ptrWidthBytes(self: Elf) u8 {
3391    return switch (self.ptr_width) {
3392        .p32 => 4,
3393        .p64 => 8,
3394    };
3395}
3396
3397/// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes
3398/// in a 32-bit ELF file.
3399pub fn archPtrWidthBytes(self: Elf) u8 {
3400    return @intCast(@divExact(self.getTarget().ptrBitWidth(), 8));
3401}
3402
3403fn phdrTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr {
3404    return .{
3405        .p_type = phdr.p_type,
3406        .p_flags = phdr.p_flags,
3407        .p_offset = @as(u32, @intCast(phdr.p_offset)),
3408        .p_vaddr = @as(u32, @intCast(phdr.p_vaddr)),
3409        .p_paddr = @as(u32, @intCast(phdr.p_paddr)),
3410        .p_filesz = @as(u32, @intCast(phdr.p_filesz)),
3411        .p_memsz = @as(u32, @intCast(phdr.p_memsz)),
3412        .p_align = @as(u32, @intCast(phdr.p_align)),
3413    };
3414}
3415
3416fn shdrTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
3417    return .{
3418        .sh_name = shdr.sh_name,
3419        .sh_type = shdr.sh_type,
3420        .sh_flags = @as(u32, @intCast(shdr.sh_flags)),
3421        .sh_addr = @as(u32, @intCast(shdr.sh_addr)),
3422        .sh_offset = @as(u32, @intCast(shdr.sh_offset)),
3423        .sh_size = @as(u32, @intCast(shdr.sh_size)),
3424        .sh_link = shdr.sh_link,
3425        .sh_info = shdr.sh_info,
3426        .sh_addralign = @as(u32, @intCast(shdr.sh_addralign)),
3427        .sh_entsize = @as(u32, @intCast(shdr.sh_entsize)),
3428    };
3429}
3430
3431pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
3432    return actual_size +| (actual_size / ideal_factor);
3433}
3434
3435/// If a target compiles other output modes as dynamic libraries,
3436/// this function returns true for those too.
3437pub fn isEffectivelyDynLib(self: Elf) bool {
3438    if (self.base.isDynLib()) return true;
3439    return switch (self.getTarget().os.tag) {
3440        .haiku => self.base.isExe(),
3441        else => false,
3442    };
3443}
3444
3445fn getPhdr(self: *Elf, opts: struct {
3446    type: u32 = 0,
3447    flags: u32 = 0,
3448}) OptionalProgramHeaderIndex {
3449    for (self.phdrs.items, 0..) |phdr, phndx| {
3450        if (self.phdr_indexes.table_load.int()) |index| {
3451            if (phndx == index) continue;
3452        }
3453        if (phdr.p_type == opts.type and phdr.p_flags == opts.flags)
3454            return @enumFromInt(phndx);
3455    }
3456    return .none;
3457}
3458
3459fn addPhdr(self: *Elf, opts: struct {
3460    type: u32 = 0,
3461    flags: u32 = 0,
3462    @"align": u64 = 0,
3463    offset: u64 = 0,
3464    addr: u64 = 0,
3465    filesz: u64 = 0,
3466    memsz: u64 = 0,
3467}) error{OutOfMemory}!ProgramHeaderIndex {
3468    const gpa = self.base.comp.gpa;
3469    const index: ProgramHeaderIndex = @enumFromInt(self.phdrs.items.len);
3470    try self.phdrs.append(gpa, .{
3471        .p_type = opts.type,
3472        .p_flags = opts.flags,
3473        .p_offset = opts.offset,
3474        .p_vaddr = opts.addr,
3475        .p_paddr = opts.addr,
3476        .p_filesz = opts.filesz,
3477        .p_memsz = opts.memsz,
3478        .p_align = opts.@"align",
3479    });
3480    return index;
3481}
3482
3483pub fn addRelaShdr(self: *Elf, name: u32, shndx: u32) !u32 {
3484    const entsize: u64 = switch (self.ptr_width) {
3485        .p32 => @sizeOf(elf.Elf32_Rela),
3486        .p64 => @sizeOf(elf.Elf64_Rela),
3487    };
3488    const addralign: u64 = switch (self.ptr_width) {
3489        .p32 => @alignOf(elf.Elf32_Rela),
3490        .p64 => @alignOf(elf.Elf64_Rela),
3491    };
3492    return self.addSection(.{
3493        .name = name,
3494        .type = elf.SHT_RELA,
3495        .flags = elf.SHF_INFO_LINK,
3496        .entsize = entsize,
3497        .info = shndx,
3498        .addralign = addralign,
3499    });
3500}
3501
3502pub const AddSectionOpts = struct {
3503    name: u32 = 0,
3504    type: u32 = elf.SHT_NULL,
3505    flags: u64 = 0,
3506    link: u32 = 0,
3507    info: u32 = 0,
3508    addralign: u64 = 0,
3509    entsize: u64 = 0,
3510};
3511
3512pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 {
3513    const gpa = self.base.comp.gpa;
3514    const index: u32 = @intCast(try self.sections.addOne(gpa));
3515    self.sections.set(index, .{
3516        .shdr = .{
3517            .sh_name = opts.name,
3518            .sh_type = opts.type,
3519            .sh_flags = opts.flags,
3520            .sh_addr = 0,
3521            .sh_offset = 0,
3522            .sh_size = 0,
3523            .sh_link = opts.link,
3524            .sh_info = opts.info,
3525            .sh_addralign = opts.addralign,
3526            .sh_entsize = opts.entsize,
3527        },
3528    });
3529    return index;
3530}
3531
3532pub fn sectionByName(self: *Elf, name: [:0]const u8) ?u32 {
3533    for (self.sections.items(.shdr), 0..) |*shdr, i| {
3534        const this_name = self.getShString(shdr.sh_name);
3535        if (mem.eql(u8, this_name, name)) return @intCast(i);
3536    } else return null;
3537}
3538
3539const RelaDyn = struct {
3540    offset: u64,
3541    sym: u64 = 0,
3542    type: u32,
3543    addend: i64 = 0,
3544    target: ?*const Symbol = null,
3545};
3546
3547pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void {
3548    try self.rela_dyn.ensureUnusedCapacity(self.base.alloctor, 1);
3549    self.addRelaDynAssumeCapacity(opts);
3550}
3551
3552pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void {
3553    relocs_log.debug("  {f}: [{x} => {d}({s})] + {x}", .{
3554        relocation.fmtRelocType(opts.type, self.getTarget().cpu.arch),
3555        opts.offset,
3556        opts.sym,
3557        if (opts.target) |sym| sym.name(self) else "",
3558        opts.addend,
3559    });
3560    self.rela_dyn.appendAssumeCapacity(.{
3561        .r_offset = opts.offset,
3562        .r_info = (opts.sym << 32) | opts.type,
3563        .r_addend = opts.addend,
3564    });
3565}
3566
3567fn sortRelaDyn(self: *Elf) void {
3568    const Sort = struct {
3569        fn rank(rel: elf.Elf64_Rela, ctx: *Elf) u2 {
3570            const cpu_arch = ctx.getTarget().cpu.arch;
3571            const r_type = rel.r_type();
3572            const r_kind = relocation.decode(r_type, cpu_arch).?;
3573            return switch (r_kind) {
3574                .rel => 0,
3575                .irel => 2,
3576                else => 1,
3577            };
3578        }
3579
3580        pub fn lessThan(ctx: *Elf, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
3581            if (rank(lhs, ctx) == rank(rhs, ctx)) {
3582                if (lhs.r_sym() == rhs.r_sym()) return lhs.r_offset < rhs.r_offset;
3583                return lhs.r_sym() < rhs.r_sym();
3584            }
3585            return rank(lhs, ctx) < rank(rhs, ctx);
3586        }
3587    };
3588    mem.sort(elf.Elf64_Rela, self.rela_dyn.items, self, Sort.lessThan);
3589}
3590
3591pub fn calcNumIRelativeRelocs(self: *Elf) usize {
3592    var count: usize = self.num_ifunc_dynrelocs;
3593
3594    for (self.got.entries.items) |entry| {
3595        if (entry.tag != .got) continue;
3596        const sym = self.symbol(entry.ref).?;
3597        if (sym.isIFunc(self)) count += 1;
3598    }
3599
3600    return count;
3601}
3602
3603pub fn getStartStopBasename(self: Elf, shdr: elf.Elf64_Shdr) ?[]const u8 {
3604    const name = self.getShString(shdr.sh_name);
3605    if (shdr.sh_flags & elf.SHF_ALLOC != 0 and name.len > 0) {
3606        if (Elf.isCIdentifier(name)) return name;
3607    }
3608    return null;
3609}
3610
3611pub fn isCIdentifier(name: []const u8) bool {
3612    if (name.len == 0) return false;
3613    const first_c = name[0];
3614    if (!std.ascii.isAlphabetic(first_c) and first_c != '_') return false;
3615    for (name[1..]) |c| {
3616        if (!std.ascii.isAlphanumeric(c) and c != '_') return false;
3617    }
3618    return true;
3619}
3620
3621pub fn addThunk(self: *Elf) !Thunk.Index {
3622    const index = @as(Thunk.Index, @intCast(self.thunks.items.len));
3623    const th = try self.thunks.addOne(self.base.comp.gpa);
3624    th.* = .{};
3625    return index;
3626}
3627
3628pub fn thunk(self: *Elf, index: Thunk.Index) *Thunk {
3629    assert(index < self.thunks.items.len);
3630    return &self.thunks.items[index];
3631}
3632
3633pub fn file(self: *Elf, index: File.Index) ?File {
3634    return fileLookup(self.files, index, self.zig_object);
3635}
3636
3637fn fileLookup(files: std.MultiArrayList(File.Entry), index: File.Index, zig_object: ?*ZigObject) ?File {
3638    const tag = files.items(.tags)[index];
3639    return switch (tag) {
3640        .null => null,
3641        .linker_defined => .{ .linker_defined = &files.items(.data)[index].linker_defined },
3642        .zig_object => .{ .zig_object = zig_object.? },
3643        .object => .{ .object = &files.items(.data)[index].object },
3644        .shared_object => .{ .shared_object = &files.items(.data)[index].shared_object },
3645    };
3646}
3647
3648pub fn addFileHandle(
3649    gpa: Allocator,
3650    file_handles: *std.ArrayList(File.Handle),
3651    handle: fs.File,
3652) Allocator.Error!File.HandleIndex {
3653    try file_handles.append(gpa, handle);
3654    return @intCast(file_handles.items.len - 1);
3655}
3656
3657pub fn fileHandle(self: Elf, index: File.HandleIndex) File.Handle {
3658    return self.file_handles.items[index];
3659}
3660
3661pub fn atom(self: *Elf, ref: Ref) ?*Atom {
3662    const file_ptr = self.file(ref.file) orelse return null;
3663    return file_ptr.atom(ref.index);
3664}
3665
3666pub fn group(self: *Elf, ref: Ref) *Group {
3667    return self.file(ref.file).?.group(ref.index);
3668}
3669
3670pub fn symbol(self: *Elf, ref: Ref) ?*Symbol {
3671    const file_ptr = self.file(ref.file) orelse return null;
3672    return file_ptr.symbol(ref.index);
3673}
3674
3675pub fn getGlobalSymbol(self: *Elf, name: []const u8, lib_name: ?[]const u8) !u32 {
3676    return self.zigObjectPtr().?.getGlobalSymbol(self, name, lib_name);
3677}
3678
3679pub fn zigObjectPtr(self: *Elf) ?*ZigObject {
3680    return self.zig_object;
3681}
3682
3683pub fn linkerDefinedPtr(self: *Elf) ?*LinkerDefined {
3684    const index = self.linker_defined_index orelse return null;
3685    return self.file(index).?.linker_defined;
3686}
3687
3688pub fn getOrCreateMergeSection(self: *Elf, name: [:0]const u8, flags: u64, @"type": u32) !Merge.Section.Index {
3689    const gpa = self.base.comp.gpa;
3690    const out_name = name: {
3691        if (self.base.isRelocatable()) break :name name;
3692        if (mem.eql(u8, name, ".rodata") or mem.startsWith(u8, name, ".rodata"))
3693            break :name if (flags & elf.SHF_STRINGS != 0) ".rodata.str" else ".rodata.cst";
3694        break :name name;
3695    };
3696    for (self.merge_sections.items, 0..) |msec, index| {
3697        if (mem.eql(u8, msec.name(self), out_name)) return @intCast(index);
3698    }
3699    const out_off = try self.insertShString(out_name);
3700    const out_flags = flags & ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP);
3701    const index: Merge.Section.Index = @intCast(self.merge_sections.items.len);
3702    const msec = try self.merge_sections.addOne(gpa);
3703    msec.* = .{
3704        .name_offset = out_off,
3705        .flags = out_flags,
3706        .type = @"type",
3707    };
3708    return index;
3709}
3710
3711pub fn mergeSection(self: *Elf, index: Merge.Section.Index) *Merge.Section {
3712    assert(index < self.merge_sections.items.len);
3713    return &self.merge_sections.items[index];
3714}
3715
3716pub fn gotAddress(self: *Elf) i64 {
3717    const shndx = blk: {
3718        if (self.getTarget().cpu.arch == .x86_64 and self.section_indexes.got_plt != null)
3719            break :blk self.section_indexes.got_plt.?;
3720        break :blk if (self.section_indexes.got) |shndx| shndx else null;
3721    };
3722    return if (shndx) |index| @intCast(self.sections.items(.shdr)[index].sh_addr) else 0;
3723}
3724
3725pub fn tpAddress(self: *Elf) i64 {
3726    const index = self.phdr_indexes.tls.int() orelse return 0;
3727    const phdr = self.phdrs.items[index];
3728    const addr = switch (self.getTarget().cpu.arch) {
3729        .x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align),
3730        .aarch64, .aarch64_be => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
3731        .riscv64, .riscv64be => phdr.p_vaddr,
3732        else => |arch| std.debug.panic("TODO implement getTpAddress for {s}", .{@tagName(arch)}),
3733    };
3734    return @intCast(addr);
3735}
3736
3737pub fn dtpAddress(self: *Elf) i64 {
3738    const index = self.phdr_indexes.tls.int() orelse return 0;
3739    const phdr = self.phdrs.items[index];
3740    return @intCast(phdr.p_vaddr);
3741}
3742
3743pub fn tlsAddress(self: *Elf) i64 {
3744    const index = self.phdr_indexes.tls.int() orelse return 0;
3745    const phdr = self.phdrs.items[index];
3746    return @intCast(phdr.p_vaddr);
3747}
3748
3749pub fn getShString(self: Elf, off: u32) [:0]const u8 {
3750    return shString(self.shstrtab.items, off);
3751}
3752
3753fn shString(
3754    shstrtab: []const u8,
3755    off: u32,
3756) [:0]const u8 {
3757    const slice = shstrtab[off..];
3758    return slice[0..mem.indexOfScalar(u8, slice, 0).? :0];
3759}
3760
3761pub fn insertShString(self: *Elf, name: [:0]const u8) error{OutOfMemory}!u32 {
3762    const gpa = self.base.comp.gpa;
3763    const off = @as(u32, @intCast(self.shstrtab.items.len));
3764    try self.shstrtab.ensureUnusedCapacity(gpa, name.len + 1);
3765    self.shstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
3766    return off;
3767}
3768
3769pub fn getDynString(self: Elf, off: u32) [:0]const u8 {
3770    assert(off < self.dynstrtab.items.len);
3771    return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.dynstrtab.items.ptr + off)), 0);
3772}
3773
3774pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
3775    const gpa = self.base.comp.gpa;
3776    const off = @as(u32, @intCast(self.dynstrtab.items.len));
3777    try self.dynstrtab.ensureUnusedCapacity(gpa, name.len + 1);
3778    self.dynstrtab.print(gpa, "{s}\x00", .{name}) catch unreachable;
3779    return off;
3780}
3781
3782fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
3783    const gpa = self.base.comp.gpa;
3784    const diags = &self.base.comp.link_diags;
3785    const max_notes = 4;
3786
3787    try diags.msgs.ensureUnusedCapacity(gpa, undefs.count());
3788
3789    for (undefs.keys(), undefs.values()) |key, refs| {
3790        const undef_sym = self.resolver.keys.items[key - 1];
3791        const nrefs = @min(refs.items.len, max_notes);
3792        const nnotes = nrefs + @intFromBool(refs.items.len > max_notes);
3793
3794        var err = try diags.addErrorWithNotesAssumeCapacity(nnotes);
3795        try err.addMsg("undefined symbol: {s}", .{undef_sym.name(self)});
3796
3797        for (refs.items[0..nrefs]) |ref| {
3798            const atom_ptr = self.atom(ref).?;
3799            const file_ptr = atom_ptr.file(self).?;
3800            err.addNote("referenced by {f}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
3801        }
3802
3803        if (refs.items.len > max_notes) {
3804            const remaining = refs.items.len - max_notes;
3805            err.addNote("referenced {d} more times", .{remaining});
3806        }
3807    }
3808}
3809
3810fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemory }!void {
3811    if (dupes.keys().len == 0) return; // Nothing to do
3812    const diags = &self.base.comp.link_diags;
3813
3814    const max_notes = 3;
3815
3816    for (dupes.keys(), dupes.values()) |key, notes| {
3817        const sym = self.resolver.keys.items[key - 1];
3818        const nnotes = @min(notes.items.len, max_notes) + @intFromBool(notes.items.len > max_notes);
3819
3820        var err = try diags.addErrorWithNotes(nnotes + 1);
3821        try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)});
3822        err.addNote("defined by {f}", .{sym.file(self).?.fmtPath()});
3823
3824        var inote: usize = 0;
3825        while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
3826            const file_ptr = self.file(notes.items[inote]).?;
3827            err.addNote("defined by {f}", .{file_ptr.fmtPath()});
3828        }
3829
3830        if (notes.items.len > max_notes) {
3831            const remaining = notes.items.len - max_notes;
3832            err.addNote("defined {d} more times", .{remaining});
3833        }
3834    }
3835
3836    return error.HasDuplicates;
3837}
3838
3839fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
3840    const diags = &self.base.comp.link_diags;
3841    var err = try diags.addErrorWithNotes(0);
3842    try err.addMsg("fatal linker error: unsupported CPU architecture {s}", .{
3843        @tagName(self.getTarget().cpu.arch),
3844    });
3845}
3846
3847pub fn addFileError(
3848    self: *Elf,
3849    file_index: File.Index,
3850    comptime format: []const u8,
3851    args: anytype,
3852) error{OutOfMemory}!void {
3853    const diags = &self.base.comp.link_diags;
3854    var err = try diags.addErrorWithNotes(1);
3855    try err.addMsg(format, args);
3856    err.addNote("while parsing {f}", .{self.file(file_index).?.fmtPath()});
3857}
3858
3859pub fn failFile(
3860    self: *Elf,
3861    file_index: File.Index,
3862    comptime format: []const u8,
3863    args: anytype,
3864) error{ OutOfMemory, LinkFailure } {
3865    try addFileError(self, file_index, format, args);
3866    return error.LinkFailure;
3867}
3868
3869const FormatShdr = struct {
3870    elf_file: *Elf,
3871    shdr: elf.Elf64_Shdr,
3872};
3873
3874fn fmtShdr(self: *Elf, shdr: elf.Elf64_Shdr) std.fmt.Alt(FormatShdr, formatShdr) {
3875    return .{ .data = .{
3876        .shdr = shdr,
3877        .elf_file = self,
3878    } };
3879}
3880
3881fn formatShdr(ctx: FormatShdr, writer: *std.Io.Writer) std.Io.Writer.Error!void {
3882    const shdr = ctx.shdr;
3883    try writer.print("{s} : @{x} ({x}) : align({x}) : size({x}) : entsize({x}) : flags({f})", .{
3884        ctx.elf_file.getShString(shdr.sh_name), shdr.sh_offset,
3885        shdr.sh_addr,                           shdr.sh_addralign,
3886        shdr.sh_size,                           shdr.sh_entsize,
3887        fmtShdrFlags(shdr.sh_flags),
3888    });
3889}
3890
3891pub fn fmtShdrFlags(sh_flags: u64) std.fmt.Alt(u64, formatShdrFlags) {
3892    return .{ .data = sh_flags };
3893}
3894
3895fn formatShdrFlags(sh_flags: u64, writer: *std.Io.Writer) std.Io.Writer.Error!void {
3896    if (elf.SHF_WRITE & sh_flags != 0) {
3897        try writer.writeAll("W");
3898    }
3899    if (elf.SHF_ALLOC & sh_flags != 0) {
3900        try writer.writeAll("A");
3901    }
3902    if (elf.SHF_EXECINSTR & sh_flags != 0) {
3903        try writer.writeAll("X");
3904    }
3905    if (elf.SHF_MERGE & sh_flags != 0) {
3906        try writer.writeAll("M");
3907    }
3908    if (elf.SHF_STRINGS & sh_flags != 0) {
3909        try writer.writeAll("S");
3910    }
3911    if (elf.SHF_INFO_LINK & sh_flags != 0) {
3912        try writer.writeAll("I");
3913    }
3914    if (elf.SHF_LINK_ORDER & sh_flags != 0) {
3915        try writer.writeAll("L");
3916    }
3917    if (elf.SHF_EXCLUDE & sh_flags != 0) {
3918        try writer.writeAll("E");
3919    }
3920    if (elf.SHF_COMPRESSED & sh_flags != 0) {
3921        try writer.writeAll("C");
3922    }
3923    if (elf.SHF_GROUP & sh_flags != 0) {
3924        try writer.writeAll("G");
3925    }
3926    if (elf.SHF_OS_NONCONFORMING & sh_flags != 0) {
3927        try writer.writeAll("O");
3928    }
3929    if (elf.SHF_TLS & sh_flags != 0) {
3930        try writer.writeAll("T");
3931    }
3932    if (elf.SHF_X86_64_LARGE & sh_flags != 0) {
3933        try writer.writeAll("l");
3934    }
3935    if (elf.SHF_MIPS_ADDR & sh_flags != 0 or elf.SHF_ARM_PURECODE & sh_flags != 0) {
3936        try writer.writeAll("p");
3937    }
3938}
3939
3940const FormatPhdr = struct {
3941    elf_file: *Elf,
3942    phdr: elf.Elf64_Phdr,
3943};
3944
3945fn fmtPhdr(self: *Elf, phdr: elf.Elf64_Phdr) std.fmt.Alt(FormatPhdr, formatPhdr) {
3946    return .{ .data = .{
3947        .phdr = phdr,
3948        .elf_file = self,
3949    } };
3950}
3951
3952fn formatPhdr(ctx: FormatPhdr, writer: *std.Io.Writer) std.Io.Writer.Error!void {
3953    const phdr = ctx.phdr;
3954    const write = phdr.p_flags & elf.PF_W != 0;
3955    const read = phdr.p_flags & elf.PF_R != 0;
3956    const exec = phdr.p_flags & elf.PF_X != 0;
3957    var flags: [3]u8 = [_]u8{'_'} ** 3;
3958    if (exec) flags[0] = 'X';
3959    if (write) flags[1] = 'W';
3960    if (read) flags[2] = 'R';
3961    const p_type = switch (phdr.p_type) {
3962        elf.PT_LOAD => "LOAD",
3963        elf.PT_TLS => "TLS",
3964        elf.PT_GNU_EH_FRAME => "GNU_EH_FRAME",
3965        elf.PT_GNU_STACK => "GNU_STACK",
3966        elf.PT_DYNAMIC => "DYNAMIC",
3967        elf.PT_INTERP => "INTERP",
3968        elf.PT_NULL => "NULL",
3969        elf.PT_PHDR => "PHDR",
3970        elf.PT_NOTE => "NOTE",
3971        else => "UNKNOWN",
3972    };
3973    try writer.print("{s} : {s} : @{x} ({x}) : align({x}) : filesz({x}) : memsz({x})", .{
3974        p_type,       flags,         phdr.p_offset, phdr.p_vaddr,
3975        phdr.p_align, phdr.p_filesz, phdr.p_memsz,
3976    });
3977}
3978
3979pub fn dumpState(self: *Elf) std.fmt.Alt(*Elf, fmtDumpState) {
3980    return .{ .data = self };
3981}
3982
3983fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void {
3984    const shared_objects = self.shared_objects.values();
3985
3986    if (self.zigObjectPtr()) |zig_object| {
3987        try writer.print("zig_object({d}) : {s}\n", .{ zig_object.index, zig_object.basename });
3988        try writer.print("{f}{f}", .{
3989            zig_object.fmtAtoms(self),
3990            zig_object.fmtSymtab(self),
3991        });
3992        try writer.writeByte('\n');
3993    }
3994
3995    for (self.objects.items) |index| {
3996        const object = self.file(index).?.object;
3997        try writer.print("object({d}) : {f}", .{ index, object.fmtPath() });
3998        if (!object.alive) try writer.writeAll(" : [*]");
3999        try writer.writeByte('\n');
4000        try writer.print("{f}{f}{f}{f}{f}\n", .{
4001            object.fmtAtoms(self),
4002            object.fmtCies(self),
4003            object.fmtFdes(self),
4004            object.fmtSymtab(self),
4005            object.fmtGroups(self),
4006        });
4007    }
4008
4009    for (shared_objects) |index| {
4010        const shared_object = self.file(index).?.shared_object;
4011        try writer.print("shared_object({d}) : {f} : needed({})", .{
4012            index, shared_object.path, shared_object.needed,
4013        });
4014        if (!shared_object.alive) try writer.writeAll(" : [*]");
4015        try writer.writeByte('\n');
4016        try writer.print("{f}\n", .{shared_object.fmtSymtab(self)});
4017    }
4018
4019    if (self.linker_defined_index) |index| {
4020        const linker_defined = self.file(index).?.linker_defined;
4021        try writer.print("linker_defined({d}) : (linker defined)\n", .{index});
4022        try writer.print("{f}\n", .{linker_defined.fmtSymtab(self)});
4023    }
4024
4025    const slice = self.sections.slice();
4026    {
4027        try writer.writeAll("atom lists\n");
4028        for (slice.items(.shdr), slice.items(.atom_list_2), 0..) |shdr, atom_list, shndx| {
4029            try writer.print("shdr({d}) : {s} : {f}\n", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) });
4030        }
4031    }
4032
4033    if (self.requiresThunks()) {
4034        try writer.writeAll("thunks\n");
4035        for (self.thunks.items, 0..) |th, index| {
4036            try writer.print("thunk({d}) : {f}\n", .{ index, th.fmt(self) });
4037        }
4038    }
4039
4040    try writer.print("{f}\n", .{self.got.fmt(self)});
4041    try writer.print("{f}\n", .{self.plt.fmt(self)});
4042
4043    try writer.writeAll("Output groups\n");
4044    for (self.group_sections.items) |cg| {
4045        try writer.print("  shdr({d}) : GROUP({f})\n", .{ cg.shndx, cg.cg_ref });
4046    }
4047
4048    try writer.writeAll("\nOutput merge sections\n");
4049    for (self.merge_sections.items) |msec| {
4050        try writer.print("  shdr({d}) : {f}\n", .{ msec.output_section_index, msec.fmt(self) });
4051    }
4052
4053    try writer.writeAll("\nOutput shdrs\n");
4054    for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| {
4055        try writer.print("  shdr({d}) : phdr({d}) : {f}\n", .{
4056            shndx,
4057            phndx,
4058            self.fmtShdr(shdr),
4059        });
4060    }
4061    try writer.writeAll("\nOutput phdrs\n");
4062    for (self.phdrs.items, 0..) |phdr, phndx| {
4063        try writer.print("  phdr({d}) : {f}\n", .{ phndx, self.fmtPhdr(phdr) });
4064    }
4065}
4066
4067/// Caller owns the memory.
4068pub fn preadAllAlloc(allocator: Allocator, handle: fs.File, offset: u64, size: u64) ![]u8 {
4069    const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow);
4070    errdefer allocator.free(buffer);
4071    const amt = try handle.preadAll(buffer, offset);
4072    if (amt != size) return error.InputOutput;
4073    return buffer;
4074}
4075
4076/// Binary search
4077pub fn bsearch(comptime T: type, haystack: []const T, predicate: anytype) usize {
4078    var min: usize = 0;
4079    var max: usize = haystack.len;
4080    while (min < max) {
4081        const index = (min + max) / 2;
4082        const curr = haystack[index];
4083        if (predicate.predicate(curr)) {
4084            min = index + 1;
4085        } else {
4086            max = index;
4087        }
4088    }
4089    return min;
4090}
4091
4092/// Linear search
4093pub fn lsearch(comptime T: type, haystack: []const T, predicate: anytype) usize {
4094    var i: usize = 0;
4095    while (i < haystack.len) : (i += 1) {
4096        if (predicate.predicate(haystack[i])) break;
4097    }
4098    return i;
4099}
4100
4101pub fn getTarget(self: *const Elf) *const std.Target {
4102    return &self.base.comp.root_mod.resolved_target.result;
4103}
4104
4105fn requiresThunks(self: Elf) bool {
4106    return switch (self.getTarget().cpu.arch) {
4107        .aarch64, .aarch64_be => true,
4108        .x86_64, .riscv64, .riscv64be => false,
4109        else => @panic("TODO unimplemented architecture"),
4110    };
4111}
4112
4113/// The following three values are only observed at compile-time and used to emit a compile error
4114/// to remind the programmer to update expected maximum numbers of different program header types
4115/// so that we reserve enough space for the program header table up-front.
4116/// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding
4117/// more special-purpose program headers.
4118const max_number_of_object_segments = 9;
4119const max_number_of_special_phdrs = 5;
4120
4121const default_entry_addr = 0x8000000;
4122
4123pub const base_tag: link.File.Tag = .elf;
4124
4125pub const Group = struct {
4126    signature_off: u32,
4127    file_index: File.Index,
4128    shndx: u32,
4129    members_start: u32,
4130    members_len: u32,
4131    is_comdat: bool,
4132    alive: bool = true,
4133
4134    pub fn file(cg: Group, elf_file: *Elf) File {
4135        return elf_file.file(cg.file_index).?;
4136    }
4137
4138    pub fn signature(cg: Group, elf_file: *Elf) [:0]const u8 {
4139        return cg.file(elf_file).object.getString(cg.signature_off);
4140    }
4141
4142    pub fn members(cg: Group, elf_file: *Elf) []const u32 {
4143        const object = cg.file(elf_file).object;
4144        return object.group_data.items[cg.members_start..][0..cg.members_len];
4145    }
4146
4147    pub const Index = u32;
4148};
4149
4150pub const SymtabCtx = struct {
4151    ilocal: u32 = 0,
4152    iglobal: u32 = 0,
4153    nlocals: u32 = 0,
4154    nglobals: u32 = 0,
4155    strsize: u32 = 0,
4156
4157    pub fn reset(ctx: *SymtabCtx) void {
4158        ctx.ilocal = 0;
4159        ctx.iglobal = 0;
4160        ctx.nlocals = 0;
4161        ctx.nglobals = 0;
4162        ctx.strsize = 0;
4163    }
4164};
4165
4166pub const null_sym = elf.Elf64_Sym{
4167    .st_name = 0,
4168    .st_info = 0,
4169    .st_other = 0,
4170    .st_shndx = 0,
4171    .st_value = 0,
4172    .st_size = 0,
4173};
4174
4175pub const null_shdr = elf.Elf64_Shdr{
4176    .sh_name = 0,
4177    .sh_type = 0,
4178    .sh_flags = 0,
4179    .sh_addr = 0,
4180    .sh_offset = 0,
4181    .sh_size = 0,
4182    .sh_link = 0,
4183    .sh_info = 0,
4184    .sh_addralign = 0,
4185    .sh_entsize = 0,
4186};
4187
4188pub const SystemLib = struct {
4189    needed: bool = false,
4190    path: Path,
4191};
4192
4193pub const Ref = struct {
4194    index: u32 = 0,
4195    file: u32 = 0,
4196
4197    pub fn eql(ref: Ref, other: Ref) bool {
4198        return ref.index == other.index and ref.file == other.file;
4199    }
4200
4201    pub fn format(ref: Ref, writer: *std.Io.Writer) std.Io.Writer.Error!void {
4202        try writer.print("ref({d},{d})", .{ ref.index, ref.file });
4203    }
4204};
4205
4206pub const SymbolResolver = struct {
4207    keys: std.ArrayList(Key) = .empty,
4208    values: std.ArrayList(Ref) = .empty,
4209    table: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
4210
4211    const Result = struct {
4212        found_existing: bool,
4213        index: Index,
4214        ref: *Ref,
4215    };
4216
4217    pub fn deinit(resolver: *SymbolResolver, allocator: Allocator) void {
4218        resolver.keys.deinit(allocator);
4219        resolver.values.deinit(allocator);
4220        resolver.table.deinit(allocator);
4221    }
4222
4223    pub fn getOrPut(
4224        resolver: *SymbolResolver,
4225        allocator: Allocator,
4226        ref: Ref,
4227        elf_file: *Elf,
4228    ) !Result {
4229        const adapter = Adapter{ .keys = resolver.keys.items, .elf_file = elf_file };
4230        const key = Key{ .index = ref.index, .file_index = ref.file };
4231        const gop = try resolver.table.getOrPutAdapted(allocator, key, adapter);
4232        if (!gop.found_existing) {
4233            try resolver.keys.append(allocator, key);
4234            _ = try resolver.values.addOne(allocator);
4235        }
4236        return .{
4237            .found_existing = gop.found_existing,
4238            .index = @intCast(gop.index + 1),
4239            .ref = &resolver.values.items[gop.index],
4240        };
4241    }
4242
4243    pub fn get(resolver: SymbolResolver, index: Index) ?Ref {
4244        if (index == 0) return null;
4245        return resolver.values.items[index - 1];
4246    }
4247
4248    pub fn reset(resolver: *SymbolResolver) void {
4249        resolver.keys.clearRetainingCapacity();
4250        resolver.values.clearRetainingCapacity();
4251        resolver.table.clearRetainingCapacity();
4252    }
4253
4254    const Key = struct {
4255        index: Symbol.Index,
4256        file_index: File.Index,
4257
4258        fn name(key: Key, elf_file: *Elf) [:0]const u8 {
4259            const ref = Ref{ .index = key.index, .file = key.file_index };
4260            return elf_file.symbol(ref).?.name(elf_file);
4261        }
4262
4263        fn file(key: Key, elf_file: *Elf) ?File {
4264            return elf_file.file(key.file_index);
4265        }
4266
4267        fn eql(key: Key, other: Key, elf_file: *Elf) bool {
4268            const key_name = key.name(elf_file);
4269            const other_name = other.name(elf_file);
4270            return mem.eql(u8, key_name, other_name);
4271        }
4272
4273        fn hash(key: Key, elf_file: *Elf) u32 {
4274            return @truncate(Hash.hash(0, key.name(elf_file)));
4275        }
4276    };
4277
4278    const Adapter = struct {
4279        keys: []const Key,
4280        elf_file: *Elf,
4281
4282        pub fn eql(ctx: @This(), key: Key, b_void: void, b_map_index: usize) bool {
4283            _ = b_void;
4284            const other = ctx.keys[b_map_index];
4285            return key.eql(other, ctx.elf_file);
4286        }
4287
4288        pub fn hash(ctx: @This(), key: Key) u32 {
4289            return key.hash(ctx.elf_file);
4290        }
4291    };
4292
4293    pub const Index = u32;
4294};
4295
4296const Section = struct {
4297    /// Section header.
4298    shdr: elf.Elf64_Shdr,
4299
4300    /// Assigned program header index if any.
4301    phndx: OptionalProgramHeaderIndex = .none,
4302
4303    /// List of atoms contributing to this section.
4304    /// TODO currently this is only used for relocations tracking in relocatable mode
4305    /// but will be merged with atom_list_2.
4306    atom_list: std.ArrayList(Ref) = .empty,
4307
4308    /// List of atoms contributing to this section.
4309    /// This can be used by sections that require special handling such as init/fini array, etc.
4310    atom_list_2: AtomList = .{},
4311
4312    /// Index of the last allocated atom in this section.
4313    last_atom: Ref = .{ .index = 0, .file = 0 },
4314
4315    /// A list of atoms that have surplus capacity. This list can have false
4316    /// positives, as functions grow and shrink over time, only sometimes being added
4317    /// or removed from the freelist.
4318    ///
4319    /// An atom has surplus capacity when its overcapacity value is greater than
4320    /// padToIdeal(minimum_atom_size). That is, when it has so
4321    /// much extra capacity, that we could fit a small new symbol in it, itself with
4322    /// ideal_capacity or more.
4323    ///
4324    /// Ideal capacity is defined by size + (size / ideal_factor)
4325    ///
4326    /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
4327    /// overcapacity can be negative. A simple way to have negative overcapacity is to
4328    /// allocate a fresh text block, which will have ideal capacity, and then grow it
4329    /// by 1 byte. It will then have -1 overcapacity.
4330    free_list: std.ArrayList(Ref) = .empty,
4331};
4332
4333pub fn sectionSize(self: *Elf, shndx: u32) u64 {
4334    const last_atom_ref = self.sections.items(.last_atom)[shndx];
4335    const atom_ptr = self.atom(last_atom_ref) orelse return 0;
4336    return @as(u64, @intCast(atom_ptr.value)) + atom_ptr.size;
4337}
4338
4339fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
4340    return switch (cpu_arch) {
4341        .mips, .mipsel, .mips64, .mips64el => "__start",
4342        else => "_start",
4343    };
4344}
4345
4346fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
4347    const gpa = elf_file.base.comp.gpa;
4348    const cpu_arch = elf_file.getTarget().cpu.arch;
4349
4350    // A branch will need an extender if its target is larger than
4351    // `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
4352    const max_distance = switch (cpu_arch) {
4353        .aarch64, .aarch64_be => 0x500_000,
4354        .x86_64, .riscv64, .riscv64be => unreachable,
4355        else => @panic("unhandled arch"),
4356    };
4357
4358    const advance = struct {
4359        fn advance(list: *AtomList, size: u64, alignment: Atom.Alignment) !i64 {
4360            const offset = alignment.forward(list.size);
4361            const padding = offset - list.size;
4362            list.size += padding + size;
4363            list.alignment = list.alignment.max(alignment);
4364            return @intCast(offset);
4365        }
4366    }.advance;
4367
4368    for (atom_list.atoms.keys()) |ref| {
4369        elf_file.atom(ref).?.value = -1;
4370    }
4371
4372    var i: usize = 0;
4373    while (i < atom_list.atoms.keys().len) {
4374        const start = i;
4375        const start_atom = elf_file.atom(atom_list.atoms.keys()[start]).?;
4376        assert(start_atom.alive);
4377        start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment);
4378        i += 1;
4379
4380        while (i < atom_list.atoms.keys().len) : (i += 1) {
4381            const atom_ptr = elf_file.atom(atom_list.atoms.keys()[i]).?;
4382            assert(atom_ptr.alive);
4383            if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance)
4384                break;
4385            atom_ptr.value = try advance(atom_list, atom_ptr.size, atom_ptr.alignment);
4386        }
4387
4388        // Insert a thunk at the group end
4389        const thunk_index = try elf_file.addThunk();
4390        const thunk_ptr = elf_file.thunk(thunk_index);
4391        thunk_ptr.output_section_index = atom_list.output_section_index;
4392
4393        // Scan relocs in the group and create trampolines for any unreachable callsite
4394        for (atom_list.atoms.keys()[start..i]) |ref| {
4395            const atom_ptr = elf_file.atom(ref).?;
4396            const file_ptr = atom_ptr.file(elf_file).?;
4397            log.debug("atom({f}) {s}", .{ ref, atom_ptr.name(elf_file) });
4398            for (atom_ptr.relocs(elf_file)) |rel| {
4399                const is_reachable = switch (cpu_arch) {
4400                    .aarch64, .aarch64_be => r: {
4401                        const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
4402                        if (r_type != .CALL26 and r_type != .JUMP26) break :r true;
4403                        const target_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
4404                        const target = elf_file.symbol(target_ref).?;
4405                        if (target.flags.has_plt) break :r false;
4406                        if (atom_ptr.output_section_index != target.output_section_index) break :r false;
4407                        const target_atom = target.atom(elf_file).?;
4408                        if (target_atom.value == -1) break :r false;
4409                        const saddr = atom_ptr.address(elf_file) + @as(i64, @intCast(rel.r_offset));
4410                        const taddr = target.address(.{}, elf_file);
4411                        _ = math.cast(i28, taddr + rel.r_addend - saddr) orelse break :r false;
4412                        break :r true;
4413                    },
4414                    .x86_64, .riscv64, .riscv64be => unreachable,
4415                    else => @panic("unsupported arch"),
4416                };
4417                if (is_reachable) continue;
4418                const target = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
4419                try thunk_ptr.symbols.put(gpa, target, {});
4420            }
4421            atom_ptr.addExtra(.{ .thunk = thunk_index }, elf_file);
4422        }
4423
4424        thunk_ptr.value = try advance(atom_list, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
4425
4426        log.debug("thunk({d}) : {f}", .{ thunk_index, thunk_ptr.fmt(elf_file) });
4427    }
4428}
4429
4430pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
4431    const slice = strtab[off..];
4432    return slice[0..mem.indexOfScalar(u8, slice, 0).? :0];
4433}
4434
4435pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
4436    const comp = elf_file.base.comp;
4437    const diags = &comp.link_diags;
4438    elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
4439        return diags.fail("failed to write: {s}", .{@errorName(err)});
4440    };
4441}
4442
4443pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void {
4444    const comp = elf_file.base.comp;
4445    const diags = &comp.link_diags;
4446    elf_file.base.file.?.setEndPos(length) catch |err| {
4447        return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
4448    };
4449}
4450
4451pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T {
4452    return std.math.cast(T, x) orelse {
4453        const comp = elf_file.base.comp;
4454        const diags = &comp.link_diags;
4455        return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
4456    };
4457}
4458
4459const std = @import("std");
4460const build_options = @import("build_options");
4461const builtin = @import("builtin");
4462const assert = std.debug.assert;
4463const elf = std.elf;
4464const fs = std.fs;
4465const log = std.log.scoped(.link);
4466const relocs_log = std.log.scoped(.link_relocs);
4467const state_log = std.log.scoped(.link_state);
4468const math = std.math;
4469const mem = std.mem;
4470const Allocator = std.mem.Allocator;
4471const Hash = std.hash.Wyhash;
4472const Path = std.Build.Cache.Path;
4473const Stat = std.Build.Cache.File.Stat;
4474
4475const codegen = @import("../codegen.zig");
4476const dev = @import("../dev.zig");
4477const eh_frame = @import("Elf/eh_frame.zig");
4478const gc = @import("Elf/gc.zig");
4479const musl = @import("../libs/musl.zig");
4480const link = @import("../link.zig");
4481const relocatable = @import("Elf/relocatable.zig");
4482const relocation = @import("Elf/relocation.zig");
4483const target_util = @import("../target.zig");
4484const trace = @import("../tracy.zig").trace;
4485const synthetic_sections = @import("Elf/synthetic_sections.zig");
4486
4487const Merge = @import("Elf/Merge.zig");
4488const Archive = @import("Elf/Archive.zig");
4489const AtomList = @import("Elf/AtomList.zig");
4490const Compilation = @import("../Compilation.zig");
4491const GroupSection = synthetic_sections.GroupSection;
4492const CopyRelSection = synthetic_sections.CopyRelSection;
4493const Diags = @import("../link.zig").Diags;
4494const DynamicSection = synthetic_sections.DynamicSection;
4495const DynsymSection = synthetic_sections.DynsymSection;
4496const Dwarf = @import("Dwarf.zig");
4497const Elf = @This();
4498const File = @import("Elf/file.zig").File;
4499const GnuHashSection = synthetic_sections.GnuHashSection;
4500const GotSection = synthetic_sections.GotSection;
4501const GotPltSection = synthetic_sections.GotPltSection;
4502const HashSection = synthetic_sections.HashSection;
4503const LinkerDefined = @import("Elf/LinkerDefined.zig");
4504const Zcu = @import("../Zcu.zig");
4505const Object = @import("Elf/Object.zig");
4506const InternPool = @import("../InternPool.zig");
4507const PltSection = synthetic_sections.PltSection;
4508const PltGotSection = synthetic_sections.PltGotSection;
4509const SharedObject = @import("Elf/SharedObject.zig");
4510const Symbol = @import("Elf/Symbol.zig");
4511const StringTable = @import("StringTable.zig");
4512const Thunk = @import("Elf/Thunk.zig");
4513const Value = @import("../Value.zig");
4514const VerneedSection = synthetic_sections.VerneedSection;
4515const ZigObject = @import("Elf/ZigObject.zig");