master
  1pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
  2    const gpa = comp.gpa;
  3    const diags = &comp.link_diags;
  4
  5    if (diags.hasErrors()) return error.LinkFailure;
  6
  7    // First, we flush relocatable object file generated with our backends.
  8    if (elf_file.zigObjectPtr()) |zig_object| {
  9        try zig_object.resolveSymbols(elf_file);
 10        elf_file.markEhFrameAtomsDead();
 11        try elf_file.addCommentString();
 12        try elf_file.finalizeMergeSections();
 13        zig_object.claimUnresolvedRelocatable(elf_file);
 14
 15        try initSections(elf_file);
 16        try Elf.sortShdrs(
 17            gpa,
 18            &elf_file.section_indexes,
 19            &elf_file.sections,
 20            elf_file.shstrtab.items,
 21            elf_file.merge_sections.items,
 22            elf_file.group_sections.items,
 23            elf_file.zigObjectPtr(),
 24            elf_file.files,
 25        );
 26        try zig_object.addAtomsToRelaSections(elf_file);
 27        try elf_file.updateMergeSectionSizes();
 28        try updateSectionSizes(elf_file);
 29
 30        try allocateAllocSections(elf_file);
 31        try elf_file.allocateNonAllocSections();
 32
 33        if (build_options.enable_logging) {
 34            state_log.debug("{f}", .{elf_file.dumpState()});
 35        }
 36
 37        try elf_file.writeMergeSections();
 38        try writeSyntheticSections(elf_file);
 39        try elf_file.writeShdrTable();
 40        try elf_file.writeElfHeader();
 41
 42        // TODO we can avoid reading in the file contents we just wrote if we give the linker
 43        // ability to write directly to a buffer.
 44        try zig_object.readFileContents(elf_file);
 45    }
 46
 47    var files = std.array_list.Managed(File.Index).init(gpa);
 48    defer files.deinit();
 49    try files.ensureTotalCapacityPrecise(elf_file.objects.items.len + 1);
 50    if (elf_file.zigObjectPtr()) |zig_object| files.appendAssumeCapacity(zig_object.index);
 51    for (elf_file.objects.items) |index| files.appendAssumeCapacity(index);
 52
 53    // Update ar symtab from parsed objects
 54    var ar_symtab: Archive.ArSymtab = .{};
 55    defer ar_symtab.deinit(gpa);
 56
 57    for (files.items) |index| {
 58        try elf_file.file(index).?.updateArSymtab(&ar_symtab, elf_file);
 59    }
 60
 61    ar_symtab.sort();
 62
 63    // Save object paths in filenames strtab.
 64    var ar_strtab: Archive.ArStrtab = .{};
 65    defer ar_strtab.deinit(gpa);
 66
 67    for (files.items) |index| {
 68        const file_ptr = elf_file.file(index).?;
 69        try file_ptr.updateArStrtab(gpa, &ar_strtab);
 70        try file_ptr.updateArSize(elf_file);
 71    }
 72
 73    // Update file offsets of contributing objects.
 74    const total_size: usize = blk: {
 75        var pos: usize = elf.ARMAG.len;
 76        pos += @sizeOf(elf.ar_hdr) + ar_symtab.size(.p64);
 77
 78        if (ar_strtab.size() > 0) {
 79            pos = mem.alignForward(usize, pos, 2);
 80            pos += @sizeOf(elf.ar_hdr) + ar_strtab.size();
 81        }
 82
 83        for (files.items) |index| {
 84            const file_ptr = elf_file.file(index).?;
 85            const state = switch (file_ptr) {
 86                .zig_object => |x| &x.output_ar_state,
 87                .object => |x| &x.output_ar_state,
 88                else => unreachable,
 89            };
 90            pos = mem.alignForward(usize, pos, 2);
 91            state.file_off = pos;
 92            pos += @sizeOf(elf.ar_hdr) + (math.cast(usize, state.size) orelse return error.Overflow);
 93        }
 94
 95        break :blk pos;
 96    };
 97
 98    if (build_options.enable_logging) {
 99        state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(elf_file)});
100        state_log.debug("ar_strtab\n{f}\n", .{ar_strtab});
101    }
102
103    const buffer = try gpa.alloc(u8, total_size);
104    defer gpa.free(buffer);
105
106    var writer: std.Io.Writer = .fixed(buffer);
107
108    // Write magic
109    try writer.writeAll(elf.ARMAG);
110
111    // Write symtab
112    try ar_symtab.write(.p64, elf_file, &writer);
113
114    // Write strtab
115    if (ar_strtab.size() > 0) {
116        if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
117        try ar_strtab.write(&writer);
118    }
119
120    // Write object files
121    for (files.items) |index| {
122        if (!mem.isAligned(writer.end, 2)) try writer.writeByte(0);
123        try elf_file.file(index).?.writeAr(elf_file, &writer);
124    }
125
126    assert(writer.buffered().len == total_size);
127
128    try elf_file.base.file.?.setEndPos(total_size);
129    try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
130
131    if (diags.hasErrors()) return error.LinkFailure;
132}
133
134pub fn flushObject(elf_file: *Elf, comp: *Compilation) !void {
135    const diags = &comp.link_diags;
136
137    if (diags.hasErrors()) return error.LinkFailure;
138
139    // Now, we are ready to resolve the symbols across all input files.
140    // We will first resolve the files in the ZigObject, next in the parsed
141    // input Object files.
142    try elf_file.resolveSymbols();
143    elf_file.markEhFrameAtomsDead();
144    try elf_file.resolveMergeSections();
145    try elf_file.addCommentString();
146    try elf_file.finalizeMergeSections();
147    claimUnresolved(elf_file);
148
149    try initSections(elf_file);
150    try Elf.sortShdrs(
151        comp.gpa,
152        &elf_file.section_indexes,
153        &elf_file.sections,
154        elf_file.shstrtab.items,
155        elf_file.merge_sections.items,
156        elf_file.group_sections.items,
157        elf_file.zigObjectPtr(),
158        elf_file.files,
159    );
160    if (elf_file.zigObjectPtr()) |zig_object| {
161        try zig_object.addAtomsToRelaSections(elf_file);
162    }
163    for (elf_file.objects.items) |index| {
164        const object = elf_file.file(index).?.object;
165        try object.addAtomsToRelaSections(elf_file);
166    }
167    try elf_file.updateMergeSectionSizes();
168    try updateSectionSizes(elf_file);
169
170    try allocateAllocSections(elf_file);
171    try elf_file.allocateNonAllocSections();
172
173    if (build_options.enable_logging) {
174        state_log.debug("{f}", .{elf_file.dumpState()});
175    }
176
177    try writeAtoms(elf_file);
178    try elf_file.writeMergeSections();
179    try writeSyntheticSections(elf_file);
180    try elf_file.writeShdrTable();
181    try elf_file.writeElfHeader();
182
183    if (diags.hasErrors()) return error.LinkFailure;
184}
185
186fn claimUnresolved(elf_file: *Elf) void {
187    if (elf_file.zigObjectPtr()) |zig_object| {
188        zig_object.claimUnresolvedRelocatable(elf_file);
189    }
190    for (elf_file.objects.items) |index| {
191        elf_file.file(index).?.object.claimUnresolvedRelocatable(elf_file);
192    }
193}
194
195fn initSections(elf_file: *Elf) !void {
196    if (elf_file.zigObjectPtr()) |zo| {
197        try zo.initRelaSections(elf_file);
198    }
199    for (elf_file.objects.items) |index| {
200        const object = elf_file.file(index).?.object;
201        try object.initOutputSections(elf_file);
202        try object.initRelaSections(elf_file);
203    }
204
205    for (elf_file.merge_sections.items) |*msec| {
206        if (msec.finalized_subsections.items.len == 0) continue;
207        try msec.initOutputSection(elf_file);
208    }
209
210    const needs_eh_frame = blk: {
211        if (elf_file.zigObjectPtr()) |zo|
212            if (zo.eh_frame_index != null) break :blk true;
213        break :blk for (elf_file.objects.items) |index| {
214            if (elf_file.file(index).?.object.cies.items.len > 0) break true;
215        } else false;
216    };
217    if (needs_eh_frame) {
218        if (elf_file.section_indexes.eh_frame == null) {
219            elf_file.section_indexes.eh_frame = elf_file.sectionByName(".eh_frame") orelse
220                try elf_file.addSection(.{
221                    .name = try elf_file.insertShString(".eh_frame"),
222                    .type = if (elf_file.getTarget().cpu.arch == .x86_64)
223                        elf.SHT_X86_64_UNWIND
224                    else
225                        elf.SHT_PROGBITS,
226                    .flags = elf.SHF_ALLOC,
227                    .addralign = elf_file.ptrWidthBytes(),
228                });
229        }
230        elf_file.section_indexes.eh_frame_rela = elf_file.sectionByName(".rela.eh_frame") orelse
231            try elf_file.addRelaShdr(
232                try elf_file.insertShString(".rela.eh_frame"),
233                elf_file.section_indexes.eh_frame.?,
234            );
235    }
236
237    try initGroups(elf_file);
238    try elf_file.initSymtab();
239    try elf_file.initShStrtab();
240}
241
242fn initGroups(elf_file: *Elf) !void {
243    const gpa = elf_file.base.comp.gpa;
244
245    for (elf_file.objects.items) |index| {
246        const object = elf_file.file(index).?.object;
247        for (object.groups.items, 0..) |cg, cg_index| {
248            if (!cg.alive) continue;
249            const cg_sec = try elf_file.group_sections.addOne(gpa);
250            cg_sec.* = .{
251                .shndx = try elf_file.addSection(.{
252                    .name = try elf_file.insertShString(".group"),
253                    .type = elf.SHT_GROUP,
254                    .entsize = @sizeOf(u32),
255                    .addralign = @alignOf(u32),
256                }),
257                .cg_ref = .{ .index = @intCast(cg_index), .file = index },
258            };
259        }
260    }
261}
262
263fn updateSectionSizes(elf_file: *Elf) !void {
264    const slice = elf_file.sections.slice();
265    for (slice.items(.atom_list_2)) |*atom_list| {
266        if (atom_list.atoms.keys().len == 0) continue;
267        if (!atom_list.dirty) continue;
268        atom_list.updateSize(elf_file);
269        try atom_list.allocate(elf_file);
270        atom_list.dirty = false;
271    }
272
273    for (slice.items(.shdr), 0..) |*shdr, shndx| {
274        const atom_list = slice.items(.atom_list)[shndx];
275        if (shdr.sh_type != elf.SHT_RELA) continue;
276        if (@as(u32, @intCast(shndx)) == elf_file.section_indexes.eh_frame) continue;
277        for (atom_list.items) |ref| {
278            const atom_ptr = elf_file.atom(ref) orelse continue;
279            if (!atom_ptr.alive) continue;
280            const relocs = atom_ptr.relocs(elf_file);
281            shdr.sh_size += shdr.sh_entsize * relocs.len;
282        }
283
284        if (shdr.sh_size == 0) shdr.sh_offset = 0;
285    }
286
287    if (elf_file.section_indexes.eh_frame) |index| {
288        slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
289    }
290    if (elf_file.section_indexes.eh_frame_rela) |index| {
291        const shdr = &slice.items(.shdr)[index];
292        shdr.sh_size = eh_frame.calcEhFrameRelocs(elf_file) * shdr.sh_entsize;
293    }
294
295    try elf_file.updateSymtabSize();
296    updateGroupsSizes(elf_file);
297    elf_file.updateShStrtabSize();
298}
299
300fn updateGroupsSizes(elf_file: *Elf) void {
301    for (elf_file.group_sections.items) |cg| {
302        const shdr = &elf_file.sections.items(.shdr)[cg.shndx];
303        shdr.sh_size = cg.size(elf_file);
304        shdr.sh_link = elf_file.section_indexes.symtab.?;
305
306        const sym = cg.symbol(elf_file);
307        shdr.sh_info = sym.outputSymtabIndex(elf_file) orelse sym.outputShndx(elf_file).?;
308    }
309}
310
311/// Allocates alloc sections when merging relocatable objects files together.
312fn allocateAllocSections(elf_file: *Elf) !void {
313    for (elf_file.sections.items(.shdr), 0..) |*shdr, shndx| {
314        if (shdr.sh_type == elf.SHT_NULL) continue;
315        if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
316        if (shdr.sh_type == elf.SHT_NOBITS) {
317            shdr.sh_offset = 0;
318            continue;
319        }
320        const needed_size = shdr.sh_size;
321        if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) {
322            shdr.sh_size = 0;
323            const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
324
325            log.debug("moving {s} from 0x{x} to 0x{x}", .{
326                elf_file.getShString(shdr.sh_name),
327                shdr.sh_offset,
328                new_offset,
329            });
330
331            if (shdr.sh_offset > 0) {
332                const existing_size = elf_file.sectionSize(@intCast(shndx));
333                const amt = try elf_file.base.file.?.copyRangeAll(
334                    shdr.sh_offset,
335                    elf_file.base.file.?,
336                    new_offset,
337                    existing_size,
338                );
339                if (amt != existing_size) return error.InputOutput;
340            }
341
342            shdr.sh_offset = new_offset;
343            shdr.sh_size = needed_size;
344        }
345    }
346}
347
348fn writeAtoms(elf_file: *Elf) !void {
349    const gpa = elf_file.base.comp.gpa;
350
351    var buffer = std.array_list.Managed(u8).init(gpa);
352    defer buffer.deinit();
353
354    const slice = elf_file.sections.slice();
355    for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
356        if (shdr.sh_type == elf.SHT_NOBITS) continue;
357        if (atom_list.atoms.keys().len == 0) continue;
358        try atom_list.writeRelocatable(&buffer, elf_file);
359    }
360}
361
362fn writeSyntheticSections(elf_file: *Elf) !void {
363    const gpa = elf_file.base.comp.gpa;
364    const slice = elf_file.sections.slice();
365
366    const SortRelocs = struct {
367        pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
368            _ = ctx;
369            assert(lhs.r_offset != rhs.r_offset);
370            return lhs.r_offset < rhs.r_offset;
371        }
372    };
373
374    for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
375        if (shdr.sh_type != elf.SHT_RELA) continue;
376        if (atom_list.items.len == 0) continue;
377        if (@as(u32, @intCast(shndx)) == elf_file.section_indexes.eh_frame) continue;
378
379        const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
380            return error.Overflow;
381        var relocs = try std.array_list.Managed(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
382        defer relocs.deinit();
383
384        for (atom_list.items) |ref| {
385            const atom_ptr = elf_file.atom(ref) orelse continue;
386            if (!atom_ptr.alive) continue;
387            try atom_ptr.writeRelocs(elf_file, &relocs);
388        }
389        assert(relocs.items.len == num_relocs);
390        // Sort output relocations by r_offset which is usually an expected (and desired) condition
391        // by the linkers.
392        mem.sortUnstable(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
393
394        log.debug("writing {s} from 0x{x} to 0x{x}", .{
395            elf_file.getShString(shdr.sh_name),
396            shdr.sh_offset,
397            shdr.sh_offset + shdr.sh_size,
398        });
399
400        try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
401    }
402
403    if (elf_file.section_indexes.eh_frame) |shndx| {
404        const existing_size = existing_size: {
405            const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
406            const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
407            break :existing_size sym.atom(elf_file).?.size;
408        };
409        const shdr = slice.items(.shdr)[shndx];
410        const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
411        const buffer = try gpa.alloc(u8, @intCast(sh_size - existing_size));
412        defer gpa.free(buffer);
413        var writer: std.Io.Writer = .fixed(buffer);
414        try eh_frame.writeEhFrameRelocatable(elf_file, &writer);
415        log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
416            shdr.sh_offset + existing_size,
417            shdr.sh_offset + sh_size,
418        });
419        assert(writer.buffered().len == sh_size - existing_size);
420        try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
421    }
422    if (elf_file.section_indexes.eh_frame_rela) |shndx| {
423        const shdr = slice.items(.shdr)[shndx];
424        const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
425            return error.Overflow;
426        var relocs = try std.array_list.Managed(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
427        defer relocs.deinit();
428        try eh_frame.writeEhFrameRelocs(elf_file, &relocs);
429        assert(relocs.items.len == num_relocs);
430        // Sort output relocations by r_offset which is usually an expected (and desired) condition
431        // by the linkers.
432        mem.sortUnstable(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
433
434        log.debug("writing .rela.eh_frame from 0x{x} to 0x{x}", .{
435            shdr.sh_offset,
436            shdr.sh_offset + shdr.sh_size,
437        });
438        try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
439    }
440
441    try writeGroups(elf_file);
442    try elf_file.writeSymtab();
443    try elf_file.writeShStrtab();
444}
445
446fn writeGroups(elf_file: *Elf) !void {
447    const gpa = elf_file.base.comp.gpa;
448    for (elf_file.group_sections.items) |cgs| {
449        const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
450        const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
451        const buffer = try gpa.alloc(u8, sh_size);
452        defer gpa.free(buffer);
453        var writer: std.Io.Writer = .fixed(buffer);
454        try cgs.write(elf_file, &writer);
455        assert(writer.buffered().len == sh_size);
456        log.debug("writing group from 0x{x} to 0x{x}", .{
457            shdr.sh_offset,
458            shdr.sh_offset + shdr.sh_size,
459        });
460        try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
461    }
462}
463
464const assert = std.debug.assert;
465const build_options = @import("build_options");
466const eh_frame = @import("eh_frame.zig");
467const elf = std.elf;
468const link = @import("../../link.zig");
469const log = std.log.scoped(.link);
470const math = std.math;
471const mem = std.mem;
472const state_log = std.log.scoped(.link_state);
473const Path = std.Build.Cache.Path;
474const std = @import("std");
475
476const Archive = @import("Archive.zig");
477const Compilation = @import("../../Compilation.zig");
478const Elf = @import("../Elf.zig");
479const File = @import("file.zig").File;
480const Object = @import("Object.zig");
481const Symbol = @import("Symbol.zig");