Commit c86a334d43

Andrew Kelley <andrew@ziglang.org>
2024-10-09 08:41:39
link.Elf.Object.initAtoms: reduce state access and indirection
The initAtoms function now only uses the `elf_file` parameter for reporting linker error messages, making it easier to see that the function has no data dependencies other than the Object struct itself, making it easier to parallelize or otherwise move that logic around. Also removed an indirect call via `addExtra` since we already know the atom's file is the current Object instance. All calls to `Atom.addExtra` should be audited for similar reasons. Also removed unjustified use of `inline fn`.
1 parent 10cb578
Changed files (2)
src
src/link/Elf/Atom.zig
@@ -868,15 +868,7 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
     if (has_reloc_errors) return error.RelocFailure;
 }
 
-const AddExtraOpts = struct {
-    thunk: ?u32 = null,
-    fde_start: ?u32 = null,
-    fde_count: ?u32 = null,
-    rel_index: ?u32 = null,
-    rel_count: ?u32 = null,
-};
-
-pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) void {
+pub fn addExtra(atom: *Atom, opts: Extra.AsOptionals, elf_file: *Elf) void {
     const file_ptr = atom.file(elf_file).?;
     var extras = file_ptr.atomExtra(atom.extra_index);
     inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
@@ -887,11 +879,11 @@ pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) void {
     file_ptr.setAtomExtra(atom.extra_index, extras);
 }
 
-pub inline fn extra(atom: Atom, elf_file: *Elf) Extra {
+pub fn extra(atom: Atom, elf_file: *Elf) Extra {
     return atom.file(elf_file).?.atomExtra(atom.extra_index);
 }
 
-pub inline fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
+pub fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
     atom.file(elf_file).?.setAtomExtra(atom.extra_index, extras);
 }
 
@@ -2103,6 +2095,14 @@ pub const Extra = struct {
 
     /// Count of relocations belonging to this atom.
     rel_count: u32 = 0,
+
+    pub const AsOptionals = struct {
+        thunk: ?u32 = null,
+        fde_start: ?u32 = null,
+        fde_count: ?u32 = null,
+        rel_index: ?u32 = null,
+        rel_count: ?u32 = null,
+    };
 };
 
 const std = @import("std");
src/link/Elf/Object.zig
@@ -166,6 +166,9 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
 }
 
 fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
+    const comp = elf_file.base.comp;
+    const debug_fmt_strip = comp.config.debug_format == .strip;
+    const target = comp.root_mod.resolved_target.result;
     const shdrs = self.shdrs.items;
     try self.atoms.ensureTotalCapacityPrecise(allocator, shdrs.len);
     try self.atoms_extra.ensureTotalCapacityPrecise(allocator, shdrs.len * @sizeOf(Atom.Extra));
@@ -194,7 +197,7 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
                     break :blk group_info_sym.st_name;
                 };
 
-                const shndx = @as(u32, @intCast(i));
+                const shndx: u32 = @intCast(i);
                 const group_raw_data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
                 defer allocator.free(group_raw_data);
                 const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
@@ -209,7 +212,7 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
                     return elf_file.failFile(self.index, "corrupt section group: unknown SHT_GROUP format", .{});
                 }
 
-                const group_start = @as(u32, @intCast(self.comdat_group_data.items.len));
+                const group_start: u32 = @intCast(self.comdat_group_data.items.len);
                 try self.comdat_group_data.appendUnalignedSlice(allocator, group_members[1..]);
 
                 const comdat_group_index = try self.addComdatGroup(allocator);
@@ -233,8 +236,8 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
             => {},
 
             else => {
-                const shndx = @as(u32, @intCast(i));
-                if (self.skipShdr(shndx, elf_file)) continue;
+                const shndx: u32 = @intCast(i);
+                if (self.skipShdr(shndx, debug_fmt_strip)) continue;
                 const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
                     const data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
                     defer allocator.free(data);
@@ -262,9 +265,9 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
                 atom_ptr.relocs_section_index = @intCast(i);
                 const rel_index: u32 = @intCast(self.relocs.items.len);
                 const rel_count: u32 = @intCast(relocs.len);
-                atom_ptr.addExtra(.{ .rel_index = rel_index, .rel_count = rel_count }, elf_file);
+                self.setAtomFields(atom_ptr, .{ .rel_index = rel_index, .rel_count = rel_count });
                 try self.relocs.appendUnalignedSlice(allocator, relocs);
-                if (elf_file.getTarget().cpu.arch == .riscv64) {
+                if (target.cpu.arch == .riscv64) {
                     sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
                 }
             }
@@ -273,15 +276,14 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
     };
 }
 
-fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
-    const comp = elf_file.base.comp;
+fn skipShdr(self: *Object, index: u32, debug_fmt_strip: bool) bool {
     const shdr = self.shdrs.items[index];
     const name = self.getString(shdr.sh_name);
     const ignore = blk: {
         if (mem.startsWith(u8, name, ".note")) break :blk true;
         if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
         if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
-        if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
+        if (debug_fmt_strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
             mem.startsWith(u8, name, ".debug")) break :blk true;
         break :blk false;
     };
@@ -1257,7 +1259,7 @@ pub fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32
 }
 
 pub fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
-    const index = @as(u32, @intCast(self.atoms_extra.items.len));
+    const index: u32 = @intCast(self.atoms_extra.items.len);
     const fields = @typeInfo(Atom.Extra).@"struct".fields;
     inline for (fields) |field| {
         self.atoms_extra.appendAssumeCapacity(switch (field.type) {
@@ -1292,6 +1294,15 @@ pub fn setAtomExtra(self: *Object, index: u32, extra: Atom.Extra) void {
     }
 }
 
+fn setAtomFields(o: *Object, atom_ptr: *Atom, opts: Atom.Extra.AsOptionals) void {
+    assert(o.index == atom_ptr.file_index);
+    var extras = o.atomExtra(atom_ptr.extra_index);
+    inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
+        if (@field(opts, field.name)) |x| @field(extras, field.name) = x;
+    }
+    o.setAtomExtra(atom_ptr.extra_index, extras);
+}
+
 fn addInputMergeSection(self: *Object, allocator: Allocator) !InputMergeSection.Index {
     const index: InputMergeSection.Index = @intCast(self.input_merge_sections.items.len);
     const msec = try self.input_merge_sections.addOne(allocator);