Commit 669f285943
Changed files (15)
src/link/Elf/Atom.zig
@@ -68,7 +68,7 @@ pub fn file(self: Atom, elf_file: *Elf) ?File {
pub fn thunk(self: Atom, elf_file: *Elf) *Thunk {
assert(self.flags.thunk);
- const extras = self.extra(elf_file).?;
+ const extras = self.extra(elf_file);
return elf_file.thunk(extras.thunk);
}
@@ -99,7 +99,8 @@ pub fn priority(self: Atom, elf_file: *Elf) u64 {
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
- const next_addr = if (elf_file.atom(self.next_index)) |next|
+ const zo = elf_file.zigObjectPtr().?;
+ const next_addr = if (zo.atom(self.next_index)) |next|
next.address(elf_file)
else
std.math.maxInt(u32);
@@ -107,8 +108,9 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
+ const zo = elf_file.zigObjectPtr().?;
// No need to keep a free list node for the last block.
- const next = elf_file.atom(self.next_index) orelse return false;
+ const next = zo.atom(self.next_index) orelse return false;
const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file));
const ideal_cap = Elf.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
@@ -117,6 +119,7 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
}
pub fn allocate(self: *Atom, elf_file: *Elf) !void {
+ const zo = elf_file.zigObjectPtr().?;
const shdr = &elf_file.shdrs.items[self.outputShndx().?];
const meta = elf_file.last_atom_and_free_list_table.getPtr(self.outputShndx().?).?;
const free_list = &meta.free_list;
@@ -137,7 +140,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
const big_atom_index = free_list.items[i];
- const big_atom = elf_file.atom(big_atom_index).?;
+ const big_atom = zo.atom(big_atom_index).?;
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(elf_file);
@@ -169,7 +172,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
free_list_removal = i;
}
break :blk @intCast(new_start_vaddr);
- } else if (elf_file.atom(last_atom_index.*)) |last| {
+ } else if (zo.atom(last_atom_index.*)) |last| {
const ideal_capacity = Elf.padToIdeal(last.size);
const ideal_capacity_end_vaddr = @as(u64, @intCast(last.value)) + ideal_capacity;
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
@@ -189,7 +192,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
});
const expand_section = if (atom_placement) |placement_index|
- elf_file.atom(placement_index).?.next_index == 0
+ zo.atom(placement_index).?.next_index == 0
else
true;
if (expand_section) {
@@ -214,15 +217,15 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (elf_file.atom(self.prev_index)) |prev| {
+ if (zo.atom(self.prev_index)) |prev| {
prev.next_index = self.next_index;
}
- if (elf_file.atom(self.next_index)) |next| {
+ if (zo.atom(self.next_index)) |next| {
next.prev_index = self.prev_index;
}
if (atom_placement) |big_atom_index| {
- const big_atom = elf_file.atom(big_atom_index).?;
+ const big_atom = zo.atom(big_atom_index).?;
self.prev_index = big_atom_index;
self.next_index = big_atom.next_index;
big_atom.next_index = self.atom_index;
@@ -250,6 +253,7 @@ pub fn grow(self: *Atom, elf_file: *Elf) !void {
pub fn free(self: *Atom, elf_file: *Elf) void {
log.debug("freeAtom {d} ({s})", .{ self.atom_index, self.name(elf_file) });
+ const zo = elf_file.zigObjectPtr().?;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const shndx = self.outputShndx().?;
@@ -272,9 +276,9 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
}
}
- if (elf_file.atom(last_atom_index.*)) |last_atom| {
+ if (zo.atom(last_atom_index.*)) |last_atom| {
if (last_atom.atom_index == self.atom_index) {
- if (elf_file.atom(self.prev_index)) |_| {
+ if (zo.atom(self.prev_index)) |_| {
// TODO shrink the section size here
last_atom_index.* = self.prev_index;
} else {
@@ -283,7 +287,7 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
}
}
- if (elf_file.atom(self.prev_index)) |prev| {
+ if (zo.atom(self.prev_index)) |prev| {
prev.next_index = self.next_index;
if (!already_have_free_list_node and prev.*.freeListEligible(elf_file)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
@@ -294,7 +298,7 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
self.prev_index = 0;
}
- if (elf_file.atom(self.next_index)) |next| {
+ if (zo.atom(self.next_index)) |next| {
next.prev_index = self.prev_index;
} else {
self.next_index = 0;
@@ -313,7 +317,7 @@ pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
switch (self.file(elf_file).?) {
.zig_object => |x| return x.relocs.items[shndx].items,
.object => |x| {
- const extras = self.extra(elf_file).?;
+ const extras = self.extra(elf_file);
return x.relocs.items[extras.rel_index..][0..extras.rel_count];
},
else => unreachable,
@@ -367,7 +371,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
if (!self.flags.fde) return &[0]Fde{};
- const extras = self.extra(elf_file).?;
+ const extras = self.extra(elf_file);
const object = self.file(elf_file).?.object;
return object.fdes.items[extras.fde_start..][0..extras.fde_count];
}
@@ -712,9 +716,9 @@ fn reportUndefined(
{
const gop = try undefs.getOrPut(sym_index);
if (!gop.found_existing) {
- gop.value_ptr.* = std.ArrayList(Atom.Index).init(gpa);
+ gop.value_ptr.* = std.ArrayList(Elf.Ref).init(gpa);
}
- try gop.value_ptr.append(self.atom_index);
+ try gop.value_ptr.append(.{ .index = self.atom_index, .file = self.file_index });
return true;
}
@@ -1001,25 +1005,23 @@ const AddExtraOpts = struct {
rel_count: ?u32 = null,
};
-pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) !void {
- if (atom.extra(elf_file) == null) {
- atom.extra_index = try elf_file.addAtomExtra(.{});
- }
- var extras = atom.extra(elf_file).?;
+pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) void {
+ const file_ptr = atom.file(elf_file).?;
+ var extras = file_ptr.atomExtra(atom.extra_index);
inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
if (@field(opts, field.name)) |x| {
@field(extras, field.name) = x;
}
}
- atom.setExtra(extras, elf_file);
+ file_ptr.setAtomExtra(atom.extra_index, extras);
}
-pub inline fn extra(atom: Atom, elf_file: *Elf) ?Extra {
- return elf_file.atomExtra(atom.extra_index);
+pub inline fn extra(atom: Atom, elf_file: *Elf) Extra {
+ return atom.file(elf_file).?.atomExtra(atom.extra_index);
}
pub inline fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
- elf_file.setAtomExtra(atom.extra_index, extras);
+ atom.file(elf_file).?.setAtomExtra(atom.extra_index, extras);
}
pub fn format(
@@ -1063,7 +1065,7 @@ fn format2(
});
if (atom.flags.fde) {
try writer.writeAll(" : fdes{ ");
- const extras = atom.extra(elf_file).?;
+ const extras = atom.extra(elf_file);
for (atom.fdes(elf_file), extras.fde_start..) |fde, i| {
try writer.print("{d}", .{i});
if (!fde.alive) try writer.writeAll("([*])");
src/link/Elf/eh_frame.zig
@@ -42,8 +42,8 @@ pub const Fde = struct {
const object = elf_file.file(fde.file_index).?.object;
const rel = fde.relocs(elf_file)[0];
const sym = object.symtab.items[rel.r_sym()];
- const atom_index = object.atoms.items[sym.st_shndx];
- return elf_file.atom(atom_index).?;
+ const atom_index = object.atoms_indexes.items[sym.st_shndx];
+ return object.atom(atom_index).?;
}
pub fn relocs(fde: Fde, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
src/link/Elf/file.zig
@@ -98,11 +98,34 @@ pub const File = union(enum) {
}
}
+ pub fn atom(file: File, atom_index: Atom.Index) ?*Atom {
+ return switch (file) {
+ .shared_object => unreachable,
+ .linker_defined => null,
+ inline else => |x| x.atom(atom_index),
+ };
+ }
+
pub fn atoms(file: File) []const Atom.Index {
return switch (file) {
- .linker_defined, .shared_object => &[0]Atom.Index{},
- .zig_object => |x| x.atoms.items,
- .object => |x| x.atoms.items,
+ .shared_object => unreachable,
+ .linker_defined => &[0]Atom.Index{},
+ .zig_object => |x| x.atoms_indexes.items,
+ .object => |x| x.atoms_indexes.items,
+ };
+ }
+
+ pub fn atomExtra(file: File, extra_index: u32) Atom.Extra {
+ return switch (file) {
+ .shared_object, .linker_defined => unreachable,
+ inline else => |x| x.atomExtra(extra_index),
+ };
+ }
+
+ pub fn setAtomExtra(file: File, extra_index: u32, extra: Atom.Extra) void {
+ return switch (file) {
+ .shared_object, .linker_defined => unreachable,
+ inline else => |x| x.setAtomExtra(extra_index, extra),
};
}
src/link/Elf/gc.zig
@@ -35,7 +35,7 @@ fn collectRoots(roots: *std.ArrayList(*Atom), files: []const File.Index, elf_fil
const file = elf_file.file(index).?;
for (file.atoms()) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
+ const atom = file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const shdr = atom.inputShdr(elf_file);
@@ -120,8 +120,9 @@ fn mark(roots: std.ArrayList(*Atom), elf_file: *Elf) void {
fn prune(files: []const File.Index, elf_file: *Elf) void {
for (files) |index| {
- for (elf_file.file(index).?.atoms()) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
+ const file = elf_file.file(index).?;
+ for (file.atoms()) |atom_index| {
+ const atom = file.atom(atom_index) orelse continue;
if (atom.flags.alive and !atom.flags.visited) {
atom.flags.alive = false;
atom.markFdesDead(elf_file);
@@ -133,8 +134,9 @@ fn prune(files: []const File.Index, elf_file: *Elf) void {
pub fn dumpPrunedAtoms(elf_file: *Elf) !void {
const stderr = std.io.getStdErr().writer();
for (elf_file.objects.items) |index| {
- for (elf_file.file(index).?.object.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
+ const file = elf_file.file(index).?;
+ for (file.atoms()) |atom_index| {
+ const atom = file.atom(atom_index) orelse continue;
if (!atom.flags.alive)
// TODO should we simply print to stderr?
try stderr.print("link: removing unused section '{s}' in file '{}'\n", .{
src/link/Elf/LinkerDefined.zig
@@ -1,4 +1,5 @@
index: File.Index,
+
symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
@@ -11,6 +12,11 @@ pub fn deinit(self: *LinkerDefined, allocator: Allocator) void {
self.symbols.deinit(allocator);
}
+pub fn init(self: *LinkerDefined, allocator: Allocator) !void {
+ // Null byte in strtab
+ try self.strtab.append(allocator, 0);
+}
+
pub fn addGlobal(self: *LinkerDefined, name: [:0]const u8, elf_file: *Elf) !u32 {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
@@ -41,7 +47,7 @@ pub fn resolveSymbols(self: *LinkerDefined, elf_file: *Elf) void {
const global = elf_file.symbol(index);
if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
global.value = 0;
- global.atom_index = 0;
+ global.atom_ref = .{ .index = 0, .file = 0 };
global.file_index = self.index;
global.esym_index = sym_idx;
global.version_index = elf_file.default_sym_version;
@@ -127,6 +133,7 @@ const mem = std.mem;
const std = @import("std");
const Allocator = mem.Allocator;
+const Atom = @import("Atom.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const LinkerDefined = @This();
src/link/Elf/Object.zig
@@ -10,9 +10,12 @@ symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
strtab: std.ArrayListUnmanaged(u8) = .{},
first_global: ?Symbol.Index = null,
symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
-atoms: std.ArrayListUnmanaged(Atom.Index) = .{},
relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{},
+atoms_extra: std.ArrayListUnmanaged(u32) = .{},
+
comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup) = .{},
comdat_group_data: std.ArrayListUnmanaged(u32) = .{},
@@ -49,6 +52,8 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
self.strtab.deinit(allocator);
self.symbols.deinit(allocator);
self.atoms.deinit(allocator);
+ self.atoms_indexes.deinit(allocator);
+ self.atoms_extra.deinit(allocator);
self.comdat_groups.deinit(allocator);
self.comdat_group_data.deinit(allocator);
self.relocs.deinit(allocator);
@@ -71,15 +76,17 @@ pub fn parse(self: *Object, elf_file: *Elf) !void {
// Append null input merge section
try self.input_merge_sections.append(gpa, .{});
+ // Allocate atom index 0 to null atom
+ try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
try self.initAtoms(gpa, handle, elf_file);
try self.initSymtab(gpa, elf_file);
for (self.shdrs.items, 0..) |shdr, i| {
- const atom = elf_file.atom(self.atoms.items[i]) orelse continue;
- if (!atom.flags.alive) continue;
+ const atom_ptr = self.atom(self.atoms_indexes.items[i]) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
if ((cpu_arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
- mem.eql(u8, atom.name(elf_file), ".eh_frame"))
+ mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame"))
{
try self.parseEhFrame(gpa, handle, @as(u32, @intCast(i)), elf_file);
}
@@ -179,8 +186,11 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
const shdrs = self.shdrs.items;
- try self.atoms.resize(allocator, shdrs.len);
- @memset(self.atoms.items, 0);
+ try self.atoms.ensureTotalCapacityPrecise(allocator, shdrs.len);
+ try self.atoms_extra.ensureTotalCapacityPrecise(allocator, shdrs.len * @sizeOf(Atom.Extra));
+ try self.atoms_indexes.ensureTotalCapacityPrecise(allocator, shdrs.len);
+ try self.atoms_indexes.resize(allocator, shdrs.len);
+ @memset(self.atoms_indexes.items, 0);
for (shdrs, 0..) |shdr, i| {
if (shdr.sh_flags & elf.SHF_EXCLUDE != 0 and
@@ -242,7 +252,19 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
else => {
const shndx = @as(u32, @intCast(i));
if (self.skipShdr(shndx, elf_file)) continue;
- try self.addAtom(allocator, handle, shdr, shndx, elf_file);
+ const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
+ const data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
+ defer allocator.free(data);
+ const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
+ break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
+ } else .{ shdr.sh_size, Alignment.fromNonzeroByteUnits(shdr.sh_addralign) };
+ const atom_index = self.addAtomAssumeCapacity(.{
+ .name = shdr.sh_name,
+ .shndx = shndx,
+ .size = size,
+ .alignment = alignment,
+ });
+ self.atoms_indexes.items[shndx] = atom_index;
},
}
}
@@ -250,14 +272,14 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
// Parse relocs sections if any.
for (shdrs, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_REL, elf.SHT_RELA => {
- const atom_index = self.atoms.items[shdr.sh_info];
- if (elf_file.atom(atom_index)) |atom| {
+ const atom_index = self.atoms_indexes.items[shdr.sh_info];
+ if (self.atom(atom_index)) |atom_ptr| {
const relocs = try self.preadRelocsAlloc(allocator, handle, @intCast(i));
defer allocator.free(relocs);
- atom.relocs_section_index = @intCast(i);
+ atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
const rel_count: u32 = @intCast(relocs.len);
- try atom.addExtra(.{ .rel_index = rel_index, .rel_count = rel_count }, elf_file);
+ atom_ptr.addExtra(.{ .rel_index = rel_index, .rel_count = rel_count }, elf_file);
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
@@ -268,27 +290,6 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
};
}
-fn addAtom(self: *Object, allocator: Allocator, handle: std.fs.File, shdr: elf.Elf64_Shdr, shndx: u32, elf_file: *Elf) !void {
- const atom_index = try elf_file.addAtom();
- const atom = elf_file.atom(atom_index).?;
- atom.atom_index = atom_index;
- atom.name_offset = shdr.sh_name;
- atom.file_index = self.index;
- atom.input_section_index = shndx;
- self.atoms.items[shndx] = atom_index;
-
- if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
- const data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
- defer allocator.free(data);
- const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
- atom.size = chdr.ch_size;
- atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign);
- } else {
- atom.size = shdr.sh_size;
- atom.alignment = Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
- }
-}
-
fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u32 {
const name = blk: {
const name = self.getString(shdr.sh_name);
@@ -368,8 +369,10 @@ fn initSymtab(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
sym_ptr.value = @intCast(sym.st_value);
sym_ptr.name_offset = sym.st_name;
sym_ptr.esym_index = @as(u32, @intCast(i));
- sym_ptr.atom_index = if (sym.st_shndx == elf.SHN_ABS) 0 else self.atoms.items[sym.st_shndx];
sym_ptr.file_index = self.index;
+ if (sym.st_shndx != elf.SHN_ABS) {
+ sym_ptr.atom_ref = .{ .index = self.atoms_indexes.items[sym.st_shndx], .file = self.index };
+ }
}
for (self.symtab.items[first_global..]) |sym| {
@@ -456,15 +459,15 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
var i: u32 = @as(u32, @intCast(fdes_start));
while (i < self.fdes.items.len) {
const fde = self.fdes.items[i];
- const atom = fde.atom(elf_file);
+ const atom_ptr = fde.atom(elf_file);
const start = i;
i += 1;
while (i < self.fdes.items.len) : (i += 1) {
const next_fde = self.fdes.items[i];
- if (atom.atom_index != next_fde.atom(elf_file).atom_index) break;
+ if (atom_ptr.atom_index != next_fde.atom(elf_file).atom_index) break;
}
- try atom.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
- atom.flags.fde = true;
+ atom_ptr.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
+ atom_ptr.flags.fde = true;
}
}
@@ -507,19 +510,19 @@ fn filterRelocs(
pub fn scanRelocs(self: *Object, elf_file: *Elf, undefs: anytype) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const shdr = atom.inputShdr(elf_file);
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const shdr = atom_ptr.inputShdr(elf_file);
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (atom.scanRelocsRequiresCode(elf_file)) {
+ if (atom_ptr.scanRelocsRequiresCode(elf_file)) {
// TODO ideally, we don't have to decompress at this stage (should already be done)
// and we just fetch the code slice.
const code = try self.codeDecompressAlloc(elf_file, atom_index);
defer gpa.free(code);
- try atom.scanRelocs(elf_file, code, undefs);
- } else try atom.scanRelocs(elf_file, null, undefs);
+ try atom_ptr.scanRelocs(elf_file, code, undefs);
+ } else try atom_ptr.scanRelocs(elf_file, null, undefs);
}
for (self.cies.items) |cie| {
@@ -547,19 +550,21 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void {
if (esym.st_shndx == elf.SHN_UNDEF) continue;
if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) {
- const atom_index = self.atoms.items[esym.st_shndx];
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
+ const atom_index = self.atoms_indexes.items[esym.st_shndx];
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
}
const global = elf_file.symbol(index);
if (self.asFile().symbolRank(esym, !self.alive) < global.symbolRank(elf_file)) {
- const atom_index = switch (esym.st_shndx) {
- elf.SHN_ABS, elf.SHN_COMMON => 0,
- else => self.atoms.items[esym.st_shndx],
- };
+ switch (esym.st_shndx) {
+ elf.SHN_ABS, elf.SHN_COMMON => {},
+ else => global.atom_ref = .{
+ .index = self.atoms_indexes.items[esym.st_shndx],
+ .file = self.index,
+ },
+ }
global.value = @intCast(esym.st_value);
- global.atom_index = atom_index;
global.esym_index = esym_index;
global.file_index = self.index;
global.version_index = elf_file.default_sym_version;
@@ -588,7 +593,7 @@ pub fn claimUnresolved(self: *Object, elf_file: *Elf) void {
};
global.value = 0;
- global.atom_index = 0;
+ global.atom_ref = .{ .index = 0, .file = 0 };
global.esym_index = esym_index;
global.file_index = self.index;
global.version_index = if (is_import) elf.VER_NDX_LOCAL else elf_file.default_sym_version;
@@ -609,7 +614,7 @@ pub fn claimUnresolvedObject(self: *Object, elf_file: *Elf) void {
}
global.value = 0;
- global.atom_index = 0;
+ global.atom_ref = .{ .index = 0, .file = 0 };
global.esym_index = esym_index;
global.file_index = self.index;
}
@@ -633,13 +638,13 @@ pub fn markLive(self: *Object, elf_file: *Elf) void {
}
}
-pub fn markEhFrameAtomsDead(self: Object, elf_file: *Elf) void {
+pub fn markEhFrameAtomsDead(self: *Object, elf_file: *Elf) void {
const cpu_arch = elf_file.getTarget().cpu.arch;
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- const is_eh_frame = (cpu_arch == .x86_64 and atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND) or
- mem.eql(u8, atom.name(elf_file), ".eh_frame");
- if (atom.flags.alive and is_eh_frame) atom.flags.alive = false;
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ const is_eh_frame = (cpu_arch == .x86_64 and atom_ptr.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND) or
+ mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame");
+ if (atom_ptr.flags.alive and is_eh_frame) atom_ptr.flags.alive = false;
}
}
@@ -657,9 +662,9 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO
sym.st_shndx == elf.SHN_COMMON) continue;
if (sym.st_shndx != elf.SHN_ABS) {
- const atom_index = self.atoms.items[sym.st_shndx];
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
+ const atom_index = self.atoms_indexes.items[sym.st_shndx];
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
}
const gop = try dupes.getOrPut(index);
@@ -680,8 +685,8 @@ pub fn initMergeSections(self: *Object, elf_file: *Elf) !void {
for (self.shdrs.items, 0..) |shdr, shndx| {
if (shdr.sh_flags & elf.SHF_MERGE == 0) continue;
- const atom_index = self.atoms.items[shndx];
- const atom_ptr = elf_file.atom(atom_index) orelse continue;
+ const atom_index = self.atoms_indexes.items[shndx];
+ const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
if (atom_ptr.relocs(elf_file).len > 0) continue;
@@ -755,7 +760,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) !void {
const imsec = self.inputMergeSection(index) orelse continue;
if (imsec.offsets.items.len == 0) continue;
const msec = elf_file.mergeSection(imsec.merge_section_index);
- const atom_ptr = elf_file.atom(imsec.atom_index).?;
+ const atom_ptr = self.atom(imsec.atom_index).?;
const isec = atom_ptr.inputShdr(elf_file);
try imsec.subsections.resize(gpa, imsec.strings.items.len);
@@ -802,10 +807,10 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) !void {
sym.value = offset;
}
- for (self.atoms.items) |atom_index| {
- const atom_ptr = elf_file.atom(atom_index) orelse continue;
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
- const extras = atom_ptr.extra(elf_file) orelse continue;
+ const extras = atom_ptr.extra(elf_file);
const relocs = self.relocs.items[extras.rel_index..][0..extras.rel_count];
for (relocs) |*rel| {
const esym = self.symtab.items[rel.r_sym()];
@@ -867,21 +872,10 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- const atom_index = try elf_file.addAtom();
- try self.atoms.append(gpa, atom_index);
-
const is_tls = global.type(elf_file) == elf.STT_TLS;
const name = if (is_tls) ".tls_common" else ".common";
-
- const atom = elf_file.atom(atom_index).?;
const name_offset = @as(u32, @intCast(self.strtab.items.len));
try self.strtab.writer(gpa).print("{s}\x00", .{name});
- atom.atom_index = atom_index;
- atom.name_offset = name_offset;
- atom.file_index = self.index;
- atom.size = this_sym.st_size;
- const alignment = this_sym.st_value;
- atom.alignment = Alignment.fromNonzeroByteUnits(alignment);
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
if (is_tls) sh_flags |= elf.SHF_TLS;
@@ -897,38 +891,45 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
.sh_size = sh_size,
.sh_link = 0,
.sh_info = 0,
- .sh_addralign = alignment,
+ .sh_addralign = this_sym.st_value,
.sh_entsize = 0,
};
- atom.input_section_index = shndx;
+
+ const atom_index = try self.addAtom(gpa, .{
+ .name = name_offset,
+ .shndx = shndx,
+ .size = this_sym.st_size,
+ .alignment = Alignment.fromNonzeroByteUnits(this_sym.st_value),
+ });
+ try self.atoms_indexes.append(gpa, atom_index);
global.value = 0;
- global.atom_index = atom_index;
+ global.atom_ref = .{ .index = atom_index, .file = self.index };
global.flags.weak = false;
}
}
-pub fn initOutputSections(self: Object, elf_file: *Elf) !void {
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const shdr = atom.inputShdr(elf_file);
+pub fn initOutputSections(self: *Object, elf_file: *Elf) !void {
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const shdr = atom_ptr.inputShdr(elf_file);
_ = try self.initOutputSection(elf_file, shdr);
}
}
pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const shdr = atom.inputShdr(elf_file);
- atom.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable;
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const shdr = atom_ptr.inputShdr(elf_file);
+ atom_ptr.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- const gop = try elf_file.output_sections.getOrPut(gpa, atom.output_section_index);
+ const gop = try elf_file.output_sections.getOrPut(gpa, atom_ptr.output_section_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
- try gop.value_ptr.append(gpa, atom_index);
+ try gop.value_ptr.append(gpa, .{ .index = atom_index, .file = self.index });
}
for (self.locals()) |local_index| {
@@ -938,9 +939,9 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
local.output_section_index = msub.mergeSection(elf_file).output_section_index;
continue;
}
- const atom = local.atom(elf_file) orelse continue;
- if (!atom.flags.alive) continue;
- local.output_section_index = atom.output_section_index;
+ const atom_ptr = local.atom(elf_file) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ local.output_section_index = atom_ptr.output_section_index;
}
for (self.globals()) |global_index| {
@@ -951,9 +952,9 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
global.output_section_index = msub.mergeSection(elf_file).output_section_index;
continue;
}
- const atom = global.atom(elf_file) orelse continue;
- if (!atom.flags.alive) continue;
- global.output_section_index = atom.output_section_index;
+ const atom_ptr = global.atom(elf_file) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ global.output_section_index = atom_ptr.output_section_index;
}
for (self.symbols.items[self.symtab.items.len..]) |local_index| {
@@ -964,11 +965,11 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
}
}
-pub fn initRelaSections(self: Object, elf_file: *Elf) !void {
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const shndx = atom.relocsShndx() orelse continue;
+pub fn initRelaSections(self: *Object, elf_file: *Elf) !void {
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
const out_shndx = try self.initOutputSection(elf_file, shdr);
const out_shdr = &elf_file.shdrs.items[out_shndx];
@@ -978,24 +979,24 @@ pub fn initRelaSections(self: Object, elf_file: *Elf) !void {
}
}
-pub fn addAtomsToRelaSections(self: Object, elf_file: *Elf) !void {
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
+pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
const shndx = blk: {
- const shndx = atom.relocsShndx() orelse continue;
+ const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
break :blk self.initOutputSection(elf_file, shdr) catch unreachable;
};
const shdr = &elf_file.shdrs.items[shndx];
- shdr.sh_info = atom.outputShndx().?;
+ shdr.sh_info = atom_ptr.outputShndx().?;
shdr.sh_link = elf_file.symtab_section_index.?;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- const gop = try elf_file.output_rela_sections.getOrPut(gpa, atom.outputShndx().?);
+ const gop = try elf_file.output_rela_sections.getOrPut(gpa, atom_ptr.outputShndx().?);
if (!gop.found_existing) gop.value_ptr.* = .{ .shndx = shndx };
- try gop.value_ptr.atom_list.append(gpa, atom_index);
+ try gop.value_ptr.atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@@ -1129,11 +1130,10 @@ pub fn globals(self: Object) []const Symbol.Index {
/// Returns atom's code and optionally uncompresses data if required (for compressed sections).
/// Caller owns the memory.
-pub fn codeDecompressAlloc(self: Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
+pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- const atom_ptr = elf_file.atom(atom_index).?;
- assert(atom_ptr.file_index == self.index);
+ const atom_ptr = self.atom(atom_index).?;
const shdr = atom_ptr.inputShdr(elf_file);
const handle = elf_file.fileHandle(self.file_handle);
const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index);
@@ -1194,6 +1194,82 @@ fn preadRelocsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, shn
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
+const AddAtomArgs = struct {
+ name: u32,
+ shndx: u32,
+ size: u64,
+ alignment: Alignment,
+};
+
+fn addAtom(self: *Object, allocator: Allocator, args: AddAtomArgs) !Atom.Index {
+ try self.atoms.ensureUnusedCapacity(allocator, 1);
+ try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
+ return self.addAtomAssumeCapacity(args);
+}
+
+fn addAtomAssumeCapacity(self: *Object, args: AddAtomArgs) Atom.Index {
+ const atom_index: Atom.Index = @intCast(self.atoms.items.len);
+ const atom_ptr = self.atoms.addOneAssumeCapacity();
+ atom_ptr.* = .{
+ .atom_index = atom_index,
+ .name_offset = args.name,
+ .file_index = self.index,
+ .input_section_index = args.shndx,
+ .extra_index = self.addAtomExtraAssumeCapacity(.{}),
+ .size = args.size,
+ .alignment = args.alignment,
+ };
+ return atom_index;
+}
+
+pub fn atom(self: *Object, atom_index: Atom.Index) ?*Atom {
+ if (atom_index == 0) return null;
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+pub fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32 {
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
+ return self.addAtomExtraAssumeCapacity(extra);
+}
+
+pub fn addAtomExtraAssumeCapacity(self: *Object, extra: Atom.Extra) u32 {
+ const index = @as(u32, @intCast(self.atoms_extra.items.len));
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ inline for (fields) |field| {
+ self.atoms_extra.appendAssumeCapacity(switch (field.type) {
+ u32 => @field(extra, field.name),
+ else => @compileError("bad field type"),
+ });
+ }
+ return index;
+}
+
+pub fn atomExtra(self: *Object, index: u32) Atom.Extra {
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ var i: usize = index;
+ var result: Atom.Extra = undefined;
+ inline for (fields) |field| {
+ @field(result, field.name) = switch (field.type) {
+ u32 => self.atoms_extra.items[i],
+ else => @compileError("bad field type"),
+ };
+ i += 1;
+ }
+ return result;
+}
+
+pub fn setAtomExtra(self: *Object, index: u32, extra: Atom.Extra) void {
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ inline for (fields, 0..) |field, i| {
+ self.atoms_extra.items[index + i] = switch (field.type) {
+ u32 => @field(extra, field.name),
+ else => @compileError("bad field type"),
+ };
+ }
+}
+
fn addInputMergeSection(self: *Object, allocator: Allocator) !InputMergeSection.Index {
const index: InputMergeSection.Index = @intCast(self.input_merge_sections.items.len);
const msec = try self.input_merge_sections.addOne(allocator);
@@ -1280,9 +1356,9 @@ fn formatAtoms(
_ = options;
const object = ctx.object;
try writer.writeAll(" atoms\n");
- for (object.atoms.items) |atom_index| {
- const atom = ctx.elf_file.atom(atom_index) orelse continue;
- try writer.print(" {}\n", .{atom.fmt(ctx.elf_file)});
+ for (object.atoms_indexes.items) |atom_index| {
+ const atom_ptr = object.atom(atom_index) orelse continue;
+ try writer.print(" {}\n", .{atom_ptr.fmt(ctx.elf_file)});
}
}
@@ -1354,9 +1430,9 @@ fn formatComdatGroups(
try writer.print(" COMDAT({d})\n", .{cg_index});
const cg_members = cg.comdatGroupMembers(elf_file);
for (cg_members) |shndx| {
- const atom_index = object.atoms.items[shndx];
- const atom = elf_file.atom(atom_index) orelse continue;
- try writer.print(" atom({d}) : {s}\n", .{ atom_index, atom.name(elf_file) });
+ const atom_index = object.atoms_indexes.items[shndx];
+ const atom_ptr = object.atom(atom_index) orelse continue;
+ try writer.print(" atom({d}) : {s}\n", .{ atom_index, atom_ptr.name(elf_file) });
}
}
}
src/link/Elf/relocatable.zig
@@ -341,8 +341,8 @@ fn initComdatGroups(elf_file: *Elf) !void {
fn updateSectionSizes(elf_file: *Elf) !void {
for (elf_file.output_sections.keys(), elf_file.output_sections.values()) |shndx, atom_list| {
const shdr = &elf_file.shdrs.items[shndx];
- for (atom_list.items) |atom_index| {
- const atom_ptr = elf_file.atom(atom_index) orelse continue;
+ for (atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.flags.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
@@ -354,8 +354,8 @@ fn updateSectionSizes(elf_file: *Elf) !void {
for (elf_file.output_rela_sections.values()) |sec| {
const shdr = &elf_file.shdrs.items[sec.shndx];
- for (sec.atom_list.items) |atom_index| {
- const atom_ptr = elf_file.atom(atom_index) orelse continue;
+ for (sec.atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.flags.alive) continue;
const relocs = atom_ptr.relocs(elf_file);
shdr.sh_size += shdr.sh_entsize * relocs.len;
@@ -448,16 +448,16 @@ fn writeAtoms(elf_file: *Elf) !void {
0;
@memset(buffer, padding_byte);
- for (atom_list.items) |atom_index| {
- const atom_ptr = elf_file.atom(atom_index).?;
+ for (atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.flags.alive);
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
- log.debug("writing atom({d}) from 0x{x} to 0x{x}", .{
- atom_index,
+ log.debug("writing atom({}) from 0x{x} to 0x{x}", .{
+ ref,
sh_offset + offset,
sh_offset + offset + size,
});
@@ -465,8 +465,8 @@ fn writeAtoms(elf_file: *Elf) !void {
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = switch (atom_ptr.file(elf_file).?) {
- .object => |x| try x.codeDecompressAlloc(elf_file, atom_index),
- .zig_object => |x| try x.codeAlloc(elf_file, atom_index),
+ .object => |x| try x.codeDecompressAlloc(elf_file, ref.index),
+ .zig_object => |x| try x.codeAlloc(elf_file, ref.index),
else => unreachable,
};
defer gpa.free(in_code);
@@ -490,8 +490,8 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
var relocs = try std.ArrayList(elf.Elf64_Rela).initCapacity(gpa, num_relocs);
defer relocs.deinit();
- for (sec.atom_list.items) |atom_index| {
- const atom_ptr = elf_file.atom(atom_index) orelse continue;
+ for (sec.atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.flags.alive) continue;
try atom_ptr.writeRelocs(elf_file, &relocs);
}
src/link/Elf/Symbol.zig
@@ -9,10 +9,9 @@ name_offset: u32 = 0,
/// Index of file where this symbol is defined.
file_index: File.Index = 0,
-/// Index of atom containing this symbol.
-/// Index of 0 means there is no associated atom with this symbol.
+/// Reference to Atom containing this symbol if any.
/// Use `atom` to get the pointer to the atom.
-atom_index: Atom.Index = 0,
+atom_ref: Elf.Ref = .{ .index = 0, .file = 0 },
/// Assigned output section index for this symbol.
output_section_index: u32 = 0,
@@ -68,7 +67,8 @@ pub fn name(symbol: Symbol, elf_file: *Elf) [:0]const u8 {
}
pub fn atom(symbol: Symbol, elf_file: *Elf) ?*Atom {
- return elf_file.atom(symbol.atom_index);
+ const file_ptr = elf_file.file(symbol.atom_ref.file) orelse return null;
+ return file_ptr.atom(symbol.atom_ref.index);
}
pub fn mergeSubsection(symbol: Symbol, elf_file: *Elf) ?*MergeSubsection {
src/link/Elf/synthetic_sections.zig
@@ -1705,14 +1705,14 @@ pub const ComdatGroupSection = struct {
const shdr = object.shdrs.items[shndx];
switch (shdr.sh_type) {
elf.SHT_RELA => {
- const atom_index = object.atoms.items[shdr.sh_info];
- const atom = elf_file.atom(atom_index).?;
+ const atom_index = object.atoms_indexes.items[shdr.sh_info];
+ const atom = object.atom(atom_index).?;
const rela = elf_file.output_rela_sections.get(atom.outputShndx().?).?;
try writer.writeInt(u32, rela.shndx, .little);
},
else => {
- const atom_index = object.atoms.items[shndx];
- const atom = elf_file.atom(atom_index).?;
+ const atom_index = object.atoms_indexes.items[shndx];
+ const atom = object.atom(atom_index).?;
try writer.writeInt(u32, atom.outputShndx().?, .little);
},
}
src/link/Elf/thunks.zig
@@ -6,8 +6,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
const atoms = elf_file.output_sections.get(shndx).?.items;
assert(atoms.len > 0);
- for (atoms) |atom_index| {
- elf_file.atom(atom_index).?.value = -1;
+ for (atoms) |ref| {
+ elf_file.atom(ref).?.value = -1;
}
var i: usize = 0;
@@ -19,8 +19,7 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
i += 1;
while (i < atoms.len) : (i += 1) {
- const atom_index = atoms[i];
- const atom = elf_file.atom(atom_index).?;
+ const atom = elf_file.atom(atoms[i]).?;
assert(atom.flags.alive);
if (@as(i64, @intCast(atom.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
break;
@@ -33,10 +32,10 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
thunk.output_section_index = shndx;
// Scan relocs in the group and create trampolines for any unreachable callsite
- for (atoms[start..i]) |atom_index| {
- const atom = elf_file.atom(atom_index).?;
+ for (atoms[start..i]) |ref| {
+ const atom = elf_file.atom(ref).?;
const file = atom.file(elf_file).?;
- log.debug("atom({d}) {s}", .{ atom_index, atom.name(elf_file) });
+ log.debug("atom({}) {s}", .{ ref, atom.name(elf_file) });
for (atom.relocs(elf_file)) |rel| {
const is_reachable = switch (cpu_arch) {
.aarch64 => aarch64.isReachable(atom, rel, elf_file),
@@ -51,7 +50,7 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
};
try thunk.symbols.put(gpa, target, {});
}
- try atom.addExtra(.{ .thunk = thunk_index }, elf_file);
+ atom.addExtra(.{ .thunk = thunk_index }, elf_file);
atom.flags.thunk = true;
}
src/link/Elf/ZigObject.zig
@@ -15,7 +15,9 @@ local_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
global_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
globals_lookup: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .{},
-atoms: std.ArrayListUnmanaged(Atom.Index) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{},
+atoms_extra: std.ArrayListUnmanaged(u32) = .{},
relocs: std.ArrayListUnmanaged(std.ArrayListUnmanaged(elf.Elf64_Rela)) = .{},
num_dynrelocs: u32 = 0,
@@ -80,7 +82,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- try self.atoms.append(gpa, 0); // null input section
+ try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); // null input section
try self.relocs.append(gpa, .{}); // null relocs section
try self.strtab.buffer.append(gpa, 0);
@@ -117,6 +119,8 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
self.global_symbols.deinit(allocator);
self.globals_lookup.deinit(allocator);
self.atoms.deinit(allocator);
+ self.atoms_indexes.deinit(allocator);
+ self.atoms_extra.deinit(allocator);
for (self.relocs.items) |*list| {
list.deinit(allocator);
}
@@ -276,24 +280,20 @@ pub fn addGlobalEsym(self: *ZigObject, allocator: Allocator) !Symbol.Index {
return index | global_symbol_bit;
}
-pub fn addAtom(self: *ZigObject, elf_file: *Elf) !Symbol.Index {
+pub fn newAtom(self: *ZigObject, elf_file: *Elf) !Symbol.Index {
const gpa = elf_file.base.comp.gpa;
- const atom_index = try elf_file.addAtom();
+ const atom_index = try self.addAtom(gpa);
const symbol_index = try elf_file.addSymbol();
const esym_index = try self.addLocalEsym(gpa);
- const shndx = @as(u32, @intCast(self.atoms.items.len));
- try self.atoms.append(gpa, atom_index);
+ try self.atoms_indexes.append(gpa, atom_index);
try self.local_symbols.append(gpa, symbol_index);
- const atom_ptr = elf_file.atom(atom_index).?;
- atom_ptr.file_index = self.index;
-
const symbol_ptr = elf_file.symbol(symbol_index);
symbol_ptr.file_index = self.index;
- symbol_ptr.atom_index = atom_index;
+ symbol_ptr.atom_ref = .{ .index = atom_index, .file = self.index };
- self.local_esyms.items(.shndx)[esym_index] = shndx;
+ self.local_esyms.items(.shndx)[esym_index] = atom_index;
self.local_esyms.items(.elf_sym)[esym_index].st_shndx = SHN_ATOM;
symbol_ptr.esym_index = esym_index;
@@ -301,21 +301,22 @@ pub fn addAtom(self: *ZigObject, elf_file: *Elf) !Symbol.Index {
const relocs_index = @as(u32, @intCast(self.relocs.items.len));
const relocs = try self.relocs.addOne(gpa);
relocs.* = .{};
+
+ const atom_ptr = self.atom(atom_index).?;
atom_ptr.relocs_section_index = relocs_index;
return symbol_index;
}
/// TODO actually create fake input shdrs and return that instead.
-pub fn inputShdr(self: ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.Elf64_Shdr {
- _ = self;
- const atom = elf_file.atom(atom_index) orelse return Elf.null_shdr;
- const shndx = atom.outputShndx() orelse return Elf.null_shdr;
+pub fn inputShdr(self: *ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.Elf64_Shdr {
+ const atom_ptr = self.atom(atom_index) orelse return Elf.null_shdr;
+ const shndx = atom_ptr.outputShndx() orelse return Elf.null_shdr;
var shdr = elf_file.shdrs.items[shndx];
shdr.sh_addr = 0;
shdr.sh_offset = 0;
- shdr.sh_size = atom.size;
- shdr.sh_addralign = atom.alignment.toByteUnits() orelse 1;
+ shdr.sh_size = atom_ptr.size;
+ shdr.sh_addralign = atom_ptr.alignment.toByteUnits() orelse 1;
return shdr;
}
@@ -329,24 +330,23 @@ pub fn resolveSymbols(self: *ZigObject, elf_file: *Elf) void {
if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) {
assert(esym.st_shndx == SHN_ATOM);
- const atom_index = self.atoms.items[shndx];
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
+ const atom_ptr = self.atom(shndx) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
}
const global = elf_file.symbol(index);
if (self.asFile().symbolRank(esym, false) < global.symbolRank(elf_file)) {
const atom_index = switch (esym.st_shndx) {
elf.SHN_ABS, elf.SHN_COMMON => 0,
- SHN_ATOM => self.atoms.items[shndx],
+ SHN_ATOM => shndx,
else => unreachable,
};
- const output_section_index = if (elf_file.atom(atom_index)) |atom|
- atom.outputShndx().?
+ const output_section_index = if (self.atom(atom_index)) |atom_ptr|
+ atom_ptr.outputShndx().?
else
elf.SHN_UNDEF;
global.value = @intCast(esym.st_value);
- global.atom_index = atom_index;
+ global.atom_ref = .{ .index = atom_index, .file = self.index };
global.esym_index = esym_index;
global.file_index = self.index;
global.output_section_index = output_section_index;
@@ -376,7 +376,7 @@ pub fn claimUnresolved(self: ZigObject, elf_file: *Elf) void {
};
global.value = 0;
- global.atom_index = 0;
+ global.atom_ref = .{ .index = 0, .file = 0 };
global.esym_index = esym_index;
global.file_index = self.index;
global.version_index = if (is_import) elf.VER_NDX_LOCAL else elf_file.default_sym_version;
@@ -397,7 +397,7 @@ pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void {
}
global.value = 0;
- global.atom_index = 0;
+ global.atom_ref = .{ .index = 0, .file = 0 };
global.esym_index = esym_index;
global.file_index = self.index;
}
@@ -405,19 +405,19 @@ pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void {
pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
const gpa = elf_file.base.comp.gpa;
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const shdr = atom.inputShdr(elf_file);
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const shdr = atom_ptr.inputShdr(elf_file);
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (atom.scanRelocsRequiresCode(elf_file)) {
+ if (atom_ptr.scanRelocsRequiresCode(elf_file)) {
// TODO ideally we don't have to fetch the code here.
// Perhaps it would make sense to save the code until flushModule where we
// would free all of generated code?
const code = try self.codeAlloc(elf_file, atom_index);
defer gpa.free(code);
- try atom.scanRelocs(elf_file, code, undefs);
- } else try atom.scanRelocs(elf_file, null, undefs);
+ try atom_ptr.scanRelocs(elf_file, code, undefs);
+ } else try atom_ptr.scanRelocs(elf_file, null, undefs);
}
}
@@ -450,9 +450,8 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O
esym.st_shndx == elf.SHN_COMMON) continue;
if (esym.st_shndx == SHN_ATOM) {
- const atom_index = self.atoms.items[shndx];
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
+ const atom_ptr = self.atom(shndx) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
}
const gop = try dupes.getOrPut(index);
@@ -517,20 +516,20 @@ pub fn writeAr(self: ZigObject, writer: anytype) !void {
try writer.writeAll(self.data.items);
}
-pub fn addAtomsToRelaSections(self: ZigObject, elf_file: *Elf) !void {
- for (self.atoms.items) |atom_index| {
- const atom = elf_file.atom(atom_index) orelse continue;
- if (!atom.flags.alive) continue;
- const rela_shndx = atom.relocsShndx() orelse continue;
+pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.flags.alive) continue;
+ const rela_shndx = atom_ptr.relocsShndx() orelse continue;
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
- const out_shndx = atom.outputShndx().?;
+ const out_shndx = atom_ptr.outputShndx().?;
const out_shdr = elf_file.shdrs.items[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
const gpa = elf_file.base.comp.gpa;
const sec = elf_file.output_rela_sections.getPtr(out_shndx).?;
- try sec.atom_list.append(gpa, atom_index);
+ try sec.atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@@ -561,7 +560,7 @@ pub fn globals(self: ZigObject) []const Symbol.Index {
pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
for (self.locals()) |local_index| {
const local = elf_file.symbol(local_index);
- if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
+ if (local.atom(elf_file)) |atom_ptr| if (!atom_ptr.flags.alive) continue;
const esym = local.elfSym(elf_file);
switch (esym.st_type()) {
elf.STT_SECTION, elf.STT_NOTYPE => continue,
@@ -577,7 +576,7 @@ pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
const global = elf_file.symbol(global_index);
const file_ptr = global.file(elf_file) orelse continue;
if (file_ptr.index() != self.index) continue;
- if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
+ if (global.atom(elf_file)) |atom_ptr| if (!atom_ptr.flags.alive) continue;
global.flags.output_symtab = true;
if (global.isLocal(elf_file)) {
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
@@ -621,11 +620,10 @@ pub fn asFile(self: *ZigObject) File {
/// Returns atom's code.
/// Caller owns the memory.
-pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
+pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const gpa = elf_file.base.comp.gpa;
- const atom = elf_file.atom(atom_index).?;
- assert(atom.file_index == self.index);
- const shdr = &elf_file.shdrs.items[atom.outputShndx().?];
+ const atom_ptr = self.atom(atom_index).?;
+ const shdr = &elf_file.shdrs.items[atom_ptr.outputShndx().?];
if (shdr.sh_flags & elf.SHF_TLS != 0) {
const tlv = self.tls_variables.get(atom_index).?;
@@ -633,13 +631,13 @@ pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8
return code;
}
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom.value));
- const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
+ const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
const amt = try elf_file.base.file.?.preadAll(code, file_offset);
if (amt != code.len) {
- log.err("fetching code for {s} failed", .{atom.name(elf_file)});
+ log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)});
return error.InputOutput;
}
return code;
@@ -760,7 +758,7 @@ pub fn getOrCreateMetadataForLazySymbol(
};
switch (metadata.state.*) {
.unused => {
- const symbol_index = try self.addAtom(elf_file);
+ const symbol_index = try self.newAtom(elf_file);
const sym = elf_file.symbol(symbol_index);
sym.flags.needs_zig_got = true;
metadata.symbol_index.* = symbol_index;
@@ -824,7 +822,7 @@ pub fn getOrCreateMetadataForDecl(
const gop = try self.decls.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
- const symbol_index = try self.addAtom(elf_file);
+ const symbol_index = try self.newAtom(elf_file);
const mod = elf_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const sym = elf_file.symbol(symbol_index);
@@ -1048,7 +1046,7 @@ fn updateTlv(
{
const gop = try elf_file.output_sections.getOrPut(gpa, atom_ptr.output_section_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
- try gop.value_ptr.append(gpa, atom_ptr.atom_index);
+ try gop.value_ptr.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index });
}
}
@@ -1307,8 +1305,7 @@ pub fn lowerUnnamedConst(
return error.CodegenFail;
},
};
- const sym = elf_file.symbol(sym_index);
- try unnamed_consts.append(gpa, sym.atom_index);
+ try unnamed_consts.append(gpa, sym_index);
return sym_index;
}
@@ -1332,7 +1329,7 @@ fn lowerConst(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
- const sym_index = try self.addAtom(elf_file);
+ const sym_index = try self.newAtom(elf_file);
const res = try codegen.generateSymbol(
&elf_file.base,
@@ -1530,6 +1527,72 @@ pub fn getString(self: ZigObject, off: u32) [:0]const u8 {
return self.strtab.getAssumeExists(off);
}
+fn addAtom(self: *ZigObject, allocator: Allocator) !Atom.Index {
+ try self.atoms.ensureUnusedCapacity(allocator, 1);
+ try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
+ return self.addAtomAssumeCapacity();
+}
+
+fn addAtomAssumeCapacity(self: *ZigObject) Atom.Index {
+ const atom_index: Atom.Index = @intCast(self.atoms.items.len);
+ const atom_ptr = self.atoms.addOneAssumeCapacity();
+ atom_ptr.* = .{
+ .file_index = self.index,
+ .atom_index = atom_index,
+ .extra_index = self.addAtomExtraAssumeCapacity(.{}),
+ };
+ return atom_index;
+}
+
+pub fn atom(self: *ZigObject, atom_index: Atom.Index) ?*Atom {
+ if (atom_index == 0) return null;
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+fn addAtomExtra(self: *ZigObject, allocator: Allocator, extra: Atom.Extra) !u32 {
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
+ return self.addAtomExtraAssumeCapacity(extra);
+}
+
+fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
+ const index = @as(u32, @intCast(self.atoms_extra.items.len));
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ inline for (fields) |field| {
+ self.atoms_extra.appendAssumeCapacity(switch (field.type) {
+ u32 => @field(extra, field.name),
+ else => @compileError("bad field type"),
+ });
+ }
+ return index;
+}
+
+pub fn atomExtra(self: ZigObject, index: u32) Atom.Extra {
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ var i: usize = index;
+ var result: Atom.Extra = undefined;
+ inline for (fields) |field| {
+ @field(result, field.name) = switch (field.type) {
+ u32 => self.atoms_extra.items[i],
+ else => @compileError("bad field type"),
+ };
+ i += 1;
+ }
+ return result;
+}
+
+pub fn setAtomExtra(self: *ZigObject, index: u32, extra: Atom.Extra) void {
+ assert(index > 0);
+ const fields = @typeInfo(Atom.Extra).Struct.fields;
+ inline for (fields, 0..) |field, i| {
+ self.atoms_extra.items[index + i] = switch (field.type) {
+ u32 => @field(extra, field.name),
+ else => @compileError("bad field type"),
+ };
+ }
+}
+
pub fn fmtSymtab(self: *ZigObject, elf_file: *Elf) std.fmt.Formatter(formatSymtab) {
return .{ .data = .{
.self = self,
@@ -1578,9 +1641,9 @@ fn formatAtoms(
_ = unused_fmt_string;
_ = options;
try writer.writeAll(" atoms\n");
- for (ctx.self.atoms.items) |atom_index| {
- const atom = ctx.elf_file.atom(atom_index) orelse continue;
- try writer.print(" {}\n", .{atom.fmt(ctx.elf_file)});
+ for (ctx.self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = ctx.self.atom(atom_index) orelse continue;
+ try writer.print(" {}\n", .{atom_ptr.fmt(ctx.elf_file)});
}
}
src/link/MachO/InternalObject.zig
@@ -45,8 +45,7 @@ pub fn deinit(self: *InternalObject, allocator: Allocator) void {
pub fn init(self: *InternalObject, allocator: Allocator) !void {
// Atom at index 0 is reserved as null atom.
- try self.atoms.append(allocator, .{});
- try self.atoms_extra.append(allocator, 0);
+ try self.atoms.append(allocator, .{ .extra = try self.addAtomExtra(allocator, .{}) });
// Null byte in strtab
try self.strtab.append(allocator, 0);
}
src/link/MachO/ZigObject.zig
@@ -1634,7 +1634,7 @@ fn isThreadlocal(macho_file: *MachO, decl_index: InternPool.DeclIndex) bool {
fn addAtom(self: *ZigObject, allocator: Allocator) !Atom.Index {
try self.atoms.ensureUnusedCapacity(allocator, 1);
- try self.atoms_extra.ensureUnusedCapacity(allocator, 1);
+ try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
return self.addAtomAssumeCapacity();
}
src/link/Elf.zig
@@ -54,7 +54,7 @@ phdr_to_shdr_table: std.AutoHashMapUnmanaged(u32, u32) = .{},
shdr_table_offset: ?u64 = null,
/// Table of lists of atoms per output section.
/// This table is not used to track incrementally generated atoms.
-output_sections: std.AutoArrayHashMapUnmanaged(u32, std.ArrayListUnmanaged(Atom.Index)) = .{},
+output_sections: std.AutoArrayHashMapUnmanaged(u32, std.ArrayListUnmanaged(Ref)) = .{},
output_rela_sections: std.AutoArrayHashMapUnmanaged(u32, RelaSection) = .{},
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
@@ -203,10 +203,6 @@ resolver: std.AutoArrayHashMapUnmanaged(u32, Symbol.Index) = .{},
has_text_reloc: bool = false,
num_ifunc_dynrelocs: usize = 0,
-/// List of atoms that are owned directly by the linker.
-atoms: std.ArrayListUnmanaged(Atom) = .{},
-atoms_extra: std.ArrayListUnmanaged(u32) = .{},
-
/// List of range extension thunks.
thunks: std.ArrayListUnmanaged(Thunk) = .{},
@@ -375,9 +371,6 @@ pub fn createEmpty(
try self.symbols.append(gpa, .{});
// Index 0 is always a null symbol.
try self.symbols_extra.append(gpa, 0);
- // Allocate atom index 0 to null atom
- try self.atoms.append(gpa, .{});
- try self.atoms_extra.append(gpa, 0);
// Append null file at index 0
try self.files.append(gpa, .null);
// Append null byte to string tables
@@ -499,8 +492,6 @@ pub fn deinit(self: *Elf) void {
self.resolver.deinit(gpa);
self.start_stop_indexes.deinit(gpa);
- self.atoms.deinit(gpa);
- self.atoms_extra.deinit(gpa);
for (self.thunks.items) |*th| {
th.deinit(gpa);
}
@@ -1305,6 +1296,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
self.files.set(index, .{ .linker_defined = .{ .index = index } });
self.linker_defined_index = index;
+ const object = self.file(index).?.linker_defined;
+ try object.init(gpa);
}
// Now, we are ready to resolve the symbols across all input files.
@@ -1379,15 +1372,15 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
// Beyond this point, everything has been allocated a virtual address and we can resolve
// the relocations, and commit objects to file.
- if (self.zigObjectPtr()) |zig_object| {
+ if (self.zigObjectPtr()) |zo| {
var has_reloc_errors = false;
- for (zig_object.atoms.items) |atom_index| {
- const atom_ptr = self.atom(atom_index) orelse continue;
+ for (zo.atoms_indexes.items) |atom_index| {
+ const atom_ptr = zo.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
const out_shndx = atom_ptr.outputShndx() orelse continue;
const shdr = &self.shdrs.items[out_shndx];
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- const code = try zig_object.codeAlloc(self, atom_index);
+ const code = try zo.codeAlloc(self, atom_index);
defer gpa.free(code);
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
@@ -2012,8 +2005,8 @@ pub fn resolveSymbols(self: *Elf) void {
const cg_owner = self.comdatGroupOwner(cg.owner);
if (cg_owner.file != index) {
for (cg.comdatGroupMembers(self)) |shndx| {
- const atom_index = object.atoms.items[shndx];
- if (self.atom(atom_index)) |atom_ptr| {
+ const atom_index = object.atoms_indexes.items[shndx];
+ if (object.atom(atom_index)) |atom_ptr| {
atom_ptr.flags.alive = false;
atom_ptr.markFdesDead(self);
}
@@ -2117,7 +2110,7 @@ fn claimUnresolved(self: *Elf) void {
fn scanRelocs(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- var undefs = std.AutoHashMap(Symbol.Index, std.ArrayList(Atom.Index)).init(gpa);
+ var undefs = std.AutoHashMap(Symbol.Index, std.ArrayList(Ref)).init(gpa);
defer {
var it = undefs.iterator();
while (it.next()) |entry| {
@@ -3721,11 +3714,11 @@ fn sortInitFini(self: *Elf) !void {
const Entry = struct {
priority: i32,
- atom_index: Atom.Index,
+ atom_ref: Ref,
pub fn lessThan(ctx: *Elf, lhs: @This(), rhs: @This()) bool {
if (lhs.priority == rhs.priority) {
- return ctx.atom(lhs.atom_index).?.priority(ctx) < ctx.atom(rhs.atom_index).?.priority(ctx);
+ return ctx.atom(lhs.atom_ref).?.priority(ctx) < ctx.atom(rhs.atom_ref).?.priority(ctx);
}
return lhs.priority < rhs.priority;
}
@@ -3756,8 +3749,8 @@ fn sortInitFini(self: *Elf) !void {
try entries.ensureTotalCapacityPrecise(atom_list.items.len);
defer entries.deinit();
- for (atom_list.items) |atom_index| {
- const atom_ptr = self.atom(atom_index).?;
+ for (atom_list.items) |ref| {
+ const atom_ptr = self.atom(ref).?;
const object = atom_ptr.file(self).?.object;
const priority = blk: {
if (is_ctor_dtor) {
@@ -3770,14 +3763,14 @@ fn sortInitFini(self: *Elf) !void {
const priority = std.fmt.parseUnsigned(u16, it.first(), 10) catch default;
break :blk priority;
};
- entries.appendAssumeCapacity(.{ .priority = priority, .atom_index = atom_index });
+ entries.appendAssumeCapacity(.{ .priority = priority, .atom_ref = ref });
}
mem.sort(Entry, entries.items, self, Entry.lessThan);
atom_list.clearRetainingCapacity();
for (entries.items) |entry| {
- atom_list.appendAssumeCapacity(entry.atom_index);
+ atom_list.appendAssumeCapacity(entry.atom_ref);
}
}
}
@@ -4143,23 +4136,23 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
}
}
- if (self.zigObjectPtr()) |zig_object| {
- for (zig_object.atoms.items) |atom_index| {
- const atom_ptr = self.atom(atom_index) orelse continue;
+ if (self.zigObjectPtr()) |zo| {
+ for (zo.atoms_indexes.items) |atom_index| {
+ const atom_ptr = zo.atom(atom_index) orelse continue;
atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
}
- for (zig_object.locals()) |local_index| {
+ for (zo.locals()) |local_index| {
const local = self.symbol(local_index);
local.output_section_index = backlinks[local.output_section_index];
}
- for (zig_object.globals()) |global_index| {
+ for (zo.globals()) |global_index| {
const global = self.symbol(global_index);
const atom_ptr = global.atom(self) orelse continue;
if (!atom_ptr.flags.alive) continue;
// TODO claim unresolved for objects
- if (global.file(self).?.index() != zig_object.index) continue;
+ if (global.file(self).?.index() != zo.index) continue;
const out_shndx = global.outputShndx() orelse continue;
global.output_section_index = backlinks[out_shndx];
}
@@ -4182,8 +4175,8 @@ fn updateSectionSizes(self: *Elf) !void {
const shdr = &self.shdrs.items[shndx];
if (atom_list.items.len == 0) continue;
if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
- for (atom_list.items) |atom_index| {
- const atom_ptr = self.atom(atom_index) orelse continue;
+ for (atom_list.items) |ref| {
+ const atom_ptr = self.atom(ref) orelse continue;
if (!atom_ptr.flags.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
@@ -4618,7 +4611,7 @@ fn allocateSpecialPhdrs(self: *Elf) void {
fn writeAtoms(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- var undefs = std.AutoHashMap(Symbol.Index, std.ArrayList(Atom.Index)).init(gpa);
+ var undefs = std.AutoHashMap(Symbol.Index, std.ArrayList(Ref)).init(gpa);
defer {
var it = undefs.iterator();
while (it.next()) |entry| {
@@ -4666,21 +4659,21 @@ fn writeAtoms(self: *Elf) !void {
0;
@memset(buffer, padding_byte);
- for (atom_list.items) |atom_index| {
- const atom_ptr = self.atom(atom_index).?;
+ for (atom_list.items) |ref| {
+ const atom_ptr = self.atom(ref).?;
assert(atom_ptr.flags.alive);
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
- log.debug("writing atom({d}) at 0x{x}", .{ atom_index, sh_offset + offset });
+ log.debug("writing atom({}) at 0x{x}", .{ ref, sh_offset + offset });
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = switch (atom_ptr.file(self).?) {
- .object => |x| try x.codeDecompressAlloc(self, atom_index),
- .zig_object => |x| try x.codeAlloc(self, atom_index),
+ .object => |x| try x.codeDecompressAlloc(self, ref.index),
+ .zig_object => |x| try x.codeAlloc(self, ref.index),
else => unreachable,
};
defer gpa.free(in_code);
@@ -5598,64 +5591,6 @@ fn getStartStopBasename(self: *Elf, shdr: elf.Elf64_Shdr) ?[]const u8 {
return null;
}
-pub fn atom(self: *Elf, atom_index: Atom.Index) ?*Atom {
- if (atom_index == 0) return null;
- assert(atom_index < self.atoms.items.len);
- return &self.atoms.items[atom_index];
-}
-
-pub fn addAtom(self: *Elf) !Atom.Index {
- const gpa = self.base.comp.gpa;
- const index = @as(Atom.Index, @intCast(self.atoms.items.len));
- const atom_ptr = try self.atoms.addOne(gpa);
- atom_ptr.* = .{ .atom_index = index };
- return index;
-}
-
-pub fn addAtomExtra(self: *Elf, extra: Atom.Extra) !u32 {
- const fields = @typeInfo(Atom.Extra).Struct.fields;
- try self.atoms_extra.ensureUnusedCapacity(self.base.comp.gpa, fields.len);
- return self.addAtomExtraAssumeCapacity(extra);
-}
-
-pub fn addAtomExtraAssumeCapacity(self: *Elf, extra: Atom.Extra) u32 {
- const index = @as(u32, @intCast(self.atoms_extra.items.len));
- const fields = @typeInfo(Atom.Extra).Struct.fields;
- inline for (fields) |field| {
- self.atoms_extra.appendAssumeCapacity(switch (field.type) {
- u32 => @field(extra, field.name),
- else => @compileError("bad field type"),
- });
- }
- return index;
-}
-
-pub fn atomExtra(self: *Elf, index: u32) ?Atom.Extra {
- if (index == 0) return null;
- const fields = @typeInfo(Atom.Extra).Struct.fields;
- var i: usize = index;
- var result: Atom.Extra = undefined;
- inline for (fields) |field| {
- @field(result, field.name) = switch (field.type) {
- u32 => self.atoms_extra.items[i],
- else => @compileError("bad field type"),
- };
- i += 1;
- }
- return result;
-}
-
-pub fn setAtomExtra(self: *Elf, index: u32, extra: Atom.Extra) void {
- assert(index > 0);
- const fields = @typeInfo(Atom.Extra).Struct.fields;
- inline for (fields, 0..) |field, i| {
- self.atoms_extra.items[index + i] = switch (field.type) {
- u32 => @field(extra, field.name),
- else => @compileError("bad field type"),
- };
- }
-}
-
pub fn addThunk(self: *Elf) !Thunk.Index {
const index = @as(Thunk.Index, @intCast(self.thunks.items.len));
const th = try self.thunks.addOne(self.base.comp.gpa);
@@ -5692,6 +5627,11 @@ pub fn fileHandle(self: Elf, index: File.HandleIndex) File.Handle {
return self.file_handles.items[index];
}
+pub fn atom(self: *Elf, ref: Ref) ?*Atom {
+ const file_ptr = self.file(ref.file) orelse return null;
+ return file_ptr.atom(ref.index);
+}
+
/// Returns pointer-to-symbol described at sym_index.
pub fn symbol(self: *Elf, sym_index: Symbol.Index) *Symbol {
return &self.symbols.items[sym_index];
@@ -5938,9 +5878,9 @@ fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
var err = try self.base.addErrorWithNotesAssumeCapacity(nnotes);
try err.addMsg("undefined symbol: {s}", .{self.symbol(undef_index).name(self)});
- for (atoms[0..natoms]) |atom_index| {
- const atom_ptr = self.atom(atom_index).?;
- const file_ptr = self.file(atom_ptr.file_index).?;
+ for (atoms[0..natoms]) |ref| {
+ const atom_ptr = self.atom(ref).?;
+ const file_ptr = self.file(ref.file).?;
try err.addNote("referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) });
}
@@ -6401,7 +6341,7 @@ const LastAtomAndFreeListTable = std.AutoArrayHashMapUnmanaged(u32, LastAtomAndF
const RelaSection = struct {
shndx: u32,
- atom_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+ atom_list: std.ArrayListUnmanaged(Ref) = .{},
};
const RelaSectionTable = std.AutoArrayHashMapUnmanaged(u32, RelaSection);