Commit fc7dd3e285
Changed files (9)
src/link/Elf/Atom.zig
@@ -312,6 +312,7 @@ pub fn relocs(self: Atom, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.Elf64_Rela)) !void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
+ const cpu_arch = elf_file.getTarget().cpu.arch;
const file_ptr = self.file(elf_file).?;
for (self.relocs(elf_file)) |rel| {
const target_index = switch (file_ptr) {
@@ -320,12 +321,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
else => unreachable,
};
const target = elf_file.symbol(target_index);
- const r_type = switch (rel.r_type()) {
- Elf.R_X86_64_ZIG_GOT32,
- Elf.R_X86_64_ZIG_GOTPCREL,
- => unreachable, // Sanity check if we accidentally emitted those.
- else => |r_type| r_type,
- };
+ const r_type = rel.r_type();
const r_offset = self.value + rel.r_offset;
var r_addend = rel.r_addend;
var r_sym: u32 = 0;
@@ -340,7 +336,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
}
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
- fmtRelocType(r_type),
+ relocation.fmtRelocType(r_type, cpu_arch),
r_offset,
r_sym,
target.name(elf_file),
@@ -385,155 +381,20 @@ pub fn freeRelocs(self: Atom, elf_file: *Elf) void {
}
pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) bool {
+ const cpu_arch = elf_file.getTarget().cpu.arch;
for (self.relocs(elf_file)) |rel| {
- if (rel.r_type() == elf.R_X86_64_GOTTPOFF) return true;
+ switch (cpu_arch) {
+ .x86_64 => if (rel.r_type() == elf.R_X86_64_GOTTPOFF) return true,
+ else => {},
+ }
}
return false;
}
pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
- const is_static = elf_file.base.isStatic();
- const is_dyn_lib = elf_file.base.isDynLib();
- const file_ptr = self.file(elf_file).?;
- const rels = self.relocs(elf_file);
- var i: usize = 0;
- while (i < rels.len) : (i += 1) {
- const rel = rels[i];
-
- if (rel.r_type() == elf.R_X86_64_NONE) continue;
-
- const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
-
- const symbol_index = switch (file_ptr) {
- .zig_object => |x| x.symbol(rel.r_sym()),
- .object => |x| x.symbols.items[rel.r_sym()],
- else => unreachable,
- };
- const symbol = elf_file.symbol(symbol_index);
-
- // Check for violation of One Definition Rule for COMDATs.
- if (symbol.file(elf_file) == null) {
- // TODO convert into an error
- log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
- file_ptr.fmtPath(),
- self.name(elf_file),
- symbol.name(elf_file),
- });
- continue;
- }
-
- // Report an undefined symbol.
- try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs);
-
- if (symbol.isIFunc(elf_file)) {
- symbol.flags.needs_got = true;
- symbol.flags.needs_plt = true;
- }
-
- // While traversing relocations, mark symbols that require special handling such as
- // pointer indirection via GOT, or a stub trampoline via PLT.
- switch (rel.r_type()) {
- elf.R_X86_64_64 => {
- try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
- },
-
- elf.R_X86_64_32,
- elf.R_X86_64_32S,
- => {
- try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
- },
-
- elf.R_X86_64_GOT32,
- elf.R_X86_64_GOTPC32,
- elf.R_X86_64_GOTPC64,
- elf.R_X86_64_GOTPCREL,
- elf.R_X86_64_GOTPCREL64,
- elf.R_X86_64_GOTPCRELX,
- elf.R_X86_64_REX_GOTPCRELX,
- => {
- symbol.flags.needs_got = true;
- },
-
- elf.R_X86_64_PLT32,
- elf.R_X86_64_PLTOFF64,
- => {
- if (symbol.flags.import) {
- symbol.flags.needs_plt = true;
- }
- },
-
- elf.R_X86_64_PC32 => {
- try self.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
- },
-
- elf.R_X86_64_TLSGD => {
- // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
-
- if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
- // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
- // We skip the next relocation.
- i += 1;
- } else if (!symbol.flags.import and is_dyn_lib) {
- symbol.flags.needs_gottp = true;
- i += 1;
- } else {
- symbol.flags.needs_tlsgd = true;
- }
- },
-
- elf.R_X86_64_TLSLD => {
- // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
-
- if (is_static or !is_dyn_lib) {
- // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
- // We skip the next relocation.
- i += 1;
- } else {
- elf_file.got.flags.needs_tlsld = true;
- }
- },
-
- elf.R_X86_64_GOTTPOFF => {
- const should_relax = blk: {
- if (is_dyn_lib or symbol.flags.import) break :blk false;
- if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
- break :blk true;
- };
- if (!should_relax) {
- symbol.flags.needs_gottp = true;
- }
- },
-
- elf.R_X86_64_GOTPC32_TLSDESC => {
- const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
- if (!should_relax) {
- symbol.flags.needs_tlsdesc = true;
- }
- },
-
- elf.R_X86_64_TPOFF32,
- elf.R_X86_64_TPOFF64,
- => {
- if (is_dyn_lib) try self.reportPicError(symbol, rel, elf_file);
- },
-
- elf.R_X86_64_GOTOFF64,
- elf.R_X86_64_DTPOFF32,
- elf.R_X86_64_DTPOFF64,
- elf.R_X86_64_SIZE32,
- elf.R_X86_64_SIZE64,
- elf.R_X86_64_TLSDESC_CALL,
- => {},
-
- // Zig custom relocations
- Elf.R_X86_64_ZIG_GOT32,
- Elf.R_X86_64_ZIG_GOTPCREL,
- => {
- assert(symbol.flags.has_zig_got);
- },
-
- else => try self.reportUnhandledRelocError(rel, elf_file),
- }
+ switch (elf_file.getTarget().cpu.arch) {
+ .x86_64 => try x86_64.scanRelocs(self, elf_file, code, undefs),
+ else => return error.UnsupportedCpuArch,
}
}
@@ -693,7 +554,7 @@ fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) error{OutOfMemory}!void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: unhandled relocation type {} at offset 0x{x}", .{
- fmtRelocType(rel.r_type()),
+ relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
rel.r_offset,
});
try err.addNote(elf_file, "in {}:{s}", .{
@@ -789,182 +650,9 @@ fn reportUndefined(
pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
- const file_ptr = self.file(elf_file).?;
- var stream = std.io.fixedBufferStream(code);
- const cwriter = stream.writer();
-
- const rels = self.relocs(elf_file);
- var i: usize = 0;
- while (i < rels.len) : (i += 1) {
- const rel = rels[i];
- const r_type = rel.r_type();
- if (r_type == elf.R_X86_64_NONE) continue;
-
- const target = switch (file_ptr) {
- .zig_object => |x| elf_file.symbol(x.symbol(rel.r_sym())),
- .object => |x| elf_file.symbol(x.symbols.items[rel.r_sym()]),
- else => unreachable,
- };
- const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
-
- // We will use equation format to resolve relocations:
- // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
- //
- // Address of the source atom.
- const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
- // Addend from the relocation.
- const A = rel.r_addend;
- // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
- const S = @as(i64, @intCast(target.address(.{}, elf_file)));
- // Address of the global offset table.
- const GOT = blk: {
- const shndx = if (elf_file.got_plt_section_index) |shndx|
- shndx
- else if (elf_file.got_section_index) |shndx|
- shndx
- else
- null;
- break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
- };
- // Address of the .zig.got table entry if any.
- const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
- // Relative offset to the start of the global offset table.
- const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
- // // Address of the thread pointer.
- const TP = @as(i64, @intCast(elf_file.tpAddress()));
- // Address of the dynamic thread pointer.
- const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
-
- relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
- fmtRelocType(r_type),
- r_offset,
- P,
- S + A,
- G + GOT + A,
- ZIG_GOT + A,
- target.name(elf_file),
- });
-
- try stream.seekTo(r_offset);
-
- switch (rel.r_type()) {
- elf.R_X86_64_NONE => unreachable,
-
- elf.R_X86_64_64 => {
- try self.resolveDynAbsReloc(
- target,
- rel,
- dynAbsRelocAction(target, elf_file),
- elf_file,
- cwriter,
- );
- },
-
- elf.R_X86_64_PLT32,
- elf.R_X86_64_PC32,
- => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
-
- elf.R_X86_64_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
- elf.R_X86_64_GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
- elf.R_X86_64_GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
-
- elf.R_X86_64_GOTPCRELX => {
- if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
- x86_64.relaxGotpcrelx(code[r_offset - 2 ..]) catch break :blk;
- try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
- continue;
- }
- try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
- },
-
- elf.R_X86_64_REX_GOTPCRELX => {
- if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
- x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..]) catch break :blk;
- try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
- continue;
- }
- try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
- },
-
- elf.R_X86_64_32 => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
- elf.R_X86_64_32S => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
-
- elf.R_X86_64_TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
- elf.R_X86_64_TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
-
- elf.R_X86_64_DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
- elf.R_X86_64_DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
-
- elf.R_X86_64_TLSGD => {
- if (target.flags.has_tlsgd) {
- const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
- } else if (target.flags.has_gottp) {
- const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
- try x86_64.relaxTlsGdToIe(self, rels[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
- i += 1;
- } else {
- try x86_64.relaxTlsGdToLe(
- self,
- rels[i .. i + 2],
- @as(i32, @intCast(S - TP)),
- elf_file,
- &stream,
- );
- i += 1;
- }
- },
-
- elf.R_X86_64_TLSLD => {
- if (elf_file.got.tlsld_index) |entry_index| {
- const tlsld_entry = elf_file.got.entries.items[entry_index];
- const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
- } else {
- try x86_64.relaxTlsLdToLe(
- self,
- rels[i .. i + 2],
- @as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
- elf_file,
- &stream,
- );
- i += 1;
- }
- },
-
- elf.R_X86_64_GOTPC32_TLSDESC => {
- if (target.flags.has_tlsdesc) {
- const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
- } else {
- try x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]);
- try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
- }
- },
-
- elf.R_X86_64_TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
- // call -> nop
- try cwriter.writeAll(&.{ 0x66, 0x90 });
- },
-
- elf.R_X86_64_GOTTPOFF => {
- if (target.flags.has_gottp) {
- const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
- try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
- } else {
- x86_64.relaxGotTpOff(code[r_offset - 3 ..]) catch unreachable;
- try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
- }
- },
-
- elf.R_X86_64_GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A)), .little),
-
- // Zig custom relocations
- Elf.R_X86_64_ZIG_GOT32 => try cwriter.writeInt(u32, @as(u32, @intCast(ZIG_GOT + A)), .little),
- Elf.R_X86_64_ZIG_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(ZIG_GOT + A - P)), .little),
-
- else => {},
- }
+ switch (elf_file.getTarget().cpu.arch) {
+ .x86_64 => try x86_64.resolveRelocsAlloc(self, elf_file, code),
+ else => return error.UnsupportedCpuArch,
}
}
@@ -978,6 +666,7 @@ fn resolveDynAbsReloc(
) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
+ const cpu_arch = elf_file.getTarget().cpu.arch;
const P = self.address(elf_file) + rel.r_offset;
const A = rel.r_addend;
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
@@ -1005,7 +694,7 @@ fn resolveDynAbsReloc(
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
- .type = elf.R_X86_64_64,
+ .type = relocation.encode(.abs, cpu_arch),
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
@@ -1019,7 +708,7 @@ fn resolveDynAbsReloc(
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
- .type = elf.R_X86_64_64,
+ .type = relocation.encode(.abs, cpu_arch),
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
@@ -1032,7 +721,7 @@ fn resolveDynAbsReloc(
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
.sym = target.extra(elf_file).?.dynamic,
- .type = elf.R_X86_64_64,
+ .type = relocation.encode(.abs, cpu_arch),
.addend = A,
});
try applyDynamicReloc(A, elf_file, writer);
@@ -1041,7 +730,7 @@ fn resolveDynAbsReloc(
.baserel => {
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
- .type = elf.R_X86_64_RELATIVE,
+ .type = relocation.encode(.rel, cpu_arch),
.addend = S + A,
});
try applyDynamicReloc(S + A, elf_file, writer);
@@ -1051,7 +740,7 @@ fn resolveDynAbsReloc(
const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
elf_file.addRelaDynAssumeCapacity(.{
.offset = P,
- .type = elf.R_X86_64_IRELATIVE,
+ .type = relocation.encode(.irel, cpu_arch),
.addend = S_ + A,
});
try applyDynamicReloc(S_ + A, elf_file, writer);
@@ -1069,158 +758,12 @@ fn applyDynamicReloc(value: i64, elf_file: *Elf, writer: anytype) !void {
pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
- const file_ptr = self.file(elf_file).?;
- var stream = std.io.fixedBufferStream(code);
- const cwriter = stream.writer();
-
- const rels = self.relocs(elf_file);
- var i: usize = 0;
- while (i < rels.len) : (i += 1) {
- const rel = rels[i];
- const r_type = rel.r_type();
- if (r_type == elf.R_X86_64_NONE) continue;
-
- const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
-
- const target_index = switch (file_ptr) {
- .zig_object => |x| x.symbol(rel.r_sym()),
- .object => |x| x.symbols.items[rel.r_sym()],
- else => unreachable,
- };
- const target = elf_file.symbol(target_index);
-
- // Check for violation of One Definition Rule for COMDATs.
- if (target.file(elf_file) == null) {
- // TODO convert into an error
- log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
- file_ptr.fmtPath(),
- self.name(elf_file),
- target.name(elf_file),
- });
- continue;
- }
-
- // Report an undefined symbol.
- try self.reportUndefined(elf_file, target, target_index, rel, undefs);
-
- // We will use equation format to resolve relocations:
- // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
- //
- const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
- // Addend from the relocation.
- const A = rel.r_addend;
- // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
- const S = @as(i64, @intCast(target.address(.{}, elf_file)));
- // Address of the global offset table.
- const GOT = blk: {
- const shndx = if (elf_file.got_plt_section_index) |shndx|
- shndx
- else if (elf_file.got_section_index) |shndx|
- shndx
- else
- null;
- break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
- };
- // Address of the dynamic thread pointer.
- const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
-
- relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
- fmtRelocType(r_type),
- rel.r_offset,
- P,
- S + A,
- target.name(elf_file),
- });
-
- try stream.seekTo(r_offset);
-
- switch (r_type) {
- elf.R_X86_64_NONE => unreachable,
- elf.R_X86_64_8 => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
- elf.R_X86_64_16 => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
- elf.R_X86_64_32 => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
- elf.R_X86_64_32S => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
- elf.R_X86_64_64 => try cwriter.writeInt(i64, S + A, .little),
- elf.R_X86_64_DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
- elf.R_X86_64_DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
- elf.R_X86_64_GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
- elf.R_X86_64_GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
- elf.R_X86_64_SIZE32 => {
- const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
- try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little);
- },
- elf.R_X86_64_SIZE64 => {
- const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
- try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little);
- },
- else => try self.reportUnhandledRelocError(rel, elf_file),
- }
+ switch (elf_file.getTarget().cpu.arch) {
+ .x86_64 => try x86_64.resolveRelocsNonAlloc(self, elf_file, code, undefs),
+ else => return error.UnsupportedCpuArch,
}
}
-pub fn fmtRelocType(r_type: u32) std.fmt.Formatter(formatRelocType) {
- return .{ .data = r_type };
-}
-
-fn formatRelocType(
- r_type: u32,
- comptime unused_fmt_string: []const u8,
- options: std.fmt.FormatOptions,
- writer: anytype,
-) !void {
- _ = unused_fmt_string;
- _ = options;
- const str = switch (r_type) {
- elf.R_X86_64_NONE => "R_X86_64_NONE",
- elf.R_X86_64_64 => "R_X86_64_64",
- elf.R_X86_64_PC32 => "R_X86_64_PC32",
- elf.R_X86_64_GOT32 => "R_X86_64_GOT32",
- elf.R_X86_64_PLT32 => "R_X86_64_PLT32",
- elf.R_X86_64_COPY => "R_X86_64_COPY",
- elf.R_X86_64_GLOB_DAT => "R_X86_64_GLOB_DAT",
- elf.R_X86_64_JUMP_SLOT => "R_X86_64_JUMP_SLOT",
- elf.R_X86_64_RELATIVE => "R_X86_64_RELATIVE",
- elf.R_X86_64_GOTPCREL => "R_X86_64_GOTPCREL",
- elf.R_X86_64_32 => "R_X86_64_32",
- elf.R_X86_64_32S => "R_X86_64_32S",
- elf.R_X86_64_16 => "R_X86_64_16",
- elf.R_X86_64_PC16 => "R_X86_64_PC16",
- elf.R_X86_64_8 => "R_X86_64_8",
- elf.R_X86_64_PC8 => "R_X86_64_PC8",
- elf.R_X86_64_DTPMOD64 => "R_X86_64_DTPMOD64",
- elf.R_X86_64_DTPOFF64 => "R_X86_64_DTPOFF64",
- elf.R_X86_64_TPOFF64 => "R_X86_64_TPOFF64",
- elf.R_X86_64_TLSGD => "R_X86_64_TLSGD",
- elf.R_X86_64_TLSLD => "R_X86_64_TLSLD",
- elf.R_X86_64_DTPOFF32 => "R_X86_64_DTPOFF32",
- elf.R_X86_64_GOTTPOFF => "R_X86_64_GOTTPOFF",
- elf.R_X86_64_TPOFF32 => "R_X86_64_TPOFF32",
- elf.R_X86_64_PC64 => "R_X86_64_PC64",
- elf.R_X86_64_GOTOFF64 => "R_X86_64_GOTOFF64",
- elf.R_X86_64_GOTPC32 => "R_X86_64_GOTPC32",
- elf.R_X86_64_GOT64 => "R_X86_64_GOT64",
- elf.R_X86_64_GOTPCREL64 => "R_X86_64_GOTPCREL64",
- elf.R_X86_64_GOTPC64 => "R_X86_64_GOTPC64",
- elf.R_X86_64_GOTPLT64 => "R_X86_64_GOTPLT64",
- elf.R_X86_64_PLTOFF64 => "R_X86_64_PLTOFF64",
- elf.R_X86_64_SIZE32 => "R_X86_64_SIZE32",
- elf.R_X86_64_SIZE64 => "R_X86_64_SIZE64",
- elf.R_X86_64_GOTPC32_TLSDESC => "R_X86_64_GOTPC32_TLSDESC",
- elf.R_X86_64_TLSDESC_CALL => "R_X86_64_TLSDESC_CALL",
- elf.R_X86_64_TLSDESC => "R_X86_64_TLSDESC",
- elf.R_X86_64_IRELATIVE => "R_X86_64_IRELATIVE",
- elf.R_X86_64_RELATIVE64 => "R_X86_64_RELATIVE64",
- elf.R_X86_64_GOTPCRELX => "R_X86_64_GOTPCRELX",
- elf.R_X86_64_REX_GOTPCRELX => "R_X86_64_REX_GOTPCRELX",
- elf.R_X86_64_NUM => "R_X86_64_NUM",
- // Zig custom relocations
- Elf.R_X86_64_ZIG_GOT32 => "R_X86_64_ZIG_GOT32",
- Elf.R_X86_64_ZIG_GOTPCREL => "R_X86_64_ZIG_GOTPCREL",
- else => "R_X86_64_UNKNOWN",
- };
- try writer.print("{s}", .{str});
-}
-
pub fn format(
atom: Atom,
comptime unused_fmt_string: []const u8,
@@ -1285,7 +828,423 @@ pub const Flags = packed struct {
};
const x86_64 = struct {
- pub fn relaxGotpcrelx(code: []u8) !void {
+ fn scanRelocs(atom: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) !void {
+ const is_static = elf_file.base.isStatic();
+ const is_dyn_lib = elf_file.base.isDynLib();
+ const file_ptr = atom.file(elf_file).?;
+ const rels = atom.relocs(elf_file);
+ var i: usize = 0;
+ while (i < rels.len) : (i += 1) {
+ const rel = rels[i];
+
+ if (rel.r_type() == elf.R_X86_64_NONE) continue;
+
+ const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
+
+ const symbol_index = switch (file_ptr) {
+ .zig_object => |x| x.symbol(rel.r_sym()),
+ .object => |x| x.symbols.items[rel.r_sym()],
+ else => unreachable,
+ };
+ const symbol = elf_file.symbol(symbol_index);
+
+ // Check for violation of One Definition Rule for COMDATs.
+ if (symbol.file(elf_file) == null) {
+ // TODO convert into an error
+ log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
+ file_ptr.fmtPath(),
+ atom.name(elf_file),
+ symbol.name(elf_file),
+ });
+ continue;
+ }
+
+ // Report an undefined symbol.
+ try atom.reportUndefined(elf_file, symbol, symbol_index, rel, undefs);
+
+ if (symbol.isIFunc(elf_file)) {
+ symbol.flags.needs_got = true;
+ symbol.flags.needs_plt = true;
+ }
+
+ // While traversing relocations, mark symbols that require special handling such as
+ // pointer indirection via GOT, or a stub trampoline via PLT.
+ switch (rel.r_type()) {
+ elf.R_X86_64_64 => {
+ try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
+ },
+
+ elf.R_X86_64_32,
+ elf.R_X86_64_32S,
+ => {
+ try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
+ },
+
+ elf.R_X86_64_GOT32,
+ elf.R_X86_64_GOTPC32,
+ elf.R_X86_64_GOTPC64,
+ elf.R_X86_64_GOTPCREL,
+ elf.R_X86_64_GOTPCREL64,
+ elf.R_X86_64_GOTPCRELX,
+ elf.R_X86_64_REX_GOTPCRELX,
+ => {
+ symbol.flags.needs_got = true;
+ },
+
+ elf.R_X86_64_PLT32,
+ elf.R_X86_64_PLTOFF64,
+ => {
+ if (symbol.flags.import) {
+ symbol.flags.needs_plt = true;
+ }
+ },
+
+ elf.R_X86_64_PC32 => {
+ try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
+ },
+
+ elf.R_X86_64_TLSGD => {
+ // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
+
+ if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
+ // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
+ // We skip the next relocation.
+ i += 1;
+ } else if (!symbol.flags.import and is_dyn_lib) {
+ symbol.flags.needs_gottp = true;
+ i += 1;
+ } else {
+ symbol.flags.needs_tlsgd = true;
+ }
+ },
+
+ elf.R_X86_64_TLSLD => {
+ // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
+
+ if (is_static or !is_dyn_lib) {
+ // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
+ // We skip the next relocation.
+ i += 1;
+ } else {
+ elf_file.got.flags.needs_tlsld = true;
+ }
+ },
+
+ elf.R_X86_64_GOTTPOFF => {
+ const should_relax = blk: {
+ if (is_dyn_lib or symbol.flags.import) break :blk false;
+ if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..])) break :blk false;
+ break :blk true;
+ };
+ if (!should_relax) {
+ symbol.flags.needs_gottp = true;
+ }
+ },
+
+ elf.R_X86_64_GOTPC32_TLSDESC => {
+ const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
+ if (!should_relax) {
+ symbol.flags.needs_tlsdesc = true;
+ }
+ },
+
+ elf.R_X86_64_TPOFF32,
+ elf.R_X86_64_TPOFF64,
+ => {
+ if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
+ },
+
+ elf.R_X86_64_GOTOFF64,
+ elf.R_X86_64_DTPOFF32,
+ elf.R_X86_64_DTPOFF64,
+ elf.R_X86_64_SIZE32,
+ elf.R_X86_64_SIZE64,
+ elf.R_X86_64_TLSDESC_CALL,
+ => {},
+
+ // Zig custom relocations
+ Elf.R_X86_64_ZIG_GOT32,
+ Elf.R_X86_64_ZIG_GOTPCREL,
+ => {
+ assert(symbol.flags.has_zig_got);
+ },
+
+ else => try atom.reportUnhandledRelocError(rel, elf_file),
+ }
+ }
+ }
+
+ fn resolveRelocsAlloc(atom: Atom, elf_file: *Elf, code: []u8) !void {
+ const file_ptr = atom.file(elf_file).?;
+ var stream = std.io.fixedBufferStream(code);
+ const cwriter = stream.writer();
+
+ const rels = atom.relocs(elf_file);
+ var i: usize = 0;
+ while (i < rels.len) : (i += 1) {
+ const rel = rels[i];
+ const r_type = rel.r_type();
+ if (r_type == elf.R_X86_64_NONE) continue;
+
+ const target = switch (file_ptr) {
+ .zig_object => |x| elf_file.symbol(x.symbol(rel.r_sym())),
+ .object => |x| elf_file.symbol(x.symbols.items[rel.r_sym()]),
+ else => unreachable,
+ };
+ const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
+
+ // We will use equation format to resolve relocations:
+ // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
+ //
+ // Address of the source atom.
+ const P = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
+ // Addend from the relocation.
+ const A = rel.r_addend;
+ // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
+ const S = @as(i64, @intCast(target.address(.{}, elf_file)));
+ // Address of the global offset table.
+ const GOT = blk: {
+ const shndx = if (elf_file.got_plt_section_index) |shndx|
+ shndx
+ else if (elf_file.got_section_index) |shndx|
+ shndx
+ else
+ null;
+ break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
+ };
+ // Address of the .zig.got table entry if any.
+ const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
+ // Relative offset to the start of the global offset table.
+ const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
+ // // Address of the thread pointer.
+ const TP = @as(i64, @intCast(elf_file.tpAddress()));
+ // Address of the dynamic thread pointer.
+ const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
+
+ relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
+ relocation.fmtRelocType(r_type, .x86_64),
+ r_offset,
+ P,
+ S + A,
+ G + GOT + A,
+ ZIG_GOT + A,
+ target.name(elf_file),
+ });
+
+ try stream.seekTo(r_offset);
+
+ switch (rel.r_type()) {
+ elf.R_X86_64_NONE => unreachable,
+
+ elf.R_X86_64_64 => {
+ try atom.resolveDynAbsReloc(
+ target,
+ rel,
+ dynAbsRelocAction(target, elf_file),
+ elf_file,
+ cwriter,
+ );
+ },
+
+ elf.R_X86_64_PLT32,
+ elf.R_X86_64_PC32,
+ => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little),
+
+ elf.R_X86_64_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little),
+ elf.R_X86_64_GOTPC32 => try cwriter.writeInt(i32, @as(i32, @intCast(GOT + A - P)), .little),
+ elf.R_X86_64_GOTPC64 => try cwriter.writeInt(i64, GOT + A - P, .little),
+
+ elf.R_X86_64_GOTPCRELX => {
+ if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
+ x86_64.relaxGotpcrelx(code[r_offset - 2 ..]) catch break :blk;
+ try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
+ continue;
+ }
+ try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
+ },
+
+ elf.R_X86_64_REX_GOTPCRELX => {
+ if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
+ x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..]) catch break :blk;
+ try cwriter.writeInt(i32, @as(i32, @intCast(S + A - P)), .little);
+ continue;
+ }
+ try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A - P)), .little);
+ },
+
+ elf.R_X86_64_32 => try cwriter.writeInt(u32, @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
+ elf.R_X86_64_32S => try cwriter.writeInt(i32, @as(i32, @truncate(S + A)), .little),
+
+ elf.R_X86_64_TPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - TP)), .little),
+ elf.R_X86_64_TPOFF64 => try cwriter.writeInt(i64, S + A - TP, .little),
+
+ elf.R_X86_64_DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @truncate(S + A - DTP)), .little),
+ elf.R_X86_64_DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
+
+ elf.R_X86_64_TLSGD => {
+ if (target.flags.has_tlsgd) {
+ const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
+ try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ } else if (target.flags.has_gottp) {
+ const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
+ try x86_64.relaxTlsGdToIe(atom, rels[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
+ i += 1;
+ } else {
+ try x86_64.relaxTlsGdToLe(
+ atom,
+ rels[i .. i + 2],
+ @as(i32, @intCast(S - TP)),
+ elf_file,
+ &stream,
+ );
+ i += 1;
+ }
+ },
+
+ elf.R_X86_64_TLSLD => {
+ if (elf_file.got.tlsld_index) |entry_index| {
+ const tlsld_entry = elf_file.got.entries.items[entry_index];
+ const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
+ try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ } else {
+ try x86_64.relaxTlsLdToLe(
+ atom,
+ rels[i .. i + 2],
+ @as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
+ elf_file,
+ &stream,
+ );
+ i += 1;
+ }
+ },
+
+ elf.R_X86_64_GOTPC32_TLSDESC => {
+ if (target.flags.has_tlsdesc) {
+ const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
+ try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ } else {
+ try x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]);
+ try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
+ }
+ },
+
+ elf.R_X86_64_TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
+ // call -> nop
+ try cwriter.writeAll(&.{ 0x66, 0x90 });
+ },
+
+ elf.R_X86_64_GOTTPOFF => {
+ if (target.flags.has_gottp) {
+ const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
+ try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
+ } else {
+ x86_64.relaxGotTpOff(code[r_offset - 3 ..]) catch unreachable;
+ try cwriter.writeInt(i32, @as(i32, @intCast(S - TP)), .little);
+ }
+ },
+
+ elf.R_X86_64_GOT32 => try cwriter.writeInt(i32, @as(i32, @intCast(G + GOT + A)), .little),
+
+ // Zig custom relocations
+ Elf.R_X86_64_ZIG_GOT32 => try cwriter.writeInt(u32, @as(u32, @intCast(ZIG_GOT + A)), .little),
+ Elf.R_X86_64_ZIG_GOTPCREL => try cwriter.writeInt(i32, @as(i32, @intCast(ZIG_GOT + A - P)), .little),
+
+ else => {},
+ }
+ }
+ }
+
+ fn resolveRelocsNonAlloc(atom: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
+ const file_ptr = atom.file(elf_file).?;
+ var stream = std.io.fixedBufferStream(code);
+ const cwriter = stream.writer();
+
+ const rels = atom.relocs(elf_file);
+ var i: usize = 0;
+ while (i < rels.len) : (i += 1) {
+ const rel = rels[i];
+ const r_type = rel.r_type();
+ if (r_type == elf.R_X86_64_NONE) continue;
+
+ const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
+
+ const target_index = switch (file_ptr) {
+ .zig_object => |x| x.symbol(rel.r_sym()),
+ .object => |x| x.symbols.items[rel.r_sym()],
+ else => unreachable,
+ };
+ const target = elf_file.symbol(target_index);
+
+ // Check for violation of One Definition Rule for COMDATs.
+ if (target.file(elf_file) == null) {
+ // TODO convert into an error
+ log.debug("{}: {s}: {s} refers to a discarded COMDAT section", .{
+ file_ptr.fmtPath(),
+ atom.name(elf_file),
+ target.name(elf_file),
+ });
+ continue;
+ }
+
+ // Report an undefined symbol.
+ try atom.reportUndefined(elf_file, target, target_index, rel, undefs);
+
+ // We will use equation format to resolve relocations:
+ // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
+ //
+ const P = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
+ // Addend from the relocation.
+ const A = rel.r_addend;
+ // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
+ const S = @as(i64, @intCast(target.address(.{}, elf_file)));
+ // Address of the global offset table.
+ const GOT = blk: {
+ const shndx = if (elf_file.got_plt_section_index) |shndx|
+ shndx
+ else if (elf_file.got_section_index) |shndx|
+ shndx
+ else
+ null;
+ break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
+ };
+ // Address of the dynamic thread pointer.
+ const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
+
+ relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
+ relocation.fmtRelocType(r_type, .x86_64),
+ rel.r_offset,
+ P,
+ S + A,
+ target.name(elf_file),
+ });
+
+ try stream.seekTo(r_offset);
+
+ switch (r_type) {
+ elf.R_X86_64_NONE => unreachable,
+ elf.R_X86_64_8 => try cwriter.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
+ elf.R_X86_64_16 => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
+ elf.R_X86_64_32 => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
+ elf.R_X86_64_32S => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
+ elf.R_X86_64_64 => try cwriter.writeInt(i64, S + A, .little),
+ elf.R_X86_64_DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
+ elf.R_X86_64_DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
+ elf.R_X86_64_GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
+ elf.R_X86_64_GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
+ elf.R_X86_64_SIZE32 => {
+ const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
+ try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(size + A)))), .little);
+ },
+ elf.R_X86_64_SIZE64 => {
+ const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
+ try cwriter.writeInt(i64, @as(i64, @intCast(size + A)), .little);
+ },
+ else => try atom.reportUnhandledRelocError(rel, elf_file),
+ }
+ }
+ }
+
+ fn relaxGotpcrelx(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
const inst = switch (old_inst.encoding.mnemonic) {
.call => try Instruction.new(old_inst.prefix, .call, &.{
@@ -1303,7 +1262,7 @@ const x86_64 = struct {
encode(&.{ nop, inst }, code) catch return error.RelaxFail;
}
- pub fn relaxRexGotpcrelx(code: []u8) !void {
+ fn relaxRexGotpcrelx(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
switch (old_inst.encoding.mnemonic) {
.mov => {
@@ -1315,7 +1274,7 @@ const x86_64 = struct {
}
}
- pub fn relaxTlsGdToIe(
+ fn relaxTlsGdToIe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
@@ -1340,8 +1299,8 @@ const x86_64 = struct {
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
- fmtRelocType(rels[0].r_type()),
- fmtRelocType(rels[1].r_type()),
+ relocation.fmtRelocType(rels[0].r_type(), .x86_64),
+ relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
@@ -1352,7 +1311,7 @@ const x86_64 = struct {
}
}
- pub fn relaxTlsLdToLe(
+ fn relaxTlsLdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
@@ -1392,8 +1351,8 @@ const x86_64 = struct {
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
- fmtRelocType(rels[0].r_type()),
- fmtRelocType(rels[1].r_type()),
+ relocation.fmtRelocType(rels[0].r_type(), .x86_64),
+ relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
@@ -1404,7 +1363,7 @@ const x86_64 = struct {
}
}
- pub fn canRelaxGotTpOff(code: []const u8) bool {
+ fn canRelaxGotTpOff(code: []const u8) bool {
const old_inst = disassemble(code) orelse return false;
switch (old_inst.encoding.mnemonic) {
.mov => if (Instruction.new(old_inst.prefix, .mov, &.{
@@ -1419,7 +1378,7 @@ const x86_64 = struct {
}
}
- pub fn relaxGotTpOff(code: []u8) !void {
+ fn relaxGotTpOff(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
switch (old_inst.encoding.mnemonic) {
.mov => {
@@ -1435,7 +1394,7 @@ const x86_64 = struct {
}
}
- pub fn relaxGotPcTlsDesc(code: []u8) !void {
+ fn relaxGotPcTlsDesc(code: []u8) !void {
const old_inst = disassemble(code) orelse return error.RelaxFail;
switch (old_inst.encoding.mnemonic) {
.lea => {
@@ -1451,7 +1410,7 @@ const x86_64 = struct {
}
}
- pub fn relaxTlsGdToLe(
+ fn relaxTlsGdToLe(
self: Atom,
rels: []align(1) const elf.Elf64_Rela,
value: i32,
@@ -1474,16 +1433,16 @@ const x86_64 = struct {
try stream.seekBy(-4);
try writer.writeAll(&insts);
relocs_log.debug(" relaxing {} and {}", .{
- fmtRelocType(rels[0].r_type()),
- fmtRelocType(rels[1].r_type()),
+ relocation.fmtRelocType(rels[0].r_type(), .x86_64),
+ relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
},
else => {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "fatal linker error: rewrite {} when followed by {}", .{
- fmtRelocType(rels[0].r_type()),
- fmtRelocType(rels[1].r_type()),
+ relocation.fmtRelocType(rels[0].r_type(), .x86_64),
+ relocation.fmtRelocType(rels[1].r_type(), .x86_64),
});
try err.addNote(elf_file, "in {}:{s} at offset 0x{x}", .{
self.file(elf_file).?.fmtPath(),
@@ -1521,6 +1480,7 @@ const elf = std.elf;
const eh_frame = @import("eh_frame.zig");
const log = std.log.scoped(.link);
const relocs_log = std.log.scoped(.link_relocs);
+const relocation = @import("relocation.zig");
const Allocator = std.mem.Allocator;
const Atom = @This();
src/link/Elf/eh_frame.zig
@@ -302,26 +302,23 @@ pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
}
fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file: *Elf, contents: []u8) !void {
+ const cpu_arch = elf_file.getTarget().cpu.arch;
const offset = std.math.cast(usize, rel.r_offset - rec.offset) orelse return error.Overflow;
- const P = @as(i64, @intCast(rec.address(elf_file) + offset));
- const S = @as(i64, @intCast(sym.address(.{}, elf_file)));
+ const P = math.cast(i64, rec.address(elf_file) + offset) orelse return error.Overflow;
+ const S = math.cast(i64, sym.address(.{}, elf_file)) orelse return error.Overflow;
const A = rel.r_addend;
relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
- Atom.fmtRelocType(rel.r_type()),
+ relocation.fmtRelocType(rel.r_type(), cpu_arch),
offset,
P,
S + A,
sym.name(elf_file),
});
- var where = contents[offset..];
- switch (rel.r_type()) {
- elf.R_X86_64_32 => std.mem.writeInt(i32, where[0..4], @as(i32, @truncate(S + A)), .little),
- elf.R_X86_64_64 => std.mem.writeInt(i64, where[0..8], S + A, .little),
- elf.R_X86_64_PC32 => std.mem.writeInt(i32, where[0..4], @as(i32, @intCast(S - P + A)), .little),
- elf.R_X86_64_PC64 => std.mem.writeInt(i64, where[0..8], S - P + A, .little),
- else => unreachable,
+ switch (cpu_arch) {
+ .x86_64 => x86_64.resolveReloc(rel, P, S + A, contents[offset..]),
+ else => return error.UnsupportedCpuArch,
}
}
@@ -403,6 +400,7 @@ pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
}
fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
+ const cpu_arch = elf_file.getTarget().cpu.arch;
const r_offset = rec.address(elf_file) + rel.r_offset - rec.offset;
const r_type = rel.r_type();
var r_addend = rel.r_addend;
@@ -418,7 +416,7 @@ fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Re
}
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
- Atom.fmtRelocType(r_type),
+ relocation.fmtRelocType(r_type, cpu_arch),
r_offset,
r_sym,
sym.name(elf_file),
@@ -541,10 +539,24 @@ const EH_PE = struct {
pub const omit = 0xFF;
};
+const x86_64 = struct {
+ fn resolveReloc(rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) void {
+ switch (rel.r_type()) {
+ elf.R_X86_64_32 => std.mem.writeInt(i32, data[0..4], @as(i32, @truncate(target)), .little),
+ elf.R_X86_64_64 => std.mem.writeInt(i64, data[0..8], target, .little),
+ elf.R_X86_64_PC32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
+ elf.R_X86_64_PC64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
+ else => unreachable,
+ }
+ }
+};
+
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
+const math = std.math;
const relocs_log = std.log.scoped(.link_relocs);
+const relocation = @import("relocation.zig");
const Allocator = std.mem.Allocator;
const Atom = @import("Atom.zig");
src/link/Elf/file.zig
@@ -91,6 +91,13 @@ pub const File = union(enum) {
}
}
+ pub fn scanRelocs(file: File, elf_file: *Elf, undefs: anytype) !void {
+ switch (file) {
+ .linker_defined, .shared_object => unreachable,
+ inline else => |x| try x.scanRelocs(elf_file, undefs),
+ }
+ }
+
pub fn atoms(file: File) []const Atom.Index {
return switch (file) {
.linker_defined, .shared_object => &[0]Atom.Index{},
src/link/Elf/Object.zig
@@ -55,6 +55,7 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
pub fn parse(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
+ const cpu_arch = elf_file.getTarget().cpu.arch;
const handle = elf_file.fileHandle(self.file_handle);
try self.parseCommon(gpa, handle, elf_file);
@@ -64,8 +65,11 @@ pub fn parse(self: *Object, elf_file: *Elf) !void {
for (self.shdrs.items, 0..) |shdr, i| {
const atom = elf_file.atom(self.atoms.items[i]) orelse continue;
if (!atom.flags.alive) continue;
- if (shdr.sh_type == elf.SHT_X86_64_UNWIND or mem.eql(u8, atom.name(elf_file), ".eh_frame"))
+ if ((cpu_arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
+ mem.eql(u8, atom.name(elf_file), ".eh_frame"))
+ {
try self.parseEhFrame(gpa, handle, @as(u32, @intCast(i)), elf_file);
+ }
}
}
@@ -286,17 +290,22 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
}
break :blk name;
};
- const @"type" = switch (shdr.sh_type) {
- elf.SHT_NULL => unreachable,
- elf.SHT_PROGBITS => blk: {
- if (std.mem.eql(u8, name, ".init_array") or std.mem.startsWith(u8, name, ".init_array."))
- break :blk elf.SHT_INIT_ARRAY;
- if (std.mem.eql(u8, name, ".fini_array") or std.mem.startsWith(u8, name, ".fini_array."))
- break :blk elf.SHT_FINI_ARRAY;
- break :blk shdr.sh_type;
- },
- elf.SHT_X86_64_UNWIND => elf.SHT_PROGBITS,
- else => shdr.sh_type,
+ const @"type" = tt: {
+ if (elf_file.getTarget().cpu.arch == .x86_64 and
+ shdr.sh_type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS;
+
+ const @"type" = switch (shdr.sh_type) {
+ elf.SHT_NULL => unreachable,
+ elf.SHT_PROGBITS => blk: {
+ if (std.mem.eql(u8, name, ".init_array") or std.mem.startsWith(u8, name, ".init_array."))
+ break :blk elf.SHT_INIT_ARRAY;
+ if (std.mem.eql(u8, name, ".fini_array") or std.mem.startsWith(u8, name, ".fini_array."))
+ break :blk elf.SHT_FINI_ARRAY;
+ break :blk shdr.sh_type;
+ },
+ else => shdr.sh_type,
+ };
+ break :tt @"type";
};
const flags = blk: {
var flags = shdr.sh_flags;
@@ -596,9 +605,10 @@ pub fn markLive(self: *Object, elf_file: *Elf) void {
}
pub fn markEhFrameAtomsDead(self: Object, elf_file: *Elf) void {
+ const cpu_arch = elf_file.getTarget().cpu.arch;
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
- const is_eh_frame = atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND or
+ const is_eh_frame = (cpu_arch == .x86_64 and atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, atom.name(elf_file), ".eh_frame");
if (atom.flags.alive and is_eh_frame) atom.flags.alive = false;
}
src/link/Elf/relocation.zig
@@ -0,0 +1,264 @@
+pub const Kind = enum {
+ abs,
+ copy,
+ rel,
+ irel,
+ glob_dat,
+ jump_slot,
+ dtpmod,
+ dtpoff,
+ tpoff,
+ tlsdesc,
+};
+
+const x86_64_relocs = [_]struct { Kind, u32 }{
+ .{ .abs, elf.R_X86_64_64 },
+ .{ .copy, elf.R_X86_64_COPY },
+ .{ .rel, elf.R_X86_64_RELATIVE },
+ .{ .irel, elf.R_X86_64_IRELATIVE },
+ .{ .glob_dat, elf.R_X86_64_GLOB_DAT },
+ .{ .jump_slot, elf.R_X86_64_JUMP_SLOT },
+ .{ .dtpmod, elf.R_X86_64_DTPMOD64 },
+ .{ .dtpoff, elf.R_X86_64_DTPOFF64 },
+ .{ .tpoff, elf.R_X86_64_TPOFF64 },
+ .{ .tlsdesc, elf.R_X86_64_TLSDESC },
+};
+
+const aarch64_relocs = [_]struct { Kind, u32 }{
+ .{ .abs, elf.R_AARCH64_ABS64 },
+ .{ .copy, elf.R_AARCH64_COPY },
+ .{ .rel, elf.R_AARCH64_RELATIVE },
+ .{ .irel, elf.R_AARCH64_IRELATIVE },
+ .{ .glob_dat, elf.R_AARCH64_GLOB_DAT },
+ .{ .jump_slot, elf.R_AARCH64_JUMP_SLOT },
+ .{ .dtpmod, elf.R_AARCH64_TLS_DTPMOD },
+ .{ .dtpoff, elf.R_AARCH64_TLS_DTPREL },
+ .{ .tpoff, elf.R_AARCH64_TLS_TPREL },
+ .{ .tlsdesc, elf.R_AARCH64_TLSDESC },
+};
+
+pub fn decode(r_type: u32, cpu_arch: std.Target.Cpu.Arch) ?Kind {
+ const relocs = switch (cpu_arch) {
+ .x86_64 => &x86_64_relocs,
+ .aarch64 => &aarch64_relocs,
+ else => @panic("TODO unhandled cpu arch"),
+ };
+ inline for (relocs) |entry| {
+ if (entry[1] == r_type) return entry[0];
+ }
+ return null;
+}
+
+pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
+ const relocs = switch (cpu_arch) {
+ .x86_64 => &x86_64_relocs,
+ .aarch64 => &aarch64_relocs,
+ else => @panic("TODO unhandled cpu arch"),
+ };
+ inline for (relocs) |entry| {
+ if (entry[0] == kind) return entry[1];
+ }
+ unreachable;
+}
+
+const FormatRelocTypeCtx = struct {
+ r_type: u32,
+ cpu_arch: std.Target.Cpu.Arch,
+};
+
+pub fn fmtRelocType(r_type: u32, cpu_arch: std.Target.Cpu.Arch) std.fmt.Formatter(formatRelocType) {
+ return .{ .data = .{
+ .r_type = r_type,
+ .cpu_arch = cpu_arch,
+ } };
+}
+
+fn formatRelocType(
+ ctx: FormatRelocTypeCtx,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = unused_fmt_string;
+ _ = options;
+ const r_type = ctx.r_type;
+ const str = switch (ctx.cpu_arch) {
+ .x86_64 => switch (r_type) {
+ elf.R_X86_64_NONE => "R_X86_64_NONE",
+ elf.R_X86_64_64 => "R_X86_64_64",
+ elf.R_X86_64_PC32 => "R_X86_64_PC32",
+ elf.R_X86_64_GOT32 => "R_X86_64_GOT32",
+ elf.R_X86_64_PLT32 => "R_X86_64_PLT32",
+ elf.R_X86_64_COPY => "R_X86_64_COPY",
+ elf.R_X86_64_GLOB_DAT => "R_X86_64_GLOB_DAT",
+ elf.R_X86_64_JUMP_SLOT => "R_X86_64_JUMP_SLOT",
+ elf.R_X86_64_RELATIVE => "R_X86_64_RELATIVE",
+ elf.R_X86_64_GOTPCREL => "R_X86_64_GOTPCREL",
+ elf.R_X86_64_32 => "R_X86_64_32",
+ elf.R_X86_64_32S => "R_X86_64_32S",
+ elf.R_X86_64_16 => "R_X86_64_16",
+ elf.R_X86_64_PC16 => "R_X86_64_PC16",
+ elf.R_X86_64_8 => "R_X86_64_8",
+ elf.R_X86_64_PC8 => "R_X86_64_PC8",
+ elf.R_X86_64_DTPMOD64 => "R_X86_64_DTPMOD64",
+ elf.R_X86_64_DTPOFF64 => "R_X86_64_DTPOFF64",
+ elf.R_X86_64_TPOFF64 => "R_X86_64_TPOFF64",
+ elf.R_X86_64_TLSGD => "R_X86_64_TLSGD",
+ elf.R_X86_64_TLSLD => "R_X86_64_TLSLD",
+ elf.R_X86_64_DTPOFF32 => "R_X86_64_DTPOFF32",
+ elf.R_X86_64_GOTTPOFF => "R_X86_64_GOTTPOFF",
+ elf.R_X86_64_TPOFF32 => "R_X86_64_TPOFF32",
+ elf.R_X86_64_PC64 => "R_X86_64_PC64",
+ elf.R_X86_64_GOTOFF64 => "R_X86_64_GOTOFF64",
+ elf.R_X86_64_GOTPC32 => "R_X86_64_GOTPC32",
+ elf.R_X86_64_GOT64 => "R_X86_64_GOT64",
+ elf.R_X86_64_GOTPCREL64 => "R_X86_64_GOTPCREL64",
+ elf.R_X86_64_GOTPC64 => "R_X86_64_GOTPC64",
+ elf.R_X86_64_GOTPLT64 => "R_X86_64_GOTPLT64",
+ elf.R_X86_64_PLTOFF64 => "R_X86_64_PLTOFF64",
+ elf.R_X86_64_SIZE32 => "R_X86_64_SIZE32",
+ elf.R_X86_64_SIZE64 => "R_X86_64_SIZE64",
+ elf.R_X86_64_GOTPC32_TLSDESC => "R_X86_64_GOTPC32_TLSDESC",
+ elf.R_X86_64_TLSDESC_CALL => "R_X86_64_TLSDESC_CALL",
+ elf.R_X86_64_TLSDESC => "R_X86_64_TLSDESC",
+ elf.R_X86_64_IRELATIVE => "R_X86_64_IRELATIVE",
+ elf.R_X86_64_RELATIVE64 => "R_X86_64_RELATIVE64",
+ elf.R_X86_64_GOTPCRELX => "R_X86_64_GOTPCRELX",
+ elf.R_X86_64_REX_GOTPCRELX => "R_X86_64_REX_GOTPCRELX",
+ elf.R_X86_64_NUM => "R_X86_64_NUM",
+ else => "R_X86_64_UNKNOWN",
+ },
+ .aarch64 => switch (r_type) {
+ elf.R_AARCH64_NONE => "R_AARCH64_NONE",
+ elf.R_AARCH64_ABS64 => "R_AARCH64_ABS64",
+ elf.R_AARCH64_ABS32 => "R_AARCH64_ABS32",
+ elf.R_AARCH64_ABS16 => "R_AARCH64_ABS16",
+ elf.R_AARCH64_PREL64 => "R_AARCH64_PREL64",
+ elf.R_AARCH64_PREL32 => "R_AARCH64_PREL32",
+ elf.R_AARCH64_PREL16 => "R_AARCH64_PREL16",
+ elf.R_AARCH64_MOVW_UABS_G0 => "R_AARCH64_MOVW_UABS_G0",
+ elf.R_AARCH64_MOVW_UABS_G0_NC => "R_AARCH64_MOVW_UABS_G0_NC",
+ elf.R_AARCH64_MOVW_UABS_G1 => "R_AARCH64_MOVW_UABS_G1",
+ elf.R_AARCH64_MOVW_UABS_G1_NC => "R_AARCH64_MOVW_UABS_G1_NC",
+ elf.R_AARCH64_MOVW_UABS_G2 => "R_AARCH64_MOVW_UABS_G2",
+ elf.R_AARCH64_MOVW_UABS_G2_NC => "R_AARCH64_MOVW_UABS_G2_NC",
+ elf.R_AARCH64_MOVW_UABS_G3 => "R_AARCH64_MOVW_UABS_G3",
+ elf.R_AARCH64_MOVW_SABS_G0 => "R_AARCH64_MOVW_SABS_G0",
+ elf.R_AARCH64_MOVW_SABS_G1 => "R_AARCH64_MOVW_SABS_G1",
+ elf.R_AARCH64_MOVW_SABS_G2 => "R_AARCH64_MOVW_SABS_G2",
+ elf.R_AARCH64_LD_PREL_LO19 => "R_AARCH64_LD_PREL_LO19",
+ elf.R_AARCH64_ADR_PREL_LO21 => "R_AARCH64_ADR_PREL_LO21",
+ elf.R_AARCH64_ADR_PREL_PG_HI21 => "R_AARCH64_ADR_PREL_PG_HI21",
+ elf.R_AARCH64_ADR_PREL_PG_HI21_NC => "R_AARCH64_ADR_PREL_PG_HI21_NC",
+ elf.R_AARCH64_ADD_ABS_LO12_NC => "R_AARCH64_ADD_ABS_LO12_NC",
+ elf.R_AARCH64_LDST8_ABS_LO12_NC => "R_AARCH64_LDST8_ABS_LO12_NC",
+ elf.R_AARCH64_TSTBR14 => "R_AARCH64_TSTBR14",
+ elf.R_AARCH64_CONDBR19 => "R_AARCH64_CONDBR19",
+ elf.R_AARCH64_JUMP26 => "R_AARCH64_JUMP26",
+ elf.R_AARCH64_CALL26 => "R_AARCH64_CALL26",
+ elf.R_AARCH64_LDST16_ABS_LO12_NC => "R_AARCH64_LDST16_ABS_LO12_NC",
+ elf.R_AARCH64_LDST32_ABS_LO12_NC => "R_AARCH64_LDST32_ABS_LO12_NC",
+ elf.R_AARCH64_LDST64_ABS_LO12_NC => "R_AARCH64_LDST64_ABS_LO12_NC",
+ elf.R_AARCH64_MOVW_PREL_G0 => "R_AARCH64_MOVW_PREL_G0",
+ elf.R_AARCH64_MOVW_PREL_G0_NC => "R_AARCH64_MOVW_PREL_G0_NC",
+ elf.R_AARCH64_MOVW_PREL_G1 => "R_AARCH64_MOVW_PREL_G1",
+ elf.R_AARCH64_MOVW_PREL_G1_NC => "R_AARCH64_MOVW_PREL_G1_NC",
+ elf.R_AARCH64_MOVW_PREL_G2 => "R_AARCH64_MOVW_PREL_G2",
+ elf.R_AARCH64_MOVW_PREL_G2_NC => "R_AARCH64_MOVW_PREL_G2_NC",
+ elf.R_AARCH64_MOVW_PREL_G3 => "R_AARCH64_MOVW_PREL_G3",
+ elf.R_AARCH64_LDST128_ABS_LO12_NC => "R_AARCH64_LDST128_ABS_LO12_NC",
+ elf.R_AARCH64_MOVW_GOTOFF_G0 => "R_AARCH64_MOVW_GOTOFF_G0",
+ elf.R_AARCH64_MOVW_GOTOFF_G0_NC => "R_AARCH64_MOVW_GOTOFF_G0_NC",
+ elf.R_AARCH64_MOVW_GOTOFF_G1 => "R_AARCH64_MOVW_GOTOFF_G1",
+ elf.R_AARCH64_MOVW_GOTOFF_G1_NC => "R_AARCH64_MOVW_GOTOFF_G1_NC",
+ elf.R_AARCH64_MOVW_GOTOFF_G2 => "R_AARCH64_MOVW_GOTOFF_G2",
+ elf.R_AARCH64_MOVW_GOTOFF_G2_NC => "R_AARCH64_MOVW_GOTOFF_G2_NC",
+ elf.R_AARCH64_MOVW_GOTOFF_G3 => "R_AARCH64_MOVW_GOTOFF_G3",
+ elf.R_AARCH64_GOTREL64 => "R_AARCH64_GOTREL64",
+ elf.R_AARCH64_GOTREL32 => "R_AARCH64_GOTREL32",
+ elf.R_AARCH64_GOT_LD_PREL19 => "R_AARCH64_GOT_LD_PREL19",
+ elf.R_AARCH64_LD64_GOTOFF_LO15 => "R_AARCH64_LD64_GOTOFF_LO15",
+ elf.R_AARCH64_ADR_GOT_PAGE => "R_AARCH64_ADR_GOT_PAGE",
+ elf.R_AARCH64_LD64_GOT_LO12_NC => "R_AARCH64_LD64_GOT_LO12_NC",
+ elf.R_AARCH64_LD64_GOTPAGE_LO15 => "R_AARCH64_LD64_GOTPAGE_LO15",
+ elf.R_AARCH64_TLSGD_ADR_PREL21 => "R_AARCH64_TLSGD_ADR_PREL21",
+ elf.R_AARCH64_TLSGD_ADR_PAGE21 => "R_AARCH64_TLSGD_ADR_PAGE21",
+ elf.R_AARCH64_TLSGD_ADD_LO12_NC => "R_AARCH64_TLSGD_ADD_LO12_NC",
+ elf.R_AARCH64_TLSGD_MOVW_G1 => "R_AARCH64_TLSGD_MOVW_G1",
+ elf.R_AARCH64_TLSGD_MOVW_G0_NC => "R_AARCH64_TLSGD_MOVW_G0_NC",
+ elf.R_AARCH64_TLSLD_ADR_PREL21 => "R_AARCH64_TLSLD_ADR_PREL21",
+ elf.R_AARCH64_TLSLD_ADR_PAGE21 => "R_AARCH64_TLSLD_ADR_PAGE21",
+ elf.R_AARCH64_TLSLD_ADD_LO12_NC => "R_AARCH64_TLSLD_ADD_LO12_NC",
+ elf.R_AARCH64_TLSLD_MOVW_G1 => "R_AARCH64_TLSLD_MOVW_G1",
+ elf.R_AARCH64_TLSLD_MOVW_G0_NC => "R_AARCH64_TLSLD_MOVW_G0_NC",
+ elf.R_AARCH64_TLSLD_LD_PREL19 => "R_AARCH64_TLSLD_LD_PREL19",
+ elf.R_AARCH64_TLSLD_MOVW_DTPREL_G2 => "R_AARCH64_TLSLD_MOVW_DTPREL_G2",
+ elf.R_AARCH64_TLSLD_MOVW_DTPREL_G1 => "R_AARCH64_TLSLD_MOVW_DTPREL_G1",
+ elf.R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC => "R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC",
+ elf.R_AARCH64_TLSLD_MOVW_DTPREL_G0 => "R_AARCH64_TLSLD_MOVW_DTPREL_G0",
+ elf.R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC => "R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC",
+ elf.R_AARCH64_TLSLD_ADD_DTPREL_HI12 => "R_AARCH64_TLSLD_ADD_DTPREL_HI12",
+ elf.R_AARCH64_TLSLD_ADD_DTPREL_LO12 => "R_AARCH64_TLSLD_ADD_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC => "R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC",
+ elf.R_AARCH64_TLSLD_LDST8_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST8_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC",
+ elf.R_AARCH64_TLSLD_LDST16_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST16_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC",
+ elf.R_AARCH64_TLSLD_LDST32_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST32_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC",
+ elf.R_AARCH64_TLSLD_LDST64_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST64_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC",
+ elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 => "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1",
+ elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC => "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC",
+ elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 => "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21",
+ elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC => "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC",
+ elf.R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 => "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19",
+ elf.R_AARCH64_TLSLE_MOVW_TPREL_G2 => "R_AARCH64_TLSLE_MOVW_TPREL_G2",
+ elf.R_AARCH64_TLSLE_MOVW_TPREL_G1 => "R_AARCH64_TLSLE_MOVW_TPREL_G1",
+ elf.R_AARCH64_TLSLE_MOVW_TPREL_G1_NC => "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC",
+ elf.R_AARCH64_TLSLE_MOVW_TPREL_G0 => "R_AARCH64_TLSLE_MOVW_TPREL_G0",
+ elf.R_AARCH64_TLSLE_MOVW_TPREL_G0_NC => "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC",
+ elf.R_AARCH64_TLSLE_ADD_TPREL_HI12 => "R_AARCH64_TLSLE_ADD_TPREL_HI12",
+ elf.R_AARCH64_TLSLE_ADD_TPREL_LO12 => "R_AARCH64_TLSLE_ADD_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_ADD_TPREL_LO12_NC => "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSLE_LDST8_TPREL_LO12 => "R_AARCH64_TLSLE_LDST8_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSLE_LDST16_TPREL_LO12 => "R_AARCH64_TLSLE_LDST16_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSLE_LDST32_TPREL_LO12 => "R_AARCH64_TLSLE_LDST32_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSLE_LDST64_TPREL_LO12 => "R_AARCH64_TLSLE_LDST64_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSDESC_LD_PREL19 => "R_AARCH64_TLSDESC_LD_PREL19",
+ elf.R_AARCH64_TLSDESC_ADR_PREL21 => "R_AARCH64_TLSDESC_ADR_PREL21",
+ elf.R_AARCH64_TLSDESC_ADR_PAGE21 => "R_AARCH64_TLSDESC_ADR_PAGE21",
+ elf.R_AARCH64_TLSDESC_LD64_LO12 => "R_AARCH64_TLSDESC_LD64_LO12",
+ elf.R_AARCH64_TLSDESC_ADD_LO12 => "R_AARCH64_TLSDESC_ADD_LO12",
+ elf.R_AARCH64_TLSDESC_OFF_G1 => "R_AARCH64_TLSDESC_OFF_G1",
+ elf.R_AARCH64_TLSDESC_OFF_G0_NC => "R_AARCH64_TLSDESC_OFF_G0_NC",
+ elf.R_AARCH64_TLSDESC_LDR => "R_AARCH64_TLSDESC_LDR",
+ elf.R_AARCH64_TLSDESC_ADD => "R_AARCH64_TLSDESC_ADD",
+ elf.R_AARCH64_TLSDESC_CALL => "R_AARCH64_TLSDESC_CALL",
+ elf.R_AARCH64_TLSLE_LDST128_TPREL_LO12 => "R_AARCH64_TLSLE_LDST128_TPREL_LO12",
+ elf.R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC",
+ elf.R_AARCH64_TLSLD_LDST128_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST128_DTPREL_LO12",
+ elf.R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC",
+ elf.R_AARCH64_COPY => "R_AARCH64_COPY",
+ elf.R_AARCH64_GLOB_DAT => "R_AARCH64_GLOB_DAT",
+ elf.R_AARCH64_JUMP_SLOT => "R_AARCH64_JUMP_SLOT",
+ elf.R_AARCH64_RELATIVE => "R_AARCH64_RELATIVE",
+ elf.R_AARCH64_TLS_DTPMOD => "R_AARCH64_TLS_DTPMOD",
+ elf.R_AARCH64_TLS_DTPREL => "R_AARCH64_TLS_DTPREL",
+ elf.R_AARCH64_TLS_TPREL => "R_AARCH64_TLS_TPREL",
+ elf.R_AARCH64_TLSDESC => "R_AARCH64_TLSDESC",
+ elf.R_AARCH64_IRELATIVE => "R_AARCH64_IRELATIVE",
+ else => "R_AARCH64_UNKNOWN",
+ },
+ else => unreachable,
+ };
+ try writer.print("{s}", .{str});
+}
+
+const assert = std.debug.assert;
+const elf = std.elf;
+const std = @import("std");
src/link/Elf/synthetic_sections.zig
@@ -292,7 +292,7 @@ pub const ZigGotSection = struct {
zig_got.flags.dirty = false;
}
const entry_size: u16 = elf_file.archPtrWidthBytes();
- const target = elf_file.base.comp.root_mod.resolved_target.result;
+ const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
const off = zig_got.entryOffset(index, elf_file);
const vaddr = zig_got.entryAddress(index, elf_file);
@@ -354,13 +354,14 @@ pub const ZigGotSection = struct {
pub fn addRela(zig_got: ZigGotSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
+ const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, zig_got.numRela());
for (zig_got.entries.items) |entry| {
const symbol = elf_file.symbol(entry);
const offset = symbol.zigGotAddress(elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
- .type = elf.R_X86_64_RELATIVE,
+ .type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.address(.{ .plt = false }, elf_file)),
});
}
@@ -644,6 +645,7 @@ pub const GotSection = struct {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const is_dyn_lib = elf_file.base.isDynLib();
+ const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, got.numRela(elf_file));
for (got.entries.items) |entry| {
@@ -660,14 +662,14 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_GLOB_DAT,
+ .type = relocation.encode(.glob_dat, cpu_arch),
});
continue;
}
if (symbol.?.isIFunc(elf_file)) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
- .type = elf.R_X86_64_IRELATIVE,
+ .type = relocation.encode(.irel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
});
continue;
@@ -677,7 +679,7 @@ pub const GotSection = struct {
{
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
- .type = elf.R_X86_64_RELATIVE,
+ .type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
});
}
@@ -688,7 +690,7 @@ pub const GotSection = struct {
const offset = entry.address(elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
- .type = elf.R_X86_64_DTPMOD64,
+ .type = relocation.encode(.dtpmod, cpu_arch),
});
}
},
@@ -699,18 +701,18 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_DTPMOD64,
+ .type = relocation.encode(.dtpmod, cpu_arch),
});
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset + 8,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_DTPOFF64,
+ .type = relocation.encode(.dtpoff, cpu_arch),
});
} else if (is_dyn_lib) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_DTPMOD64,
+ .type = relocation.encode(.dtpmod, cpu_arch),
});
}
},
@@ -721,12 +723,12 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_TPOFF64,
+ .type = relocation.encode(.tpoff, cpu_arch),
});
} else if (is_dyn_lib) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
- .type = elf.R_X86_64_TPOFF64,
+ .type = relocation.encode(.tpoff, cpu_arch),
.addend = @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
});
}
@@ -737,7 +739,7 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
- .type = elf.R_X86_64_TLSDESC,
+ .type = relocation.encode(.tlsdesc, cpu_arch),
});
},
}
@@ -914,6 +916,7 @@ pub const PltSection = struct {
pub fn addRela(plt: PltSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
+ const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_plt.ensureUnusedCapacity(gpa, plt.numRela());
for (plt.symbols.items) |sym_index| {
const sym = elf_file.symbol(sym_index);
@@ -921,7 +924,7 @@ pub const PltSection = struct {
const extra = sym.extra(elf_file).?;
const r_offset = sym.gotPltAddress(elf_file);
const r_sym: u64 = extra.dynamic;
- const r_type: u32 = elf.R_X86_64_JUMP_SLOT;
+ const r_type = relocation.encode(.jump_slot, cpu_arch);
elf_file.rela_plt.appendAssumeCapacity(.{
.r_offset = r_offset,
.r_info = (r_sym << 32) | r_type,
@@ -1154,6 +1157,7 @@ pub const CopyRelSection = struct {
pub fn addRela(copy_rel: CopyRelSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
+ const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, copy_rel.numRela());
for (copy_rel.symbols.items) |sym_index| {
const sym = elf_file.symbol(sym_index);
@@ -1162,7 +1166,7 @@ pub const CopyRelSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = sym.address(.{}, elf_file),
.sym = extra.dynamic,
- .type = elf.R_X86_64_COPY,
+ .type = relocation.encode(.copy, cpu_arch),
});
}
}
@@ -1612,7 +1616,7 @@ pub const ComdatGroupSection = struct {
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
const entry_size = elf_file.archPtrWidthBytes();
- const target = elf_file.base.comp.root_mod.resolved_target.result;
+ const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
switch (entry_size) {
2 => try writer.writeInt(u16, @intCast(value), endian),
@@ -1627,6 +1631,7 @@ const builtin = @import("builtin");
const elf = std.elf;
const mem = std.mem;
const log = std.log.scoped(.link);
+const relocation = @import("relocation.zig");
const std = @import("std");
const Allocator = std.mem.Allocator;
src/link/Elf/ZigObject.zig
@@ -653,9 +653,10 @@ pub fn getDeclVAddr(
const this_sym = elf_file.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
const parent_atom = elf_file.symbol(reloc_info.parent_atom_index).atom(elf_file).?;
+ const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
try parent_atom.addReloc(elf_file, .{
.r_offset = reloc_info.offset,
- .r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | elf.R_X86_64_64,
+ .r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
@@ -671,9 +672,10 @@ pub fn getAnonDeclVAddr(
const sym = elf_file.symbol(sym_index);
const vaddr = sym.address(.{}, elf_file);
const parent_atom = elf_file.symbol(reloc_info.parent_atom_index).atom(elf_file).?;
+ const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
try parent_atom.addReloc(elf_file, .{
.r_offset = reloc_info.offset,
- .r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | elf.R_X86_64_64,
+ .r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
@@ -1636,6 +1638,7 @@ const elf = std.elf;
const link = @import("../../link.zig");
const log = std.log.scoped(.link);
const mem = std.mem;
+const relocation = @import("relocation.zig");
const trace = @import("../../tracy.zig").trace;
const std = @import("std");
src/link/Elf.zig
@@ -1357,6 +1357,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
+ error.UnsupportedCpuArch => {
+ try self.reportUnsupportedCpuArch();
+ return error.FlushFailure;
+ },
else => |e| return e,
};
try self.base.file.?.pwriteAll(code, file_offset);
@@ -1366,7 +1370,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
- try self.writeSyntheticSections();
+
+ self.writeSyntheticSections() catch |err| switch (err) {
+ error.UnsupportedCpuArch => {
+ try self.reportUnsupportedCpuArch();
+ return error.FlushFailure;
+ },
+ else => |e| return e,
+ };
if (self.entry_index == null and self.base.isExe()) {
log.debug("flushing. no_entry_point_found = true", .{});
@@ -2032,12 +2043,19 @@ fn scanRelocs(self: *Elf) !void {
undefs.deinit();
}
- if (self.zigObjectPtr()) |zig_object| {
- try zig_object.scanRelocs(self, &undefs);
- }
- for (self.objects.items) |index| {
- const object = self.file(index).?.object;
- try object.scanRelocs(self, &undefs);
+ var objects = try std.ArrayList(File.Index).initCapacity(gpa, self.objects.items.len + 1);
+ defer objects.deinit();
+ if (self.zigObjectPtr()) |zo| objects.appendAssumeCapacity(zo.index);
+ objects.appendSliceAssumeCapacity(self.objects.items);
+
+ for (objects.items) |index| {
+ self.file(index).?.scanRelocs(self, &undefs) catch |err| switch (err) {
+ error.UnsupportedCpuArch => {
+ try self.reportUnsupportedCpuArch();
+ return error.FlushFailure;
+ },
+ else => |e| return e,
+ };
}
try self.reportUndefinedSymbols(&undefs);
@@ -4470,17 +4488,21 @@ fn writeAtoms(self: *Elf) !void {
defer gpa.free(in_code);
@memcpy(out_code, in_code);
- if (shdr.sh_flags & elf.SHF_ALLOC == 0) {
- try atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs);
- } else {
- atom_ptr.resolveRelocsAlloc(self, out_code) catch |err| switch (err) {
- // TODO
- error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
- log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
- },
- else => |e| return e,
- };
- }
+ const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0)
+ atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs)
+ else
+ atom_ptr.resolveRelocsAlloc(self, out_code);
+ _ = res catch |err| switch (err) {
+ // TODO
+ error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
+ log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
+ },
+ error.UnsupportedCpuArch => {
+ try self.reportUnsupportedCpuArch();
+ return error.FlushFailure;
+ },
+ else => |e| return e,
+ };
}
try self.base.file.?.pwriteAll(buffer, sh_offset);
@@ -5271,24 +5293,26 @@ pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void {
fn sortRelaDyn(self: *Elf) void {
const Sort = struct {
- fn rank(rel: elf.Elf64_Rela) u2 {
- return switch (rel.r_type()) {
- elf.R_X86_64_RELATIVE => 0,
- elf.R_X86_64_IRELATIVE => 2,
+ fn rank(rel: elf.Elf64_Rela, ctx: *Elf) u2 {
+ const cpu_arch = ctx.getTarget().cpu.arch;
+ const r_type = rel.r_type();
+ const r_kind = relocation.decode(r_type, cpu_arch).?;
+ return switch (r_kind) {
+ .rel => 0,
+ .irel => 2,
else => 1,
};
}
- pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
- _ = ctx;
- if (rank(lhs) == rank(rhs)) {
+ pub fn lessThan(ctx: *Elf, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
+ if (rank(lhs, ctx) == rank(rhs, ctx)) {
if (lhs.r_sym() == rhs.r_sym()) return lhs.r_offset < rhs.r_offset;
return lhs.r_sym() < rhs.r_sym();
}
- return rank(lhs) < rank(rhs);
+ return rank(lhs, ctx) < rank(rhs, ctx);
}
};
- mem.sort(elf.Elf64_Rela, self.rela_dyn.items, {}, Sort.lessThan);
+ mem.sort(elf.Elf64_Rela, self.rela_dyn.items, self, Sort.lessThan);
}
fn calcNumIRelativeRelocs(self: *Elf) usize {
@@ -5667,6 +5691,13 @@ fn reportMissingLibraryError(
}
}
+pub fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
+ var err = try self.addErrorWithNotes(0);
+ try err.addMsg(self, "fatal linker error: unsupported CPU architecture {s}", .{
+ @tagName(self.getTarget().cpu.arch),
+ });
+}
+
pub fn reportParseError(
self: *Elf,
path: []const u8,
@@ -5932,6 +5963,10 @@ pub fn lsearch(comptime T: type, haystack: []align(1) const T, predicate: anytyp
return i;
}
+pub fn getTarget(self: Elf) std.Target {
+ return self.base.comp.root_mod.resolved_target.result;
+}
+
/// The following three values are only observed at compile-time and used to emit a compile error
/// to remind the programmer to update expected maximum numbers of different program header types
/// so that we reserve enough space for the program header table up-front.
@@ -6059,6 +6094,7 @@ const link = @import("../link.zig");
const lldMain = @import("../main.zig").lldMain;
const musl = @import("../musl.zig");
const relocatable = @import("Elf/relocatable.zig");
+const relocation = @import("Elf/relocation.zig");
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const synthetic_sections = @import("Elf/synthetic_sections.zig");
CMakeLists.txt
@@ -594,6 +594,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/Elf/file.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/gc.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/relocatable.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/Elf/relocation.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/synthetic_sections.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"