master
1/// Address allocated for this Atom.
2value: i64 = 0,
3
4/// Name of this Atom.
5name_offset: u32 = 0,
6
7/// Index into linker's input file table.
8file_index: File.Index = 0,
9
10/// Size of this atom
11size: u64 = 0,
12
13/// Alignment of this atom as a power of two.
14alignment: Alignment = .@"1",
15
16/// Index of the input section.
17input_section_index: u32 = 0,
18
19/// Index of the output section.
20output_section_index: u32 = 0,
21
22/// Index of the input section containing this atom's relocs.
23relocs_section_index: u32 = 0,
24
25/// Index of this atom in the linker's atoms table.
26atom_index: Index = 0,
27
28/// Points to the previous and next neighbors.
29prev_atom_ref: Elf.Ref = .{},
30next_atom_ref: Elf.Ref = .{},
31
32/// Specifies whether this atom is alive or has been garbage collected.
33alive: bool = true,
34
35/// Specifies if the atom has been visited during garbage collection.
36visited: bool = false,
37
38extra_index: u32 = 0,
39
40pub const Alignment = @import("../../InternPool.zig").Alignment;
41
42pub fn name(self: Atom, elf_file: *Elf) [:0]const u8 {
43 const file_ptr = self.file(elf_file).?;
44 return switch (file_ptr) {
45 inline else => |x| x.getString(self.name_offset),
46 };
47}
48
49pub fn address(self: Atom, elf_file: *Elf) i64 {
50 const shdr = elf_file.sections.items(.shdr)[self.output_section_index];
51 return @as(i64, @intCast(shdr.sh_addr)) + self.value;
52}
53
54pub fn offset(self: Atom, elf_file: *Elf) u64 {
55 const shdr = elf_file.sections.items(.shdr)[self.output_section_index];
56 return shdr.sh_offset + @as(u64, @intCast(self.value));
57}
58
59pub fn ref(self: Atom) Elf.Ref {
60 return .{ .index = self.atom_index, .file = self.file_index };
61}
62
63pub fn prevAtom(self: Atom, elf_file: *Elf) ?*Atom {
64 return elf_file.atom(self.prev_atom_ref);
65}
66
67pub fn nextAtom(self: Atom, elf_file: *Elf) ?*Atom {
68 return elf_file.atom(self.next_atom_ref);
69}
70
71pub fn debugTombstoneValue(self: Atom, target: Symbol, elf_file: *Elf) ?u64 {
72 if (target.mergeSubsection(elf_file)) |msub| {
73 if (msub.alive) return null;
74 }
75 if (target.atom(elf_file)) |atom_ptr| {
76 if (atom_ptr.alive) return null;
77 }
78 const atom_name = self.name(elf_file);
79 if (!mem.startsWith(u8, atom_name, ".debug")) return null;
80 return if (mem.eql(u8, atom_name, ".debug_loc") or mem.eql(u8, atom_name, ".debug_ranges")) 1 else 0;
81}
82
83pub fn file(self: Atom, elf_file: *Elf) ?File {
84 return elf_file.file(self.file_index);
85}
86
87pub fn thunk(self: Atom, elf_file: *Elf) *Thunk {
88 const extras = self.extra(elf_file);
89 return elf_file.thunk(extras.thunk);
90}
91
92pub fn inputShdr(self: Atom, elf_file: *Elf) elf.Elf64_Shdr {
93 return switch (self.file(elf_file).?) {
94 .object => |x| x.shdrs.items[self.input_section_index],
95 .zig_object => |x| x.inputShdr(self.atom_index, elf_file),
96 else => unreachable,
97 };
98}
99
100pub fn relocsShndx(self: Atom) ?u32 {
101 if (self.relocs_section_index == 0) return null;
102 return self.relocs_section_index;
103}
104
105pub fn priority(atom: Atom, elf_file: *Elf) u64 {
106 const index = atom.file(elf_file).?.index();
107 return priorityLookup(index, atom.input_section_index);
108}
109
110pub fn priorityLookup(file_index: File.Index, input_section_index: u32) u64 {
111 return (@as(u64, @intCast(file_index)) << 32) | @as(u64, @intCast(input_section_index));
112}
113
114/// Returns how much room there is to grow in virtual address space.
115/// File offset relocation happens transparently, so it is not included in
116/// this calculation.
117pub fn capacity(self: Atom, elf_file: *Elf) u64 {
118 const next_addr = if (self.nextAtom(elf_file)) |next_atom|
119 next_atom.address(elf_file)
120 else
121 std.math.maxInt(u32);
122 return @intCast(next_addr - self.address(elf_file));
123}
124
125pub fn fileCapacity(self: Atom, elf_file: *Elf) u64 {
126 const self_off = self.offset(elf_file);
127 const next_off = if (self.nextAtom(elf_file)) |next_atom|
128 next_atom.offset(elf_file)
129 else
130 self_off + elf_file.allocatedSize(self_off);
131 return @intCast(next_off - self_off);
132}
133
134pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
135 // No need to keep a free list node for the last block.
136 const next = self.nextAtom(elf_file) orelse return false;
137 const cap: u64 = @intCast(next.value - self.value);
138 const ideal_cap = Elf.padToIdeal(self.size);
139 if (cap <= ideal_cap) return false;
140 const surplus = cap - ideal_cap;
141 return surplus >= Elf.min_text_capacity;
142}
143
144pub fn free(self: *Atom, elf_file: *Elf) void {
145 log.debug("freeAtom atom({f}) ({s})", .{ self.ref(), self.name(elf_file) });
146
147 const comp = elf_file.base.comp;
148 const gpa = comp.gpa;
149 const shndx = self.output_section_index;
150 const slice = elf_file.sections.slice();
151 const free_list = &slice.items(.free_list)[shndx];
152 const last_atom_ref = &slice.items(.last_atom)[shndx];
153 var already_have_free_list_node = false;
154 {
155 var i: usize = 0;
156 // TODO turn free_list into a hash map
157 while (i < free_list.items.len) {
158 if (free_list.items[i].eql(self.ref())) {
159 _ = free_list.swapRemove(i);
160 continue;
161 }
162 if (self.prevAtom(elf_file)) |prev_atom| {
163 if (free_list.items[i].eql(prev_atom.ref())) {
164 already_have_free_list_node = true;
165 }
166 }
167 i += 1;
168 }
169 }
170
171 if (elf_file.atom(last_atom_ref.*)) |last_atom| {
172 if (last_atom.ref().eql(self.ref())) {
173 if (self.prevAtom(elf_file)) |prev_atom| {
174 // TODO shrink the section size here
175 last_atom_ref.* = prev_atom.ref();
176 } else {
177 last_atom_ref.* = .{};
178 }
179 }
180 }
181
182 if (self.prevAtom(elf_file)) |prev_atom| {
183 prev_atom.next_atom_ref = self.next_atom_ref;
184 if (!already_have_free_list_node and prev_atom.*.freeListEligible(elf_file)) {
185 // The free list is heuristics, it doesn't have to be perfect, so we can
186 // ignore the OOM here.
187 free_list.append(gpa, prev_atom.ref()) catch {};
188 }
189 } else {
190 self.prev_atom_ref = .{};
191 }
192
193 if (self.nextAtom(elf_file)) |next_atom| {
194 next_atom.prev_atom_ref = self.prev_atom_ref;
195 } else {
196 self.next_atom_ref = .{};
197 }
198
199 switch (self.file(elf_file).?) {
200 .zig_object => |zo| {
201 // TODO create relocs free list
202 self.freeRelocs(zo);
203 // TODO figure out how to free input section mappind in ZigModule
204 // const zig_object = elf_file.zigObjectPtr().?
205 // assert(zig_object.atoms.swapRemove(self.atom_index));
206 },
207 else => {},
208 }
209 self.* = .{};
210}
211
212pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
213 const shndx = self.relocsShndx() orelse return &[0]elf.Elf64_Rela{};
214 switch (self.file(elf_file).?) {
215 .zig_object => |x| return x.relocs.items[shndx].items,
216 .object => |x| {
217 const extras = self.extra(elf_file);
218 return x.relocs.items[extras.rel_index..][0..extras.rel_count];
219 },
220 else => unreachable,
221 }
222}
223
224pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.array_list.Managed(elf.Elf64_Rela)) !void {
225 relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
226
227 const cpu_arch = elf_file.getTarget().cpu.arch;
228 const file_ptr = self.file(elf_file).?;
229 for (self.relocs(elf_file)) |rel| {
230 const target_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
231 const target = elf_file.symbol(target_ref).?;
232 const r_type = rel.r_type();
233 const r_offset: u64 = @intCast(self.value + @as(i64, @intCast(rel.r_offset)));
234 var r_addend = rel.r_addend;
235 var r_sym: u32 = 0;
236 switch (target.type(elf_file)) {
237 elf.STT_SECTION => {
238 r_addend += @intCast(target.address(.{}, elf_file));
239 r_sym = target.outputShndx(elf_file) orelse 0;
240 },
241 else => {
242 r_sym = target.outputSymtabIndex(elf_file) orelse 0;
243 },
244 }
245
246 relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
247 relocation.fmtRelocType(rel.r_type(), cpu_arch),
248 r_offset,
249 r_sym,
250 target.name(elf_file),
251 r_addend,
252 });
253
254 out_relocs.appendAssumeCapacity(.{
255 .r_offset = r_offset,
256 .r_addend = r_addend,
257 .r_info = (@as(u64, @intCast(r_sym)) << 32) | r_type,
258 });
259 }
260}
261
262pub fn fdes(atom: Atom, object: *Object) []Fde {
263 const extras = object.atomExtra(atom.extra_index);
264 return object.fdes.items[extras.fde_start..][0..extras.fde_count];
265}
266
267pub fn markFdesDead(self: Atom, object: *Object) void {
268 for (self.fdes(object)) |*fde| fde.alive = false;
269}
270
271pub fn addReloc(self: Atom, alloc: Allocator, reloc: elf.Elf64_Rela, zo: *ZigObject) !void {
272 const rels = &zo.relocs.items[self.relocs_section_index];
273 try rels.ensureUnusedCapacity(alloc, 1);
274 self.addRelocAssumeCapacity(reloc, zo);
275}
276
277pub fn addRelocAssumeCapacity(self: Atom, reloc: elf.Elf64_Rela, zo: *ZigObject) void {
278 const rels = &zo.relocs.items[self.relocs_section_index];
279 rels.appendAssumeCapacity(reloc);
280}
281
282pub fn freeRelocs(self: Atom, zo: *ZigObject) void {
283 zo.relocs.items[self.relocs_section_index].clearRetainingCapacity();
284}
285
286pub fn scanRelocsRequiresCode(self: Atom, elf_file: *Elf) bool {
287 const cpu_arch = elf_file.getTarget().cpu.arch;
288 for (self.relocs(elf_file)) |rel| {
289 switch (cpu_arch) {
290 .x86_64 => {
291 const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
292 if (r_type == .GOTTPOFF) return true;
293 },
294 else => {},
295 }
296 }
297 return false;
298}
299
300pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype) RelocError!void {
301 const cpu_arch = elf_file.getTarget().cpu.arch;
302 const file_ptr = self.file(elf_file).?;
303 const rels = self.relocs(elf_file);
304
305 var has_reloc_errors = false;
306 var it = RelocsIterator{ .relocs = rels };
307 while (it.next()) |rel| {
308 const r_kind = relocation.decode(rel.r_type(), cpu_arch);
309 if (r_kind == .none) continue;
310
311 const symbol_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
312 const symbol = elf_file.symbol(symbol_ref) orelse {
313 const sym_name = switch (file_ptr) {
314 .zig_object => |x| x.symbol(rel.r_sym()).name(elf_file),
315 inline else => |x| x.symbols.items[rel.r_sym()].name(elf_file),
316 };
317 // Violation of One Definition Rule for COMDATs.
318 // TODO convert into an error
319 log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
320 file_ptr.fmtPath(),
321 self.name(elf_file),
322 sym_name,
323 });
324 continue;
325 };
326
327 const is_synthetic_symbol = switch (file_ptr) {
328 .zig_object => false, // TODO: implement this once we support merge sections in ZigObject
329 .object => |x| rel.r_sym() >= x.symtab.items.len,
330 else => unreachable,
331 };
332
333 // Report an undefined symbol.
334 if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, symbol, rel, undefs)))
335 continue;
336
337 if (symbol.isIFunc(elf_file)) {
338 symbol.flags.needs_got = true;
339 symbol.flags.needs_plt = true;
340 }
341
342 // While traversing relocations, mark symbols that require special handling such as
343 // pointer indirection via GOT, or a stub trampoline via PLT.
344 switch (cpu_arch) {
345 .x86_64 => x86_64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
346 error.RelocFailure => has_reloc_errors = true,
347 else => |e| return e,
348 },
349 .aarch64, .aarch64_be => aarch64.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
350 error.RelocFailure => has_reloc_errors = true,
351 else => |e| return e,
352 },
353 .riscv64, .riscv64be => riscv.scanReloc(self, elf_file, rel, symbol, code, &it) catch |err| switch (err) {
354 error.RelocFailure => has_reloc_errors = true,
355 else => |e| return e,
356 },
357 else => return error.UnsupportedCpuArch,
358 }
359 }
360 if (has_reloc_errors) return error.RelocFailure;
361}
362
363fn scanReloc(
364 self: Atom,
365 symbol: *Symbol,
366 rel: elf.Elf64_Rela,
367 action: RelocAction,
368 elf_file: *Elf,
369) RelocError!void {
370 const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
371 const num_dynrelocs = switch (self.file(elf_file).?) {
372 .linker_defined => unreachable,
373 .shared_object => unreachable,
374 inline else => |x| &x.num_dynrelocs,
375 };
376
377 switch (action) {
378 .none => {},
379
380 .@"error" => if (symbol.isAbs(elf_file))
381 try self.reportNoPicError(symbol, rel, elf_file)
382 else
383 try self.reportPicError(symbol, rel, elf_file),
384
385 .copyrel => {
386 if (elf_file.z_nocopyreloc) {
387 if (symbol.isAbs(elf_file))
388 try self.reportNoPicError(symbol, rel, elf_file)
389 else
390 try self.reportPicError(symbol, rel, elf_file);
391 }
392 symbol.flags.needs_copy_rel = true;
393 },
394
395 .dyn_copyrel => {
396 if (is_writeable or elf_file.z_nocopyreloc) {
397 if (!is_writeable) {
398 if (elf_file.z_notext) {
399 elf_file.has_text_reloc = true;
400 } else {
401 try self.reportTextRelocError(symbol, rel, elf_file);
402 }
403 }
404 num_dynrelocs.* += 1;
405 } else {
406 symbol.flags.needs_copy_rel = true;
407 }
408 },
409
410 .plt => {
411 symbol.flags.needs_plt = true;
412 },
413
414 .cplt => {
415 symbol.flags.needs_plt = true;
416 symbol.flags.is_canonical = true;
417 },
418
419 .dyn_cplt => {
420 if (is_writeable) {
421 num_dynrelocs.* += 1;
422 } else {
423 symbol.flags.needs_plt = true;
424 symbol.flags.is_canonical = true;
425 }
426 },
427
428 .dynrel, .baserel, .ifunc => {
429 if (!is_writeable) {
430 if (elf_file.z_notext) {
431 elf_file.has_text_reloc = true;
432 } else {
433 try self.reportTextRelocError(symbol, rel, elf_file);
434 }
435 }
436 num_dynrelocs.* += 1;
437
438 if (action == .ifunc) elf_file.num_ifunc_dynrelocs += 1;
439 },
440 }
441}
442
443const RelocAction = enum {
444 none,
445 @"error",
446 copyrel,
447 dyn_copyrel,
448 plt,
449 dyn_cplt,
450 cplt,
451 dynrel,
452 baserel,
453 ifunc,
454};
455
456fn pcRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
457 // zig fmt: off
458 const table: [3][4]RelocAction = .{
459 // Abs Local Import data Import func
460 .{ .@"error", .none, .@"error", .plt }, // Shared object
461 .{ .@"error", .none, .copyrel, .plt }, // PIE
462 .{ .none, .none, .copyrel, .cplt }, // Non-PIE
463 };
464 // zig fmt: on
465 const output = outputType(elf_file);
466 const data = dataType(symbol, elf_file);
467 return table[output][data];
468}
469
470fn absRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
471 // zig fmt: off
472 const table: [3][4]RelocAction = .{
473 // Abs Local Import data Import func
474 .{ .none, .@"error", .@"error", .@"error" }, // Shared object
475 .{ .none, .@"error", .@"error", .@"error" }, // PIE
476 .{ .none, .none, .copyrel, .cplt }, // Non-PIE
477 };
478 // zig fmt: on
479 const output = outputType(elf_file);
480 const data = dataType(symbol, elf_file);
481 return table[output][data];
482}
483
484fn dynAbsRelocAction(symbol: *const Symbol, elf_file: *Elf) RelocAction {
485 if (symbol.isIFunc(elf_file)) return .ifunc;
486 // zig fmt: off
487 const table: [3][4]RelocAction = .{
488 // Abs Local Import data Import func
489 .{ .none, .baserel, .dynrel, .dynrel }, // Shared object
490 .{ .none, .baserel, .dynrel, .dynrel }, // PIE
491 .{ .none, .none, .dyn_copyrel, .dyn_cplt }, // Non-PIE
492 };
493 // zig fmt: on
494 const output = outputType(elf_file);
495 const data = dataType(symbol, elf_file);
496 return table[output][data];
497}
498
499fn outputType(elf_file: *Elf) u2 {
500 assert(!elf_file.base.isRelocatable());
501 const config = &elf_file.base.comp.config;
502 return switch (config.output_mode) {
503 .Obj => unreachable,
504 .Lib => 0,
505 .Exe => switch (elf_file.getTarget().os.tag) {
506 .haiku => 0,
507 else => if (config.pie) 1 else 2,
508 },
509 };
510}
511
512fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 {
513 if (symbol.isAbs(elf_file)) return 0;
514 if (!symbol.flags.import) return 1;
515 if (symbol.type(elf_file) != elf.STT_FUNC) return 2;
516 return 3;
517}
518
519fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) RelocError!void {
520 const diags = &elf_file.base.comp.link_diags;
521 var err = try diags.addErrorWithNotes(1);
522 try err.addMsg("fatal linker error: unhandled relocation type {f} at offset 0x{x}", .{
523 relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
524 rel.r_offset,
525 });
526 err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
527 return error.RelocFailure;
528}
529
530fn reportTextRelocError(
531 self: Atom,
532 symbol: *const Symbol,
533 rel: elf.Elf64_Rela,
534 elf_file: *Elf,
535) RelocError!void {
536 const diags = &elf_file.base.comp.link_diags;
537 var err = try diags.addErrorWithNotes(1);
538 try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
539 rel.r_offset,
540 symbol.name(elf_file),
541 });
542 err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
543 return error.RelocFailure;
544}
545
546fn reportPicError(
547 self: Atom,
548 symbol: *const Symbol,
549 rel: elf.Elf64_Rela,
550 elf_file: *Elf,
551) RelocError!void {
552 const diags = &elf_file.base.comp.link_diags;
553 var err = try diags.addErrorWithNotes(2);
554 try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
555 rel.r_offset,
556 symbol.name(elf_file),
557 });
558 err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
559 err.addNote("recompile with -fPIC", .{});
560 return error.RelocFailure;
561}
562
563fn reportNoPicError(
564 self: Atom,
565 symbol: *const Symbol,
566 rel: elf.Elf64_Rela,
567 elf_file: *Elf,
568) RelocError!void {
569 const diags = &elf_file.base.comp.link_diags;
570 var err = try diags.addErrorWithNotes(2);
571 try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{
572 rel.r_offset,
573 symbol.name(elf_file),
574 });
575 err.addNote("in {f}:{s}", .{ self.file(elf_file).?.fmtPath(), self.name(elf_file) });
576 err.addNote("recompile with -fno-PIC", .{});
577 return error.RelocFailure;
578}
579
580// This function will report any undefined non-weak symbols that are not imports.
581fn reportUndefined(
582 self: Atom,
583 elf_file: *Elf,
584 sym: *const Symbol,
585 rel: elf.Elf64_Rela,
586 undefs: anytype,
587) !bool {
588 const comp = elf_file.base.comp;
589 const gpa = comp.gpa;
590 const file_ptr = self.file(elf_file).?;
591 const rel_esym = switch (file_ptr) {
592 .zig_object => |x| x.symbol(rel.r_sym()).elfSym(elf_file),
593 .shared_object => |so| so.parsed.symtab[rel.r_sym()],
594 inline else => |x| x.symtab.items[rel.r_sym()],
595 };
596 const esym = sym.elfSym(elf_file);
597 if (rel_esym.st_shndx == elf.SHN_UNDEF and
598 rel_esym.st_bind() == elf.STB_GLOBAL and
599 sym.esym_index > 0 and
600 !sym.flags.import and
601 esym.st_shndx == elf.SHN_UNDEF)
602 {
603 const idx = switch (file_ptr) {
604 .zig_object => |x| x.symbols_resolver.items[rel.r_sym() & ZigObject.symbol_mask],
605 .object => |x| x.symbols_resolver.items[rel.r_sym() - x.first_global.?],
606 inline else => |x| x.symbols_resolver.items[rel.r_sym()],
607 };
608 const gop = try undefs.getOrPut(idx);
609 if (!gop.found_existing) {
610 gop.value_ptr.* = std.array_list.Managed(Elf.Ref).init(gpa);
611 }
612 try gop.value_ptr.append(.{ .index = self.atom_index, .file = self.file_index });
613 return true;
614 }
615
616 return false;
617}
618
619pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!void {
620 relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
621
622 const cpu_arch = elf_file.getTarget().cpu.arch;
623 const file_ptr = self.file(elf_file).?;
624
625 const rels = self.relocs(elf_file);
626 var it = RelocsIterator{ .relocs = rels };
627 var has_reloc_errors = false;
628 while (it.next()) |rel| {
629 const r_kind = relocation.decode(rel.r_type(), cpu_arch);
630 if (r_kind == .none) continue;
631
632 const target_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
633 const target = elf_file.symbol(target_ref).?;
634 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
635
636 // We will use equation format to resolve relocations:
637 // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
638 //
639 // Address of the source atom.
640 const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
641 // Addend from the relocation.
642 const A = rel.r_addend;
643 // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub, or address of a Zig trampoline.
644 const S = target.address(.{}, elf_file);
645 // Address of the global offset table.
646 const GOT = elf_file.gotAddress();
647 // Relative offset to the start of the global offset table.
648 const G = target.gotAddress(elf_file) - GOT;
649 // // Address of the thread pointer.
650 const TP = elf_file.tpAddress();
651 // Address of the dynamic thread pointer.
652 const DTP = elf_file.dtpAddress();
653
654 relocs_log.debug(" {f}: {x}: [{x} => {x}] GOT({x}) ({s})", .{
655 relocation.fmtRelocType(rel.r_type(), cpu_arch),
656 r_offset,
657 P,
658 S + A,
659 G + GOT + A,
660 target.name(elf_file),
661 });
662
663 const args = ResolveArgs{ P, A, S, GOT, G, TP, DTP };
664
665 switch (cpu_arch) {
666 .x86_64 => x86_64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
667 error.RelocFailure,
668 error.RelaxFailure,
669 => has_reloc_errors = true,
670 else => |e| return e,
671 },
672 .aarch64, .aarch64_be => aarch64.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
673 error.RelocFailure,
674 error.RelaxFailure,
675 error.UnexpectedRemainder,
676 error.DivisionByZero,
677 => has_reloc_errors = true,
678 else => |e| return e,
679 },
680 .riscv64, .riscv64be => riscv.resolveRelocAlloc(self, elf_file, rel, target, args, &it, code) catch |err| switch (err) {
681 error.RelocFailure,
682 error.RelaxFailure,
683 => has_reloc_errors = true,
684 else => |e| return e,
685 },
686 else => return error.UnsupportedCpuArch,
687 }
688 }
689
690 if (has_reloc_errors) return error.RelaxFailure;
691}
692
693fn resolveDynAbsReloc(
694 self: Atom,
695 target: *const Symbol,
696 rel: elf.Elf64_Rela,
697 action: RelocAction,
698 elf_file: *Elf,
699 code: []u8,
700 r_offset: usize,
701) !void {
702 const comp = elf_file.base.comp;
703 const gpa = comp.gpa;
704 const cpu_arch = elf_file.getTarget().cpu.arch;
705 const P: u64 = @intCast(self.address(elf_file) + @as(i64, @intCast(rel.r_offset)));
706 const A = rel.r_addend;
707 const S = target.address(.{}, elf_file);
708 const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
709
710 const num_dynrelocs = switch (self.file(elf_file).?) {
711 .linker_defined => unreachable,
712 .shared_object => unreachable,
713 inline else => |x| x.num_dynrelocs,
714 };
715 try elf_file.rela_dyn.ensureUnusedCapacity(gpa, num_dynrelocs);
716
717 switch (action) {
718 .@"error",
719 .plt,
720 => unreachable,
721
722 .copyrel,
723 .cplt,
724 .none,
725 => mem.writeInt(i64, code[r_offset..][0..8], S + A, .little),
726
727 .dyn_copyrel => {
728 if (is_writeable or elf_file.z_nocopyreloc) {
729 elf_file.addRelaDynAssumeCapacity(.{
730 .offset = P,
731 .sym = target.extra(elf_file).dynamic,
732 .type = relocation.encode(.abs, cpu_arch),
733 .addend = A,
734 .target = target,
735 });
736 applyDynamicReloc(A, code, r_offset);
737 } else {
738 mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
739 }
740 },
741
742 .dyn_cplt => {
743 if (is_writeable) {
744 elf_file.addRelaDynAssumeCapacity(.{
745 .offset = P,
746 .sym = target.extra(elf_file).dynamic,
747 .type = relocation.encode(.abs, cpu_arch),
748 .addend = A,
749 .target = target,
750 });
751 applyDynamicReloc(A, code, r_offset);
752 } else {
753 mem.writeInt(i64, code[r_offset..][0..8], S + A, .little);
754 }
755 },
756
757 .dynrel => {
758 elf_file.addRelaDynAssumeCapacity(.{
759 .offset = P,
760 .sym = target.extra(elf_file).dynamic,
761 .type = relocation.encode(.abs, cpu_arch),
762 .addend = A,
763 .target = target,
764 });
765 applyDynamicReloc(A, code, r_offset);
766 },
767
768 .baserel => {
769 elf_file.addRelaDynAssumeCapacity(.{
770 .offset = P,
771 .type = relocation.encode(.rel, cpu_arch),
772 .addend = S + A,
773 .target = target,
774 });
775 applyDynamicReloc(S + A, code, r_offset);
776 },
777
778 .ifunc => {
779 const S_ = target.address(.{ .plt = false }, elf_file);
780 elf_file.addRelaDynAssumeCapacity(.{
781 .offset = P,
782 .type = relocation.encode(.irel, cpu_arch),
783 .addend = S_ + A,
784 .target = target,
785 });
786 applyDynamicReloc(S_ + A, code, r_offset);
787 },
788 }
789}
790
791fn applyDynamicReloc(value: i64, code: []u8, r_offset: usize) void {
792 mem.writeInt(i64, code[r_offset..][0..8], value, .little);
793}
794
795pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: anytype) !void {
796 relocs_log.debug("0x{x}: {s}", .{ self.address(elf_file), self.name(elf_file) });
797
798 const cpu_arch = elf_file.getTarget().cpu.arch;
799 const file_ptr = self.file(elf_file).?;
800
801 const rels = self.relocs(elf_file);
802 var has_reloc_errors = false;
803 var it = RelocsIterator{ .relocs = rels };
804 while (it.next()) |rel| {
805 const r_kind = relocation.decode(rel.r_type(), cpu_arch);
806 if (r_kind == .none) continue;
807
808 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
809
810 const target_ref = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
811 const target = elf_file.symbol(target_ref) orelse {
812 const sym_name = switch (file_ptr) {
813 .zig_object => |x| x.symbol(rel.r_sym()).name(elf_file),
814 inline else => |x| x.symbols.items[rel.r_sym()].name(elf_file),
815 };
816 // Violation of One Definition Rule for COMDATs.
817 // TODO convert into an error
818 log.debug("{f}: {s}: {s} refers to a discarded COMDAT section", .{
819 file_ptr.fmtPath(),
820 self.name(elf_file),
821 sym_name,
822 });
823 continue;
824 };
825 const is_synthetic_symbol = switch (file_ptr) {
826 .zig_object => false, // TODO: implement this once we support merge sections in ZigObject
827 .object => |x| rel.r_sym() >= x.symtab.items.len,
828 else => unreachable,
829 };
830
831 // Report an undefined symbol.
832 if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, target, rel, undefs)))
833 continue;
834
835 // We will use equation format to resolve relocations:
836 // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
837 //
838 const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
839 // Addend from the relocation.
840 const A = rel.r_addend;
841 // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
842 const S = target.address(.{}, elf_file);
843 // Address of the global offset table.
844 const GOT = elf_file.gotAddress();
845 // Address of the dynamic thread pointer.
846 const DTP = elf_file.dtpAddress();
847
848 const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP };
849
850 relocs_log.debug(" {f}: {x}: [{x} => {x}] ({s})", .{
851 relocation.fmtRelocType(rel.r_type(), cpu_arch),
852 rel.r_offset,
853 P,
854 S + A,
855 target.name(elf_file),
856 });
857
858 switch (cpu_arch) {
859 .x86_64 => x86_64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
860 error.RelocFailure => has_reloc_errors = true,
861 else => |e| return e,
862 },
863 .aarch64, .aarch64_be => aarch64.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
864 error.RelocFailure => has_reloc_errors = true,
865 else => |e| return e,
866 },
867 .riscv64, .riscv64be => riscv.resolveRelocNonAlloc(self, elf_file, rel, target, args, code[r_offset..]) catch |err| switch (err) {
868 error.RelocFailure => has_reloc_errors = true,
869 else => |e| return e,
870 },
871 else => return error.UnsupportedCpuArch,
872 }
873 }
874
875 if (has_reloc_errors) return error.RelocFailure;
876}
877
878pub fn addExtra(atom: *Atom, opts: Extra.AsOptionals, elf_file: *Elf) void {
879 const file_ptr = atom.file(elf_file).?;
880 var extras = file_ptr.atomExtra(atom.extra_index);
881 inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
882 if (@field(opts, field.name)) |x| {
883 @field(extras, field.name) = x;
884 }
885 }
886 file_ptr.setAtomExtra(atom.extra_index, extras);
887}
888
889pub fn extra(atom: Atom, elf_file: *Elf) Extra {
890 return atom.file(elf_file).?.atomExtra(atom.extra_index);
891}
892
893pub fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
894 atom.file(elf_file).?.setAtomExtra(atom.extra_index, extras);
895}
896
897pub fn fmt(atom: Atom, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
898 return .{ .data = .{
899 .atom = atom,
900 .elf_file = elf_file,
901 } };
902}
903
904const Format = struct {
905 atom: Atom,
906 elf_file: *Elf,
907
908 fn default(f: Format, w: *Writer) Writer.Error!void {
909 const atom = f.atom;
910 const elf_file = f.elf_file;
911 try w.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({f}) : next({f})", .{
912 atom.atom_index, atom.name(elf_file), atom.address(elf_file),
913 atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size,
914 atom.prev_atom_ref, atom.next_atom_ref,
915 });
916 if (atom.file(elf_file)) |atom_file| switch (atom_file) {
917 .object => |object| {
918 if (atom.fdes(object).len > 0) {
919 try w.writeAll(" : fdes{ ");
920 const extras = atom.extra(elf_file);
921 for (atom.fdes(object), extras.fde_start..) |fde, i| {
922 try w.print("{d}", .{i});
923 if (!fde.alive) try w.writeAll("([*])");
924 if (i - extras.fde_start < extras.fde_count - 1) try w.writeAll(", ");
925 }
926 try w.writeAll(" }");
927 }
928 },
929 else => {},
930 };
931 if (!atom.alive) {
932 try w.writeAll(" : [*]");
933 }
934 }
935};
936
937pub const Index = u32;
938
939const x86_64 = struct {
940 fn scanReloc(
941 atom: Atom,
942 elf_file: *Elf,
943 rel: elf.Elf64_Rela,
944 symbol: *Symbol,
945 code: ?[]const u8,
946 it: *RelocsIterator,
947 ) !void {
948 dev.check(.x86_64_backend);
949 const t = &elf_file.base.comp.root_mod.resolved_target.result;
950 const is_static = elf_file.base.isStatic();
951 const is_dyn_lib = elf_file.isEffectivelyDynLib();
952
953 const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
954 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
955
956 switch (r_type) {
957 .@"64" => {
958 try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
959 },
960
961 .@"32",
962 .@"32S",
963 => {
964 try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file);
965 },
966
967 .GOT32,
968 .GOTPC32,
969 .GOTPC64,
970 .GOTPCREL,
971 .GOTPCREL64,
972 .GOTPCRELX,
973 .REX_GOTPCRELX,
974 => {
975 symbol.flags.needs_got = true;
976 },
977
978 .PLT32,
979 .PLTOFF64,
980 => {
981 if (symbol.flags.import) {
982 symbol.flags.needs_plt = true;
983 }
984 },
985
986 .PC32 => {
987 try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
988 },
989
990 .TLSGD => {
991 // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
992
993 if (is_static or (!symbol.flags.import and !is_dyn_lib)) {
994 // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
995 // We skip the next relocation.
996 it.skip(1);
997 } else if (!symbol.flags.import and is_dyn_lib) {
998 symbol.flags.needs_gottp = true;
999 it.skip(1);
1000 } else {
1001 symbol.flags.needs_tlsgd = true;
1002 }
1003 },
1004
1005 .TLSLD => {
1006 // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
1007
1008 if (is_static or !is_dyn_lib) {
1009 // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
1010 // We skip the next relocation.
1011 it.skip(1);
1012 } else {
1013 elf_file.got.flags.needs_tlsld = true;
1014 }
1015 },
1016
1017 .GOTTPOFF => {
1018 const should_relax = blk: {
1019 if (is_dyn_lib or symbol.flags.import) break :blk false;
1020 if (!x86_64.canRelaxGotTpOff(code.?[r_offset - 3 ..], t)) break :blk false;
1021 break :blk true;
1022 };
1023 if (!should_relax) {
1024 symbol.flags.needs_gottp = true;
1025 }
1026 },
1027
1028 .GOTPC32_TLSDESC => {
1029 const should_relax = is_static or (!is_dyn_lib and !symbol.flags.import);
1030 if (!should_relax) {
1031 symbol.flags.needs_tlsdesc = true;
1032 }
1033 },
1034
1035 .TPOFF32,
1036 .TPOFF64,
1037 => {
1038 if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
1039 },
1040
1041 .GOTOFF64,
1042 .DTPOFF32,
1043 .DTPOFF64,
1044 .SIZE32,
1045 .SIZE64,
1046 .TLSDESC_CALL,
1047 => {},
1048
1049 else => try atom.reportUnhandledRelocError(rel, elf_file),
1050 }
1051 }
1052
1053 fn resolveRelocAlloc(
1054 atom: Atom,
1055 elf_file: *Elf,
1056 rel: elf.Elf64_Rela,
1057 target: *const Symbol,
1058 args: ResolveArgs,
1059 it: *RelocsIterator,
1060 code: []u8,
1061 ) !void {
1062 dev.check(.x86_64_backend);
1063 const t = &elf_file.base.comp.root_mod.resolved_target.result;
1064 const diags = &elf_file.base.comp.link_diags;
1065 const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
1066 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
1067
1068 const P, const A, const S, const GOT, const G, const TP, const DTP = args;
1069
1070 switch (r_type) {
1071 .NONE => unreachable,
1072
1073 .@"64" => {
1074 try atom.resolveDynAbsReloc(
1075 target,
1076 rel,
1077 dynAbsRelocAction(target, elf_file),
1078 elf_file,
1079 code,
1080 r_offset,
1081 );
1082 },
1083
1084 .PLT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
1085 .PC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little),
1086
1087 .GOTPCREL => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little),
1088 .GOTPC32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(GOT + A - P)), .little),
1089 .GOTPC64 => mem.writeInt(i64, code[r_offset..][0..8], GOT + A - P, .little),
1090
1091 .GOTPCRELX => {
1092 if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
1093 x86_64.relaxGotpcrelx(code[r_offset - 2 ..], t) catch break :blk;
1094 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
1095 return;
1096 }
1097 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
1098 },
1099
1100 .REX_GOTPCRELX => {
1101 if (!target.flags.import and !target.isIFunc(elf_file) and !target.isAbs(elf_file)) blk: {
1102 x86_64.relaxRexGotpcrelx(code[r_offset - 3 ..], t) catch break :blk;
1103 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S + A - P)), .little);
1104 return;
1105 }
1106 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + GOT + A - P)), .little);
1107 },
1108
1109 .@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
1110 .@"32S" => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A)), .little),
1111
1112 .TPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - TP)), .little),
1113 .TPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - TP, .little),
1114
1115 .DTPOFF32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @truncate(S + A - DTP)), .little),
1116 .DTPOFF64 => mem.writeInt(i64, code[r_offset..][0..8], S + A - DTP, .little),
1117
1118 .TLSGD => {
1119 if (target.flags.has_tlsgd) {
1120 const S_ = target.tlsGdAddress(elf_file);
1121 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
1122 } else if (target.flags.has_gottp) {
1123 const S_ = target.gotTpAddress(elf_file);
1124 try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, code, r_offset);
1125 } else {
1126 try x86_64.relaxTlsGdToLe(
1127 atom,
1128 &.{ rel, it.next().? },
1129 @as(i32, @intCast(S - TP)),
1130 elf_file,
1131 code,
1132 r_offset,
1133 );
1134 }
1135 },
1136
1137 .TLSLD => {
1138 if (elf_file.got.tlsld_index) |entry_index| {
1139 const tlsld_entry = elf_file.got.entries.items[entry_index];
1140 const S_ = tlsld_entry.address(elf_file);
1141 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
1142 } else {
1143 try x86_64.relaxTlsLdToLe(
1144 atom,
1145 &.{ rel, it.next().? },
1146 @as(i32, @intCast(TP - elf_file.tlsAddress())),
1147 elf_file,
1148 code,
1149 r_offset,
1150 );
1151 }
1152 },
1153
1154 .GOTPC32_TLSDESC => {
1155 if (target.flags.has_tlsdesc) {
1156 const S_ = target.tlsDescAddress(elf_file);
1157 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
1158 } else {
1159 x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..], t) catch {
1160 var err = try diags.addErrorWithNotes(1);
1161 try err.addMsg("could not relax {s}", .{@tagName(r_type)});
1162 err.addNote("in {f}:{s} at offset 0x{x}", .{
1163 atom.file(elf_file).?.fmtPath(),
1164 atom.name(elf_file),
1165 rel.r_offset,
1166 });
1167 return error.RelaxFailure;
1168 };
1169 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
1170 }
1171 },
1172
1173 .TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
1174 // call -> nop
1175 code[r_offset..][0..2].* = .{ 0x66, 0x90 };
1176 },
1177
1178 .GOTTPOFF => {
1179 if (target.flags.has_gottp) {
1180 const S_ = target.gotTpAddress(elf_file);
1181 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S_ + A - P)), .little);
1182 } else {
1183 x86_64.relaxGotTpOff(code[r_offset - 3 ..], t);
1184 mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(S - TP)), .little);
1185 }
1186 },
1187
1188 .GOT32 => mem.writeInt(i32, code[r_offset..][0..4], @as(i32, @intCast(G + A)), .little),
1189
1190 else => try atom.reportUnhandledRelocError(rel, elf_file),
1191 }
1192 }
1193
1194 fn resolveRelocNonAlloc(
1195 atom: Atom,
1196 elf_file: *Elf,
1197 rel: elf.Elf64_Rela,
1198 target: *const Symbol,
1199 args: ResolveArgs,
1200 code: []u8,
1201 ) !void {
1202 dev.check(.x86_64_backend);
1203 const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
1204
1205 _, const A, const S, const GOT, _, _, const DTP = args;
1206
1207 var writer: Writer = .fixed(code);
1208
1209 switch (r_type) {
1210 .NONE => unreachable,
1211 .@"8" => try writer.writeInt(u8, @as(u8, @bitCast(@as(i8, @intCast(S + A)))), .little),
1212 .@"16" => try writer.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
1213 .@"32" => try writer.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
1214 .@"32S" => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
1215 .@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
1216 try writer.writeInt(u64, value, .little)
1217 else
1218 try writer.writeInt(i64, S + A, .little),
1219 .DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
1220 try writer.writeInt(u64, value, .little)
1221 else
1222 try writer.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
1223 .DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
1224 try writer.writeInt(u64, value, .little)
1225 else
1226 try writer.writeInt(i64, S + A - DTP, .little),
1227 .GOTOFF64 => try writer.writeInt(i64, S + A - GOT, .little),
1228 .GOTPC64 => try writer.writeInt(i64, GOT + A, .little),
1229 .SIZE32 => {
1230 const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
1231 try writer.writeInt(u32, @bitCast(@as(i32, @intCast(size + A))), .little);
1232 },
1233 .SIZE64 => {
1234 const size = @as(i64, @intCast(target.elfSym(elf_file).st_size));
1235 try writer.writeInt(i64, @intCast(size + A), .little);
1236 },
1237 else => try atom.reportUnhandledRelocError(rel, elf_file),
1238 }
1239 }
1240
1241 fn relaxGotpcrelx(code: []u8, t: *const std.Target) !void {
1242 dev.check(.x86_64_backend);
1243 const old_inst = disassemble(code) orelse return error.RelaxFailure;
1244 const inst: Instruction = switch (old_inst.encoding.mnemonic) {
1245 .call => try .new(old_inst.prefix, .call, &.{
1246 // TODO: hack to force imm32s in the assembler
1247 .{ .imm = .s(-129) },
1248 }, t),
1249 .jmp => try .new(old_inst.prefix, .jmp, &.{
1250 // TODO: hack to force imm32s in the assembler
1251 .{ .imm = .s(-129) },
1252 }, t),
1253 else => return error.RelaxFailure,
1254 };
1255 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
1256 const nop: Instruction = try .new(.none, .nop, &.{}, t);
1257 try encode(&.{ nop, inst }, code);
1258 }
1259
1260 fn relaxRexGotpcrelx(code: []u8, t: *const std.Target) !void {
1261 dev.check(.x86_64_backend);
1262 const old_inst = disassemble(code) orelse return error.RelaxFailure;
1263 switch (old_inst.encoding.mnemonic) {
1264 .mov => {
1265 const inst: Instruction = try .new(old_inst.prefix, .lea, &old_inst.ops, t);
1266 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
1267 try encode(&.{inst}, code);
1268 },
1269 else => return error.RelaxFailure,
1270 }
1271 }
1272
1273 fn relaxTlsGdToIe(
1274 self: Atom,
1275 rels: []const elf.Elf64_Rela,
1276 value: i32,
1277 elf_file: *Elf,
1278 code: []u8,
1279 r_offset: usize,
1280 ) !void {
1281 dev.check(.x86_64_backend);
1282 assert(rels.len == 2);
1283 const diags = &elf_file.base.comp.link_diags;
1284 const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
1285 switch (rel) {
1286 .PC32,
1287 .PLT32,
1288 => {
1289 var insts = [_]u8{
1290 0x64, 0x48, 0x8b, 0x04, 0x25, 0, 0, 0, 0, // movq %fs:0,%rax
1291 0x48, 0x03, 0x05, 0, 0, 0, 0, // add foo@gottpoff(%rip), %rax
1292 };
1293 std.mem.writeInt(i32, insts[12..][0..4], value - 12, .little);
1294 @memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
1295 },
1296
1297 else => {
1298 var err = try diags.addErrorWithNotes(1);
1299 try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
1300 relocation.fmtRelocType(rels[0].r_type(), .x86_64),
1301 relocation.fmtRelocType(rels[1].r_type(), .x86_64),
1302 });
1303 err.addNote("in {f}:{s} at offset 0x{x}", .{
1304 self.file(elf_file).?.fmtPath(),
1305 self.name(elf_file),
1306 rels[0].r_offset,
1307 });
1308 return error.RelaxFailure;
1309 },
1310 }
1311 }
1312
1313 fn relaxTlsLdToLe(
1314 self: Atom,
1315 rels: []const elf.Elf64_Rela,
1316 value: i32,
1317 elf_file: *Elf,
1318 code: []u8,
1319 r_offset: usize,
1320 ) !void {
1321 dev.check(.x86_64_backend);
1322 assert(rels.len == 2);
1323 const diags = &elf_file.base.comp.link_diags;
1324 const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
1325 switch (rel) {
1326 .PC32,
1327 .PLT32,
1328 => {
1329 var insts = [_]u8{
1330 0x31, 0xc0, // xor %eax, %eax
1331 0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
1332 0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
1333 };
1334 std.mem.writeInt(i32, insts[8..][0..4], value, .little);
1335 @memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
1336 },
1337
1338 .GOTPCREL,
1339 .GOTPCRELX,
1340 => {
1341 var insts = [_]u8{
1342 0x31, 0xc0, // xor %eax, %eax
1343 0x64, 0x48, 0x8b, 0, // mov %fs:(%rax), %rax
1344 0x48, 0x2d, 0, 0, 0, 0, // sub $tls_size, %rax
1345 0x90, // nop
1346 };
1347 std.mem.writeInt(i32, insts[8..][0..4], value, .little);
1348 @memcpy(code[r_offset - 3 ..][0..insts.len], &insts);
1349 },
1350
1351 else => {
1352 var err = try diags.addErrorWithNotes(1);
1353 try err.addMsg("TODO: rewrite {f} when followed by {f}", .{
1354 relocation.fmtRelocType(rels[0].r_type(), .x86_64),
1355 relocation.fmtRelocType(rels[1].r_type(), .x86_64),
1356 });
1357 err.addNote("in {f}:{s} at offset 0x{x}", .{
1358 self.file(elf_file).?.fmtPath(),
1359 self.name(elf_file),
1360 rels[0].r_offset,
1361 });
1362 return error.RelaxFailure;
1363 },
1364 }
1365 }
1366
1367 fn canRelaxGotTpOff(code: []const u8, t: *const std.Target) bool {
1368 dev.check(.x86_64_backend);
1369 const old_inst = disassemble(code) orelse return false;
1370 switch (old_inst.encoding.mnemonic) {
1371 .mov => {
1372 const inst = Instruction.new(old_inst.prefix, .mov, &.{
1373 old_inst.ops[0],
1374 // TODO: hack to force imm32s in the assembler
1375 .{ .imm = .s(-129) },
1376 }, t) catch return false;
1377 var trash: Writer.Discarding = .init(&.{});
1378 inst.encode(&trash.writer, .{}) catch return false;
1379 return true;
1380 },
1381 else => return false,
1382 }
1383 }
1384
1385 fn relaxGotTpOff(code: []u8, t: *const std.Target) void {
1386 dev.check(.x86_64_backend);
1387 const old_inst = disassemble(code) orelse unreachable;
1388 switch (old_inst.encoding.mnemonic) {
1389 .mov => {
1390 const inst = Instruction.new(old_inst.prefix, .mov, &.{
1391 old_inst.ops[0],
1392 // TODO: hack to force imm32s in the assembler
1393 .{ .imm = .s(-129) },
1394 }, t) catch unreachable;
1395 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
1396 encode(&.{inst}, code) catch unreachable;
1397 },
1398 else => unreachable,
1399 }
1400 }
1401
1402 fn relaxGotPcTlsDesc(code: []u8, target: *const std.Target) !void {
1403 dev.check(.x86_64_backend);
1404 const old_inst = disassemble(code) orelse return error.RelaxFailure;
1405 switch (old_inst.encoding.mnemonic) {
1406 .lea => {
1407 const inst: Instruction = try .new(old_inst.prefix, .mov, &.{
1408 old_inst.ops[0],
1409 // TODO: hack to force imm32s in the assembler
1410 .{ .imm = .s(-129) },
1411 }, target);
1412 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
1413 try encode(&.{inst}, code);
1414 },
1415 else => return error.RelaxFailure,
1416 }
1417 }
1418
1419 fn relaxTlsGdToLe(
1420 self: Atom,
1421 rels: []const elf.Elf64_Rela,
1422 value: i32,
1423 elf_file: *Elf,
1424 code: []u8,
1425 r_offset: usize,
1426 ) !void {
1427 dev.check(.x86_64_backend);
1428 assert(rels.len == 2);
1429 const diags = &elf_file.base.comp.link_diags;
1430 const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type());
1431 switch (rel) {
1432 .PC32,
1433 .PLT32,
1434 .GOTPCREL,
1435 .GOTPCRELX,
1436 => {
1437 var insts = [_]u8{
1438 0x64, 0x48, 0x8b, 0x04, 0x25, 0, 0, 0, 0, // movq %fs:0,%rax
1439 0x48, 0x81, 0xc0, 0, 0, 0, 0, // add $tp_offset, %rax
1440 };
1441 std.mem.writeInt(i32, insts[12..][0..4], value, .little);
1442 @memcpy(code[r_offset - 4 ..][0..insts.len], &insts);
1443 relocs_log.debug(" relaxing {f} and {f}", .{
1444 relocation.fmtRelocType(rels[0].r_type(), .x86_64),
1445 relocation.fmtRelocType(rels[1].r_type(), .x86_64),
1446 });
1447 },
1448
1449 else => {
1450 var err = try diags.addErrorWithNotes(1);
1451 try err.addMsg("fatal linker error: rewrite {f} when followed by {f}", .{
1452 relocation.fmtRelocType(rels[0].r_type(), .x86_64),
1453 relocation.fmtRelocType(rels[1].r_type(), .x86_64),
1454 });
1455 err.addNote("in {f}:{s} at offset 0x{x}", .{
1456 self.file(elf_file).?.fmtPath(),
1457 self.name(elf_file),
1458 rels[0].r_offset,
1459 });
1460 return error.RelaxFailure;
1461 },
1462 }
1463 }
1464
1465 fn disassemble(code: []const u8) ?Instruction {
1466 var disas = Disassembler.init(code);
1467 const inst = disas.next() catch return null;
1468 return inst;
1469 }
1470
1471 fn encode(insts: []const Instruction, code: []u8) !void {
1472 var writer: Writer = .fixed(code);
1473 for (insts) |inst| try inst.encode(&writer, .{});
1474 }
1475
1476 const bits = @import("../../codegen/x86_64/bits.zig");
1477 const encoder = @import("../../codegen/x86_64/encoder.zig");
1478 const Disassembler = @import("../../codegen/x86_64/Disassembler.zig");
1479 const Immediate = Instruction.Immediate;
1480 const Instruction = encoder.Instruction;
1481};
1482
1483const aarch64 = struct {
1484 fn scanReloc(
1485 atom: Atom,
1486 elf_file: *Elf,
1487 rel: elf.Elf64_Rela,
1488 symbol: *Symbol,
1489 code: ?[]const u8,
1490 it: *RelocsIterator,
1491 ) !void {
1492 _ = code;
1493 _ = it;
1494
1495 const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
1496 const is_dyn_lib = elf_file.isEffectivelyDynLib();
1497
1498 switch (r_type) {
1499 .ABS64 => {
1500 try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
1501 },
1502
1503 .ADR_PREL_PG_HI21 => {
1504 try atom.scanReloc(symbol, rel, pcRelocAction(symbol, elf_file), elf_file);
1505 },
1506
1507 .ADR_GOT_PAGE => {
1508 // TODO: relax if possible
1509 symbol.flags.needs_got = true;
1510 },
1511
1512 .LD64_GOT_LO12_NC,
1513 .LD64_GOTPAGE_LO15,
1514 => {
1515 symbol.flags.needs_got = true;
1516 },
1517
1518 .CALL26,
1519 .JUMP26,
1520 => {
1521 if (symbol.flags.import) {
1522 symbol.flags.needs_plt = true;
1523 }
1524 },
1525
1526 .TLSLE_ADD_TPREL_HI12,
1527 .TLSLE_ADD_TPREL_LO12_NC,
1528 => {
1529 if (is_dyn_lib) try atom.reportPicError(symbol, rel, elf_file);
1530 },
1531
1532 .TLSIE_ADR_GOTTPREL_PAGE21,
1533 .TLSIE_LD64_GOTTPREL_LO12_NC,
1534 => {
1535 symbol.flags.needs_gottp = true;
1536 },
1537
1538 .TLSGD_ADR_PAGE21,
1539 .TLSGD_ADD_LO12_NC,
1540 => {
1541 symbol.flags.needs_tlsgd = true;
1542 },
1543
1544 .TLSDESC_ADR_PAGE21,
1545 .TLSDESC_LD64_LO12,
1546 .TLSDESC_ADD_LO12,
1547 .TLSDESC_CALL,
1548 => {
1549 const should_relax = elf_file.base.isStatic() or (!is_dyn_lib and !symbol.flags.import);
1550 if (!should_relax) {
1551 symbol.flags.needs_tlsdesc = true;
1552 }
1553 },
1554
1555 .ADD_ABS_LO12_NC,
1556 .ADR_PREL_LO21,
1557 .LDST8_ABS_LO12_NC,
1558 .LDST16_ABS_LO12_NC,
1559 .LDST32_ABS_LO12_NC,
1560 .LDST64_ABS_LO12_NC,
1561 .LDST128_ABS_LO12_NC,
1562 .PREL32,
1563 .PREL64,
1564 => {},
1565
1566 else => try atom.reportUnhandledRelocError(rel, elf_file),
1567 }
1568 }
1569
1570 fn resolveRelocAlloc(
1571 atom: Atom,
1572 elf_file: *Elf,
1573 rel: elf.Elf64_Rela,
1574 target: *const Symbol,
1575 args: ResolveArgs,
1576 it: *RelocsIterator,
1577 code_buffer: []u8,
1578 ) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void {
1579 _ = it;
1580
1581 const diags = &elf_file.base.comp.link_diags;
1582 const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
1583 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
1584 const code = code_buffer[r_offset..][0..4];
1585 const file_ptr = atom.file(elf_file).?;
1586
1587 const P, const A, const S, const GOT, const G, const TP, const DTP = args;
1588 _ = DTP;
1589
1590 switch (r_type) {
1591 .NONE => unreachable,
1592 .ABS64 => {
1593 try atom.resolveDynAbsReloc(
1594 target,
1595 rel,
1596 dynAbsRelocAction(target, elf_file),
1597 elf_file,
1598 code_buffer,
1599 r_offset,
1600 );
1601 },
1602
1603 .CALL26,
1604 .JUMP26,
1605 => {
1606 const disp: i28 = math.cast(i28, S + A - P) orelse blk: {
1607 const th = atom.thunk(elf_file);
1608 const target_index = file_ptr.resolveSymbol(rel.r_sym(), elf_file);
1609 const S_ = th.targetAddress(target_index, elf_file);
1610 break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
1611 };
1612 util.writeBranchImm(disp, code);
1613 },
1614
1615 .PREL32 => {
1616 const value = math.cast(i32, S + A - P) orelse return error.Overflow;
1617 mem.writeInt(u32, code, @bitCast(value), .little);
1618 },
1619
1620 .PREL64 => {
1621 const value = S + A - P;
1622 mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
1623 },
1624
1625 .ADR_PREL_LO21 => {
1626 const value = math.cast(i21, S + A - P) orelse return error.Overflow;
1627 util.writeAdrInst(value, code);
1628 },
1629
1630 .ADR_PREL_PG_HI21 => {
1631 // TODO: check for relaxation of ADRP+ADD
1632 util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
1633 },
1634
1635 .ADR_GOT_PAGE => if (target.flags.has_got) {
1636 util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
1637 } else {
1638 // TODO: relax
1639 var err = try diags.addErrorWithNotes(1);
1640 try err.addMsg("TODO: relax ADR_GOT_PAGE", .{});
1641 err.addNote("in {f}:{s} at offset 0x{x}", .{
1642 atom.file(elf_file).?.fmtPath(),
1643 atom.name(elf_file),
1644 r_offset,
1645 });
1646 },
1647
1648 .LD64_GOT_LO12_NC => {
1649 assert(target.flags.has_got);
1650 const taddr = @as(u64, @intCast(G + GOT + A));
1651 util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
1652 },
1653
1654 .ADD_ABS_LO12_NC => {
1655 const taddr = @as(u64, @intCast(S + A));
1656 util.writeAddImmInst(@truncate(taddr), code);
1657 },
1658
1659 .LDST8_ABS_LO12_NC,
1660 .LDST16_ABS_LO12_NC,
1661 .LDST32_ABS_LO12_NC,
1662 .LDST64_ABS_LO12_NC,
1663 .LDST128_ABS_LO12_NC,
1664 => {
1665 // TODO: NC means no overflow check
1666 const taddr = @as(u64, @intCast(S + A));
1667 const off: u12 = switch (r_type) {
1668 .LDST8_ABS_LO12_NC => @truncate(taddr),
1669 .LDST16_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 2),
1670 .LDST32_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 4),
1671 .LDST64_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 8),
1672 .LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
1673 else => unreachable,
1674 };
1675 util.writeLoadStoreRegInst(off, code);
1676 },
1677
1678 .TLSLE_ADD_TPREL_HI12 => {
1679 const value = math.cast(i12, (S + A - TP) >> 12) orelse
1680 return error.Overflow;
1681 util.writeAddImmInst(@bitCast(value), code);
1682 },
1683
1684 .TLSLE_ADD_TPREL_LO12_NC => {
1685 const value: i12 = @truncate(S + A - TP);
1686 util.writeAddImmInst(@bitCast(value), code);
1687 },
1688
1689 .TLSIE_ADR_GOTTPREL_PAGE21 => {
1690 const S_ = target.gotTpAddress(elf_file);
1691 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1692 util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
1693 },
1694
1695 .TLSIE_LD64_GOTTPREL_LO12_NC => {
1696 const S_ = target.gotTpAddress(elf_file);
1697 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1698 const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
1699 util.writeLoadStoreRegInst(off, code);
1700 },
1701
1702 .TLSGD_ADR_PAGE21 => {
1703 const S_ = target.tlsGdAddress(elf_file);
1704 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1705 util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
1706 },
1707
1708 .TLSGD_ADD_LO12_NC => {
1709 const S_ = target.tlsGdAddress(elf_file);
1710 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1711 const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
1712 util.writeAddImmInst(off, code);
1713 },
1714
1715 .TLSDESC_ADR_PAGE21 => {
1716 if (target.flags.has_tlsdesc) {
1717 const S_ = target.tlsDescAddress(elf_file);
1718 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1719 util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
1720 } else {
1721 relocs_log.debug(" relaxing adrp => nop", .{});
1722 util.encoding.Instruction.nop().write(code);
1723 }
1724 },
1725
1726 .TLSDESC_LD64_LO12 => {
1727 if (target.flags.has_tlsdesc) {
1728 const S_ = target.tlsDescAddress(elf_file);
1729 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1730 const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
1731 util.writeLoadStoreRegInst(off, code);
1732 } else {
1733 relocs_log.debug(" relaxing ldr => nop", .{});
1734 util.encoding.Instruction.nop().write(code);
1735 }
1736 },
1737
1738 .TLSDESC_ADD_LO12 => {
1739 if (target.flags.has_tlsdesc) {
1740 const S_ = target.tlsDescAddress(elf_file);
1741 relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
1742 const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
1743 util.writeAddImmInst(off, code);
1744 } else {
1745 relocs_log.debug(" relaxing add => movz(x0, {x})", .{S + A - TP});
1746 const value: u16 = @bitCast(math.cast(i16, (S + A - TP) >> 16) orelse return error.Overflow);
1747 util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
1748 }
1749 },
1750
1751 .TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
1752 relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
1753 const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
1754 util.encoding.Instruction.movk(.x0, value, .{}).write(code);
1755 },
1756
1757 else => try atom.reportUnhandledRelocError(rel, elf_file),
1758 }
1759 }
1760
1761 fn resolveRelocNonAlloc(
1762 atom: Atom,
1763 elf_file: *Elf,
1764 rel: elf.Elf64_Rela,
1765 target: *const Symbol,
1766 args: ResolveArgs,
1767 code: []u8,
1768 ) !void {
1769 const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
1770
1771 _, const A, const S, _, _, _, _ = args;
1772
1773 var writer: Writer = .fixed(code);
1774 switch (r_type) {
1775 .NONE => unreachable,
1776 .ABS32 => try writer.writeInt(i32, @as(i32, @intCast(S + A)), .little),
1777 .ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
1778 try writer.writeInt(u64, value, .little)
1779 else
1780 try writer.writeInt(i64, S + A, .little),
1781 else => try atom.reportUnhandledRelocError(rel, elf_file),
1782 }
1783 }
1784
1785 const util = @import("../aarch64.zig");
1786};
1787
1788const riscv = struct {
1789 fn scanReloc(
1790 atom: Atom,
1791 elf_file: *Elf,
1792 rel: elf.Elf64_Rela,
1793 symbol: *Symbol,
1794 code: ?[]const u8,
1795 it: *RelocsIterator,
1796 ) !void {
1797 _ = code;
1798 _ = it;
1799
1800 const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
1801
1802 switch (r_type) {
1803 .@"32" => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file),
1804 .@"64" => try atom.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file),
1805 .HI20 => try atom.scanReloc(symbol, rel, absRelocAction(symbol, elf_file), elf_file),
1806
1807 .CALL_PLT => if (symbol.flags.import) {
1808 symbol.flags.needs_plt = true;
1809 },
1810 .GOT_HI20 => symbol.flags.needs_got = true,
1811
1812 .TPREL_HI20,
1813 .TPREL_LO12_I,
1814 .TPREL_LO12_S,
1815 .TPREL_ADD,
1816
1817 .PCREL_HI20,
1818 .PCREL_LO12_I,
1819 .PCREL_LO12_S,
1820 .LO12_I,
1821 .LO12_S,
1822 .ADD32,
1823 .SUB32,
1824
1825 .SUB_ULEB128,
1826 .SET_ULEB128,
1827 => {},
1828
1829 else => try atom.reportUnhandledRelocError(rel, elf_file),
1830 }
1831 }
1832
1833 fn resolveRelocAlloc(
1834 atom: Atom,
1835 elf_file: *Elf,
1836 rel: elf.Elf64_Rela,
1837 target: *const Symbol,
1838 args: ResolveArgs,
1839 it: *RelocsIterator,
1840 code: []u8,
1841 ) !void {
1842 const diags = &elf_file.base.comp.link_diags;
1843 const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
1844 const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow;
1845
1846 const P, const A, const S, const GOT, const G, const TP, const DTP = args;
1847 _ = TP;
1848 _ = DTP;
1849
1850 switch (r_type) {
1851 .NONE => unreachable,
1852
1853 .@"32" => mem.writeInt(u32, code[r_offset..][0..4], @as(u32, @truncate(@as(u64, @intCast(S + A)))), .little),
1854
1855 .@"64" => {
1856 try atom.resolveDynAbsReloc(
1857 target,
1858 rel,
1859 dynAbsRelocAction(target, elf_file),
1860 elf_file,
1861 code,
1862 r_offset,
1863 );
1864 },
1865
1866 .ADD32 => riscv_util.writeAddend(i32, .add, code[r_offset..][0..4], S + A),
1867 .SUB32 => riscv_util.writeAddend(i32, .sub, code[r_offset..][0..4], S + A),
1868
1869 .HI20 => {
1870 const value: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
1871 riscv_util.writeInstU(code[r_offset..][0..4], value);
1872 },
1873
1874 .GOT_HI20 => {
1875 assert(target.flags.has_got);
1876 const disp: u32 = @bitCast(math.cast(i32, G + GOT + A - P) orelse return error.Overflow);
1877 riscv_util.writeInstU(code[r_offset..][0..4], disp);
1878 },
1879
1880 .CALL_PLT => {
1881 // TODO: relax
1882 const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
1883 riscv_util.writeInstU(code[r_offset..][0..4], disp); // auipc
1884 riscv_util.writeInstI(code[r_offset + 4 ..][0..4], disp); // jalr
1885 },
1886
1887 .PCREL_HI20 => {
1888 const disp: u32 = @bitCast(math.cast(i32, S + A - P) orelse return error.Overflow);
1889 riscv_util.writeInstU(code[r_offset..][0..4], disp);
1890 },
1891
1892 .PCREL_LO12_I,
1893 .PCREL_LO12_S,
1894 => {
1895 assert(A == 0); // according to the spec
1896 // We need to find the paired reloc for this relocation.
1897 const file_ptr = atom.file(elf_file).?;
1898 const atom_addr = atom.address(elf_file);
1899 const pos = it.pos;
1900 const pair = while (it.prev()) |pair| {
1901 if (S == atom_addr + @as(i64, @intCast(pair.r_offset))) break pair;
1902 } else {
1903 // TODO: implement searching forward
1904 var err = try diags.addErrorWithNotes(1);
1905 try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{});
1906 err.addNote("in {f}:{s} at offset 0x{x}", .{
1907 atom.file(elf_file).?.fmtPath(),
1908 atom.name(elf_file),
1909 rel.r_offset,
1910 });
1911 return error.RelocFailure;
1912 };
1913 it.pos = pos;
1914 const target_ref_ = file_ptr.resolveSymbol(pair.r_sym(), elf_file);
1915 const target_ = elf_file.symbol(target_ref_).?;
1916 const S_ = target_.address(.{}, elf_file);
1917 const A_ = pair.r_addend;
1918 const P_ = atom_addr + @as(i64, @intCast(pair.r_offset));
1919 const G_ = target_.gotAddress(elf_file) - GOT;
1920 const disp = switch (@as(elf.R_RISCV, @enumFromInt(pair.r_type()))) {
1921 .PCREL_HI20 => math.cast(i32, S_ + A_ - P_) orelse return error.Overflow,
1922 .GOT_HI20 => math.cast(i32, G_ + GOT + A_ - P_) orelse return error.Overflow,
1923 else => unreachable,
1924 };
1925 relocs_log.debug(" [{x} => {x}]", .{ P_, disp + P_ });
1926 switch (r_type) {
1927 .PCREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(disp)),
1928 .PCREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(disp)),
1929 else => unreachable,
1930 }
1931 },
1932
1933 .LO12_I,
1934 .LO12_S,
1935 => {
1936 const disp: u32 = @bitCast(math.cast(i32, S + A) orelse return error.Overflow);
1937 switch (r_type) {
1938 .LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], disp),
1939 .LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], disp),
1940 else => unreachable,
1941 }
1942 },
1943
1944 .TPREL_HI20 => {
1945 const target_addr: u32 = @intCast(target.address(.{}, elf_file));
1946 const val: i32 = @intCast(S + A - target_addr);
1947 riscv_util.writeInstU(code[r_offset..][0..4], @bitCast(val));
1948 },
1949
1950 .TPREL_LO12_I,
1951 .TPREL_LO12_S,
1952 => {
1953 const target_addr: u32 = @intCast(target.address(.{}, elf_file));
1954 const val: i32 = @intCast(S + A - target_addr);
1955 switch (r_type) {
1956 .TPREL_LO12_I => riscv_util.writeInstI(code[r_offset..][0..4], @bitCast(val)),
1957 .TPREL_LO12_S => riscv_util.writeInstS(code[r_offset..][0..4], @bitCast(val)),
1958 else => unreachable,
1959 }
1960 },
1961
1962 .TPREL_ADD => {
1963 // TODO: annotates an ADD instruction that can be removed when TPREL is relaxed
1964 },
1965
1966 else => try atom.reportUnhandledRelocError(rel, elf_file),
1967 }
1968 }
1969
1970 fn resolveRelocNonAlloc(
1971 atom: Atom,
1972 elf_file: *Elf,
1973 rel: elf.Elf64_Rela,
1974 target: *const Symbol,
1975 args: ResolveArgs,
1976 code: []u8,
1977 ) !void {
1978 const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
1979
1980 _, const A, const S, const GOT, _, _, const DTP = args;
1981 _ = GOT;
1982 _ = DTP;
1983
1984 switch (r_type) {
1985 .NONE => unreachable,
1986
1987 .@"32" => mem.writeInt(i32, code[0..4], @intCast(S + A), .little),
1988 .@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
1989 mem.writeInt(u64, code[0..8], value, .little)
1990 else
1991 mem.writeInt(i64, code[0..8], S + A, .little),
1992 .ADD8 => riscv_util.writeAddend(i8, .add, code[0..1], S + A),
1993 .SUB8 => riscv_util.writeAddend(i8, .sub, code[0..1], S + A),
1994 .ADD16 => riscv_util.writeAddend(i16, .add, code[0..2], S + A),
1995 .SUB16 => riscv_util.writeAddend(i16, .sub, code[0..2], S + A),
1996 .ADD32 => riscv_util.writeAddend(i32, .add, code[0..4], S + A),
1997 .SUB32 => riscv_util.writeAddend(i32, .sub, code[0..4], S + A),
1998 .ADD64 => riscv_util.writeAddend(i64, .add, code[0..8], S + A),
1999 .SUB64 => riscv_util.writeAddend(i64, .sub, code[0..8], S + A),
2000
2001 .SET8 => mem.writeInt(i8, code[0..1], @as(i8, @truncate(S + A)), .little),
2002 .SET16 => mem.writeInt(i16, code[0..2], @as(i16, @truncate(S + A)), .little),
2003 .SET32 => mem.writeInt(i32, code[0..4], @as(i32, @truncate(S + A)), .little),
2004
2005 .SET6 => riscv_util.writeSetSub6(.set, code[0..1], S + A),
2006 .SUB6 => riscv_util.writeSetSub6(.sub, code[0..1], S + A),
2007
2008 .SET_ULEB128 => riscv_util.writeSetUleb(code, S + A),
2009 .SUB_ULEB128 => riscv_util.writeSubUleb(code, S - A),
2010
2011 else => try atom.reportUnhandledRelocError(rel, elf_file),
2012 }
2013 }
2014
2015 const riscv_util = @import("../riscv.zig");
2016};
2017
2018const ResolveArgs = struct { i64, i64, i64, i64, i64, i64, i64 };
2019
2020const RelocError = error{
2021 Overflow,
2022 OutOfMemory,
2023 NoSpaceLeft,
2024 RelocFailure,
2025 RelaxFailure,
2026 UnsupportedCpuArch,
2027};
2028
2029const RelocsIterator = struct {
2030 relocs: []const elf.Elf64_Rela,
2031 pos: i64 = -1,
2032
2033 fn next(it: *RelocsIterator) ?elf.Elf64_Rela {
2034 it.pos += 1;
2035 if (it.pos >= it.relocs.len) return null;
2036 return it.relocs[@intCast(it.pos)];
2037 }
2038
2039 fn prev(it: *RelocsIterator) ?elf.Elf64_Rela {
2040 if (it.pos == -1) return null;
2041 const rel = it.relocs[@intCast(it.pos)];
2042 it.pos -= 1;
2043 return rel;
2044 }
2045
2046 fn skip(it: *RelocsIterator, num: usize) void {
2047 assert(num > 0);
2048 it.pos += @intCast(num);
2049 }
2050};
2051
2052pub const Extra = struct {
2053 /// Index of the range extension thunk of this atom.
2054 thunk: u32 = 0,
2055
2056 /// Start index of FDEs referencing this atom.
2057 fde_start: u32 = 0,
2058
2059 /// Count of FDEs referencing this atom.
2060 fde_count: u32 = 0,
2061
2062 /// Start index of relocations belonging to this atom.
2063 rel_index: u32 = 0,
2064
2065 /// Count of relocations belonging to this atom.
2066 rel_count: u32 = 0,
2067
2068 pub const AsOptionals = struct {
2069 thunk: ?u32 = null,
2070 fde_start: ?u32 = null,
2071 fde_count: ?u32 = null,
2072 rel_index: ?u32 = null,
2073 rel_count: ?u32 = null,
2074 };
2075};
2076
2077const std = @import("std");
2078const assert = std.debug.assert;
2079const elf = std.elf;
2080const log = std.log.scoped(.link);
2081const math = std.math;
2082const mem = std.mem;
2083const relocs_log = std.log.scoped(.link_relocs);
2084const Allocator = mem.Allocator;
2085const Writer = std.Io.Writer;
2086
2087const eh_frame = @import("eh_frame.zig");
2088const relocation = @import("relocation.zig");
2089
2090const Atom = @This();
2091const Elf = @import("../Elf.zig");
2092const Fde = eh_frame.Fde;
2093const File = @import("file.zig").File;
2094const Object = @import("Object.zig");
2095const Symbol = @import("Symbol.zig");
2096const Thunk = @import("Thunk.zig");
2097const ZigObject = @import("ZigObject.zig");
2098const dev = @import("../../dev.zig");