master
1//! ZigObject encapsulates the state of the incrementally compiled Zig module.
2//! It stores the associated input local and global symbols, allocated atoms,
3//! and any relocations that may have been emitted.
4//! Think about this as fake in-memory Object file for the Zig module.
5
6data: std.ArrayList(u8) = .empty,
7/// Externally owned memory.
8basename: []const u8,
9index: File.Index,
10
11symtab: std.MultiArrayList(ElfSym) = .{},
12strtab: StringTable = .{},
13symbols: std.ArrayList(Symbol) = .empty,
14symbols_extra: std.ArrayList(u32) = .empty,
15symbols_resolver: std.ArrayList(Elf.SymbolResolver.Index) = .empty,
16local_symbols: std.ArrayList(Symbol.Index) = .empty,
17global_symbols: std.ArrayList(Symbol.Index) = .empty,
18globals_lookup: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .empty,
19
20atoms: std.ArrayList(Atom) = .empty,
21atoms_indexes: std.ArrayList(Atom.Index) = .empty,
22atoms_extra: std.ArrayList(u32) = .empty,
23relocs: std.ArrayList(std.ArrayList(elf.Elf64_Rela)) = .empty,
24
25num_dynrelocs: u32 = 0,
26
27output_symtab_ctx: Elf.SymtabCtx = .{},
28output_ar_state: Archive.ArState = .{},
29
30dwarf: ?Dwarf = null,
31
32/// Table of tracked LazySymbols.
33lazy_syms: LazySymbolTable = .{},
34
35/// Table of tracked `Nav`s.
36navs: NavTable = .{},
37
38/// TLS variables indexed by Atom.Index.
39tls_variables: TlsTable = .{},
40
41/// Table of tracked `Uav`s.
42uavs: UavTable = .{},
43
44debug_info_section_dirty: bool = false,
45debug_abbrev_section_dirty: bool = false,
46debug_aranges_section_dirty: bool = false,
47debug_str_section_dirty: bool = false,
48debug_line_section_dirty: bool = false,
49debug_line_str_section_dirty: bool = false,
50debug_loclists_section_dirty: bool = false,
51debug_rnglists_section_dirty: bool = false,
52eh_frame_section_dirty: bool = false,
53
54text_index: ?Symbol.Index = null,
55rodata_index: ?Symbol.Index = null,
56data_relro_index: ?Symbol.Index = null,
57data_index: ?Symbol.Index = null,
58bss_index: ?Symbol.Index = null,
59tdata_index: ?Symbol.Index = null,
60tbss_index: ?Symbol.Index = null,
61eh_frame_index: ?Symbol.Index = null,
62debug_info_index: ?Symbol.Index = null,
63debug_abbrev_index: ?Symbol.Index = null,
64debug_aranges_index: ?Symbol.Index = null,
65debug_str_index: ?Symbol.Index = null,
66debug_line_index: ?Symbol.Index = null,
67debug_line_str_index: ?Symbol.Index = null,
68debug_loclists_index: ?Symbol.Index = null,
69debug_rnglists_index: ?Symbol.Index = null,
70
71pub const global_symbol_bit: u32 = 0x80000000;
72pub const symbol_mask: u32 = 0x7fffffff;
73pub const SHN_ATOM: u16 = 0x100;
74
75const InitOptions = struct {
76 symbol_count_hint: u64,
77 program_code_size_hint: u64,
78};
79
80pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
81 _ = options;
82 const comp = elf_file.base.comp;
83 const gpa = comp.gpa;
84 const ptr_size = elf_file.ptrWidthBytes();
85
86 try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); // null input section
87 try self.relocs.append(gpa, .{}); // null relocs section
88 try self.strtab.buffer.append(gpa, 0);
89
90 {
91 const name_off = try self.strtab.insert(gpa, self.basename);
92 const symbol_index = try self.newLocalSymbol(gpa, name_off);
93 const sym = self.symbol(symbol_index);
94 const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
95 esym.st_info = elf.STT_FILE;
96 esym.st_shndx = elf.SHN_ABS;
97 }
98
99 switch (comp.config.debug_format) {
100 .strip => {},
101 .dwarf => |v| {
102 var dwarf = Dwarf.init(&elf_file.base, v);
103
104 const addSectionSymbolWithAtom = struct {
105 fn addSectionSymbolWithAtom(
106 zo: *ZigObject,
107 allocator: Allocator,
108 name: [:0]const u8,
109 alignment: Atom.Alignment,
110 shndx: u32,
111 ) !Symbol.Index {
112 const name_off = try zo.addString(allocator, name);
113 const sym_index = try zo.addSectionSymbol(allocator, name_off, shndx);
114 const sym = zo.symbol(sym_index);
115 const atom_index = try zo.newAtom(allocator, name_off);
116 const atom_ptr = zo.atom(atom_index).?;
117 atom_ptr.alignment = alignment;
118 atom_ptr.output_section_index = shndx;
119 sym.ref = .{ .index = atom_index, .file = zo.index };
120 zo.symtab.items(.shndx)[sym.esym_index] = atom_index;
121 zo.symtab.items(.elf_sym)[sym.esym_index].st_shndx = SHN_ATOM;
122 return sym_index;
123 }
124 }.addSectionSymbolWithAtom;
125
126 if (self.debug_str_index == null) {
127 const osec = try elf_file.addSection(.{
128 .name = try elf_file.insertShString(".debug_str"),
129 .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
130 .entsize = 1,
131 .type = elf.SHT_PROGBITS,
132 .addralign = 1,
133 });
134 self.debug_str_section_dirty = true;
135 self.debug_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_str", .@"1", osec);
136 }
137
138 if (self.debug_info_index == null) {
139 const osec = try elf_file.addSection(.{
140 .name = try elf_file.insertShString(".debug_info"),
141 .type = elf.SHT_PROGBITS,
142 .addralign = 1,
143 });
144 self.debug_info_section_dirty = true;
145 self.debug_info_index = try addSectionSymbolWithAtom(self, gpa, ".debug_info", .@"1", osec);
146 }
147
148 if (self.debug_abbrev_index == null) {
149 const osec = try elf_file.addSection(.{
150 .name = try elf_file.insertShString(".debug_abbrev"),
151 .type = elf.SHT_PROGBITS,
152 .addralign = 1,
153 });
154 self.debug_abbrev_section_dirty = true;
155 self.debug_abbrev_index = try addSectionSymbolWithAtom(self, gpa, ".debug_abbrev", .@"1", osec);
156 }
157
158 if (self.debug_aranges_index == null) {
159 const osec = try elf_file.addSection(.{
160 .name = try elf_file.insertShString(".debug_aranges"),
161 .type = elf.SHT_PROGBITS,
162 .addralign = 16,
163 });
164 self.debug_aranges_section_dirty = true;
165 self.debug_aranges_index = try addSectionSymbolWithAtom(self, gpa, ".debug_aranges", .@"16", osec);
166 }
167
168 if (self.debug_line_index == null) {
169 const osec = try elf_file.addSection(.{
170 .name = try elf_file.insertShString(".debug_line"),
171 .type = elf.SHT_PROGBITS,
172 .addralign = 1,
173 });
174 self.debug_line_section_dirty = true;
175 self.debug_line_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line", .@"1", osec);
176 }
177
178 if (self.debug_line_str_index == null) {
179 const osec = try elf_file.addSection(.{
180 .name = try elf_file.insertShString(".debug_line_str"),
181 .flags = elf.SHF_MERGE | elf.SHF_STRINGS,
182 .entsize = 1,
183 .type = elf.SHT_PROGBITS,
184 .addralign = 1,
185 });
186 self.debug_line_str_section_dirty = true;
187 self.debug_line_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line_str", .@"1", osec);
188 }
189
190 if (self.debug_loclists_index == null) {
191 const osec = try elf_file.addSection(.{
192 .name = try elf_file.insertShString(".debug_loclists"),
193 .type = elf.SHT_PROGBITS,
194 .addralign = 1,
195 });
196 self.debug_loclists_section_dirty = true;
197 self.debug_loclists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_loclists", .@"1", osec);
198 }
199
200 if (self.debug_rnglists_index == null) {
201 const osec = try elf_file.addSection(.{
202 .name = try elf_file.insertShString(".debug_rnglists"),
203 .type = elf.SHT_PROGBITS,
204 .addralign = 1,
205 });
206 self.debug_rnglists_section_dirty = true;
207 self.debug_rnglists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_rnglists", .@"1", osec);
208 }
209
210 if (self.eh_frame_index == null) {
211 const osec = try elf_file.addSection(.{
212 .name = try elf_file.insertShString(".eh_frame"),
213 .type = if (elf_file.getTarget().cpu.arch == .x86_64)
214 elf.SHT_X86_64_UNWIND
215 else
216 elf.SHT_PROGBITS,
217 .flags = elf.SHF_ALLOC,
218 .addralign = ptr_size,
219 });
220 self.eh_frame_section_dirty = true;
221 self.eh_frame_index = try addSectionSymbolWithAtom(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec);
222 }
223
224 try dwarf.initMetadata();
225 self.dwarf = dwarf;
226 },
227 .code_view => unreachable,
228 }
229}
230
231pub fn deinit(self: *ZigObject, allocator: Allocator) void {
232 self.data.deinit(allocator);
233 self.symtab.deinit(allocator);
234 self.strtab.deinit(allocator);
235 self.symbols.deinit(allocator);
236 self.symbols_extra.deinit(allocator);
237 self.symbols_resolver.deinit(allocator);
238 self.local_symbols.deinit(allocator);
239 self.global_symbols.deinit(allocator);
240 self.globals_lookup.deinit(allocator);
241 self.atoms.deinit(allocator);
242 self.atoms_indexes.deinit(allocator);
243 self.atoms_extra.deinit(allocator);
244 for (self.relocs.items) |*list| {
245 list.deinit(allocator);
246 }
247 self.relocs.deinit(allocator);
248
249 for (self.navs.values()) |*meta| {
250 meta.exports.deinit(allocator);
251 }
252 self.navs.deinit(allocator);
253
254 self.lazy_syms.deinit(allocator);
255
256 for (self.uavs.values()) |*meta| {
257 meta.exports.deinit(allocator);
258 }
259 self.uavs.deinit(allocator);
260 self.tls_variables.deinit(allocator);
261
262 if (self.dwarf) |*dwarf| {
263 dwarf.deinit();
264 }
265}
266
267pub fn flush(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void {
268 // Handle any lazy symbols that were emitted by incremental compilation.
269 if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
270 const pt: Zcu.PerThread = .activate(elf_file.base.comp.zcu.?, tid);
271 defer pt.deactivate();
272
273 // Most lazy symbols can be updated on first use, but
274 // anyerror needs to wait for everything to be flushed.
275 if (metadata.text_state != .unused) self.updateLazySymbol(
276 elf_file,
277 pt,
278 .{ .kind = .code, .ty = .anyerror_type },
279 metadata.text_symbol_index,
280 ) catch |err| switch (err) {
281 error.CodegenFail => return error.LinkFailure,
282 else => |e| return e,
283 };
284 if (metadata.rodata_state != .unused) self.updateLazySymbol(
285 elf_file,
286 pt,
287 .{ .kind = .const_data, .ty = .anyerror_type },
288 metadata.rodata_symbol_index,
289 ) catch |err| switch (err) {
290 error.CodegenFail => return error.LinkFailure,
291 else => |e| return e,
292 };
293 }
294 for (self.lazy_syms.values()) |*metadata| {
295 if (metadata.text_state != .unused) metadata.text_state = .flushed;
296 if (metadata.rodata_state != .unused) metadata.rodata_state = .flushed;
297 }
298
299 if (build_options.enable_logging) {
300 const pt: Zcu.PerThread = .activate(elf_file.base.comp.zcu.?, tid);
301 defer pt.deactivate();
302 for (self.navs.keys(), self.navs.values()) |nav_index, meta| {
303 checkNavAllocated(pt, nav_index, meta);
304 }
305 for (self.uavs.keys(), self.uavs.values()) |uav_index, meta| {
306 checkUavAllocated(pt, uav_index, meta);
307 }
308 }
309
310 if (self.dwarf) |*dwarf| {
311 const pt: Zcu.PerThread = .activate(elf_file.base.comp.zcu.?, tid);
312 defer pt.deactivate();
313 try dwarf.flush(pt);
314
315 const gpa = elf_file.base.comp.gpa;
316 const cpu_arch = elf_file.getTarget().cpu.arch;
317
318 // TODO invert this logic so that we manage the output section with the atom, not the
319 // other way around
320 for ([_]u32{
321 self.debug_info_index.?,
322 self.debug_abbrev_index.?,
323 self.debug_str_index.?,
324 self.debug_aranges_index.?,
325 self.debug_line_index.?,
326 self.debug_line_str_index.?,
327 self.debug_loclists_index.?,
328 self.debug_rnglists_index.?,
329 self.eh_frame_index.?,
330 }, [_]*Dwarf.Section{
331 &dwarf.debug_info.section,
332 &dwarf.debug_abbrev.section,
333 &dwarf.debug_str.section,
334 &dwarf.debug_aranges.section,
335 &dwarf.debug_line.section,
336 &dwarf.debug_line_str.section,
337 &dwarf.debug_loclists.section,
338 &dwarf.debug_rnglists.section,
339 &dwarf.debug_frame.section,
340 }, [_]Dwarf.Section.Index{
341 .debug_info,
342 .debug_abbrev,
343 .debug_str,
344 .debug_aranges,
345 .debug_line,
346 .debug_line_str,
347 .debug_loclists,
348 .debug_rnglists,
349 .debug_frame,
350 }) |sym_index, sect, sect_index| {
351 const sym = self.symbol(sym_index);
352 const atom_ptr = self.atom(sym.ref.index).?;
353 if (!atom_ptr.alive) continue;
354
355 const relocs = &self.relocs.items[atom_ptr.relocsShndx().?];
356 for (sect.units.items) |*unit| {
357 try relocs.ensureUnusedCapacity(gpa, unit.cross_unit_relocs.items.len +
358 unit.cross_section_relocs.items.len);
359 for (unit.cross_unit_relocs.items) |reloc| {
360 const target_unit = sect.getUnit(reloc.target_unit);
361 const r_offset = unit.off + reloc.source_off;
362 const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
363 target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(target_unit, sect, dwarf).off
364 else
365 0));
366 const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
367 atom_ptr.addRelocAssumeCapacity(.{
368 .r_offset = r_offset,
369 .r_addend = r_addend,
370 .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
371 }, self);
372 }
373 for (unit.cross_section_relocs.items) |reloc| {
374 const target_sym_index = switch (reloc.target_sec) {
375 .debug_abbrev => self.debug_abbrev_index.?,
376 .debug_aranges => self.debug_aranges_index.?,
377 .debug_frame => self.eh_frame_index.?,
378 .debug_info => self.debug_info_index.?,
379 .debug_line => self.debug_line_index.?,
380 .debug_line_str => self.debug_line_str_index.?,
381 .debug_loclists => self.debug_loclists_index.?,
382 .debug_rnglists => self.debug_rnglists_index.?,
383 .debug_str => self.debug_str_index.?,
384 };
385 const target_sec = switch (reloc.target_sec) {
386 inline else => |target_sec| &@field(dwarf, @tagName(target_sec)).section,
387 };
388 const target_unit = target_sec.getUnit(reloc.target_unit);
389 const r_offset = unit.off + reloc.source_off;
390 const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
391 target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(target_unit, sect, dwarf).off
392 else
393 0));
394 const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
395 atom_ptr.addRelocAssumeCapacity(.{
396 .r_offset = r_offset,
397 .r_addend = r_addend,
398 .r_info = (@as(u64, @intCast(target_sym_index)) << 32) | r_type,
399 }, self);
400 }
401
402 for (unit.entries.items) |*entry| {
403 const entry_off = unit.off + unit.header_len + entry.off;
404
405 try relocs.ensureUnusedCapacity(gpa, entry.cross_entry_relocs.items.len +
406 entry.cross_unit_relocs.items.len + entry.cross_section_relocs.items.len +
407 entry.external_relocs.items.len);
408 for (entry.cross_entry_relocs.items) |reloc| {
409 const r_offset = entry_off + reloc.source_off;
410 const r_addend: i64 = @intCast(unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
411 unit.header_len + unit.getEntry(target_entry).assertNonEmpty(unit, sect, dwarf).off
412 else
413 0));
414 const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
415 atom_ptr.addRelocAssumeCapacity(.{
416 .r_offset = r_offset,
417 .r_addend = r_addend,
418 .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
419 }, self);
420 }
421 for (entry.cross_unit_relocs.items) |reloc| {
422 const target_unit = sect.getUnit(reloc.target_unit);
423 const r_offset = entry_off + reloc.source_off;
424 const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
425 target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(target_unit, sect, dwarf).off
426 else
427 0));
428 const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
429 atom_ptr.addRelocAssumeCapacity(.{
430 .r_offset = r_offset,
431 .r_addend = r_addend,
432 .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
433 }, self);
434 }
435 for (entry.cross_section_relocs.items) |reloc| {
436 const target_sym_index = switch (reloc.target_sec) {
437 .debug_abbrev => self.debug_abbrev_index.?,
438 .debug_aranges => self.debug_aranges_index.?,
439 .debug_frame => self.eh_frame_index.?,
440 .debug_info => self.debug_info_index.?,
441 .debug_line => self.debug_line_index.?,
442 .debug_line_str => self.debug_line_str_index.?,
443 .debug_loclists => self.debug_loclists_index.?,
444 .debug_rnglists => self.debug_rnglists_index.?,
445 .debug_str => self.debug_str_index.?,
446 };
447 const target_sec = switch (reloc.target_sec) {
448 inline else => |target_sec| &@field(dwarf, @tagName(target_sec)).section,
449 };
450 const target_unit = target_sec.getUnit(reloc.target_unit);
451 const r_offset = entry_off + reloc.source_off;
452 const r_addend: i64 = @intCast(target_unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
453 target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(target_unit, sect, dwarf).off
454 else
455 0));
456 const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
457 atom_ptr.addRelocAssumeCapacity(.{
458 .r_offset = r_offset,
459 .r_addend = r_addend,
460 .r_info = (@as(u64, @intCast(target_sym_index)) << 32) | r_type,
461 }, self);
462 }
463 for (entry.external_relocs.items) |reloc| {
464 const target_sym = self.symbol(reloc.target_sym);
465 const r_offset = entry_off + reloc.source_off;
466 const r_addend: i64 = @intCast(reloc.target_off);
467 const r_type = relocation.dwarf.externalRelocType(target_sym.*, sect_index, dwarf.address_size, cpu_arch);
468 atom_ptr.addRelocAssumeCapacity(.{
469 .r_offset = r_offset,
470 .r_addend = r_addend,
471 .r_info = (@as(u64, @intCast(reloc.target_sym)) << 32) | r_type,
472 }, self);
473 }
474 }
475 }
476 }
477
478 self.debug_abbrev_section_dirty = false;
479 self.debug_aranges_section_dirty = false;
480 self.debug_rnglists_section_dirty = false;
481 self.debug_str_section_dirty = false;
482 }
483
484 // The point of flush() is to commit changes, so in theory, nothing should
485 // be dirty after this. However, it is possible for some things to remain
486 // dirty because they fail to be written in the event of compile errors,
487 // such as debug_line_header_dirty and debug_info_header_dirty.
488 assert(!self.debug_abbrev_section_dirty);
489 assert(!self.debug_aranges_section_dirty);
490 assert(!self.debug_rnglists_section_dirty);
491 assert(!self.debug_str_section_dirty);
492}
493
494fn newSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, st_bind: u4) !Symbol.Index {
495 try self.symtab.ensureUnusedCapacity(allocator, 1);
496 try self.symbols.ensureUnusedCapacity(allocator, 1);
497 try self.symbols_extra.ensureUnusedCapacity(allocator, @sizeOf(Symbol.Extra));
498
499 const index = self.addSymbolAssumeCapacity();
500 const sym = &self.symbols.items[index];
501 sym.name_offset = name_off;
502 sym.extra_index = self.addSymbolExtraAssumeCapacity(.{});
503
504 const esym_idx: u32 = @intCast(self.symtab.addOneAssumeCapacity());
505 const esym = ElfSym{ .elf_sym = .{
506 .st_value = 0,
507 .st_name = name_off,
508 .st_info = @as(u8, @intCast(st_bind)) << 4,
509 .st_other = 0,
510 .st_size = 0,
511 .st_shndx = 0,
512 } };
513 self.symtab.set(index, esym);
514 sym.esym_index = esym_idx;
515
516 return index;
517}
518
519fn newLocalSymbol(self: *ZigObject, allocator: Allocator, name_off: u32) !Symbol.Index {
520 try self.local_symbols.ensureUnusedCapacity(allocator, 1);
521 const fake_index: Symbol.Index = @intCast(self.local_symbols.items.len);
522 const index = try self.newSymbol(allocator, name_off, elf.STB_LOCAL);
523 self.local_symbols.appendAssumeCapacity(index);
524 return fake_index;
525}
526
527fn newGlobalSymbol(self: *ZigObject, allocator: Allocator, name_off: u32) !Symbol.Index {
528 try self.global_symbols.ensureUnusedCapacity(allocator, 1);
529 try self.symbols_resolver.ensureUnusedCapacity(allocator, 1);
530 const fake_index: Symbol.Index = @intCast(self.global_symbols.items.len);
531 const index = try self.newSymbol(allocator, name_off, elf.STB_GLOBAL);
532 self.global_symbols.appendAssumeCapacity(index);
533 self.symbols_resolver.addOneAssumeCapacity().* = 0;
534 return fake_index | global_symbol_bit;
535}
536
537fn newAtom(self: *ZigObject, allocator: Allocator, name_off: u32) !Atom.Index {
538 try self.atoms.ensureUnusedCapacity(allocator, 1);
539 try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
540 try self.atoms_indexes.ensureUnusedCapacity(allocator, 1);
541 try self.relocs.ensureUnusedCapacity(allocator, 1);
542
543 const index = self.addAtomAssumeCapacity();
544 self.atoms_indexes.appendAssumeCapacity(index);
545 const atom_ptr = self.atom(index).?;
546 atom_ptr.name_offset = name_off;
547
548 const relocs_index: u32 = @intCast(self.relocs.items.len);
549 self.relocs.addOneAssumeCapacity().* = .{};
550 atom_ptr.relocs_section_index = relocs_index;
551
552 return index;
553}
554
555fn newSymbolWithAtom(self: *ZigObject, allocator: Allocator, name_off: u32) !Symbol.Index {
556 const atom_index = try self.newAtom(allocator, name_off);
557 const sym_index = try self.newLocalSymbol(allocator, name_off);
558 const sym = self.symbol(sym_index);
559 sym.ref = .{ .index = atom_index, .file = self.index };
560 self.symtab.items(.shndx)[sym.esym_index] = atom_index;
561 self.symtab.items(.elf_sym)[sym.esym_index].st_shndx = SHN_ATOM;
562 return sym_index;
563}
564
565/// TODO actually create fake input shdrs and return that instead.
566pub fn inputShdr(self: *ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.Elf64_Shdr {
567 const atom_ptr = self.atom(atom_index) orelse return Elf.null_shdr;
568 const shndx = atom_ptr.output_section_index;
569 var shdr = elf_file.sections.items(.shdr)[shndx];
570 shdr.sh_addr = 0;
571 shdr.sh_offset = 0;
572 shdr.sh_size = atom_ptr.size;
573 shdr.sh_addralign = atom_ptr.alignment.toByteUnits() orelse 1;
574 return shdr;
575}
576
577pub fn resolveSymbols(self: *ZigObject, elf_file: *Elf) !void {
578 const gpa = elf_file.base.comp.gpa;
579
580 for (self.global_symbols.items, 0..) |index, i| {
581 const global = &self.symbols.items[index];
582 const esym = global.elfSym(elf_file);
583 const shndx = self.symtab.items(.shndx)[global.esym_index];
584 const resolv = &self.symbols_resolver.items[i];
585 const gop = try elf_file.resolver.getOrPut(gpa, .{
586 .index = @intCast(i | global_symbol_bit),
587 .file = self.index,
588 }, elf_file);
589 if (!gop.found_existing) {
590 gop.ref.* = .{ .index = 0, .file = 0 };
591 }
592 resolv.* = gop.index;
593
594 if (esym.st_shndx == elf.SHN_UNDEF) continue;
595 if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) {
596 assert(esym.st_shndx == SHN_ATOM);
597 const atom_ptr = self.atom(shndx) orelse continue;
598 if (!atom_ptr.alive) continue;
599 }
600 if (elf_file.symbol(gop.ref.*) == null) {
601 gop.ref.* = .{ .index = @intCast(i | global_symbol_bit), .file = self.index };
602 continue;
603 }
604
605 if (self.asFile().symbolRank(esym, false) < elf_file.symbol(gop.ref.*).?.symbolRank(elf_file)) {
606 gop.ref.* = .{ .index = @intCast(i | global_symbol_bit), .file = self.index };
607 }
608 }
609}
610
611pub fn claimUnresolved(self: *ZigObject, elf_file: *Elf) void {
612 for (self.global_symbols.items, 0..) |index, i| {
613 const global = &self.symbols.items[index];
614 const esym = self.symtab.items(.elf_sym)[index];
615 if (esym.st_shndx != elf.SHN_UNDEF) continue;
616 if (elf_file.symbol(self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file)) != null) continue;
617
618 const is_import = blk: {
619 if (!elf_file.isEffectivelyDynLib()) break :blk false;
620 const vis: elf.STV = @enumFromInt(@as(u3, @truncate(esym.st_other)));
621 if (vis == .HIDDEN) break :blk false;
622 break :blk true;
623 };
624
625 global.value = 0;
626 global.ref = .{ .index = 0, .file = 0 };
627 global.esym_index = @intCast(index);
628 global.file_index = self.index;
629 global.version_index = if (is_import) .LOCAL else elf_file.default_sym_version;
630 global.flags.import = is_import;
631
632 const idx = self.symbols_resolver.items[i];
633 elf_file.resolver.values.items[idx - 1] = .{ .index = @intCast(i | global_symbol_bit), .file = self.index };
634 }
635}
636
637pub fn claimUnresolvedRelocatable(self: ZigObject, elf_file: *Elf) void {
638 for (self.global_symbols.items, 0..) |index, i| {
639 const global = &self.symbols.items[index];
640 const esym = self.symtab.items(.elf_sym)[index];
641 if (esym.st_shndx != elf.SHN_UNDEF) continue;
642 if (elf_file.symbol(self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file)) != null) continue;
643
644 global.value = 0;
645 global.ref = .{ .index = 0, .file = 0 };
646 global.esym_index = @intCast(index);
647 global.file_index = self.index;
648
649 const idx = self.symbols_resolver.items[i];
650 elf_file.resolver.values.items[idx - 1] = .{ .index = @intCast(i | global_symbol_bit), .file = self.index };
651 }
652}
653
654pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
655 const gpa = elf_file.base.comp.gpa;
656 for (self.atoms_indexes.items) |atom_index| {
657 const atom_ptr = self.atom(atom_index) orelse continue;
658 if (!atom_ptr.alive) continue;
659 const shdr = atom_ptr.inputShdr(elf_file);
660 if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
661 if (shdr.sh_type == elf.SHT_NOBITS) continue;
662 if (atom_ptr.scanRelocsRequiresCode(elf_file)) {
663 // TODO ideally we don't have to fetch the code here.
664 // Perhaps it would make sense to save the code until flush where we
665 // would free all of generated code?
666 const code = try self.codeAlloc(elf_file, atom_index);
667 defer gpa.free(code);
668 try atom_ptr.scanRelocs(elf_file, code, undefs);
669 } else try atom_ptr.scanRelocs(elf_file, null, undefs);
670 }
671}
672
673pub fn markLive(self: *ZigObject, elf_file: *Elf) void {
674 for (self.global_symbols.items, 0..) |index, i| {
675 const global = self.symbols.items[index];
676 const esym = self.symtab.items(.elf_sym)[index];
677 if (esym.st_bind() == elf.STB_WEAK) continue;
678
679 const ref = self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file);
680 const sym = elf_file.symbol(ref) orelse continue;
681 const file = sym.file(elf_file).?;
682 const should_keep = esym.st_shndx == elf.SHN_UNDEF or
683 (esym.st_shndx == elf.SHN_COMMON and global.elfSym(elf_file).st_shndx != elf.SHN_COMMON);
684 if (should_keep and !file.isAlive()) {
685 file.setAlive();
686 file.markLive(elf_file);
687 }
688 }
689}
690
691pub fn markImportsExports(self: *ZigObject, elf_file: *Elf) void {
692 for (0..self.global_symbols.items.len) |i| {
693 const ref = self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file);
694 const sym = elf_file.symbol(ref) orelse continue;
695 const file = sym.file(elf_file).?;
696 // https://github.com/ziglang/zig/issues/21678
697 if (@as(u16, @bitCast(sym.version_index)) == @as(u16, @bitCast(elf.Versym.LOCAL))) continue;
698 const vis: elf.STV = @enumFromInt(@as(u3, @truncate(sym.elfSym(elf_file).st_other)));
699 if (vis == .HIDDEN) continue;
700 if (file == .shared_object and !sym.isAbs(elf_file)) {
701 sym.flags.import = true;
702 continue;
703 }
704 if (file.index() == self.index) {
705 sym.flags.@"export" = true;
706 if (elf_file.isEffectivelyDynLib() and vis != .PROTECTED) {
707 sym.flags.import = true;
708 }
709 }
710 }
711}
712
713pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{OutOfMemory}!void {
714 for (self.global_symbols.items, 0..) |index, i| {
715 const esym = self.symtab.items(.elf_sym)[index];
716 const shndx = self.symtab.items(.shndx)[index];
717 const ref = self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file);
718 const ref_sym = elf_file.symbol(ref) orelse continue;
719 const ref_file = ref_sym.file(elf_file).?;
720
721 if (self.index == ref_file.index() or
722 esym.st_shndx == elf.SHN_UNDEF or
723 esym.st_bind() == elf.STB_WEAK or
724 esym.st_shndx == elf.SHN_COMMON) continue;
725
726 if (esym.st_shndx == SHN_ATOM) {
727 const atom_ptr = self.atom(shndx) orelse continue;
728 if (!atom_ptr.alive) continue;
729 }
730
731 const gop = try dupes.getOrPut(self.symbols_resolver.items[i]);
732 if (!gop.found_existing) {
733 gop.value_ptr.* = .{};
734 }
735 try gop.value_ptr.append(elf_file.base.comp.gpa, self.index);
736 }
737}
738
739/// This is just a temporary helper function that allows us to re-read what we wrote to file into a buffer.
740/// We need this so that we can write to an archive.
741/// TODO implement writing ZigObject data directly to a buffer instead.
742pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
743 const gpa = elf_file.base.comp.gpa;
744 const shsize: u64 = switch (elf_file.ptr_width) {
745 .p32 => @sizeOf(elf.Elf32_Shdr),
746 .p64 => @sizeOf(elf.Elf64_Shdr),
747 };
748 var end_pos: u64 = elf_file.shdr_table_offset.? + elf_file.sections.items(.shdr).len * shsize;
749 for (elf_file.sections.items(.shdr)) |shdr| {
750 if (shdr.sh_type == elf.SHT_NOBITS) continue;
751 end_pos = @max(end_pos, shdr.sh_offset + shdr.sh_size);
752 }
753 const size = std.math.cast(usize, end_pos) orelse return error.Overflow;
754 try self.data.resize(gpa, size);
755
756 const amt = try elf_file.base.file.?.preadAll(self.data.items, 0);
757 if (amt != size) return error.InputOutput;
758}
759
760pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, elf_file: *Elf) error{OutOfMemory}!void {
761 const gpa = elf_file.base.comp.gpa;
762
763 try ar_symtab.symtab.ensureUnusedCapacity(gpa, self.global_symbols.items.len);
764
765 for (self.global_symbols.items, 0..) |index, i| {
766 const global = self.symbols.items[index];
767 const ref = self.resolveSymbol(@intCast(i | global_symbol_bit), elf_file);
768 const sym = elf_file.symbol(ref).?;
769 assert(sym.file(elf_file).?.index() == self.index);
770 if (global.outputShndx(elf_file) == null) continue;
771
772 const off = try ar_symtab.strtab.insert(gpa, global.name(elf_file));
773 ar_symtab.symtab.appendAssumeCapacity(.{ .off = off, .file_index = self.index });
774 }
775}
776
777pub fn updateArSize(self: *ZigObject) void {
778 self.output_ar_state.size = self.data.items.len;
779}
780
781pub fn writeAr(self: ZigObject, writer: anytype) !void {
782 const name = self.basename;
783 const hdr = Archive.setArHdr(.{
784 .name = if (name.len <= Archive.max_member_name_len)
785 .{ .name = name }
786 else
787 .{ .name_off = self.output_ar_state.name_off },
788 .size = self.data.items.len,
789 });
790 try writer.writeAll(mem.asBytes(&hdr));
791 try writer.writeAll(self.data.items);
792}
793
794pub fn initRelaSections(self: *ZigObject, elf_file: *Elf) !void {
795 const gpa = elf_file.base.comp.gpa;
796 for (self.atoms_indexes.items) |atom_index| {
797 const atom_ptr = self.atom(atom_index) orelse continue;
798 if (!atom_ptr.alive) continue;
799 if (atom_ptr.output_section_index == elf_file.section_indexes.eh_frame) continue;
800 const rela_shndx = atom_ptr.relocsShndx() orelse continue;
801 // TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
802 if (self.relocs.items[rela_shndx].items.len == 0) continue;
803 const out_shndx = atom_ptr.output_section_index;
804 const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
805 if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
806 const rela_sect_name = try std.fmt.allocPrintSentinel(gpa, ".rela{s}", .{
807 elf_file.getShString(out_shdr.sh_name),
808 }, 0);
809 defer gpa.free(rela_sect_name);
810 _ = elf_file.sectionByName(rela_sect_name) orelse
811 try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), out_shndx);
812 }
813}
814
815pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
816 const gpa = elf_file.base.comp.gpa;
817 for (self.atoms_indexes.items) |atom_index| {
818 const atom_ptr = self.atom(atom_index) orelse continue;
819 if (!atom_ptr.alive) continue;
820 if (atom_ptr.output_section_index == elf_file.section_indexes.eh_frame) continue;
821 const rela_shndx = atom_ptr.relocsShndx() orelse continue;
822 // TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
823 if (self.relocs.items[rela_shndx].items.len == 0) continue;
824 const out_shndx = atom_ptr.output_section_index;
825 const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
826 if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
827 const rela_sect_name = try std.fmt.allocPrintSentinel(gpa, ".rela{s}", .{
828 elf_file.getShString(out_shdr.sh_name),
829 }, 0);
830 defer gpa.free(rela_sect_name);
831 const out_rela_shndx = elf_file.sectionByName(rela_sect_name).?;
832 const out_rela_shdr = &elf_file.sections.items(.shdr)[out_rela_shndx];
833 out_rela_shdr.sh_info = out_shndx;
834 out_rela_shdr.sh_link = elf_file.section_indexes.symtab.?;
835 const atom_list = &elf_file.sections.items(.atom_list)[out_rela_shndx];
836 try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
837 }
838}
839
840pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
841 for (self.local_symbols.items) |index| {
842 const local = &self.symbols.items[index];
843 if (local.atom(elf_file)) |atom_ptr| if (!atom_ptr.alive) continue;
844 const name = local.name(elf_file);
845 assert(name.len > 0);
846 const esym = local.elfSym(elf_file);
847 switch (esym.st_type()) {
848 elf.STT_SECTION, elf.STT_NOTYPE => continue,
849 else => {},
850 }
851 local.flags.output_symtab = true;
852 local.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
853 self.output_symtab_ctx.nlocals += 1;
854 self.output_symtab_ctx.strsize += @as(u32, @intCast(name.len)) + 1;
855 }
856
857 for (self.global_symbols.items, self.symbols_resolver.items) |index, resolv| {
858 const global = &self.symbols.items[index];
859 const ref = elf_file.resolver.values.items[resolv - 1];
860 const ref_sym = elf_file.symbol(ref) orelse continue;
861 if (ref_sym.file(elf_file).?.index() != self.index) continue;
862 if (global.atom(elf_file)) |atom_ptr| if (!atom_ptr.alive) continue;
863 global.flags.output_symtab = true;
864 if (global.isLocal(elf_file)) {
865 global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
866 self.output_symtab_ctx.nlocals += 1;
867 } else {
868 global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
869 self.output_symtab_ctx.nglobals += 1;
870 }
871 self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
872 }
873}
874
875pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
876 for (self.local_symbols.items) |index| {
877 const local = &self.symbols.items[index];
878 const idx = local.outputSymtabIndex(elf_file) orelse continue;
879 const out_sym = &elf_file.symtab.items[idx];
880 out_sym.st_name = @intCast(elf_file.strtab.items.len);
881 elf_file.strtab.appendSliceAssumeCapacity(local.name(elf_file));
882 elf_file.strtab.appendAssumeCapacity(0);
883 local.setOutputSym(elf_file, out_sym);
884 }
885
886 for (self.global_symbols.items, self.symbols_resolver.items) |index, resolv| {
887 const global = self.symbols.items[index];
888 const ref = elf_file.resolver.values.items[resolv - 1];
889 const ref_sym = elf_file.symbol(ref) orelse continue;
890 if (ref_sym.file(elf_file).?.index() != self.index) continue;
891 const idx = global.outputSymtabIndex(elf_file) orelse continue;
892 const st_name = @as(u32, @intCast(elf_file.strtab.items.len));
893 elf_file.strtab.appendSliceAssumeCapacity(global.name(elf_file));
894 elf_file.strtab.appendAssumeCapacity(0);
895 const out_sym = &elf_file.symtab.items[idx];
896 out_sym.st_name = st_name;
897 global.setOutputSym(elf_file, out_sym);
898 }
899}
900
901/// Returns atom's code.
902/// Caller owns the memory.
903pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
904 const gpa = elf_file.base.comp.gpa;
905 const atom_ptr = self.atom(atom_index).?;
906 const file_offset = atom_ptr.offset(elf_file);
907 const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
908 const code = try gpa.alloc(u8, size);
909 errdefer gpa.free(code);
910 const amt = try elf_file.base.file.?.preadAll(code, file_offset);
911 if (amt != code.len) {
912 log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)});
913 return error.InputOutput;
914 }
915 return code;
916}
917
918pub fn getNavVAddr(
919 self: *ZigObject,
920 elf_file: *Elf,
921 pt: Zcu.PerThread,
922 nav_index: InternPool.Nav.Index,
923 reloc_info: link.File.RelocInfo,
924) !u64 {
925 const zcu = pt.zcu;
926 const ip = &zcu.intern_pool;
927 const nav = ip.getNav(nav_index);
928 log.debug("getNavVAddr {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
929 const this_sym_index = if (nav.getExtern(ip)) |@"extern"| try self.getGlobalSymbol(
930 elf_file,
931 nav.name.toSlice(ip),
932 @"extern".lib_name.toSlice(ip),
933 ) else try self.getOrCreateMetadataForNav(zcu, nav_index);
934 const this_sym = self.symbol(this_sym_index);
935 const vaddr = this_sym.address(.{}, elf_file);
936 switch (reloc_info.parent) {
937 .none => unreachable,
938 .atom_index => |atom_index| {
939 const parent_atom = self.symbol(atom_index).atom(elf_file).?;
940 const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
941 try parent_atom.addReloc(elf_file.base.comp.gpa, .{
942 .r_offset = reloc_info.offset,
943 .r_info = (@as(u64, @intCast(this_sym_index)) << 32) | r_type,
944 .r_addend = reloc_info.addend,
945 }, self);
946 },
947 .debug_output => |debug_output| switch (debug_output) {
948 .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{
949 .source_off = @intCast(reloc_info.offset),
950 .target_sym = this_sym_index,
951 .target_off = reloc_info.addend,
952 }),
953 .none => unreachable,
954 },
955 }
956 return @intCast(vaddr);
957}
958
959pub fn getUavVAddr(
960 self: *ZigObject,
961 elf_file: *Elf,
962 uav: InternPool.Index,
963 reloc_info: link.File.RelocInfo,
964) !u64 {
965 const sym_index = self.uavs.get(uav).?.symbol_index;
966 const sym = self.symbol(sym_index);
967 const vaddr = sym.address(.{}, elf_file);
968 switch (reloc_info.parent) {
969 .none => unreachable,
970 .atom_index => |atom_index| {
971 const parent_atom = self.symbol(atom_index).atom(elf_file).?;
972 const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
973 try parent_atom.addReloc(elf_file.base.comp.gpa, .{
974 .r_offset = reloc_info.offset,
975 .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type,
976 .r_addend = reloc_info.addend,
977 }, self);
978 },
979 .debug_output => |debug_output| switch (debug_output) {
980 .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{
981 .source_off = @intCast(reloc_info.offset),
982 .target_sym = sym_index,
983 .target_off = reloc_info.addend,
984 }),
985 .none => unreachable,
986 },
987 }
988 return @intCast(vaddr);
989}
990
991pub fn lowerUav(
992 self: *ZigObject,
993 elf_file: *Elf,
994 pt: Zcu.PerThread,
995 uav: InternPool.Index,
996 explicit_alignment: InternPool.Alignment,
997 src_loc: Zcu.LazySrcLoc,
998) !codegen.SymbolResult {
999 const zcu = pt.zcu;
1000 const gpa = zcu.gpa;
1001 const val = Value.fromInterned(uav);
1002 const uav_alignment = switch (explicit_alignment) {
1003 .none => val.typeOf(zcu).abiAlignment(zcu),
1004 else => explicit_alignment,
1005 };
1006 if (self.uavs.get(uav)) |metadata| {
1007 assert(metadata.allocated);
1008 const sym = self.symbol(metadata.symbol_index);
1009 const existing_alignment = sym.atom(elf_file).?.alignment;
1010 if (uav_alignment.order(existing_alignment).compare(.lte))
1011 return .{ .sym_index = metadata.symbol_index };
1012 }
1013
1014 const osec = if (self.data_relro_index) |sym_index|
1015 self.symbol(sym_index).outputShndx(elf_file).?
1016 else osec: {
1017 const osec = try elf_file.addSection(.{
1018 .name = try elf_file.insertShString(".data.rel.ro"),
1019 .type = elf.SHT_PROGBITS,
1020 .addralign = 1,
1021 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1022 });
1023 self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec);
1024 break :osec osec;
1025 };
1026
1027 var name_buf: [32]u8 = undefined;
1028 const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
1029 @intFromEnum(uav),
1030 }) catch unreachable;
1031 const res = self.lowerConst(
1032 elf_file,
1033 pt,
1034 name,
1035 val,
1036 uav_alignment,
1037 osec,
1038 src_loc,
1039 ) catch |err| switch (err) {
1040 error.OutOfMemory => return error.OutOfMemory,
1041 else => |e| return .{ .fail = try Zcu.ErrorMsg.create(
1042 gpa,
1043 src_loc,
1044 "unable to lower constant value: {s}",
1045 .{@errorName(e)},
1046 ) },
1047 };
1048 switch (res) {
1049 .sym_index => |sym_index| try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index, .allocated = true }),
1050 .fail => {},
1051 }
1052 return res;
1053}
1054
1055pub fn getOrCreateMetadataForLazySymbol(
1056 self: *ZigObject,
1057 elf_file: *Elf,
1058 pt: Zcu.PerThread,
1059 lazy_sym: link.File.LazySymbol,
1060) !Symbol.Index {
1061 const gop = try self.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty);
1062 errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
1063 if (!gop.found_existing) gop.value_ptr.* = .{};
1064 const symbol_index_ptr, const state_ptr = switch (lazy_sym.kind) {
1065 .code => .{ &gop.value_ptr.text_symbol_index, &gop.value_ptr.text_state },
1066 .const_data => .{ &gop.value_ptr.rodata_symbol_index, &gop.value_ptr.rodata_state },
1067 };
1068 switch (state_ptr.*) {
1069 .unused => symbol_index_ptr.* = try self.newSymbolWithAtom(pt.zcu.gpa, 0),
1070 .pending_flush => return symbol_index_ptr.*,
1071 .flushed => {},
1072 }
1073 state_ptr.* = .pending_flush;
1074 const symbol_index = symbol_index_ptr.*;
1075 // anyerror needs to be deferred until flush
1076 if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index);
1077 return symbol_index;
1078}
1079
1080fn freeNavMetadata(self: *ZigObject, elf_file: *Elf, sym_index: Symbol.Index) void {
1081 const sym = self.symbol(sym_index);
1082 sym.atom(elf_file).?.free(elf_file);
1083 log.debug("adding %{d} to local symbols free list", .{sym_index});
1084 self.symbols.items[sym_index] = .{};
1085 // TODO free GOT entry here
1086}
1087
1088pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index) void {
1089 const gpa = elf_file.base.comp.gpa;
1090
1091 log.debug("freeNav ({d})", .{nav_index});
1092
1093 if (self.navs.fetchRemove(nav_index)) |const_kv| {
1094 var kv = const_kv;
1095 const sym_index = kv.value.symbol_index;
1096 self.freeNavMetadata(elf_file, sym_index);
1097 kv.value.exports.deinit(gpa);
1098 }
1099
1100 if (self.dwarf) |*dwarf| {
1101 dwarf.freeNav(nav_index);
1102 }
1103}
1104
1105pub fn getOrCreateMetadataForNav(self: *ZigObject, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
1106 const gpa = zcu.gpa;
1107 const ip = &zcu.intern_pool;
1108 const gop = try self.navs.getOrPut(gpa, nav_index);
1109 if (!gop.found_existing) {
1110 const symbol_index = try self.newSymbolWithAtom(gpa, 0);
1111 const sym = self.symbol(symbol_index);
1112 if (ip.getNav(nav_index).isThreadlocal(ip) and zcu.comp.config.any_non_single_threaded) {
1113 sym.flags.is_tls = true;
1114 }
1115 gop.value_ptr.* = .{ .symbol_index = symbol_index };
1116 }
1117 return gop.value_ptr.symbol_index;
1118}
1119
1120fn addSectionSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, shndx: u32) !Symbol.Index {
1121 const index = try self.newLocalSymbol(allocator, name_off);
1122 const sym = self.symbol(index);
1123 const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
1124 esym.st_info |= elf.STT_SECTION;
1125 // TODO create fake shdrs?
1126 // esym.st_shndx = shndx;
1127 sym.output_section_index = shndx;
1128 return index;
1129}
1130
1131fn getNavShdrIndex(
1132 self: *ZigObject,
1133 elf_file: *Elf,
1134 zcu: *Zcu,
1135 nav_index: InternPool.Nav.Index,
1136 sym_index: Symbol.Index,
1137 code: []const u8,
1138) error{OutOfMemory}!u32 {
1139 const gpa = elf_file.base.comp.gpa;
1140 const ptr_size = elf_file.ptrWidthBytes();
1141 const ip = &zcu.intern_pool;
1142 const nav_val = zcu.navValue(nav_index);
1143 const is_func = ip.isFunctionType(nav_val.typeOf(zcu).toIntern());
1144 if (ip.getNav(nav_index).getLinkSection().unwrap()) |@"linksection"| {
1145 const section_name = @"linksection".toSlice(ip);
1146 if (elf_file.sectionByName(section_name)) |osec| {
1147 if (is_func) {
1148 elf_file.sections.items(.shdr)[osec].sh_flags |= elf.SHF_EXECINSTR;
1149 } else {
1150 elf_file.sections.items(.shdr)[osec].sh_flags |= elf.SHF_WRITE;
1151 }
1152 return osec;
1153 }
1154 const osec = try elf_file.addSection(.{
1155 .type = elf.SHT_PROGBITS,
1156 .flags = elf.SHF_ALLOC | @as(u64, if (is_func) elf.SHF_EXECINSTR else elf.SHF_WRITE),
1157 .name = try elf_file.insertShString(section_name),
1158 .addralign = 1,
1159 });
1160 const section_index = try self.addSectionSymbol(gpa, try self.addString(gpa, section_name), osec);
1161 if (std.mem.eql(u8, section_name, ".text")) {
1162 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR;
1163 self.text_index = section_index;
1164 } else if (std.mem.startsWith(u8, section_name, ".text.")) {
1165 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR;
1166 } else if (std.mem.eql(u8, section_name, ".rodata")) {
1167 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC;
1168 self.rodata_index = section_index;
1169 } else if (std.mem.startsWith(u8, section_name, ".rodata.")) {
1170 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC;
1171 } else if (std.mem.eql(u8, section_name, ".data.rel.ro")) {
1172 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1173 self.data_relro_index = section_index;
1174 } else if (std.mem.eql(u8, section_name, ".data")) {
1175 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1176 self.data_index = section_index;
1177 } else if (std.mem.startsWith(u8, section_name, ".data.")) {
1178 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1179 } else if (std.mem.eql(u8, section_name, ".bss")) {
1180 const shdr = &elf_file.sections.items(.shdr)[osec];
1181 shdr.sh_type = elf.SHT_NOBITS;
1182 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1183 self.bss_index = section_index;
1184 } else if (std.mem.startsWith(u8, section_name, ".bss.")) {
1185 const shdr = &elf_file.sections.items(.shdr)[osec];
1186 shdr.sh_type = elf.SHT_NOBITS;
1187 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1188 } else if (std.mem.eql(u8, section_name, ".tdata")) {
1189 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS;
1190 self.tdata_index = section_index;
1191 } else if (std.mem.startsWith(u8, section_name, ".tdata.")) {
1192 elf_file.sections.items(.shdr)[osec].sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS;
1193 } else if (std.mem.eql(u8, section_name, ".tbss")) {
1194 const shdr = &elf_file.sections.items(.shdr)[osec];
1195 shdr.sh_type = elf.SHT_NOBITS;
1196 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS;
1197 self.tbss_index = section_index;
1198 } else if (std.mem.startsWith(u8, section_name, ".tbss.")) {
1199 const shdr = &elf_file.sections.items(.shdr)[osec];
1200 shdr.sh_type = elf.SHT_NOBITS;
1201 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS;
1202 } else if (std.mem.eql(u8, section_name, ".eh_frame")) {
1203 const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
1204 const shdr = &elf_file.sections.items(.shdr)[osec];
1205 if (target.cpu.arch == .x86_64) shdr.sh_type = elf.SHT_X86_64_UNWIND;
1206 shdr.sh_flags = elf.SHF_ALLOC;
1207 self.eh_frame_index = section_index;
1208 } else if (std.mem.eql(u8, section_name, ".debug_info")) {
1209 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1210 self.debug_info_index = section_index;
1211 } else if (std.mem.eql(u8, section_name, ".debug_abbrev")) {
1212 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1213 self.debug_abbrev_index = section_index;
1214 } else if (std.mem.eql(u8, section_name, ".debug_aranges")) {
1215 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1216 self.debug_aranges_index = section_index;
1217 } else if (std.mem.eql(u8, section_name, ".debug_str")) {
1218 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1219 self.debug_str_index = section_index;
1220 } else if (std.mem.eql(u8, section_name, ".debug_line")) {
1221 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1222 self.debug_line_index = section_index;
1223 } else if (std.mem.eql(u8, section_name, ".debug_line_str")) {
1224 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1225 self.debug_line_str_index = section_index;
1226 } else if (std.mem.eql(u8, section_name, ".debug_loclists")) {
1227 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1228 self.debug_loclists_index = section_index;
1229 } else if (std.mem.eql(u8, section_name, ".debug_rnglists")) {
1230 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1231 self.debug_rnglists_index = section_index;
1232 } else if (std.mem.startsWith(u8, section_name, ".debug")) {
1233 elf_file.sections.items(.shdr)[osec].sh_flags = 0;
1234 } else if (std.mem.eql(u8, section_name, ".init_array") or std.mem.startsWith(u8, section_name, ".init_array.")) {
1235 const shdr = &elf_file.sections.items(.shdr)[osec];
1236 shdr.sh_type = elf.SHT_INIT_ARRAY;
1237 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1238 } else if (std.mem.eql(u8, section_name, ".fini_array") or std.mem.startsWith(u8, section_name, ".fini_array.")) {
1239 const shdr = &elf_file.sections.items(.shdr)[osec];
1240 shdr.sh_type = elf.SHT_FINI_ARRAY;
1241 shdr.sh_flags = elf.SHF_ALLOC | elf.SHF_WRITE;
1242 }
1243 return osec;
1244 }
1245 if (is_func) {
1246 if (self.text_index) |symbol_index|
1247 return self.symbol(symbol_index).outputShndx(elf_file).?;
1248 const osec = try elf_file.addSection(.{
1249 .type = elf.SHT_PROGBITS,
1250 .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
1251 .name = try elf_file.insertShString(".text"),
1252 .addralign = 1,
1253 });
1254 self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec);
1255 return osec;
1256 }
1257 const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
1258 .variable => |variable| .{ false, variable.is_threadlocal, variable.init },
1259 .@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none },
1260 else => .{ true, false, nav_val.toIntern() },
1261 };
1262 const has_relocs = self.symbol(sym_index).atom(elf_file).?.relocs(elf_file).len > 0;
1263 if (is_threadlocal and elf_file.base.comp.config.any_non_single_threaded) {
1264 const is_bss = !has_relocs and for (code) |byte| {
1265 if (byte != 0) break false;
1266 } else true;
1267 if (is_bss) {
1268 if (self.tbss_index) |symbol_index|
1269 return self.symbol(symbol_index).outputShndx(elf_file).?;
1270 const osec = try elf_file.addSection(.{
1271 .name = try elf_file.insertShString(".tbss"),
1272 .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
1273 .type = elf.SHT_NOBITS,
1274 .addralign = 1,
1275 });
1276 self.tbss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tbss"), osec);
1277 return osec;
1278 }
1279 if (self.tdata_index) |symbol_index|
1280 return self.symbol(symbol_index).outputShndx(elf_file).?;
1281 const osec = try elf_file.addSection(.{
1282 .type = elf.SHT_PROGBITS,
1283 .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
1284 .name = try elf_file.insertShString(".tdata"),
1285 .addralign = 1,
1286 });
1287 self.tdata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tdata"), osec);
1288 return osec;
1289 }
1290 if (is_const) {
1291 if (self.data_relro_index) |symbol_index|
1292 return self.symbol(symbol_index).outputShndx(elf_file).?;
1293 const osec = try elf_file.addSection(.{
1294 .name = try elf_file.insertShString(".data.rel.ro"),
1295 .type = elf.SHT_PROGBITS,
1296 .addralign = 1,
1297 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1298 });
1299 self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec);
1300 return osec;
1301 }
1302 if (nav_init != .none and Value.fromInterned(nav_init).isUndef(zcu))
1303 return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
1304 .Debug, .ReleaseSafe => {
1305 if (self.data_index) |symbol_index|
1306 return self.symbol(symbol_index).outputShndx(elf_file).?;
1307 const osec = try elf_file.addSection(.{
1308 .name = try elf_file.insertShString(".data"),
1309 .type = elf.SHT_PROGBITS,
1310 .addralign = ptr_size,
1311 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1312 });
1313 self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec);
1314 return osec;
1315 },
1316 .ReleaseFast, .ReleaseSmall => {
1317 if (self.bss_index) |symbol_index|
1318 return self.symbol(symbol_index).outputShndx(elf_file).?;
1319 const osec = try elf_file.addSection(.{
1320 .type = elf.SHT_NOBITS,
1321 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1322 .name = try elf_file.insertShString(".bss"),
1323 .addralign = 1,
1324 });
1325 self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec);
1326 return osec;
1327 },
1328 };
1329 const is_bss = !has_relocs and for (code) |byte| {
1330 if (byte != 0) break false;
1331 } else true;
1332 if (is_bss) {
1333 if (self.bss_index) |symbol_index|
1334 return self.symbol(symbol_index).outputShndx(elf_file).?;
1335 const osec = try elf_file.addSection(.{
1336 .type = elf.SHT_NOBITS,
1337 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1338 .name = try elf_file.insertShString(".bss"),
1339 .addralign = 1,
1340 });
1341 self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec);
1342 return osec;
1343 }
1344 if (self.data_index) |symbol_index|
1345 return self.symbol(symbol_index).outputShndx(elf_file).?;
1346 const osec = try elf_file.addSection(.{
1347 .name = try elf_file.insertShString(".data"),
1348 .type = elf.SHT_PROGBITS,
1349 .addralign = ptr_size,
1350 .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
1351 });
1352 self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec);
1353 return osec;
1354}
1355
1356fn updateNavCode(
1357 self: *ZigObject,
1358 elf_file: *Elf,
1359 pt: Zcu.PerThread,
1360 nav_index: InternPool.Nav.Index,
1361 sym_index: Symbol.Index,
1362 shdr_index: u32,
1363 code: []const u8,
1364 stt_bits: u8,
1365) link.File.UpdateNavError!void {
1366 const zcu = pt.zcu;
1367 const gpa = zcu.gpa;
1368 const ip = &zcu.intern_pool;
1369 const nav = ip.getNav(nav_index);
1370
1371 log.debug("updateNavCode {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
1372
1373 const mod = zcu.navFileScope(nav_index).mod.?;
1374 const target = &mod.resolved_target.result;
1375 const required_alignment = switch (nav.status.fully_resolved.alignment) {
1376 .none => switch (mod.optimize_mode) {
1377 .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
1378 .ReleaseSmall => target_util.minFunctionAlignment(target),
1379 },
1380 else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
1381 };
1382
1383 const sym = self.symbol(sym_index);
1384 const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
1385 const atom_ptr = sym.atom(elf_file).?;
1386 const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip));
1387
1388 atom_ptr.alive = true;
1389 atom_ptr.name_offset = name_offset;
1390 atom_ptr.output_section_index = shdr_index;
1391
1392 sym.name_offset = name_offset;
1393 esym.st_name = name_offset;
1394 esym.st_info |= stt_bits;
1395 esym.st_size = code.len;
1396
1397 const old_size = atom_ptr.size;
1398 const old_vaddr = atom_ptr.value;
1399 atom_ptr.alignment = required_alignment;
1400 atom_ptr.size = code.len;
1401
1402 if (old_size > 0 and elf_file.base.child_pid == null) {
1403 const capacity = atom_ptr.capacity(elf_file);
1404 const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
1405 if (need_realloc) {
1406 self.allocateAtom(atom_ptr, true, elf_file) catch |err|
1407 return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
1408
1409 log.debug("growing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
1410 if (old_vaddr != atom_ptr.value) {
1411 sym.value = 0;
1412 esym.st_value = 0;
1413 }
1414 } else if (code.len < old_size) {
1415 // TODO shrink section size
1416 }
1417 } else {
1418 self.allocateAtom(atom_ptr, true, elf_file) catch |err|
1419 return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
1420
1421 errdefer self.freeNavMetadata(elf_file, sym_index);
1422 sym.value = 0;
1423 esym.st_value = 0;
1424 }
1425
1426 self.navs.getPtr(nav_index).?.allocated = true;
1427
1428 if (elf_file.base.child_pid) |pid| {
1429 switch (builtin.os.tag) {
1430 .linux => {
1431 var code_vec: [1]std.posix.iovec_const = .{.{
1432 .base = code.ptr,
1433 .len = code.len,
1434 }};
1435 var remote_vec: [1]std.posix.iovec_const = .{.{
1436 .base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(sym.address(.{}, elf_file))))),
1437 .len = code.len,
1438 }};
1439 const rc = std.os.linux.process_vm_writev(pid, &code_vec, &remote_vec, 0);
1440 switch (std.os.linux.errno(rc)) {
1441 .SUCCESS => assert(rc == code.len),
1442 else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
1443 }
1444 },
1445 else => return elf_file.base.cgFail(nav_index, "ELF hot swap unavailable on host operating system '{s}'", .{@tagName(builtin.os.tag)}),
1446 }
1447 }
1448
1449 const shdr = elf_file.sections.items(.shdr)[shdr_index];
1450 if (shdr.sh_type != elf.SHT_NOBITS) {
1451 const file_offset = atom_ptr.offset(elf_file);
1452 elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
1453 return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
1454 log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
1455 }
1456}
1457
1458fn updateTlv(
1459 self: *ZigObject,
1460 elf_file: *Elf,
1461 pt: Zcu.PerThread,
1462 nav_index: InternPool.Nav.Index,
1463 sym_index: Symbol.Index,
1464 shndx: u32,
1465 code: []const u8,
1466) link.File.UpdateNavError!void {
1467 const zcu = pt.zcu;
1468 const ip = &zcu.intern_pool;
1469 const gpa = zcu.gpa;
1470 const nav = ip.getNav(nav_index);
1471
1472 log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
1473
1474 const required_alignment = pt.navAlignment(nav_index);
1475
1476 const sym = self.symbol(sym_index);
1477 const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
1478 const atom_ptr = sym.atom(elf_file).?;
1479 const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip));
1480
1481 atom_ptr.alive = true;
1482 atom_ptr.name_offset = name_offset;
1483 atom_ptr.output_section_index = shndx;
1484
1485 sym.name_offset = name_offset;
1486 esym.st_name = name_offset;
1487 esym.st_info = elf.STT_TLS;
1488 esym.st_size = code.len;
1489
1490 atom_ptr.alignment = required_alignment;
1491 atom_ptr.size = code.len;
1492
1493 const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
1494 assert(!gop.found_existing); // TODO incremental updates
1495
1496 self.allocateAtom(atom_ptr, true, elf_file) catch |err|
1497 return elf_file.base.cgFail(nav_index, "failed to allocate atom: {s}", .{@errorName(err)});
1498 sym.value = 0;
1499 esym.st_value = 0;
1500
1501 self.navs.getPtr(nav_index).?.allocated = true;
1502
1503 const shdr = elf_file.sections.items(.shdr)[shndx];
1504 if (shdr.sh_type != elf.SHT_NOBITS) {
1505 const file_offset = atom_ptr.offset(elf_file);
1506 elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
1507 return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
1508 log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
1509 atom_ptr.name(elf_file),
1510 file_offset,
1511 file_offset + code.len,
1512 });
1513 }
1514}
1515
1516pub fn updateFunc(
1517 self: *ZigObject,
1518 elf_file: *Elf,
1519 pt: Zcu.PerThread,
1520 func_index: InternPool.Index,
1521 mir: *const codegen.AnyMir,
1522) link.File.UpdateNavError!void {
1523 const tracy = trace(@src());
1524 defer tracy.end();
1525
1526 const zcu = pt.zcu;
1527 const ip = &zcu.intern_pool;
1528 const gpa = elf_file.base.comp.gpa;
1529 const func = zcu.funcInfo(func_index);
1530
1531 log.debug("updateFunc {f}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav });
1532
1533 const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
1534 self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
1535
1536 var aw: std.Io.Writer.Allocating = .init(gpa);
1537 defer aw.deinit();
1538
1539 var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
1540 defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
1541
1542 codegen.emitFunction(
1543 &elf_file.base,
1544 pt,
1545 zcu.navSrcLoc(func.owner_nav),
1546 func_index,
1547 sym_index,
1548 mir,
1549 &aw.writer,
1550 if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
1551 ) catch |err| switch (err) {
1552 error.WriteFailed => return error.OutOfMemory,
1553 else => |e| return e,
1554 };
1555 const code = aw.written();
1556
1557 const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, sym_index, code);
1558 log.debug("setting shdr({x},{s}) for {f}", .{
1559 shndx,
1560 elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
1561 ip.getNav(func.owner_nav).fqn.fmt(ip),
1562 });
1563 const old_rva, const old_alignment = blk: {
1564 const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
1565 break :blk .{ atom_ptr.value, atom_ptr.alignment };
1566 };
1567 try self.updateNavCode(elf_file, pt, func.owner_nav, sym_index, shndx, code, elf.STT_FUNC);
1568 const new_rva, const new_alignment = blk: {
1569 const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
1570 break :blk .{ atom_ptr.value, atom_ptr.alignment };
1571 };
1572
1573 if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNavFunc(pt, func.owner_nav, code.len, wip_nav) catch |err|
1574 return elf_file.base.cgFail(func.owner_nav, "failed to finish dwarf function: {s}", .{@errorName(err)});
1575
1576 // Exports will be updated by `Zcu.processExports` after the update.
1577
1578 if (old_rva != new_rva and old_rva > 0) {
1579 // If we had to reallocate the function, we re-use the existing slot for a trampoline.
1580 // In the rare case that the function has been further overaligned we skip creating a
1581 // trampoline and update all symbols referring this function.
1582 if (old_alignment.order(new_alignment) == .lt) {
1583 @panic("TODO update all symbols referring this function");
1584 }
1585
1586 // Create a trampoline to the new location at `old_rva`.
1587 if (!self.symbol(sym_index).flags.has_trampoline) {
1588 const name = try std.fmt.allocPrint(gpa, "{s}$trampoline", .{
1589 self.symbol(sym_index).name(elf_file),
1590 });
1591 defer gpa.free(name);
1592 const osec = if (self.text_index) |sect_sym_index|
1593 self.symbol(sect_sym_index).outputShndx(elf_file).?
1594 else osec: {
1595 const osec = try elf_file.addSection(.{
1596 .name = try elf_file.insertShString(".text"),
1597 .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
1598 .type = elf.SHT_PROGBITS,
1599 .addralign = 1,
1600 });
1601 self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec);
1602 break :osec osec;
1603 };
1604 const name_off = try self.addString(gpa, name);
1605 const tr_size = trampolineSize(elf_file.getTarget().cpu.arch);
1606 const tr_sym_index = try self.newSymbolWithAtom(gpa, name_off);
1607 const tr_sym = self.symbol(tr_sym_index);
1608 const tr_esym = &self.symtab.items(.elf_sym)[tr_sym.esym_index];
1609 tr_esym.st_info |= elf.STT_OBJECT;
1610 tr_esym.st_size = tr_size;
1611 const tr_atom_ptr = tr_sym.atom(elf_file).?;
1612 tr_atom_ptr.value = old_rva;
1613 tr_atom_ptr.alive = true;
1614 tr_atom_ptr.alignment = old_alignment;
1615 tr_atom_ptr.output_section_index = osec;
1616 tr_atom_ptr.size = tr_size;
1617 const target_sym = self.symbol(sym_index);
1618 target_sym.addExtra(.{ .trampoline = tr_sym_index }, elf_file);
1619 target_sym.flags.has_trampoline = true;
1620 }
1621 const target_sym = self.symbol(sym_index);
1622 writeTrampoline(self.symbol(target_sym.extra(elf_file).trampoline).*, target_sym.*, elf_file) catch |err|
1623 return elf_file.base.cgFail(func.owner_nav, "failed to write trampoline: {s}", .{@errorName(err)});
1624 }
1625}
1626
1627pub fn updateNav(
1628 self: *ZigObject,
1629 elf_file: *Elf,
1630 pt: Zcu.PerThread,
1631 nav_index: InternPool.Nav.Index,
1632) link.File.UpdateNavError!void {
1633 const tracy = trace(@src());
1634 defer tracy.end();
1635
1636 const zcu = pt.zcu;
1637 const ip = &zcu.intern_pool;
1638 const nav = ip.getNav(nav_index);
1639
1640 log.debug("updateNav {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
1641
1642 const nav_init = switch (ip.indexToKey(nav.status.fully_resolved.val)) {
1643 .func => .none,
1644 .variable => |variable| variable.init,
1645 .@"extern" => |@"extern"| {
1646 const sym_index = try self.getGlobalSymbol(
1647 elf_file,
1648 nav.name.toSlice(ip),
1649 @"extern".lib_name.toSlice(ip),
1650 );
1651 if (@"extern".is_threadlocal and elf_file.base.comp.config.any_non_single_threaded) self.symbol(sym_index).flags.is_tls = true;
1652 if (self.dwarf) |*dwarf| {
1653 var debug_wip_nav = try dwarf.initWipNav(pt, nav_index, sym_index);
1654 defer debug_wip_nav.deinit();
1655 dwarf.finishWipNav(pt, nav_index, &debug_wip_nav) catch |err| switch (err) {
1656 error.OutOfMemory => return error.OutOfMemory,
1657 error.Overflow => return error.Overflow,
1658 else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
1659 };
1660 }
1661 return;
1662 },
1663 else => nav.status.fully_resolved.val,
1664 };
1665
1666 if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
1667 const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
1668 self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
1669
1670 var aw: std.Io.Writer.Allocating = .init(zcu.gpa);
1671 defer aw.deinit();
1672
1673 var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
1674 defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
1675
1676 codegen.generateSymbol(
1677 &elf_file.base,
1678 pt,
1679 zcu.navSrcLoc(nav_index),
1680 Value.fromInterned(nav_init),
1681 &aw.writer,
1682 .{ .atom_index = sym_index },
1683 ) catch |err| switch (err) {
1684 error.WriteFailed => return error.OutOfMemory,
1685 else => |e| return e,
1686 };
1687 const code = aw.written();
1688
1689 const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
1690 log.debug("setting shdr({x},{s}) for {f}", .{
1691 shndx,
1692 elf_file.getShString(elf_file.sections.items(.shdr)[shndx].sh_name),
1693 nav.fqn.fmt(ip),
1694 });
1695 if (elf_file.sections.items(.shdr)[shndx].sh_flags & elf.SHF_TLS != 0)
1696 try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
1697 else
1698 try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
1699
1700 if (debug_wip_nav) |*wip_nav| self.dwarf.?.finishWipNav(pt, nav_index, wip_nav) catch |err| switch (err) {
1701 error.OutOfMemory => return error.OutOfMemory,
1702 error.Overflow => return error.Overflow,
1703 else => |e| return elf_file.base.cgFail(nav_index, "failed to finish dwarf nav: {s}", .{@errorName(e)}),
1704 };
1705 } else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
1706
1707 // Exports will be updated by `Zcu.processExports` after the update.
1708}
1709
1710pub fn updateContainerType(
1711 self: *ZigObject,
1712 pt: Zcu.PerThread,
1713 ty: InternPool.Index,
1714) !void {
1715 const tracy = trace(@src());
1716 defer tracy.end();
1717
1718 if (self.dwarf) |*dwarf| try dwarf.updateContainerType(pt, ty);
1719}
1720
1721fn updateLazySymbol(
1722 self: *ZigObject,
1723 elf_file: *Elf,
1724 pt: Zcu.PerThread,
1725 sym: link.File.LazySymbol,
1726 symbol_index: Symbol.Index,
1727) !void {
1728 const zcu = pt.zcu;
1729 const gpa = zcu.gpa;
1730
1731 var required_alignment: InternPool.Alignment = .none;
1732 var aw: std.Io.Writer.Allocating = .init(gpa);
1733 defer aw.deinit();
1734
1735 const name_str_index = blk: {
1736 const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{f}", .{
1737 @tagName(sym.kind),
1738 Type.fromInterned(sym.ty).fmt(pt),
1739 });
1740 defer gpa.free(name);
1741 break :blk try self.strtab.insert(gpa, name);
1742 };
1743
1744 codegen.generateLazySymbol(
1745 &elf_file.base,
1746 pt,
1747 Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse .unneeded,
1748 sym,
1749 &required_alignment,
1750 &aw.writer,
1751 .none,
1752 .{ .atom_index = symbol_index },
1753 ) catch |err| switch (err) {
1754 error.WriteFailed => return error.OutOfMemory,
1755 else => |e| return e,
1756 };
1757 const code = aw.written();
1758
1759 const output_section_index = switch (sym.kind) {
1760 .code => if (self.text_index) |sym_index|
1761 self.symbol(sym_index).outputShndx(elf_file).?
1762 else osec: {
1763 const osec = try elf_file.addSection(.{
1764 .name = try elf_file.insertShString(".text"),
1765 .type = elf.SHT_PROGBITS,
1766 .addralign = 1,
1767 .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
1768 });
1769 self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec);
1770 break :osec osec;
1771 },
1772 .const_data => if (self.rodata_index) |sym_index|
1773 self.symbol(sym_index).outputShndx(elf_file).?
1774 else osec: {
1775 const osec = try elf_file.addSection(.{
1776 .name = try elf_file.insertShString(".rodata"),
1777 .type = elf.SHT_PROGBITS,
1778 .addralign = 1,
1779 .flags = elf.SHF_ALLOC,
1780 });
1781 self.rodata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".rodata"), osec);
1782 break :osec osec;
1783 },
1784 };
1785 const local_sym = self.symbol(symbol_index);
1786 local_sym.name_offset = name_str_index;
1787 const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index];
1788 local_esym.st_name = name_str_index;
1789 local_esym.st_info |= elf.STT_OBJECT;
1790 local_esym.st_size = code.len;
1791 const atom_ptr = local_sym.atom(elf_file).?;
1792 atom_ptr.alive = true;
1793 atom_ptr.name_offset = name_str_index;
1794 atom_ptr.alignment = required_alignment;
1795 atom_ptr.size = code.len;
1796 atom_ptr.output_section_index = output_section_index;
1797
1798 try self.allocateAtom(atom_ptr, true, elf_file);
1799 errdefer self.freeNavMetadata(elf_file, symbol_index);
1800
1801 local_sym.value = 0;
1802 local_esym.st_value = 0;
1803
1804 try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
1805}
1806
1807fn lowerConst(
1808 self: *ZigObject,
1809 elf_file: *Elf,
1810 pt: Zcu.PerThread,
1811 name: []const u8,
1812 val: Value,
1813 required_alignment: InternPool.Alignment,
1814 output_section_index: u32,
1815 src_loc: Zcu.LazySrcLoc,
1816) !codegen.SymbolResult {
1817 const gpa = pt.zcu.gpa;
1818
1819 var aw: std.Io.Writer.Allocating = .init(gpa);
1820 defer aw.deinit();
1821
1822 const name_off = try self.addString(gpa, name);
1823 const sym_index = try self.newSymbolWithAtom(gpa, name_off);
1824
1825 codegen.generateSymbol(
1826 &elf_file.base,
1827 pt,
1828 src_loc,
1829 val,
1830 &aw.writer,
1831 .{ .atom_index = sym_index },
1832 ) catch |err| switch (err) {
1833 error.WriteFailed => return error.OutOfMemory,
1834 else => |e| return e,
1835 };
1836 const code = aw.written();
1837
1838 const local_sym = self.symbol(sym_index);
1839 const local_esym = &self.symtab.items(.elf_sym)[local_sym.esym_index];
1840 local_esym.st_info |= elf.STT_OBJECT;
1841 local_esym.st_size = code.len;
1842 const atom_ptr = local_sym.atom(elf_file).?;
1843 atom_ptr.alive = true;
1844 atom_ptr.alignment = required_alignment;
1845 atom_ptr.size = code.len;
1846 atom_ptr.output_section_index = output_section_index;
1847
1848 try self.allocateAtom(atom_ptr, true, elf_file);
1849 errdefer self.freeNavMetadata(elf_file, sym_index);
1850
1851 try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
1852
1853 return .{ .sym_index = sym_index };
1854}
1855
1856pub fn updateExports(
1857 self: *ZigObject,
1858 elf_file: *Elf,
1859 pt: Zcu.PerThread,
1860 exported: Zcu.Exported,
1861 export_indices: []const Zcu.Export.Index,
1862) link.File.UpdateExportsError!void {
1863 const tracy = trace(@src());
1864 defer tracy.end();
1865
1866 const zcu = pt.zcu;
1867 const gpa = elf_file.base.comp.gpa;
1868 const metadata = switch (exported) {
1869 .nav => |nav| blk: {
1870 _ = try self.getOrCreateMetadataForNav(zcu, nav);
1871 break :blk self.navs.getPtr(nav).?;
1872 },
1873 .uav => |uav| self.uavs.getPtr(uav) orelse blk: {
1874 const first_exp = export_indices[0].ptr(zcu);
1875 const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src);
1876 switch (res) {
1877 .sym_index => {},
1878 .fail => |em| {
1879 // TODO maybe it's enough to return an error here and let Zcu.processExportsInner
1880 // handle the error?
1881 try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
1882 zcu.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em);
1883 return;
1884 },
1885 }
1886 break :blk self.uavs.getPtr(uav).?;
1887 },
1888 };
1889 const sym_index = metadata.symbol_index;
1890 const esym_index = self.symbol(sym_index).esym_index;
1891 const esym = self.symtab.items(.elf_sym)[esym_index];
1892 const esym_shndx = self.symtab.items(.shndx)[esym_index];
1893
1894 for (export_indices) |export_idx| {
1895 const exp = export_idx.ptr(zcu);
1896 if (exp.opts.section.unwrap()) |section_name| {
1897 if (!section_name.eqlSlice(".text", &zcu.intern_pool)) {
1898 try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
1899 zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
1900 gpa,
1901 exp.src,
1902 "Unimplemented: ExportOptions.section",
1903 .{},
1904 ));
1905 continue;
1906 }
1907 }
1908 const stb_bits: u8 = switch (exp.opts.linkage) {
1909 .internal => elf.STB_LOCAL,
1910 .strong => elf.STB_GLOBAL,
1911 .weak => elf.STB_WEAK,
1912 .link_once => {
1913 try zcu.failed_exports.ensureUnusedCapacity(zcu.gpa, 1);
1914 zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create(
1915 gpa,
1916 exp.src,
1917 "Unimplemented: GlobalLinkage.LinkOnce",
1918 .{},
1919 ));
1920 continue;
1921 },
1922 };
1923 const stt_bits: u8 = @as(u4, @truncate(esym.st_info));
1924 const exp_name = exp.opts.name.toSlice(&zcu.intern_pool);
1925 const name_off = try self.strtab.insert(gpa, exp_name);
1926 const global_sym_index = if (metadata.@"export"(self, exp_name)) |exp_index|
1927 exp_index.*
1928 else blk: {
1929 const global_sym_index = try self.getGlobalSymbol(elf_file, exp_name, null);
1930 try metadata.exports.append(gpa, global_sym_index);
1931 break :blk global_sym_index;
1932 };
1933
1934 const value = self.symbol(sym_index).value;
1935 const global_sym = self.symbol(global_sym_index);
1936 global_sym.value = value;
1937 global_sym.flags.weak = exp.opts.linkage == .weak;
1938 global_sym.version_index = elf_file.default_sym_version;
1939 global_sym.ref = .{ .index = esym_shndx, .file = self.index };
1940 const global_esym = &self.symtab.items(.elf_sym)[global_sym.esym_index];
1941 global_esym.st_value = @intCast(value);
1942 global_esym.st_shndx = esym.st_shndx;
1943 global_esym.st_info = (stb_bits << 4) | stt_bits;
1944 global_esym.st_name = name_off;
1945 global_esym.st_size = esym.st_size;
1946 self.symtab.items(.shndx)[global_sym.esym_index] = esym_shndx;
1947 }
1948}
1949
1950pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
1951 if (self.dwarf) |*dwarf| {
1952 const comp = dwarf.bin_file.comp;
1953 const diags = &comp.link_diags;
1954 dwarf.updateLineNumber(pt.zcu, ti_id) catch |err| switch (err) {
1955 error.Overflow => return error.Overflow,
1956 error.OutOfMemory => return error.OutOfMemory,
1957 else => |e| return diags.fail("failed to update dwarf line numbers: {s}", .{@errorName(e)}),
1958 };
1959 }
1960}
1961
1962pub fn deleteExport(
1963 self: *ZigObject,
1964 elf_file: *Elf,
1965 exported: Zcu.Exported,
1966 name: InternPool.NullTerminatedString,
1967) void {
1968 const metadata = switch (exported) {
1969 .nav => |nav| self.navs.getPtr(nav),
1970 .uav => |uav| self.uavs.getPtr(uav),
1971 } orelse return;
1972 const zcu = elf_file.base.comp.zcu.?;
1973 const exp_name = name.toSlice(&zcu.intern_pool);
1974 const sym_index = metadata.@"export"(self, exp_name) orelse return;
1975 log.debug("deleting export '{s}'", .{exp_name});
1976 const esym_index = self.symbol(sym_index.*).esym_index;
1977 const esym = &self.symtab.items(.elf_sym)[esym_index];
1978 _ = self.globals_lookup.remove(esym.st_name);
1979 esym.* = Elf.null_sym;
1980 self.symtab.items(.shndx)[esym_index] = elf.SHN_UNDEF;
1981}
1982
1983pub fn getGlobalSymbol(self: *ZigObject, elf_file: *Elf, name: []const u8, lib_name: ?[]const u8) !u32 {
1984 _ = lib_name;
1985 const gpa = elf_file.base.comp.gpa;
1986 const off = try self.strtab.insert(gpa, name);
1987 const lookup_gop = try self.globals_lookup.getOrPut(gpa, off);
1988 if (!lookup_gop.found_existing) {
1989 lookup_gop.value_ptr.* = try self.newGlobalSymbol(gpa, off);
1990 }
1991 return lookup_gop.value_ptr.*;
1992}
1993
1994const max_trampoline_len = 12;
1995
1996fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
1997 const len = switch (cpu_arch) {
1998 .x86_64 => 5, // jmp rel32
1999 else => @panic("TODO implement trampoline size for this CPU arch"),
2000 };
2001 comptime assert(len <= max_trampoline_len);
2002 return len;
2003}
2004
2005fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
2006 const atom_ptr = tr_sym.atom(elf_file).?;
2007 const fileoff = atom_ptr.offset(elf_file);
2008 const source_addr = tr_sym.address(.{}, elf_file);
2009 const target_addr = target.address(.{ .trampoline = false }, elf_file);
2010 var buf: [max_trampoline_len]u8 = undefined;
2011 const out = switch (elf_file.getTarget().cpu.arch) {
2012 .x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
2013 else => @panic("TODO implement write trampoline for this CPU arch"),
2014 };
2015 try elf_file.base.file.?.pwriteAll(out, fileoff);
2016
2017 if (elf_file.base.child_pid) |pid| {
2018 switch (builtin.os.tag) {
2019 .linux => {
2020 var local_vec: [1]std.posix.iovec_const = .{.{
2021 .base = out.ptr,
2022 .len = out.len,
2023 }};
2024 var remote_vec: [1]std.posix.iovec_const = .{.{
2025 .base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(source_addr)))),
2026 .len = out.len,
2027 }};
2028 const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0);
2029 switch (std.os.linux.errno(rc)) {
2030 .SUCCESS => assert(rc == out.len),
2031 else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
2032 }
2033 },
2034 else => return error.HotSwapUnavailableOnHostOperatingSystem,
2035 }
2036 }
2037}
2038
2039pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, elf_file: *Elf) !void {
2040 const slice = elf_file.sections.slice();
2041 const shdr = &slice.items(.shdr)[atom_ptr.output_section_index];
2042 const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index];
2043
2044 if (last_atom_ref.eql(atom_ptr.ref())) {
2045 if (atom_ptr.prevAtom(elf_file)) |prev_atom| {
2046 prev_atom.next_atom_ref = .{};
2047 last_atom_ref.* = prev_atom.ref();
2048 } else {
2049 last_atom_ref.* = .{};
2050 }
2051 }
2052
2053 const alloc_res = try elf_file.allocateChunk(.{
2054 .shndx = atom_ptr.output_section_index,
2055 .size = atom_ptr.size,
2056 .alignment = atom_ptr.alignment,
2057 .requires_padding = requires_padding,
2058 });
2059 atom_ptr.value = @intCast(alloc_res.value);
2060 log.debug("allocated {s} at {x}\n placement {f}", .{
2061 atom_ptr.name(elf_file),
2062 atom_ptr.offset(elf_file),
2063 alloc_res.placement,
2064 });
2065
2066 const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
2067 placement_atom.nextAtom(elf_file) == null
2068 else
2069 true;
2070 if (expand_section) {
2071 last_atom_ref.* = atom_ptr.ref();
2072 if (self.dwarf) |_| {
2073 // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
2074 // range of the compilation unit. When we expand the text section, this range changes,
2075 // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
2076 self.debug_info_section_dirty = true;
2077 // This becomes dirty for the same reason. We could potentially make this more
2078 // fine-grained with the addition of support for more compilation units. It is planned to
2079 // model each package as a different compilation unit.
2080 self.debug_aranges_section_dirty = true;
2081 self.debug_rnglists_section_dirty = true;
2082 }
2083 }
2084 shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?);
2085
2086 // This function can also reallocate an atom.
2087 // In this case we need to "unplug" it from its previous location before
2088 // plugging it in to its new location.
2089 if (atom_ptr.prevAtom(elf_file)) |prev| {
2090 prev.next_atom_ref = atom_ptr.next_atom_ref;
2091 }
2092 if (atom_ptr.nextAtom(elf_file)) |next| {
2093 next.prev_atom_ref = atom_ptr.prev_atom_ref;
2094 }
2095
2096 if (elf_file.atom(alloc_res.placement)) |big_atom| {
2097 atom_ptr.prev_atom_ref = alloc_res.placement;
2098 atom_ptr.next_atom_ref = big_atom.next_atom_ref;
2099 big_atom.next_atom_ref = atom_ptr.ref();
2100 } else {
2101 atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 };
2102 atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 };
2103 }
2104
2105 log.debug(" prev {f}, next {f}", .{ atom_ptr.prev_atom_ref, atom_ptr.next_atom_ref });
2106}
2107
2108pub fn resetShdrIndexes(self: *ZigObject, backlinks: []const u32) void {
2109 for (self.atoms_indexes.items) |atom_index| {
2110 const atom_ptr = self.atom(atom_index) orelse continue;
2111 atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
2112 }
2113 inline for ([_]?Symbol.Index{
2114 self.text_index,
2115 self.rodata_index,
2116 self.data_relro_index,
2117 self.data_index,
2118 self.bss_index,
2119 self.tdata_index,
2120 self.tbss_index,
2121 self.eh_frame_index,
2122 self.debug_info_index,
2123 self.debug_abbrev_index,
2124 self.debug_aranges_index,
2125 self.debug_str_index,
2126 self.debug_line_index,
2127 self.debug_line_str_index,
2128 self.debug_loclists_index,
2129 self.debug_rnglists_index,
2130 }) |maybe_sym_index| {
2131 if (maybe_sym_index) |sym_index| {
2132 const sym = self.symbol(sym_index);
2133 sym.output_section_index = backlinks[sym.output_section_index];
2134 }
2135 }
2136}
2137
2138pub fn asFile(self: *ZigObject) File {
2139 return .{ .zig_object = self };
2140}
2141
2142pub fn sectionSymbol(self: *ZigObject, shndx: u32, elf_file: *Elf) ?*Symbol {
2143 inline for ([_]?Symbol.Index{
2144 self.text_index,
2145 self.rodata_index,
2146 self.data_relro_index,
2147 self.data_index,
2148 self.bss_index,
2149 self.tdata_index,
2150 self.tbss_index,
2151 self.eh_frame_index,
2152 self.debug_info_index,
2153 self.debug_abbrev_index,
2154 self.debug_aranges_index,
2155 self.debug_str_index,
2156 self.debug_line_index,
2157 self.debug_line_str_index,
2158 self.debug_loclists_index,
2159 self.debug_rnglists_index,
2160 }) |maybe_sym_index| {
2161 if (maybe_sym_index) |sym_index| {
2162 const sym = self.symbol(sym_index);
2163 if (sym.outputShndx(elf_file) == shndx) return sym;
2164 }
2165 }
2166 return null;
2167}
2168
2169pub fn addString(self: *ZigObject, allocator: Allocator, string: []const u8) !u32 {
2170 return self.strtab.insert(allocator, string);
2171}
2172
2173pub fn getString(self: ZigObject, off: u32) [:0]const u8 {
2174 return self.strtab.getAssumeExists(off);
2175}
2176
2177fn addAtom(self: *ZigObject, allocator: Allocator) !Atom.Index {
2178 try self.atoms.ensureUnusedCapacity(allocator, 1);
2179 try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
2180 return self.addAtomAssumeCapacity();
2181}
2182
2183fn addAtomAssumeCapacity(self: *ZigObject) Atom.Index {
2184 const atom_index: Atom.Index = @intCast(self.atoms.items.len);
2185 const atom_ptr = self.atoms.addOneAssumeCapacity();
2186 atom_ptr.* = .{
2187 .file_index = self.index,
2188 .atom_index = atom_index,
2189 .extra_index = self.addAtomExtraAssumeCapacity(.{}),
2190 };
2191 return atom_index;
2192}
2193
2194pub fn atom(self: *ZigObject, atom_index: Atom.Index) ?*Atom {
2195 if (atom_index == 0) return null;
2196 assert(atom_index < self.atoms.items.len);
2197 return &self.atoms.items[atom_index];
2198}
2199
2200fn addAtomExtra(self: *ZigObject, allocator: Allocator, extra: Atom.Extra) !u32 {
2201 const fields = @typeInfo(Atom.Extra).@"struct".fields;
2202 try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
2203 return self.addAtomExtraAssumeCapacity(extra);
2204}
2205
2206fn addAtomExtraAssumeCapacity(self: *ZigObject, extra: Atom.Extra) u32 {
2207 const index = @as(u32, @intCast(self.atoms_extra.items.len));
2208 const fields = @typeInfo(Atom.Extra).@"struct".fields;
2209 inline for (fields) |field| {
2210 self.atoms_extra.appendAssumeCapacity(switch (field.type) {
2211 u32 => @field(extra, field.name),
2212 else => @compileError("bad field type"),
2213 });
2214 }
2215 return index;
2216}
2217
2218pub fn atomExtra(self: ZigObject, index: u32) Atom.Extra {
2219 const fields = @typeInfo(Atom.Extra).@"struct".fields;
2220 var i: usize = index;
2221 var result: Atom.Extra = undefined;
2222 inline for (fields) |field| {
2223 @field(result, field.name) = switch (field.type) {
2224 u32 => self.atoms_extra.items[i],
2225 else => @compileError("bad field type"),
2226 };
2227 i += 1;
2228 }
2229 return result;
2230}
2231
2232pub fn setAtomExtra(self: *ZigObject, index: u32, extra: Atom.Extra) void {
2233 assert(index > 0);
2234 const fields = @typeInfo(Atom.Extra).@"struct".fields;
2235 inline for (fields, 0..) |field, i| {
2236 self.atoms_extra.items[index + i] = switch (field.type) {
2237 u32 => @field(extra, field.name),
2238 else => @compileError("bad field type"),
2239 };
2240 }
2241}
2242
2243inline fn isGlobal(index: Symbol.Index) bool {
2244 return index & global_symbol_bit != 0;
2245}
2246
2247pub fn symbol(self: *ZigObject, index: Symbol.Index) *Symbol {
2248 const actual_index = index & symbol_mask;
2249 if (isGlobal(index)) return &self.symbols.items[self.global_symbols.items[actual_index]];
2250 return &self.symbols.items[self.local_symbols.items[actual_index]];
2251}
2252
2253pub fn resolveSymbol(self: ZigObject, index: Symbol.Index, elf_file: *Elf) Elf.Ref {
2254 if (isGlobal(index)) {
2255 const resolv = self.symbols_resolver.items[index & symbol_mask];
2256 return elf_file.resolver.get(resolv).?;
2257 }
2258 return .{ .index = index, .file = self.index };
2259}
2260
2261fn addSymbol(self: *ZigObject, allocator: Allocator) !Symbol.Index {
2262 try self.symbols.ensureUnusedCapacity(allocator, 1);
2263 return self.addSymbolAssumeCapacity();
2264}
2265
2266fn addSymbolAssumeCapacity(self: *ZigObject) Symbol.Index {
2267 const index: Symbol.Index = @intCast(self.symbols.items.len);
2268 self.symbols.appendAssumeCapacity(.{ .file_index = self.index });
2269 return index;
2270}
2271
2272pub fn addSymbolExtra(self: *ZigObject, allocator: Allocator, extra: Symbol.Extra) !u32 {
2273 const fields = @typeInfo(Symbol.Extra).@"struct".fields;
2274 try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
2275 return self.addSymbolExtraAssumeCapacity(extra);
2276}
2277
2278pub fn addSymbolExtraAssumeCapacity(self: *ZigObject, extra: Symbol.Extra) u32 {
2279 const index = @as(u32, @intCast(self.symbols_extra.items.len));
2280 const fields = @typeInfo(Symbol.Extra).@"struct".fields;
2281 inline for (fields) |field| {
2282 self.symbols_extra.appendAssumeCapacity(switch (field.type) {
2283 u32 => @field(extra, field.name),
2284 else => @compileError("bad field type"),
2285 });
2286 }
2287 return index;
2288}
2289
2290pub fn symbolExtra(self: *ZigObject, index: u32) Symbol.Extra {
2291 const fields = @typeInfo(Symbol.Extra).@"struct".fields;
2292 var i: usize = index;
2293 var result: Symbol.Extra = undefined;
2294 inline for (fields) |field| {
2295 @field(result, field.name) = switch (field.type) {
2296 u32 => self.symbols_extra.items[i],
2297 else => @compileError("bad field type"),
2298 };
2299 i += 1;
2300 }
2301 return result;
2302}
2303
2304pub fn setSymbolExtra(self: *ZigObject, index: u32, extra: Symbol.Extra) void {
2305 const fields = @typeInfo(Symbol.Extra).@"struct".fields;
2306 inline for (fields, 0..) |field, i| {
2307 self.symbols_extra.items[index + i] = switch (field.type) {
2308 u32 => @field(extra, field.name),
2309 else => @compileError("bad field type"),
2310 };
2311 }
2312}
2313
2314const Format = struct {
2315 self: *ZigObject,
2316 elf_file: *Elf,
2317
2318 fn symtab(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
2319 const self = f.self;
2320 const elf_file = f.elf_file;
2321 try writer.writeAll(" locals\n");
2322 for (self.local_symbols.items) |index| {
2323 const local = self.symbols.items[index];
2324 try writer.print(" {f}\n", .{local.fmt(elf_file)});
2325 }
2326 try writer.writeAll(" globals\n");
2327 for (f.self.global_symbols.items) |index| {
2328 const global = self.symbols.items[index];
2329 try writer.print(" {f}\n", .{global.fmt(elf_file)});
2330 }
2331 }
2332
2333 fn atoms(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
2334 try writer.writeAll(" atoms\n");
2335 for (f.self.atoms_indexes.items) |atom_index| {
2336 const atom_ptr = f.self.atom(atom_index) orelse continue;
2337 try writer.print(" {f}\n", .{atom_ptr.fmt(f.elf_file)});
2338 }
2339 }
2340};
2341
2342pub fn fmtSymtab(self: *ZigObject, elf_file: *Elf) std.fmt.Alt(Format, Format.symtab) {
2343 return .{ .data = .{
2344 .self = self,
2345 .elf_file = elf_file,
2346 } };
2347}
2348
2349pub fn fmtAtoms(self: *ZigObject, elf_file: *Elf) std.fmt.Alt(Format, Format.atoms) {
2350 return .{ .data = .{
2351 .self = self,
2352 .elf_file = elf_file,
2353 } };
2354}
2355
2356const ElfSym = struct {
2357 elf_sym: elf.Elf64_Sym,
2358 shndx: u32 = elf.SHN_UNDEF,
2359};
2360
2361const LazySymbolMetadata = struct {
2362 const State = enum { unused, pending_flush, flushed };
2363 text_symbol_index: Symbol.Index = undefined,
2364 rodata_symbol_index: Symbol.Index = undefined,
2365 text_state: State = .unused,
2366 rodata_state: State = .unused,
2367};
2368
2369const AvMetadata = struct {
2370 symbol_index: Symbol.Index,
2371 /// A list of all exports aliases of this Av.
2372 exports: std.ArrayList(Symbol.Index) = .empty,
2373 /// Set to true if the AV has been initialized and allocated.
2374 allocated: bool = false,
2375
2376 fn @"export"(m: AvMetadata, zig_object: *ZigObject, name: []const u8) ?*u32 {
2377 for (m.exports.items) |*exp| {
2378 const exp_name = zig_object.getString(zig_object.symbol(exp.*).name_offset);
2379 if (mem.eql(u8, name, exp_name)) return exp;
2380 }
2381 return null;
2382 }
2383};
2384
2385fn checkNavAllocated(pt: Zcu.PerThread, index: InternPool.Nav.Index, meta: AvMetadata) void {
2386 if (!meta.allocated) {
2387 const zcu = pt.zcu;
2388 const ip = &zcu.intern_pool;
2389 const nav = ip.getNav(index);
2390 log.err("NAV {f}({d}) assigned symbol {d} but not allocated!", .{
2391 nav.fqn.fmt(ip),
2392 index,
2393 meta.symbol_index,
2394 });
2395 }
2396}
2397
2398fn checkUavAllocated(pt: Zcu.PerThread, index: InternPool.Index, meta: AvMetadata) void {
2399 if (!meta.allocated) {
2400 const zcu = pt.zcu;
2401 const uav = Value.fromInterned(index);
2402 const ty = uav.typeOf(zcu);
2403 log.err("UAV {f}({d}) assigned symbol {d} but not allocated!", .{
2404 ty.fmt(pt),
2405 index,
2406 meta.symbol_index,
2407 });
2408 }
2409}
2410
2411const TlsVariable = struct {
2412 symbol_index: Symbol.Index,
2413 code: []const u8 = &[0]u8{},
2414
2415 fn deinit(tlv: *TlsVariable, allocator: Allocator) void {
2416 allocator.free(tlv.code);
2417 }
2418};
2419
2420const AtomList = std.ArrayList(Atom.Index);
2421const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
2422const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata);
2423const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
2424const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, void);
2425
2426const x86_64 = struct {
2427 fn writeTrampolineCode(source_addr: i64, target_addr: i64, buf: *[max_trampoline_len]u8) ![]u8 {
2428 const disp = @as(i64, @intCast(target_addr)) - source_addr - 5;
2429 var bytes = [_]u8{
2430 0xe9, 0x00, 0x00, 0x00, 0x00, // jmp rel32
2431 };
2432 assert(bytes.len == trampolineSize(.x86_64));
2433 mem.writeInt(i32, bytes[1..][0..4], @intCast(disp), .little);
2434 @memcpy(buf[0..bytes.len], &bytes);
2435 return buf[0..bytes.len];
2436 }
2437};
2438
2439const assert = std.debug.assert;
2440const build_options = @import("build_options");
2441const builtin = @import("builtin");
2442const codegen = @import("../../codegen.zig");
2443const elf = std.elf;
2444const link = @import("../../link.zig");
2445const log = std.log.scoped(.link);
2446const mem = std.mem;
2447const relocation = @import("relocation.zig");
2448const target_util = @import("../../target.zig");
2449const trace = @import("../../tracy.zig").trace;
2450const std = @import("std");
2451const Allocator = std.mem.Allocator;
2452
2453const Archive = @import("Archive.zig");
2454const Atom = @import("Atom.zig");
2455const Dwarf = @import("../Dwarf.zig");
2456const Elf = @import("../Elf.zig");
2457const File = @import("file.zig").File;
2458const InternPool = @import("../../InternPool.zig");
2459const Zcu = @import("../../Zcu.zig");
2460const Object = @import("Object.zig");
2461const Symbol = @import("Symbol.zig");
2462const StringTable = @import("../StringTable.zig");
2463const Type = @import("../../Type.zig");
2464const Value = @import("../../Value.zig");
2465const AnalUnit = InternPool.AnalUnit;
2466const ZigObject = @This();