master
1base: link.File,
2mf: MappedFile,
3nodes: std.MultiArrayList(Node),
4import_table: ImportTable,
5strings: std.HashMapUnmanaged(
6 u32,
7 void,
8 std.hash_map.StringIndexContext,
9 std.hash_map.default_max_load_percentage,
10),
11string_bytes: std.ArrayList(u8),
12image_section_table: std.ArrayList(Symbol.Index),
13pseudo_section_table: std.AutoArrayHashMapUnmanaged(String, Symbol.Index),
14object_section_table: std.AutoArrayHashMapUnmanaged(String, Symbol.Index),
15symbol_table: std.ArrayList(Symbol),
16globals: std.AutoArrayHashMapUnmanaged(GlobalName, Symbol.Index),
17global_pending_index: u32,
18navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Symbol.Index),
19uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
20lazy: std.EnumArray(link.File.LazySymbol.Kind, struct {
21 map: std.AutoArrayHashMapUnmanaged(InternPool.Index, Symbol.Index),
22 pending_index: u32,
23}),
24pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
25 alignment: InternPool.Alignment,
26 src_loc: Zcu.LazySrcLoc,
27}),
28relocs: std.ArrayList(Reloc),
29const_prog_node: std.Progress.Node,
30synth_prog_node: std.Progress.Node,
31
32pub const default_file_alignment: u16 = 0x200;
33pub const default_size_of_stack_reserve: u32 = 0x1000000;
34pub const default_size_of_stack_commit: u32 = 0x1000;
35pub const default_size_of_heap_reserve: u32 = 0x100000;
36pub const default_size_of_heap_commit: u32 = 0x1000;
37
38/// This is the start of a Portable Executable (PE) file.
39/// It starts with a MS-DOS header followed by a MS-DOS stub program.
40/// This data does not change so we include it as follows in all binaries.
41///
42/// In this context,
43/// A "paragraph" is 16 bytes.
44/// A "page" is 512 bytes.
45/// A "long" is 4 bytes.
46/// A "word" is 2 bytes.
47pub const msdos_stub: [120]u8 = .{
48 'M', 'Z', // Magic number. Stands for Mark Zbikowski (designer of the MS-DOS executable format).
49 0x78, 0x00, // Number of bytes in the last page. This matches the size of this entire MS-DOS stub.
50 0x01, 0x00, // Number of pages.
51 0x00, 0x00, // Number of entries in the relocation table.
52 0x04, 0x00, // The number of paragraphs taken up by the header. 4 * 16 = 64, which matches the header size (all bytes before the MS-DOS stub program).
53 0x00, 0x00, // The number of paragraphs required by the program.
54 0x00, 0x00, // The number of paragraphs requested by the program.
55 0x00, 0x00, // Initial value for SS (relocatable segment address).
56 0x00, 0x00, // Initial value for SP.
57 0x00, 0x00, // Checksum.
58 0x00, 0x00, // Initial value for IP.
59 0x00, 0x00, // Initial value for CS (relocatable segment address).
60 0x40, 0x00, // Absolute offset to relocation table. 64 matches the header size (all bytes before the MS-DOS stub program).
61 0x00, 0x00, // Overlay number. Zero means this is the main executable.
62}
63 // Reserved words.
64 ++ .{ 0x00, 0x00 } ** 4
65 // OEM-related fields.
66 ++ .{
67 0x00, 0x00, // OEM identifier.
68 0x00, 0x00, // OEM information.
69 }
70 // Reserved words.
71 ++ .{ 0x00, 0x00 } ** 10
72 // Address of the PE header (a long). This matches the size of this entire MS-DOS stub, so that's the address of what's after this MS-DOS stub.
73 ++ .{ 0x78, 0x00, 0x00, 0x00 }
74 // What follows is a 16-bit x86 MS-DOS program of 7 instructions that prints the bytes after these instructions and then exits.
75 ++ .{
76 // Set the value of the data segment to the same value as the code segment.
77 0x0e, // push cs
78 0x1f, // pop ds
79 // Set the DX register to the address of the message.
80 // If you count all bytes of these 7 instructions you get 14, so that's the address of what's after these instructions.
81 0xba, 14, 0x00, // mov dx, 14
82 // Set AH to the system call code for printing a message.
83 0xb4, 0x09, // mov ah, 0x09
84 // Perform the system call to print the message.
85 0xcd, 0x21, // int 0x21
86 // Set AH to 0x4c which is the system call code for exiting, and set AL to 0x01 which is the exit code.
87 0xb8, 0x01, 0x4c, // mov ax, 0x4c01
88 // Peform the system call to exit the program with exit code 1.
89 0xcd, 0x21, // int 0x21
90 }
91 // Message to print.
92 ++ "This program cannot be run in DOS mode.".*
93 // Message terminators.
94 ++ .{
95 '$', // We do not pass a length to the print system call; the string is terminated by this character.
96 0x00, 0x00, // Terminating zero bytes.
97 };
98
99pub const Node = union(enum) {
100 file,
101 header,
102 signature,
103 coff_header,
104 optional_header,
105 data_directories,
106 section_table,
107 image_section: Symbol.Index,
108
109 import_directory_table,
110 import_lookup_table: ImportTable.Index,
111 import_address_table: ImportTable.Index,
112 import_hint_name_table: ImportTable.Index,
113
114 pseudo_section: PseudoSectionMapIndex,
115 object_section: ObjectSectionMapIndex,
116 global: GlobalMapIndex,
117 nav: NavMapIndex,
118 uav: UavMapIndex,
119 lazy_code: LazyMapRef.Index(.code),
120 lazy_const_data: LazyMapRef.Index(.const_data),
121
122 pub const PseudoSectionMapIndex = enum(u32) {
123 _,
124
125 pub fn name(psmi: PseudoSectionMapIndex, coff: *const Coff) String {
126 return coff.pseudo_section_table.keys()[@intFromEnum(psmi)];
127 }
128
129 pub fn symbol(psmi: PseudoSectionMapIndex, coff: *const Coff) Symbol.Index {
130 return coff.pseudo_section_table.values()[@intFromEnum(psmi)];
131 }
132 };
133
134 pub const ObjectSectionMapIndex = enum(u32) {
135 _,
136
137 pub fn name(osmi: ObjectSectionMapIndex, coff: *const Coff) String {
138 return coff.object_section_table.keys()[@intFromEnum(osmi)];
139 }
140
141 pub fn symbol(osmi: ObjectSectionMapIndex, coff: *const Coff) Symbol.Index {
142 return coff.object_section_table.values()[@intFromEnum(osmi)];
143 }
144 };
145
146 pub const GlobalMapIndex = enum(u32) {
147 _,
148
149 pub fn globalName(gmi: GlobalMapIndex, coff: *const Coff) GlobalName {
150 return coff.globals.keys()[@intFromEnum(gmi)];
151 }
152
153 pub fn symbol(gmi: GlobalMapIndex, coff: *const Coff) Symbol.Index {
154 return coff.globals.values()[@intFromEnum(gmi)];
155 }
156 };
157
158 pub const NavMapIndex = enum(u32) {
159 _,
160
161 pub fn navIndex(nmi: NavMapIndex, coff: *const Coff) InternPool.Nav.Index {
162 return coff.navs.keys()[@intFromEnum(nmi)];
163 }
164
165 pub fn symbol(nmi: NavMapIndex, coff: *const Coff) Symbol.Index {
166 return coff.navs.values()[@intFromEnum(nmi)];
167 }
168 };
169
170 pub const UavMapIndex = enum(u32) {
171 _,
172
173 pub fn uavValue(umi: UavMapIndex, coff: *const Coff) InternPool.Index {
174 return coff.uavs.keys()[@intFromEnum(umi)];
175 }
176
177 pub fn symbol(umi: UavMapIndex, coff: *const Coff) Symbol.Index {
178 return coff.uavs.values()[@intFromEnum(umi)];
179 }
180 };
181
182 pub const LazyMapRef = struct {
183 kind: link.File.LazySymbol.Kind,
184 index: u32,
185
186 pub fn Index(comptime kind: link.File.LazySymbol.Kind) type {
187 return enum(u32) {
188 _,
189
190 pub fn ref(lmi: @This()) LazyMapRef {
191 return .{ .kind = kind, .index = @intFromEnum(lmi) };
192 }
193
194 pub fn lazySymbol(lmi: @This(), coff: *const Coff) link.File.LazySymbol {
195 return lmi.ref().lazySymbol(coff);
196 }
197
198 pub fn symbol(lmi: @This(), coff: *const Coff) Symbol.Index {
199 return lmi.ref().symbol(coff);
200 }
201 };
202 }
203
204 pub fn lazySymbol(lmr: LazyMapRef, coff: *const Coff) link.File.LazySymbol {
205 return .{ .kind = lmr.kind, .ty = coff.lazy.getPtrConst(lmr.kind).map.keys()[lmr.index] };
206 }
207
208 pub fn symbol(lmr: LazyMapRef, coff: *const Coff) Symbol.Index {
209 return coff.lazy.getPtrConst(lmr.kind).map.values()[lmr.index];
210 }
211 };
212
213 pub const Tag = @typeInfo(Node).@"union".tag_type.?;
214
215 const known_count = @typeInfo(@TypeOf(known)).@"struct".fields.len;
216 const known = known: {
217 const Known = enum {
218 file,
219 header,
220 signature,
221 coff_header,
222 optional_header,
223 data_directories,
224 section_table,
225 };
226 var mut_known: std.enums.EnumFieldStruct(Known, MappedFile.Node.Index, null) = undefined;
227 for (@typeInfo(Known).@"enum".fields) |field|
228 @field(mut_known, field.name) = @enumFromInt(field.value);
229 break :known mut_known;
230 };
231
232 comptime {
233 if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 8);
234 }
235};
236
237pub const ImportTable = struct {
238 ni: MappedFile.Node.Index,
239 entries: std.AutoArrayHashMapUnmanaged(void, Entry),
240
241 pub const Entry = struct {
242 import_lookup_table_ni: MappedFile.Node.Index,
243 import_address_table_si: Symbol.Index,
244 import_hint_name_table_ni: MappedFile.Node.Index,
245 len: u32,
246 hint_name_len: u32,
247 };
248
249 const Adapter = struct {
250 coff: *Coff,
251
252 pub fn eql(adapter: Adapter, lhs_key: []const u8, _: void, rhs_index: usize) bool {
253 const coff = adapter.coff;
254 const dll_name = coff.import_table.entries.values()[rhs_index]
255 .import_hint_name_table_ni.sliceConst(&coff.mf);
256 return std.mem.startsWith(u8, dll_name, lhs_key) and
257 std.mem.startsWith(u8, dll_name[lhs_key.len..], ".dll\x00");
258 }
259
260 pub fn hash(_: Adapter, key: []const u8) u32 {
261 assert(std.mem.indexOfScalar(u8, key, 0) == null);
262 return std.array_hash_map.hashString(key);
263 }
264 };
265
266 pub const Index = enum(u32) {
267 _,
268
269 pub fn get(import_index: ImportTable.Index, coff: *Coff) *Entry {
270 return &coff.import_table.entries.values()[@intFromEnum(import_index)];
271 }
272 };
273};
274
275pub const String = enum(u32) {
276 @".data" = 0,
277 @".idata" = 6,
278 @".rdata" = 13,
279 @".text" = 20,
280 @".tls$" = 26,
281 _,
282
283 pub const Optional = enum(u32) {
284 @".data" = @intFromEnum(String.@".data"),
285 @".rdata" = @intFromEnum(String.@".rdata"),
286 @".text" = @intFromEnum(String.@".text"),
287 @".tls$" = @intFromEnum(String.@".tls$"),
288 none = std.math.maxInt(u32),
289 _,
290
291 pub fn unwrap(os: String.Optional) ?String {
292 return switch (os) {
293 else => |s| @enumFromInt(@intFromEnum(s)),
294 .none => null,
295 };
296 }
297
298 pub fn toSlice(os: String.Optional, coff: *Coff) ?[:0]const u8 {
299 return (os.unwrap() orelse return null).toSlice(coff);
300 }
301 };
302
303 pub fn toSlice(s: String, coff: *Coff) [:0]const u8 {
304 const slice = coff.string_bytes.items[@intFromEnum(s)..];
305 return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
306 }
307
308 pub fn toOptional(s: String) String.Optional {
309 return @enumFromInt(@intFromEnum(s));
310 }
311};
312
313pub const GlobalName = struct { name: String, lib_name: String.Optional };
314
315pub const Symbol = struct {
316 ni: MappedFile.Node.Index,
317 rva: u32,
318 size: u32,
319 /// Relocations contained within this symbol
320 loc_relocs: Reloc.Index,
321 /// Relocations targeting this symbol
322 target_relocs: Reloc.Index,
323 section_number: SectionNumber,
324 unused0: u32 = 0,
325 unused1: u32 = 0,
326 unused2: u16 = 0,
327
328 pub const SectionNumber = enum(i16) {
329 UNDEFINED = 0,
330 ABSOLUTE = -1,
331 DEBUG = -2,
332 _,
333
334 fn toIndex(sn: SectionNumber) u15 {
335 return @intCast(@intFromEnum(sn) - 1);
336 }
337
338 pub fn symbol(sn: SectionNumber, coff: *const Coff) Symbol.Index {
339 return coff.image_section_table.items[sn.toIndex()];
340 }
341
342 pub fn header(sn: SectionNumber, coff: *Coff) *std.coff.SectionHeader {
343 return &coff.sectionTableSlice()[sn.toIndex()];
344 }
345 };
346
347 pub const Index = enum(u32) {
348 null,
349 data,
350 rdata,
351 text,
352 _,
353
354 const known_count = @typeInfo(Index).@"enum".fields.len;
355
356 pub fn get(si: Symbol.Index, coff: *Coff) *Symbol {
357 return &coff.symbol_table.items[@intFromEnum(si)];
358 }
359
360 pub fn node(si: Symbol.Index, coff: *Coff) MappedFile.Node.Index {
361 const ni = si.get(coff).ni;
362 assert(ni != .none);
363 return ni;
364 }
365
366 pub fn flushMoved(si: Symbol.Index, coff: *Coff) void {
367 const sym = si.get(coff);
368 sym.rva = coff.computeNodeRva(sym.ni);
369 si.applyLocationRelocs(coff);
370 si.applyTargetRelocs(coff);
371 }
372
373 pub fn applyLocationRelocs(si: Symbol.Index, coff: *Coff) void {
374 for (coff.relocs.items[@intFromEnum(si.get(coff).loc_relocs)..]) |*reloc| {
375 if (reloc.loc != si) break;
376 reloc.apply(coff);
377 }
378 }
379
380 pub fn applyTargetRelocs(si: Symbol.Index, coff: *Coff) void {
381 var ri = si.get(coff).target_relocs;
382 while (ri != .none) {
383 const reloc = ri.get(coff);
384 assert(reloc.target == si);
385 reloc.apply(coff);
386 ri = reloc.next;
387 }
388 }
389
390 pub fn deleteLocationRelocs(si: Symbol.Index, coff: *Coff) void {
391 const sym = si.get(coff);
392 for (coff.relocs.items[@intFromEnum(sym.loc_relocs)..]) |*reloc| {
393 if (reloc.loc != si) break;
394 reloc.delete(coff);
395 }
396 sym.loc_relocs = .none;
397 }
398 };
399
400 comptime {
401 if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Symbol) == 32);
402 }
403};
404
405pub const Reloc = extern struct {
406 type: Reloc.Type,
407 prev: Reloc.Index,
408 next: Reloc.Index,
409 loc: Symbol.Index,
410 target: Symbol.Index,
411 unused: u32,
412 offset: u64,
413 addend: i64,
414
415 pub const Type = extern union {
416 AMD64: std.coff.IMAGE.REL.AMD64,
417 ARM: std.coff.IMAGE.REL.ARM,
418 ARM64: std.coff.IMAGE.REL.ARM64,
419 SH: std.coff.IMAGE.REL.SH,
420 PPC: std.coff.IMAGE.REL.PPC,
421 I386: std.coff.IMAGE.REL.I386,
422 IA64: std.coff.IMAGE.REL.IA64,
423 MIPS: std.coff.IMAGE.REL.MIPS,
424 M32R: std.coff.IMAGE.REL.M32R,
425 };
426
427 pub const Index = enum(u32) {
428 none = std.math.maxInt(u32),
429 _,
430
431 pub fn get(si: Reloc.Index, coff: *Coff) *Reloc {
432 return &coff.relocs.items[@intFromEnum(si)];
433 }
434 };
435
436 pub fn apply(reloc: *const Reloc, coff: *Coff) void {
437 const loc_sym = reloc.loc.get(coff);
438 switch (loc_sym.ni) {
439 .none => return,
440 else => |ni| if (ni.hasMoved(&coff.mf)) return,
441 }
442 const target_sym = reloc.target.get(coff);
443 switch (target_sym.ni) {
444 .none => return,
445 else => |ni| if (ni.hasMoved(&coff.mf)) return,
446 }
447 const loc_slice = loc_sym.ni.slice(&coff.mf)[@intCast(reloc.offset)..];
448 const target_rva = target_sym.rva +% @as(u64, @bitCast(reloc.addend));
449 const target_endian = coff.targetEndian();
450 switch (coff.targetLoad(&coff.headerPtr().machine)) {
451 else => |machine| @panic(@tagName(machine)),
452 .AMD64 => switch (reloc.type.AMD64) {
453 else => |kind| @panic(@tagName(kind)),
454 .ABSOLUTE => {},
455 .ADDR64 => std.mem.writeInt(
456 u64,
457 loc_slice[0..8],
458 coff.optionalHeaderField(.image_base) + target_rva,
459 target_endian,
460 ),
461 .ADDR32 => std.mem.writeInt(
462 u32,
463 loc_slice[0..4],
464 @intCast(coff.optionalHeaderField(.image_base) + target_rva),
465 target_endian,
466 ),
467 .ADDR32NB => std.mem.writeInt(
468 u32,
469 loc_slice[0..4],
470 @intCast(target_rva),
471 target_endian,
472 ),
473 .REL32 => std.mem.writeInt(
474 i32,
475 loc_slice[0..4],
476 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
477 target_endian,
478 ),
479 .REL32_1 => std.mem.writeInt(
480 i32,
481 loc_slice[0..4],
482 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 5)))),
483 target_endian,
484 ),
485 .REL32_2 => std.mem.writeInt(
486 i32,
487 loc_slice[0..4],
488 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 6)))),
489 target_endian,
490 ),
491 .REL32_3 => std.mem.writeInt(
492 i32,
493 loc_slice[0..4],
494 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 7)))),
495 target_endian,
496 ),
497 .REL32_4 => std.mem.writeInt(
498 i32,
499 loc_slice[0..4],
500 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 8)))),
501 target_endian,
502 ),
503 .REL32_5 => std.mem.writeInt(
504 i32,
505 loc_slice[0..4],
506 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 9)))),
507 target_endian,
508 ),
509 .SECREL => std.mem.writeInt(
510 u32,
511 loc_slice[0..4],
512 coff.computeNodeSectionOffset(target_sym.ni),
513 target_endian,
514 ),
515 },
516 .I386 => switch (reloc.type.I386) {
517 else => |kind| @panic(@tagName(kind)),
518 .ABSOLUTE => {},
519 .DIR16 => std.mem.writeInt(
520 u16,
521 loc_slice[0..2],
522 @intCast(coff.optionalHeaderField(.image_base) + target_rva),
523 target_endian,
524 ),
525 .REL16 => std.mem.writeInt(
526 i16,
527 loc_slice[0..2],
528 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 2)))),
529 target_endian,
530 ),
531 .DIR32 => std.mem.writeInt(
532 u32,
533 loc_slice[0..4],
534 @intCast(coff.optionalHeaderField(.image_base) + target_rva),
535 target_endian,
536 ),
537 .DIR32NB => std.mem.writeInt(
538 u32,
539 loc_slice[0..4],
540 @intCast(target_rva),
541 target_endian,
542 ),
543 .REL32 => std.mem.writeInt(
544 i32,
545 loc_slice[0..4],
546 @intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
547 target_endian,
548 ),
549 .SECREL => std.mem.writeInt(
550 u32,
551 loc_slice[0..4],
552 coff.computeNodeSectionOffset(target_sym.ni),
553 target_endian,
554 ),
555 },
556 }
557 }
558
559 pub fn delete(reloc: *Reloc, coff: *Coff) void {
560 switch (reloc.prev) {
561 .none => {
562 const target = reloc.target.get(coff);
563 assert(target.target_relocs.get(coff) == reloc);
564 target.target_relocs = reloc.next;
565 },
566 else => |prev| prev.get(coff).next = reloc.next,
567 }
568 switch (reloc.next) {
569 .none => {},
570 else => |next| next.get(coff).prev = reloc.prev,
571 }
572 reloc.* = undefined;
573 }
574
575 comptime {
576 if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Reloc) == 40);
577 }
578};
579
580pub fn open(
581 arena: std.mem.Allocator,
582 comp: *Compilation,
583 path: std.Build.Cache.Path,
584 options: link.File.OpenOptions,
585) !*Coff {
586 return create(arena, comp, path, options);
587}
588pub fn createEmpty(
589 arena: std.mem.Allocator,
590 comp: *Compilation,
591 path: std.Build.Cache.Path,
592 options: link.File.OpenOptions,
593) !*Coff {
594 return create(arena, comp, path, options);
595}
596fn create(
597 arena: std.mem.Allocator,
598 comp: *Compilation,
599 path: std.Build.Cache.Path,
600 options: link.File.OpenOptions,
601) !*Coff {
602 const target = &comp.root_mod.resolved_target.result;
603 assert(target.ofmt == .coff);
604 if (target.cpu.arch.endian() != comptime targetEndian(undefined))
605 return error.UnsupportedCOFFArchitecture;
606 const is_image = switch (comp.config.output_mode) {
607 .Exe => true,
608 .Lib => switch (comp.config.link_mode) {
609 .static => false,
610 .dynamic => true,
611 },
612 .Obj => false,
613 };
614 const machine = target.toCoffMachine();
615 const timestamp: u32 = 0;
616 const major_subsystem_version = options.major_subsystem_version orelse 6;
617 const minor_subsystem_version = options.minor_subsystem_version orelse 0;
618 const magic: std.coff.OptionalHeader.Magic = switch (target.ptrBitWidth()) {
619 0...32 => .PE32,
620 33...64 => .@"PE32+",
621 else => return error.UnsupportedCOFFArchitecture,
622 };
623 const section_align: std.mem.Alignment = switch (machine) {
624 .AMD64, .I386 => @enumFromInt(12),
625 .SH3, .SH3DSP, .SH4, .SH5 => @enumFromInt(12),
626 .MIPS16, .MIPSFPU, .MIPSFPU16, .WCEMIPSV2 => @enumFromInt(12),
627 .POWERPC, .POWERPCFP => @enumFromInt(12),
628 .ALPHA, .ALPHA64 => @enumFromInt(13),
629 .IA64 => @enumFromInt(13),
630 .ARM => @enumFromInt(12),
631 else => return error.UnsupportedCOFFArchitecture,
632 };
633
634 const coff = try arena.create(Coff);
635 const file = try path.root_dir.handle.adaptToNewApi().createFile(comp.io, path.sub_path, .{
636 .read = true,
637 .mode = link.File.determineMode(comp.config.output_mode, comp.config.link_mode),
638 });
639 errdefer file.close(comp.io);
640 coff.* = .{
641 .base = .{
642 .tag = .coff2,
643
644 .comp = comp,
645 .emit = path,
646
647 .file = .adaptFromNewApi(file),
648 .gc_sections = false,
649 .print_gc_sections = false,
650 .build_id = .none,
651 .allow_shlib_undefined = false,
652 .stack_size = 0,
653 },
654 .mf = try .init(file, comp.gpa),
655 .nodes = .empty,
656 .import_table = .{
657 .ni = .none,
658 .entries = .empty,
659 },
660 .strings = .empty,
661 .string_bytes = .empty,
662 .image_section_table = .empty,
663 .pseudo_section_table = .empty,
664 .object_section_table = .empty,
665 .symbol_table = .empty,
666 .globals = .empty,
667 .global_pending_index = 0,
668 .navs = .empty,
669 .uavs = .empty,
670 .lazy = .initFill(.{
671 .map = .empty,
672 .pending_index = 0,
673 }),
674 .pending_uavs = .empty,
675 .relocs = .empty,
676 .const_prog_node = .none,
677 .synth_prog_node = .none,
678 };
679 errdefer coff.deinit();
680
681 {
682 const strings = std.enums.values(String);
683 try coff.strings.ensureTotalCapacityContext(comp.gpa, @intCast(strings.len), .{
684 .bytes = &coff.string_bytes,
685 });
686 for (strings) |string| assert(try coff.getOrPutString(@tagName(string)) == string);
687 }
688
689 try coff.initHeaders(
690 is_image,
691 machine,
692 timestamp,
693 major_subsystem_version,
694 minor_subsystem_version,
695 magic,
696 section_align,
697 );
698 return coff;
699}
700
701pub fn deinit(coff: *Coff) void {
702 const gpa = coff.base.comp.gpa;
703 coff.mf.deinit(gpa);
704 coff.nodes.deinit(gpa);
705 coff.import_table.entries.deinit(gpa);
706 coff.strings.deinit(gpa);
707 coff.string_bytes.deinit(gpa);
708 coff.image_section_table.deinit(gpa);
709 coff.pseudo_section_table.deinit(gpa);
710 coff.object_section_table.deinit(gpa);
711 coff.symbol_table.deinit(gpa);
712 coff.globals.deinit(gpa);
713 coff.navs.deinit(gpa);
714 coff.uavs.deinit(gpa);
715 for (&coff.lazy.values) |*lazy| lazy.map.deinit(gpa);
716 coff.pending_uavs.deinit(gpa);
717 coff.relocs.deinit(gpa);
718 coff.* = undefined;
719}
720
721fn initHeaders(
722 coff: *Coff,
723 is_image: bool,
724 machine: std.coff.IMAGE.FILE.MACHINE,
725 timestamp: u32,
726 major_subsystem_version: u16,
727 minor_subsystem_version: u16,
728 magic: std.coff.OptionalHeader.Magic,
729 section_align: std.mem.Alignment,
730) !void {
731 const comp = coff.base.comp;
732 const gpa = comp.gpa;
733 const target_endian = coff.targetEndian();
734 const file_align: std.mem.Alignment = comptime .fromByteUnits(default_file_alignment);
735
736 const optional_header_size: u16 = if (is_image) switch (magic) {
737 _ => unreachable,
738 inline else => |ct_magic| @sizeOf(@field(std.coff.OptionalHeader, @tagName(ct_magic))),
739 } else 0;
740 const data_directories_size: u16 = if (is_image)
741 @sizeOf(std.coff.ImageDataDirectory) * std.coff.IMAGE.DIRECTORY_ENTRY.len
742 else
743 0;
744
745 const expected_nodes_len = Node.known_count + 6 +
746 @as(usize, @intFromBool(comp.config.any_non_single_threaded)) * 2;
747 try coff.nodes.ensureTotalCapacity(gpa, expected_nodes_len);
748 coff.nodes.appendAssumeCapacity(.file);
749
750 const header_ni = Node.known.header;
751 assert(header_ni == try coff.mf.addOnlyChildNode(gpa, .root, .{
752 .alignment = coff.mf.flags.block_size,
753 .fixed = true,
754 }));
755 coff.nodes.appendAssumeCapacity(.header);
756
757 const signature_ni = Node.known.signature;
758 assert(signature_ni == try coff.mf.addOnlyChildNode(gpa, header_ni, .{
759 .size = (if (is_image) msdos_stub.len else 0) + "PE\x00\x00".len,
760 .alignment = .@"4",
761 .fixed = true,
762 }));
763 coff.nodes.appendAssumeCapacity(.signature);
764 {
765 const signature_slice = signature_ni.slice(&coff.mf);
766 if (is_image) @memcpy(signature_slice[0..msdos_stub.len], &msdos_stub);
767 @memcpy(signature_slice[signature_slice.len - 4 ..], "PE\x00\x00");
768 }
769
770 const coff_header_ni = Node.known.coff_header;
771 assert(coff_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
772 .size = @sizeOf(std.coff.Header),
773 .alignment = .@"4",
774 .fixed = true,
775 }));
776 coff.nodes.appendAssumeCapacity(.coff_header);
777 {
778 const coff_header = coff.headerPtr();
779 coff_header.* = .{
780 .machine = machine,
781 .number_of_sections = 0,
782 .time_date_stamp = timestamp,
783 .pointer_to_symbol_table = 0,
784 .number_of_symbols = 0,
785 .size_of_optional_header = optional_header_size + data_directories_size,
786 .flags = .{
787 .RELOCS_STRIPPED = is_image,
788 .EXECUTABLE_IMAGE = is_image,
789 .DEBUG_STRIPPED = true,
790 .@"32BIT_MACHINE" = magic == .PE32,
791 .LARGE_ADDRESS_AWARE = magic == .@"PE32+",
792 .DLL = comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic,
793 },
794 };
795 if (target_endian != native_endian) std.mem.byteSwapAllFields(std.coff.Header, coff_header);
796 }
797
798 const optional_header_ni = Node.known.optional_header;
799 assert(optional_header_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
800 .size = optional_header_size,
801 .alignment = .@"4",
802 .fixed = true,
803 }));
804 coff.nodes.appendAssumeCapacity(.optional_header);
805 if (is_image) {
806 coff.targetStore(&coff.optionalHeaderStandardPtr().magic, magic);
807 switch (coff.optionalHeaderPtr()) {
808 .PE32 => |optional_header| {
809 optional_header.* = .{
810 .standard = .{
811 .magic = .PE32,
812 .major_linker_version = 0,
813 .minor_linker_version = 0,
814 .size_of_code = 0,
815 .size_of_initialized_data = 0,
816 .size_of_uninitialized_data = 0,
817 .address_of_entry_point = 0,
818 .base_of_code = 0,
819 },
820 .base_of_data = 0,
821 .image_base = switch (coff.base.comp.config.output_mode) {
822 .Exe => 0x400000,
823 .Lib => switch (coff.base.comp.config.link_mode) {
824 .static => 0,
825 .dynamic => 0x10000000,
826 },
827 .Obj => 0,
828 },
829 .section_alignment = @intCast(section_align.toByteUnits()),
830 .file_alignment = @intCast(file_align.toByteUnits()),
831 .major_operating_system_version = 6,
832 .minor_operating_system_version = 0,
833 .major_image_version = 0,
834 .minor_image_version = 0,
835 .major_subsystem_version = major_subsystem_version,
836 .minor_subsystem_version = minor_subsystem_version,
837 .win32_version_value = 0,
838 .size_of_image = 0,
839 .size_of_headers = 0,
840 .checksum = 0,
841 .subsystem = .WINDOWS_CUI,
842 .dll_flags = .{
843 .HIGH_ENTROPY_VA = true,
844 .DYNAMIC_BASE = true,
845 .TERMINAL_SERVER_AWARE = true,
846 .NX_COMPAT = true,
847 },
848 .size_of_stack_reserve = default_size_of_stack_reserve,
849 .size_of_stack_commit = default_size_of_stack_commit,
850 .size_of_heap_reserve = default_size_of_heap_reserve,
851 .size_of_heap_commit = default_size_of_heap_commit,
852 .loader_flags = 0,
853 .number_of_rva_and_sizes = std.coff.IMAGE.DIRECTORY_ENTRY.len,
854 };
855 if (target_endian != native_endian)
856 std.mem.byteSwapAllFields(std.coff.OptionalHeader.PE32, optional_header);
857 },
858 .@"PE32+" => |optional_header| {
859 optional_header.* = .{
860 .standard = .{
861 .magic = .@"PE32+",
862 .major_linker_version = 0,
863 .minor_linker_version = 0,
864 .size_of_code = 0,
865 .size_of_initialized_data = 0,
866 .size_of_uninitialized_data = 0,
867 .address_of_entry_point = 0,
868 .base_of_code = 0,
869 },
870 .image_base = switch (coff.base.comp.config.output_mode) {
871 .Exe => 0x140000000,
872 .Lib => switch (coff.base.comp.config.link_mode) {
873 .static => 0,
874 .dynamic => 0x180000000,
875 },
876 .Obj => 0,
877 },
878 .section_alignment = @intCast(section_align.toByteUnits()),
879 .file_alignment = @intCast(file_align.toByteUnits()),
880 .major_operating_system_version = 6,
881 .minor_operating_system_version = 0,
882 .major_image_version = 0,
883 .minor_image_version = 0,
884 .major_subsystem_version = major_subsystem_version,
885 .minor_subsystem_version = minor_subsystem_version,
886 .win32_version_value = 0,
887 .size_of_image = 0,
888 .size_of_headers = 0,
889 .checksum = 0,
890 .subsystem = .WINDOWS_CUI,
891 .dll_flags = .{
892 .HIGH_ENTROPY_VA = true,
893 .DYNAMIC_BASE = true,
894 .TERMINAL_SERVER_AWARE = true,
895 .NX_COMPAT = true,
896 },
897 .size_of_stack_reserve = default_size_of_stack_reserve,
898 .size_of_stack_commit = default_size_of_stack_commit,
899 .size_of_heap_reserve = default_size_of_heap_reserve,
900 .size_of_heap_commit = default_size_of_heap_commit,
901 .loader_flags = 0,
902 .number_of_rva_and_sizes = std.coff.IMAGE.DIRECTORY_ENTRY.len,
903 };
904 if (target_endian != native_endian)
905 std.mem.byteSwapAllFields(std.coff.OptionalHeader.@"PE32+", optional_header);
906 },
907 }
908 }
909
910 const data_directories_ni = Node.known.data_directories;
911 assert(data_directories_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
912 .size = data_directories_size,
913 .alignment = .@"4",
914 .fixed = true,
915 }));
916 coff.nodes.appendAssumeCapacity(.data_directories);
917 {
918 const data_directories = coff.dataDirectorySlice();
919 @memset(data_directories, .{ .virtual_address = 0, .size = 0 });
920 if (target_endian != native_endian) std.mem.byteSwapAllFields(
921 [std.coff.IMAGE.DIRECTORY_ENTRY.len]std.coff.ImageDataDirectory,
922 data_directories,
923 );
924 }
925
926 const section_table_ni = Node.known.section_table;
927 assert(section_table_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
928 .alignment = .@"4",
929 .fixed = true,
930 }));
931 coff.nodes.appendAssumeCapacity(.section_table);
932
933 assert(coff.nodes.len == Node.known_count);
934
935 try coff.symbol_table.ensureTotalCapacity(gpa, Symbol.Index.known_count);
936 coff.symbol_table.addOneAssumeCapacity().* = .{
937 .ni = .none,
938 .rva = 0,
939 .size = 0,
940 .loc_relocs = .none,
941 .target_relocs = .none,
942 .section_number = .UNDEFINED,
943 };
944 assert(try coff.addSection(".data", .{
945 .CNT_INITIALIZED_DATA = true,
946 .MEM_READ = true,
947 .MEM_WRITE = true,
948 }) == .data);
949 assert(try coff.addSection(".rdata", .{
950 .CNT_INITIALIZED_DATA = true,
951 .MEM_READ = true,
952 }) == .rdata);
953 assert(try coff.addSection(".text", .{
954 .CNT_CODE = true,
955 .MEM_EXECUTE = true,
956 .MEM_READ = true,
957 }) == .text);
958
959 coff.import_table.ni = try coff.mf.addLastChildNode(
960 gpa,
961 (try coff.objectSectionMapIndex(
962 .@".idata",
963 coff.mf.flags.block_size,
964 .{ .read = true },
965 )).symbol(coff).node(coff),
966 .{ .alignment = .@"4", .moved = true },
967 );
968 coff.nodes.appendAssumeCapacity(.import_directory_table);
969
970 // While tls variables allocated at runtime are writable, the template itself is not
971 if (comp.config.any_non_single_threaded) _ = try coff.objectSectionMapIndex(
972 .@".tls$",
973 coff.mf.flags.block_size,
974 .{ .read = true },
975 );
976
977 assert(coff.nodes.len == expected_nodes_len);
978}
979
980pub fn startProgress(coff: *Coff, prog_node: std.Progress.Node) void {
981 prog_node.increaseEstimatedTotalItems(3);
982 coff.const_prog_node = prog_node.start("Constants", coff.pending_uavs.count());
983 coff.synth_prog_node = prog_node.start("Synthetics", count: {
984 var count = coff.globals.count() - coff.global_pending_index;
985 for (&coff.lazy.values) |*lazy| count += lazy.map.count() - lazy.pending_index;
986 break :count count;
987 });
988 coff.mf.update_prog_node = prog_node.start("Relocations", coff.mf.updates.items.len);
989}
990
991pub fn endProgress(coff: *Coff) void {
992 coff.mf.update_prog_node.end();
993 coff.mf.update_prog_node = .none;
994 coff.synth_prog_node.end();
995 coff.synth_prog_node = .none;
996 coff.const_prog_node.end();
997 coff.const_prog_node = .none;
998}
999
1000fn getNode(coff: *const Coff, ni: MappedFile.Node.Index) Node {
1001 return coff.nodes.get(@intFromEnum(ni));
1002}
1003fn computeNodeRva(coff: *Coff, ni: MappedFile.Node.Index) u32 {
1004 const parent_rva = parent_rva: {
1005 const parent_si = switch (coff.getNode(ni.parent(&coff.mf))) {
1006 .file,
1007 .header,
1008 .signature,
1009 .coff_header,
1010 .optional_header,
1011 .data_directories,
1012 .section_table,
1013 => unreachable,
1014 .image_section => |si| si,
1015 .import_directory_table => break :parent_rva coff.targetLoad(
1016 &coff.dataDirectoryPtr(.IMPORT).virtual_address,
1017 ),
1018 .import_lookup_table => |import_index| break :parent_rva coff.targetLoad(
1019 &coff.importDirectoryEntryPtr(import_index).import_lookup_table_rva,
1020 ),
1021 .import_address_table => |import_index| break :parent_rva coff.targetLoad(
1022 &coff.importDirectoryEntryPtr(import_index).import_address_table_rva,
1023 ),
1024 .import_hint_name_table => |import_index| break :parent_rva coff.targetLoad(
1025 &coff.importDirectoryEntryPtr(import_index).name_rva,
1026 ),
1027 inline .pseudo_section,
1028 .object_section,
1029 .global,
1030 .nav,
1031 .uav,
1032 .lazy_code,
1033 .lazy_const_data,
1034 => |mi| mi.symbol(coff),
1035 };
1036 break :parent_rva parent_si.get(coff).rva;
1037 };
1038 const offset, _ = ni.location(&coff.mf).resolve(&coff.mf);
1039 return @intCast(parent_rva + offset);
1040}
1041fn computeNodeSectionOffset(coff: *Coff, ni: MappedFile.Node.Index) u32 {
1042 var section_offset: u32 = 0;
1043 var parent_ni = ni;
1044 while (true) {
1045 const offset, _ = parent_ni.location(&coff.mf).resolve(&coff.mf);
1046 section_offset += @intCast(offset);
1047 parent_ni = parent_ni.parent(&coff.mf);
1048 switch (coff.getNode(parent_ni)) {
1049 else => unreachable,
1050 .image_section, .pseudo_section => return section_offset,
1051 .object_section => {},
1052 }
1053 }
1054}
1055
1056pub inline fn targetEndian(_: *const Coff) std.builtin.Endian {
1057 return .little;
1058}
1059fn targetLoad(coff: *const Coff, ptr: anytype) @typeInfo(@TypeOf(ptr)).pointer.child {
1060 const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
1061 return switch (@typeInfo(Child)) {
1062 else => @compileError(@typeName(Child)),
1063 .int => std.mem.toNative(Child, ptr.*, coff.targetEndian()),
1064 .@"enum" => |@"enum"| @enumFromInt(coff.targetLoad(@as(*@"enum".tag_type, @ptrCast(ptr)))),
1065 .@"struct" => |@"struct"| @bitCast(
1066 coff.targetLoad(@as(*@"struct".backing_integer.?, @ptrCast(ptr))),
1067 ),
1068 };
1069}
1070fn targetStore(coff: *const Coff, ptr: anytype, val: @typeInfo(@TypeOf(ptr)).pointer.child) void {
1071 const Child = @typeInfo(@TypeOf(ptr)).pointer.child;
1072 return switch (@typeInfo(Child)) {
1073 else => @compileError(@typeName(Child)),
1074 .int => ptr.* = std.mem.nativeTo(Child, val, coff.targetEndian()),
1075 .@"enum" => |@"enum"| coff.targetStore(
1076 @as(*@"enum".tag_type, @ptrCast(ptr)),
1077 @intFromEnum(val),
1078 ),
1079 .@"struct" => |@"struct"| coff.targetStore(
1080 @as(*@"struct".backing_integer.?, @ptrCast(ptr)),
1081 @bitCast(val),
1082 ),
1083 };
1084}
1085
1086pub fn headerPtr(coff: *Coff) *std.coff.Header {
1087 return @ptrCast(@alignCast(Node.known.coff_header.slice(&coff.mf)));
1088}
1089
1090pub fn optionalHeaderStandardPtr(coff: *Coff) *std.coff.OptionalHeader {
1091 return @ptrCast(@alignCast(
1092 Node.known.optional_header.slice(&coff.mf)[0..@sizeOf(std.coff.OptionalHeader)],
1093 ));
1094}
1095
1096pub const OptionalHeaderPtr = union(std.coff.OptionalHeader.Magic) {
1097 PE32: *std.coff.OptionalHeader.PE32,
1098 @"PE32+": *std.coff.OptionalHeader.@"PE32+",
1099};
1100pub fn optionalHeaderPtr(coff: *Coff) OptionalHeaderPtr {
1101 const slice = Node.known.optional_header.slice(&coff.mf);
1102 return switch (coff.targetLoad(&coff.optionalHeaderStandardPtr().magic)) {
1103 _ => unreachable,
1104 inline else => |magic| @unionInit(
1105 OptionalHeaderPtr,
1106 @tagName(magic),
1107 @ptrCast(@alignCast(slice)),
1108 ),
1109 };
1110}
1111pub fn optionalHeaderField(
1112 coff: *Coff,
1113 comptime field: std.meta.FieldEnum(std.coff.OptionalHeader.@"PE32+"),
1114) @FieldType(std.coff.OptionalHeader.@"PE32+", @tagName(field)) {
1115 return switch (coff.optionalHeaderPtr()) {
1116 inline else => |optional_header| coff.targetLoad(&@field(optional_header, @tagName(field))),
1117 };
1118}
1119
1120pub fn dataDirectorySlice(
1121 coff: *Coff,
1122) *[std.coff.IMAGE.DIRECTORY_ENTRY.len]std.coff.ImageDataDirectory {
1123 return @ptrCast(@alignCast(Node.known.data_directories.slice(&coff.mf)));
1124}
1125pub fn dataDirectoryPtr(
1126 coff: *Coff,
1127 entry: std.coff.IMAGE.DIRECTORY_ENTRY,
1128) *std.coff.ImageDataDirectory {
1129 return &coff.dataDirectorySlice()[@intFromEnum(entry)];
1130}
1131
1132pub fn sectionTableSlice(coff: *Coff) []std.coff.SectionHeader {
1133 return @ptrCast(@alignCast(Node.known.section_table.slice(&coff.mf)));
1134}
1135
1136pub fn importDirectoryTableSlice(coff: *Coff) []std.coff.ImportDirectoryEntry {
1137 return @ptrCast(@alignCast(coff.import_table.ni.slice(&coff.mf)));
1138}
1139pub fn importDirectoryEntryPtr(
1140 coff: *Coff,
1141 import_index: ImportTable.Index,
1142) *std.coff.ImportDirectoryEntry {
1143 return &coff.importDirectoryTableSlice()[@intFromEnum(import_index)];
1144}
1145
1146fn addSymbolAssumeCapacity(coff: *Coff) Symbol.Index {
1147 defer coff.symbol_table.addOneAssumeCapacity().* = .{
1148 .ni = .none,
1149 .rva = 0,
1150 .size = 0,
1151 .loc_relocs = .none,
1152 .target_relocs = .none,
1153 .section_number = .UNDEFINED,
1154 };
1155 return @enumFromInt(coff.symbol_table.items.len);
1156}
1157
1158fn initSymbolAssumeCapacity(coff: *Coff) !Symbol.Index {
1159 const si = coff.addSymbolAssumeCapacity();
1160 return si;
1161}
1162
1163fn getOrPutString(coff: *Coff, string: []const u8) !String {
1164 try coff.ensureUnusedStringCapacity(string.len);
1165 return coff.getOrPutStringAssumeCapacity(string);
1166}
1167fn getOrPutOptionalString(coff: *Coff, string: ?[]const u8) !String.Optional {
1168 return (try coff.getOrPutString(string orelse return .none)).toOptional();
1169}
1170
1171fn ensureUnusedStringCapacity(coff: *Coff, len: usize) !void {
1172 const gpa = coff.base.comp.gpa;
1173 try coff.strings.ensureUnusedCapacityContext(gpa, 1, .{ .bytes = &coff.string_bytes });
1174 try coff.string_bytes.ensureUnusedCapacity(gpa, len + 1);
1175}
1176fn getOrPutStringAssumeCapacity(coff: *Coff, string: []const u8) String {
1177 const gop = coff.strings.getOrPutAssumeCapacityAdapted(
1178 string,
1179 std.hash_map.StringIndexAdapter{ .bytes = &coff.string_bytes },
1180 );
1181 if (!gop.found_existing) {
1182 gop.key_ptr.* = @intCast(coff.string_bytes.items.len);
1183 gop.value_ptr.* = {};
1184 coff.string_bytes.appendSliceAssumeCapacity(string);
1185 coff.string_bytes.appendAssumeCapacity(0);
1186 }
1187 return @enumFromInt(gop.key_ptr.*);
1188}
1189
1190pub fn globalSymbol(coff: *Coff, name: []const u8, lib_name: ?[]const u8) !Symbol.Index {
1191 const gpa = coff.base.comp.gpa;
1192 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1193 const sym_gop = try coff.globals.getOrPut(gpa, .{
1194 .name = try coff.getOrPutString(name),
1195 .lib_name = try coff.getOrPutOptionalString(lib_name),
1196 });
1197 if (!sym_gop.found_existing) {
1198 sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
1199 coff.synth_prog_node.increaseEstimatedTotalItems(1);
1200 }
1201 return sym_gop.value_ptr.*;
1202}
1203
1204fn navSection(
1205 coff: *Coff,
1206 zcu: *Zcu,
1207 nav_fr: @FieldType(@FieldType(InternPool.Nav, "status"), "fully_resolved"),
1208) !Symbol.Index {
1209 const ip = &zcu.intern_pool;
1210 const default: String, const attributes: ObjectSectionAttributes =
1211 switch (ip.indexToKey(nav_fr.val)) {
1212 else => .{ .@".rdata", .{ .read = true } },
1213 .variable => |variable| if (variable.is_threadlocal and
1214 coff.base.comp.config.any_non_single_threaded)
1215 .{ .@".tls$", .{ .read = true, .write = true } }
1216 else
1217 .{ .@".data", .{ .read = true, .write = true } },
1218 .@"extern" => |@"extern"| if (@"extern".is_threadlocal and
1219 coff.base.comp.config.any_non_single_threaded)
1220 .{ .@".tls$", .{ .read = true, .write = true } }
1221 else if (ip.isFunctionType(@"extern".ty))
1222 .{ .@".text", .{ .read = true, .execute = true } }
1223 else if (@"extern".is_const)
1224 .{ .@".rdata", .{ .read = true } }
1225 else
1226 .{ .@".data", .{ .read = true, .write = true } },
1227 .func => .{ .@".text", .{ .read = true, .execute = true } },
1228 };
1229 return (try coff.objectSectionMapIndex(
1230 (try coff.getOrPutOptionalString(nav_fr.@"linksection".toSlice(ip))).unwrap() orelse default,
1231 switch (nav_fr.@"linksection") {
1232 .none => coff.mf.flags.block_size,
1233 else => switch (nav_fr.alignment) {
1234 .none => Type.fromInterned(ip.typeOf(nav_fr.val)).abiAlignment(zcu),
1235 else => |alignment| alignment,
1236 }.toStdMem(),
1237 },
1238 attributes,
1239 )).symbol(coff);
1240}
1241fn navMapIndex(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
1242 const gpa = zcu.gpa;
1243 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1244 const sym_gop = try coff.navs.getOrPut(gpa, nav_index);
1245 if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
1246 return @enumFromInt(sym_gop.index);
1247}
1248pub fn navSymbol(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
1249 const ip = &zcu.intern_pool;
1250 const nav = ip.getNav(nav_index);
1251 if (nav.getExtern(ip)) |@"extern"| return coff.globalSymbol(
1252 @"extern".name.toSlice(ip),
1253 @"extern".lib_name.toSlice(ip),
1254 );
1255 const nmi = try coff.navMapIndex(zcu, nav_index);
1256 return nmi.symbol(coff);
1257}
1258
1259fn uavMapIndex(coff: *Coff, uav_val: InternPool.Index) !Node.UavMapIndex {
1260 const gpa = coff.base.comp.gpa;
1261 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1262 const sym_gop = try coff.uavs.getOrPut(gpa, uav_val);
1263 if (!sym_gop.found_existing) sym_gop.value_ptr.* = coff.addSymbolAssumeCapacity();
1264 return @enumFromInt(sym_gop.index);
1265}
1266pub fn uavSymbol(coff: *Coff, uav_val: InternPool.Index) !Symbol.Index {
1267 const umi = try coff.uavMapIndex(uav_val);
1268 return umi.symbol(coff);
1269}
1270
1271pub fn lazySymbol(coff: *Coff, lazy: link.File.LazySymbol) !Symbol.Index {
1272 const gpa = coff.base.comp.gpa;
1273 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1274 const sym_gop = try coff.lazy.getPtr(lazy.kind).map.getOrPut(gpa, lazy.ty);
1275 if (!sym_gop.found_existing) {
1276 sym_gop.value_ptr.* = try coff.initSymbolAssumeCapacity();
1277 coff.synth_prog_node.increaseEstimatedTotalItems(1);
1278 }
1279 return sym_gop.value_ptr.*;
1280}
1281
1282pub fn getNavVAddr(
1283 coff: *Coff,
1284 pt: Zcu.PerThread,
1285 nav: InternPool.Nav.Index,
1286 reloc_info: link.File.RelocInfo,
1287) !u64 {
1288 return coff.getVAddr(reloc_info, try coff.navSymbol(pt.zcu, nav));
1289}
1290
1291pub fn getUavVAddr(
1292 coff: *Coff,
1293 uav: InternPool.Index,
1294 reloc_info: link.File.RelocInfo,
1295) !u64 {
1296 return coff.getVAddr(reloc_info, try coff.uavSymbol(uav));
1297}
1298
1299pub fn getVAddr(coff: *Coff, reloc_info: link.File.RelocInfo, target_si: Symbol.Index) !u64 {
1300 try coff.addReloc(
1301 @enumFromInt(reloc_info.parent.atom_index),
1302 reloc_info.offset,
1303 target_si,
1304 reloc_info.addend,
1305 switch (coff.targetLoad(&coff.headerPtr().machine)) {
1306 else => unreachable,
1307 .AMD64 => .{ .AMD64 = .ADDR64 },
1308 .I386 => .{ .I386 = .DIR32 },
1309 },
1310 );
1311 return coff.optionalHeaderField(.image_base) + target_si.get(coff).rva;
1312}
1313
1314fn addSection(coff: *Coff, name: []const u8, flags: std.coff.SectionHeader.Flags) !Symbol.Index {
1315 const gpa = coff.base.comp.gpa;
1316 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1317 try coff.image_section_table.ensureUnusedCapacity(gpa, 1);
1318 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1319
1320 const coff_header = coff.headerPtr();
1321 const section_index = coff.targetLoad(&coff_header.number_of_sections);
1322 const section_table_len = section_index + 1;
1323 coff.targetStore(&coff_header.number_of_sections, section_table_len);
1324 try Node.known.section_table.resize(
1325 &coff.mf,
1326 gpa,
1327 @sizeOf(std.coff.SectionHeader) * section_table_len,
1328 );
1329 const ni = try coff.mf.addLastChildNode(gpa, .root, .{
1330 .alignment = coff.mf.flags.block_size,
1331 .moved = true,
1332 .bubbles_moved = false,
1333 });
1334 const si = coff.addSymbolAssumeCapacity();
1335 coff.image_section_table.appendAssumeCapacity(si);
1336 coff.nodes.appendAssumeCapacity(.{ .image_section = si });
1337 const section_table = coff.sectionTableSlice();
1338 const virtual_size = coff.optionalHeaderField(.section_alignment);
1339 const rva: u32 = switch (section_index) {
1340 0 => @intCast(Node.known.header.location(&coff.mf).resolve(&coff.mf)[1]),
1341 else => coff.image_section_table.items[section_index - 1].get(coff).rva +
1342 coff.targetLoad(§ion_table[section_index - 1].virtual_size),
1343 };
1344 {
1345 const sym = si.get(coff);
1346 sym.ni = ni;
1347 sym.rva = rva;
1348 sym.section_number = @enumFromInt(section_table_len);
1349 }
1350 const section = §ion_table[section_index];
1351 section.* = .{
1352 .name = undefined,
1353 .virtual_size = virtual_size,
1354 .virtual_address = rva,
1355 .size_of_raw_data = 0,
1356 .pointer_to_raw_data = 0,
1357 .pointer_to_relocations = 0,
1358 .pointer_to_linenumbers = 0,
1359 .number_of_relocations = 0,
1360 .number_of_linenumbers = 0,
1361 .flags = flags,
1362 };
1363 @memcpy(section.name[0..name.len], name);
1364 @memset(section.name[name.len..], 0);
1365 if (coff.targetEndian() != native_endian)
1366 std.mem.byteSwapAllFields(std.coff.SectionHeader, section);
1367 switch (coff.optionalHeaderPtr()) {
1368 inline else => |optional_header| coff.targetStore(
1369 &optional_header.size_of_image,
1370 @intCast(rva + virtual_size),
1371 ),
1372 }
1373 return si;
1374}
1375
1376const ObjectSectionAttributes = packed struct {
1377 read: bool = false,
1378 write: bool = false,
1379 execute: bool = false,
1380 shared: bool = false,
1381 nopage: bool = false,
1382 nocache: bool = false,
1383 discard: bool = false,
1384 remove: bool = false,
1385};
1386fn pseudoSectionMapIndex(
1387 coff: *Coff,
1388 name: String,
1389 alignment: std.mem.Alignment,
1390 attributes: ObjectSectionAttributes,
1391) !Node.PseudoSectionMapIndex {
1392 const gpa = coff.base.comp.gpa;
1393 const pseudo_section_gop = try coff.pseudo_section_table.getOrPut(gpa, name);
1394 const psmi: Node.PseudoSectionMapIndex = @enumFromInt(pseudo_section_gop.index);
1395 if (!pseudo_section_gop.found_existing) {
1396 const parent: Symbol.Index = if (attributes.execute)
1397 .text
1398 else if (attributes.write)
1399 .data
1400 else
1401 .rdata;
1402 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1403 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1404 const ni = try coff.mf.addLastChildNode(gpa, parent.node(coff), .{ .alignment = alignment });
1405 const si = coff.addSymbolAssumeCapacity();
1406 pseudo_section_gop.value_ptr.* = si;
1407 const sym = si.get(coff);
1408 sym.ni = ni;
1409 sym.rva = coff.computeNodeRva(ni);
1410 sym.section_number = parent.get(coff).section_number;
1411 assert(sym.loc_relocs == .none);
1412 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1413 coff.nodes.appendAssumeCapacity(.{ .pseudo_section = psmi });
1414 }
1415 return psmi;
1416}
1417fn objectSectionMapIndex(
1418 coff: *Coff,
1419 name: String,
1420 alignment: std.mem.Alignment,
1421 attributes: ObjectSectionAttributes,
1422) !Node.ObjectSectionMapIndex {
1423 const gpa = coff.base.comp.gpa;
1424 const object_section_gop = try coff.object_section_table.getOrPut(gpa, name);
1425 const osmi: Node.ObjectSectionMapIndex = @enumFromInt(object_section_gop.index);
1426 if (!object_section_gop.found_existing) {
1427 try coff.ensureUnusedStringCapacity(name.toSlice(coff).len);
1428 const name_slice = name.toSlice(coff);
1429 const parent = (try coff.pseudoSectionMapIndex(coff.getOrPutStringAssumeCapacity(
1430 name_slice[0 .. std.mem.indexOfScalar(u8, name_slice, '$') orelse name_slice.len],
1431 ), alignment, attributes)).symbol(coff);
1432 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1433 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1434 const parent_ni = parent.node(coff);
1435 var prev_ni: MappedFile.Node.Index = .none;
1436 var next_it = parent_ni.children(&coff.mf);
1437 while (next_it.next()) |next_ni| switch (std.mem.order(
1438 u8,
1439 name_slice,
1440 coff.getNode(next_ni).object_section.name(coff).toSlice(coff),
1441 )) {
1442 .lt => break,
1443 .eq => unreachable,
1444 .gt => prev_ni = next_ni,
1445 };
1446 const ni = switch (prev_ni) {
1447 .none => try coff.mf.addFirstChildNode(gpa, parent_ni, .{
1448 .alignment = alignment,
1449 .fixed = true,
1450 }),
1451 else => try coff.mf.addNodeAfter(gpa, prev_ni, .{
1452 .alignment = alignment,
1453 .fixed = true,
1454 }),
1455 };
1456 const si = coff.addSymbolAssumeCapacity();
1457 object_section_gop.value_ptr.* = si;
1458 const sym = si.get(coff);
1459 sym.ni = ni;
1460 sym.rva = coff.computeNodeRva(ni);
1461 sym.section_number = parent.get(coff).section_number;
1462 assert(sym.loc_relocs == .none);
1463 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1464 coff.nodes.appendAssumeCapacity(.{ .object_section = osmi });
1465 }
1466 return osmi;
1467}
1468
1469pub fn addReloc(
1470 coff: *Coff,
1471 loc_si: Symbol.Index,
1472 offset: u64,
1473 target_si: Symbol.Index,
1474 addend: i64,
1475 @"type": Reloc.Type,
1476) !void {
1477 const gpa = coff.base.comp.gpa;
1478 const target = target_si.get(coff);
1479 const ri: Reloc.Index = @enumFromInt(coff.relocs.items.len);
1480 (try coff.relocs.addOne(gpa)).* = .{
1481 .type = @"type",
1482 .prev = .none,
1483 .next = target.target_relocs,
1484 .loc = loc_si,
1485 .target = target_si,
1486 .unused = 0,
1487 .offset = offset,
1488 .addend = addend,
1489 };
1490 switch (target.target_relocs) {
1491 .none => {},
1492 else => |target_ri| target_ri.get(coff).prev = ri,
1493 }
1494 target.target_relocs = ri;
1495}
1496
1497pub fn prelink(coff: *Coff, prog_node: std.Progress.Node) void {
1498 _ = coff;
1499 _ = prog_node;
1500}
1501
1502pub fn updateNav(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
1503 coff.updateNavInner(pt, nav_index) catch |err| switch (err) {
1504 error.OutOfMemory,
1505 error.Overflow,
1506 error.RelocationNotByteAligned,
1507 => |e| return e,
1508 else => |e| return coff.base.cgFail(nav_index, "linker failed to update variable: {t}", .{e}),
1509 };
1510}
1511fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
1512 const zcu = pt.zcu;
1513 const gpa = zcu.gpa;
1514 const ip = &zcu.intern_pool;
1515
1516 const nav = ip.getNav(nav_index);
1517 const nav_val = nav.status.fully_resolved.val;
1518 const nav_init = switch (ip.indexToKey(nav_val)) {
1519 else => nav_val,
1520 .variable => |variable| variable.init,
1521 .@"extern", .func => .none,
1522 };
1523 if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
1524
1525 const nmi = try coff.navMapIndex(zcu, nav_index);
1526 const si = nmi.symbol(coff);
1527 const ni = ni: {
1528 switch (si.get(coff).ni) {
1529 .none => {
1530 const sec_si = try coff.navSection(zcu, nav.status.fully_resolved);
1531 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1532 const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
1533 .alignment = pt.navAlignment(nav_index).toStdMem(),
1534 .moved = true,
1535 });
1536 coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
1537 const sym = si.get(coff);
1538 sym.ni = ni;
1539 sym.section_number = sec_si.get(coff).section_number;
1540 },
1541 else => si.deleteLocationRelocs(coff),
1542 }
1543 const sym = si.get(coff);
1544 assert(sym.loc_relocs == .none);
1545 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1546 break :ni sym.ni;
1547 };
1548
1549 {
1550 var nw: MappedFile.Node.Writer = undefined;
1551 ni.writer(&coff.mf, gpa, &nw);
1552 defer nw.deinit();
1553 codegen.generateSymbol(
1554 &coff.base,
1555 pt,
1556 zcu.navSrcLoc(nav_index),
1557 .fromInterned(nav_init),
1558 &nw.interface,
1559 .{ .atom_index = @intFromEnum(si) },
1560 ) catch |err| switch (err) {
1561 error.WriteFailed => return error.OutOfMemory,
1562 else => |e| return e,
1563 };
1564 si.get(coff).size = @intCast(nw.interface.end);
1565 si.applyLocationRelocs(coff);
1566 }
1567
1568 if (nav.status.fully_resolved.@"linksection".unwrap()) |_| {
1569 try ni.resize(&coff.mf, gpa, si.get(coff).size);
1570 var parent_ni = ni;
1571 while (true) {
1572 parent_ni = parent_ni.parent(&coff.mf);
1573 switch (coff.getNode(parent_ni)) {
1574 else => unreachable,
1575 .image_section, .pseudo_section => break,
1576 .object_section => {
1577 var child_it = parent_ni.reverseChildren(&coff.mf);
1578 const last_offset, const last_size =
1579 child_it.next().?.location(&coff.mf).resolve(&coff.mf);
1580 try parent_ni.resize(&coff.mf, gpa, last_offset + last_size);
1581 },
1582 }
1583 }
1584 }
1585}
1586
1587pub fn lowerUav(
1588 coff: *Coff,
1589 pt: Zcu.PerThread,
1590 uav_val: InternPool.Index,
1591 uav_align: InternPool.Alignment,
1592 src_loc: Zcu.LazySrcLoc,
1593) !codegen.SymbolResult {
1594 const zcu = pt.zcu;
1595 const gpa = zcu.gpa;
1596
1597 try coff.pending_uavs.ensureUnusedCapacity(gpa, 1);
1598 const umi = try coff.uavMapIndex(uav_val);
1599 const si = umi.symbol(coff);
1600 if (switch (si.get(coff).ni) {
1601 .none => true,
1602 else => |ni| uav_align.toStdMem().order(ni.alignment(&coff.mf)).compare(.gt),
1603 }) {
1604 const gop = coff.pending_uavs.getOrPutAssumeCapacity(umi);
1605 if (gop.found_existing) {
1606 gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
1607 } else {
1608 gop.value_ptr.* = .{
1609 .alignment = uav_align,
1610 .src_loc = src_loc,
1611 };
1612 coff.const_prog_node.increaseEstimatedTotalItems(1);
1613 }
1614 }
1615 return .{ .sym_index = @intFromEnum(si) };
1616}
1617
1618pub fn updateFunc(
1619 coff: *Coff,
1620 pt: Zcu.PerThread,
1621 func_index: InternPool.Index,
1622 mir: *const codegen.AnyMir,
1623) !void {
1624 coff.updateFuncInner(pt, func_index, mir) catch |err| switch (err) {
1625 error.OutOfMemory,
1626 error.Overflow,
1627 error.RelocationNotByteAligned,
1628 error.CodegenFail,
1629 => |e| return e,
1630 else => |e| return coff.base.cgFail(
1631 pt.zcu.funcInfo(func_index).owner_nav,
1632 "linker failed to update function: {s}",
1633 .{@errorName(e)},
1634 ),
1635 };
1636}
1637fn updateFuncInner(
1638 coff: *Coff,
1639 pt: Zcu.PerThread,
1640 func_index: InternPool.Index,
1641 mir: *const codegen.AnyMir,
1642) !void {
1643 const zcu = pt.zcu;
1644 const gpa = zcu.gpa;
1645 const ip = &zcu.intern_pool;
1646 const func = zcu.funcInfo(func_index);
1647 const nav = ip.getNav(func.owner_nav);
1648
1649 const nmi = try coff.navMapIndex(zcu, func.owner_nav);
1650 const si = nmi.symbol(coff);
1651 log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
1652 const ni = ni: {
1653 switch (si.get(coff).ni) {
1654 .none => {
1655 const sec_si = try coff.navSection(zcu, nav.status.fully_resolved);
1656 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1657 const mod = zcu.navFileScope(func.owner_nav).mod.?;
1658 const target = &mod.resolved_target.result;
1659 const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
1660 .alignment = switch (nav.status.fully_resolved.alignment) {
1661 .none => switch (mod.optimize_mode) {
1662 .Debug,
1663 .ReleaseSafe,
1664 .ReleaseFast,
1665 => target_util.defaultFunctionAlignment(target),
1666 .ReleaseSmall => target_util.minFunctionAlignment(target),
1667 },
1668 else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
1669 }.toStdMem(),
1670 .moved = true,
1671 });
1672 coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
1673 const sym = si.get(coff);
1674 sym.ni = ni;
1675 sym.section_number = sec_si.get(coff).section_number;
1676 },
1677 else => si.deleteLocationRelocs(coff),
1678 }
1679 const sym = si.get(coff);
1680 assert(sym.loc_relocs == .none);
1681 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1682 break :ni sym.ni;
1683 };
1684
1685 var nw: MappedFile.Node.Writer = undefined;
1686 ni.writer(&coff.mf, gpa, &nw);
1687 defer nw.deinit();
1688 codegen.emitFunction(
1689 &coff.base,
1690 pt,
1691 zcu.navSrcLoc(func.owner_nav),
1692 func_index,
1693 @intFromEnum(si),
1694 mir,
1695 &nw.interface,
1696 .none,
1697 ) catch |err| switch (err) {
1698 error.WriteFailed => return nw.err.?,
1699 else => |e| return e,
1700 };
1701 si.get(coff).size = @intCast(nw.interface.end);
1702 si.applyLocationRelocs(coff);
1703}
1704
1705pub fn updateErrorData(coff: *Coff, pt: Zcu.PerThread) !void {
1706 coff.flushLazy(pt, .{
1707 .kind = .const_data,
1708 .index = @intCast(coff.lazy.getPtr(.const_data).map.getIndex(.anyerror_type) orelse return),
1709 }) catch |err| switch (err) {
1710 error.OutOfMemory => return error.OutOfMemory,
1711 error.CodegenFail => return error.LinkFailure,
1712 else => |e| return coff.base.comp.link_diags.fail("updateErrorData failed {t}", .{e}),
1713 };
1714}
1715
1716pub fn flush(
1717 coff: *Coff,
1718 arena: std.mem.Allocator,
1719 tid: Zcu.PerThread.Id,
1720 prog_node: std.Progress.Node,
1721) !void {
1722 _ = arena;
1723 _ = prog_node;
1724 while (try coff.idle(tid)) {}
1725
1726 // hack for stage2_x86_64 + coff
1727 const comp = coff.base.comp;
1728 if (comp.compiler_rt_dyn_lib) |crt_file| {
1729 const gpa = comp.gpa;
1730 const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
1731 std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
1732 std.fs.path.basename(crt_file.full_object_path.sub_path),
1733 });
1734 defer gpa.free(compiler_rt_sub_path);
1735 crt_file.full_object_path.root_dir.handle.copyFile(
1736 crt_file.full_object_path.sub_path,
1737 coff.base.emit.root_dir.handle,
1738 compiler_rt_sub_path,
1739 .{},
1740 ) catch |err| switch (err) {
1741 else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
1742 compiler_rt_sub_path,
1743 @errorName(e),
1744 }),
1745 };
1746 }
1747}
1748
1749pub fn idle(coff: *Coff, tid: Zcu.PerThread.Id) !bool {
1750 const comp = coff.base.comp;
1751 task: {
1752 while (coff.pending_uavs.pop()) |pending_uav| {
1753 const sub_prog_node = coff.idleProgNode(tid, coff.const_prog_node, .{ .uav = pending_uav.key });
1754 defer sub_prog_node.end();
1755 coff.flushUav(
1756 .{ .zcu = comp.zcu.?, .tid = tid },
1757 pending_uav.key,
1758 pending_uav.value.alignment,
1759 pending_uav.value.src_loc,
1760 ) catch |err| switch (err) {
1761 error.OutOfMemory => return error.OutOfMemory,
1762 else => |e| return comp.link_diags.fail(
1763 "linker failed to lower constant: {t}",
1764 .{e},
1765 ),
1766 };
1767 break :task;
1768 }
1769 if (coff.global_pending_index < coff.globals.count()) {
1770 const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = tid };
1771 const gmi: Node.GlobalMapIndex = @enumFromInt(coff.global_pending_index);
1772 coff.global_pending_index += 1;
1773 const sub_prog_node = coff.synth_prog_node.start(
1774 gmi.globalName(coff).name.toSlice(coff),
1775 0,
1776 );
1777 defer sub_prog_node.end();
1778 coff.flushGlobal(pt, gmi) catch |err| switch (err) {
1779 error.OutOfMemory => return error.OutOfMemory,
1780 else => |e| return comp.link_diags.fail(
1781 "linker failed to lower constant: {t}",
1782 .{e},
1783 ),
1784 };
1785 break :task;
1786 }
1787 var lazy_it = coff.lazy.iterator();
1788 while (lazy_it.next()) |lazy| if (lazy.value.pending_index < lazy.value.map.count()) {
1789 const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = tid };
1790 const lmr: Node.LazyMapRef = .{ .kind = lazy.key, .index = lazy.value.pending_index };
1791 lazy.value.pending_index += 1;
1792 const kind = switch (lmr.kind) {
1793 .code => "code",
1794 .const_data => "data",
1795 };
1796 var name: [std.Progress.Node.max_name_len]u8 = undefined;
1797 const sub_prog_node = coff.synth_prog_node.start(
1798 std.fmt.bufPrint(&name, "lazy {s} for {f}", .{
1799 kind,
1800 Type.fromInterned(lmr.lazySymbol(coff).ty).fmt(pt),
1801 }) catch &name,
1802 0,
1803 );
1804 defer sub_prog_node.end();
1805 coff.flushLazy(pt, lmr) catch |err| switch (err) {
1806 error.OutOfMemory => return error.OutOfMemory,
1807 else => |e| return comp.link_diags.fail(
1808 "linker failed to lower lazy {s}: {t}",
1809 .{ kind, e },
1810 ),
1811 };
1812 break :task;
1813 };
1814 while (coff.mf.updates.pop()) |ni| {
1815 const clean_moved = ni.cleanMoved(&coff.mf);
1816 const clean_resized = ni.cleanResized(&coff.mf);
1817 if (clean_moved or clean_resized) {
1818 const sub_prog_node =
1819 coff.idleProgNode(tid, coff.mf.update_prog_node, coff.getNode(ni));
1820 defer sub_prog_node.end();
1821 if (clean_moved) try coff.flushMoved(ni);
1822 if (clean_resized) try coff.flushResized(ni);
1823 break :task;
1824 } else coff.mf.update_prog_node.completeOne();
1825 }
1826 }
1827 if (coff.pending_uavs.count() > 0) return true;
1828 if (coff.globals.count() > coff.global_pending_index) return true;
1829 for (&coff.lazy.values) |lazy| if (lazy.map.count() > lazy.pending_index) return true;
1830 if (coff.mf.updates.items.len > 0) return true;
1831 return false;
1832}
1833
1834fn idleProgNode(
1835 coff: *Coff,
1836 tid: Zcu.PerThread.Id,
1837 prog_node: std.Progress.Node,
1838 node: Node,
1839) std.Progress.Node {
1840 var name: [std.Progress.Node.max_name_len]u8 = undefined;
1841 return prog_node.start(name: switch (node) {
1842 else => |tag| @tagName(tag),
1843 .image_section => |si| std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
1844 inline .pseudo_section, .object_section => |smi| smi.name(coff).toSlice(coff),
1845 .global => |gmi| gmi.globalName(coff).name.toSlice(coff),
1846 .nav => |nmi| {
1847 const ip = &coff.base.comp.zcu.?.intern_pool;
1848 break :name ip.getNav(nmi.navIndex(coff)).fqn.toSlice(ip);
1849 },
1850 .uav => |umi| std.fmt.bufPrint(&name, "{f}", .{
1851 Value.fromInterned(umi.uavValue(coff)).fmtValue(.{
1852 .zcu = coff.base.comp.zcu.?,
1853 .tid = tid,
1854 }),
1855 }) catch &name,
1856 }, 0);
1857}
1858
1859fn flushUav(
1860 coff: *Coff,
1861 pt: Zcu.PerThread,
1862 umi: Node.UavMapIndex,
1863 uav_align: InternPool.Alignment,
1864 src_loc: Zcu.LazySrcLoc,
1865) !void {
1866 const zcu = pt.zcu;
1867 const gpa = zcu.gpa;
1868
1869 const uav_val = umi.uavValue(coff);
1870 const si = umi.symbol(coff);
1871 const ni = ni: {
1872 switch (si.get(coff).ni) {
1873 .none => {
1874 const sec_si = (try coff.objectSectionMapIndex(
1875 .@".rdata",
1876 coff.mf.flags.block_size,
1877 .{ .read = true },
1878 )).symbol(coff);
1879 try coff.nodes.ensureUnusedCapacity(gpa, 1);
1880 const sym = si.get(coff);
1881 const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
1882 .alignment = uav_align.toStdMem(),
1883 .moved = true,
1884 });
1885 coff.nodes.appendAssumeCapacity(.{ .uav = umi });
1886 sym.ni = ni;
1887 sym.section_number = sec_si.get(coff).section_number;
1888 },
1889 else => {
1890 if (si.get(coff).ni.alignment(&coff.mf).order(uav_align.toStdMem()).compare(.gte))
1891 return;
1892 si.deleteLocationRelocs(coff);
1893 },
1894 }
1895 const sym = si.get(coff);
1896 assert(sym.loc_relocs == .none);
1897 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1898 break :ni sym.ni;
1899 };
1900
1901 var nw: MappedFile.Node.Writer = undefined;
1902 ni.writer(&coff.mf, gpa, &nw);
1903 defer nw.deinit();
1904 codegen.generateSymbol(
1905 &coff.base,
1906 pt,
1907 src_loc,
1908 .fromInterned(uav_val),
1909 &nw.interface,
1910 .{ .atom_index = @intFromEnum(si) },
1911 ) catch |err| switch (err) {
1912 error.WriteFailed => return error.OutOfMemory,
1913 else => |e| return e,
1914 };
1915 si.get(coff).size = @intCast(nw.interface.end);
1916 si.applyLocationRelocs(coff);
1917}
1918
1919fn flushGlobal(coff: *Coff, pt: Zcu.PerThread, gmi: Node.GlobalMapIndex) !void {
1920 const zcu = pt.zcu;
1921 const comp = zcu.comp;
1922 const gpa = zcu.gpa;
1923 const gn = gmi.globalName(coff);
1924 if (gn.lib_name.toSlice(coff)) |lib_name| {
1925 const name = gn.name.toSlice(coff);
1926 try coff.nodes.ensureUnusedCapacity(gpa, 4);
1927 try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
1928
1929 const target_endian = coff.targetEndian();
1930 const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
1931 const addr_size: u64, const addr_align: std.mem.Alignment = switch (magic) {
1932 _ => unreachable,
1933 .PE32 => .{ 4, .@"4" },
1934 .@"PE32+" => .{ 8, .@"8" },
1935 };
1936
1937 const gop = try coff.import_table.entries.getOrPutAdapted(
1938 gpa,
1939 lib_name,
1940 ImportTable.Adapter{ .coff = coff },
1941 );
1942 const import_hint_name_align: std.mem.Alignment = .@"2";
1943 if (!gop.found_existing) {
1944 errdefer _ = coff.import_table.entries.pop();
1945 try coff.import_table.ni.resize(
1946 &coff.mf,
1947 gpa,
1948 @sizeOf(std.coff.ImportDirectoryEntry) * (gop.index + 2),
1949 );
1950 const import_hint_name_table_len =
1951 import_hint_name_align.forward(lib_name.len + ".dll".len + 1);
1952 const idata_section_ni = coff.import_table.ni.parent(&coff.mf);
1953 const import_lookup_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
1954 .size = addr_size * 2,
1955 .alignment = addr_align,
1956 .moved = true,
1957 });
1958 const import_address_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
1959 .size = addr_size * 2,
1960 .alignment = addr_align,
1961 .moved = true,
1962 });
1963 const import_address_table_si = coff.addSymbolAssumeCapacity();
1964 {
1965 const import_address_table_sym = import_address_table_si.get(coff);
1966 import_address_table_sym.ni = import_address_table_ni;
1967 assert(import_address_table_sym.loc_relocs == .none);
1968 import_address_table_sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
1969 import_address_table_sym.section_number =
1970 coff.getNode(idata_section_ni).object_section.symbol(coff).get(coff).section_number;
1971 }
1972 const import_hint_name_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
1973 .size = import_hint_name_table_len,
1974 .alignment = import_hint_name_align,
1975 .moved = true,
1976 });
1977 gop.value_ptr.* = .{
1978 .import_lookup_table_ni = import_lookup_table_ni,
1979 .import_address_table_si = import_address_table_si,
1980 .import_hint_name_table_ni = import_hint_name_table_ni,
1981 .len = 0,
1982 .hint_name_len = @intCast(import_hint_name_table_len),
1983 };
1984 const import_hint_name_slice = import_hint_name_table_ni.slice(&coff.mf);
1985 @memcpy(import_hint_name_slice[0..lib_name.len], lib_name);
1986 @memcpy(import_hint_name_slice[lib_name.len..][0..".dll".len], ".dll");
1987 @memset(import_hint_name_slice[lib_name.len + ".dll".len ..], 0);
1988 coff.nodes.appendAssumeCapacity(.{ .import_lookup_table = @enumFromInt(gop.index) });
1989 coff.nodes.appendAssumeCapacity(.{ .import_address_table = @enumFromInt(gop.index) });
1990 coff.nodes.appendAssumeCapacity(.{ .import_hint_name_table = @enumFromInt(gop.index) });
1991
1992 const import_directory_entries = coff.importDirectoryTableSlice()[gop.index..][0..2];
1993 import_directory_entries.* = .{ .{
1994 .import_lookup_table_rva = coff.computeNodeRva(import_lookup_table_ni),
1995 .time_date_stamp = 0,
1996 .forwarder_chain = 0,
1997 .name_rva = coff.computeNodeRva(import_hint_name_table_ni),
1998 .import_address_table_rva = coff.computeNodeRva(import_address_table_ni),
1999 }, .{
2000 .import_lookup_table_rva = 0,
2001 .time_date_stamp = 0,
2002 .forwarder_chain = 0,
2003 .name_rva = 0,
2004 .import_address_table_rva = 0,
2005 } };
2006 if (target_endian != native_endian)
2007 std.mem.byteSwapAllFields([2]std.coff.ImportDirectoryEntry, import_directory_entries);
2008 }
2009 const import_symbol_index = gop.value_ptr.len;
2010 gop.value_ptr.len = import_symbol_index + 1;
2011 const new_symbol_table_size = addr_size * (import_symbol_index + 2);
2012 const import_hint_name_index = gop.value_ptr.hint_name_len;
2013 gop.value_ptr.hint_name_len = @intCast(
2014 import_hint_name_align.forward(import_hint_name_index + 2 + name.len + 1),
2015 );
2016 try gop.value_ptr.import_lookup_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
2017 const import_address_table_ni = gop.value_ptr.import_address_table_si.node(coff);
2018 try import_address_table_ni.resize(&coff.mf, gpa, new_symbol_table_size);
2019 try gop.value_ptr.import_hint_name_table_ni.resize(&coff.mf, gpa, gop.value_ptr.hint_name_len);
2020 const import_lookup_slice = gop.value_ptr.import_lookup_table_ni.slice(&coff.mf);
2021 const import_address_slice = import_address_table_ni.slice(&coff.mf);
2022 const import_hint_name_slice = gop.value_ptr.import_hint_name_table_ni.slice(&coff.mf);
2023 @memset(import_hint_name_slice[import_hint_name_index..][0..2], 0);
2024 @memcpy(import_hint_name_slice[import_hint_name_index + 2 ..][0..name.len], name);
2025 @memset(import_hint_name_slice[import_hint_name_index + 2 + name.len ..], 0);
2026 const import_hint_name_rva =
2027 coff.computeNodeRva(gop.value_ptr.import_hint_name_table_ni) + import_hint_name_index;
2028 switch (magic) {
2029 _ => unreachable,
2030 inline .PE32, .@"PE32+" => |ct_magic| {
2031 const Addr = switch (ct_magic) {
2032 _ => comptime unreachable,
2033 .PE32 => u32,
2034 .@"PE32+" => u64,
2035 };
2036 const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
2037 const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
2038 const import_hint_name_rvas: [2]Addr = .{
2039 std.mem.nativeTo(Addr, @intCast(import_hint_name_rva), target_endian),
2040 std.mem.nativeTo(Addr, 0, target_endian),
2041 };
2042 import_lookup_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
2043 import_address_table[import_symbol_index..][0..2].* = import_hint_name_rvas;
2044 },
2045 }
2046 const si = gmi.symbol(coff);
2047 const sym = si.get(coff);
2048 sym.section_number = Symbol.Index.text.get(coff).section_number;
2049 assert(sym.loc_relocs == .none);
2050 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
2051 switch (coff.targetLoad(&coff.headerPtr().machine)) {
2052 else => |tag| @panic(@tagName(tag)),
2053 .AMD64 => {
2054 const init = [_]u8{ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 };
2055 const target = &comp.root_mod.resolved_target.result;
2056 const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
2057 .alignment = switch (comp.root_mod.optimize_mode) {
2058 .Debug,
2059 .ReleaseSafe,
2060 .ReleaseFast,
2061 => target_util.defaultFunctionAlignment(target),
2062 .ReleaseSmall => target_util.minFunctionAlignment(target),
2063 }.toStdMem(),
2064 .size = init.len,
2065 });
2066 @memcpy(ni.slice(&coff.mf)[0..init.len], &init);
2067 sym.ni = ni;
2068 sym.size = init.len;
2069 try coff.addReloc(
2070 si,
2071 init.len - 4,
2072 gop.value_ptr.import_address_table_si,
2073 @intCast(addr_size * import_symbol_index),
2074 .{ .AMD64 = .REL32 },
2075 );
2076 },
2077 }
2078 coff.nodes.appendAssumeCapacity(.{ .global = gmi });
2079 sym.rva = coff.computeNodeRva(sym.ni);
2080 si.applyLocationRelocs(coff);
2081 }
2082}
2083
2084fn flushLazy(coff: *Coff, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
2085 const zcu = pt.zcu;
2086 const gpa = zcu.gpa;
2087
2088 const lazy = lmr.lazySymbol(coff);
2089 const si = lmr.symbol(coff);
2090 const ni = ni: {
2091 const sym = si.get(coff);
2092 switch (sym.ni) {
2093 .none => {
2094 try coff.nodes.ensureUnusedCapacity(gpa, 1);
2095 const sec_si: Symbol.Index = switch (lazy.kind) {
2096 .code => .text,
2097 .const_data => .rdata,
2098 };
2099 const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{ .moved = true });
2100 coff.nodes.appendAssumeCapacity(switch (lazy.kind) {
2101 .code => .{ .lazy_code = @enumFromInt(lmr.index) },
2102 .const_data => .{ .lazy_const_data = @enumFromInt(lmr.index) },
2103 });
2104 sym.ni = ni;
2105 sym.section_number = sec_si.get(coff).section_number;
2106 },
2107 else => si.deleteLocationRelocs(coff),
2108 }
2109 assert(sym.loc_relocs == .none);
2110 sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
2111 break :ni sym.ni;
2112 };
2113
2114 var required_alignment: InternPool.Alignment = .none;
2115 var nw: MappedFile.Node.Writer = undefined;
2116 ni.writer(&coff.mf, gpa, &nw);
2117 defer nw.deinit();
2118 try codegen.generateLazySymbol(
2119 &coff.base,
2120 pt,
2121 Type.fromInterned(lazy.ty).srcLocOrNull(pt.zcu) orelse .unneeded,
2122 lazy,
2123 &required_alignment,
2124 &nw.interface,
2125 .none,
2126 .{ .atom_index = @intFromEnum(si) },
2127 );
2128 si.get(coff).size = @intCast(nw.interface.end);
2129 si.applyLocationRelocs(coff);
2130}
2131
2132fn flushMoved(coff: *Coff, ni: MappedFile.Node.Index) !void {
2133 switch (coff.getNode(ni)) {
2134 .file,
2135 .header,
2136 .signature,
2137 .coff_header,
2138 .optional_header,
2139 .data_directories,
2140 .section_table,
2141 => unreachable,
2142 .image_section => |si| return coff.targetStore(
2143 &si.get(coff).section_number.header(coff).pointer_to_raw_data,
2144 @intCast(ni.fileLocation(&coff.mf, false).offset),
2145 ),
2146 .import_directory_table => coff.targetStore(
2147 &coff.dataDirectoryPtr(.IMPORT).virtual_address,
2148 coff.computeNodeRva(ni),
2149 ),
2150 .import_lookup_table => |import_index| coff.targetStore(
2151 &coff.importDirectoryEntryPtr(import_index).import_lookup_table_rva,
2152 coff.computeNodeRva(ni),
2153 ),
2154 .import_address_table => |import_index| {
2155 const import_address_table_si = import_index.get(coff).import_address_table_si;
2156 import_address_table_si.flushMoved(coff);
2157 coff.targetStore(
2158 &coff.importDirectoryEntryPtr(import_index).import_address_table_rva,
2159 import_address_table_si.get(coff).rva,
2160 );
2161 },
2162 .import_hint_name_table => |import_index| {
2163 const target_endian = coff.targetEndian();
2164 const magic = coff.targetLoad(&coff.optionalHeaderStandardPtr().magic);
2165 const import_hint_name_rva = coff.computeNodeRva(ni);
2166 coff.targetStore(
2167 &coff.importDirectoryEntryPtr(import_index).name_rva,
2168 import_hint_name_rva,
2169 );
2170 const import_entry = import_index.get(coff);
2171 const import_lookup_slice = import_entry.import_lookup_table_ni.slice(&coff.mf);
2172 const import_address_slice =
2173 import_entry.import_address_table_si.node(coff).slice(&coff.mf);
2174 const import_hint_name_slice = ni.slice(&coff.mf);
2175 const import_hint_name_align = ni.alignment(&coff.mf);
2176 var import_hint_name_index: u32 = 0;
2177 for (0..import_entry.len) |import_symbol_index| {
2178 import_hint_name_index = @intCast(import_hint_name_align.forward(
2179 std.mem.indexOfScalarPos(
2180 u8,
2181 import_hint_name_slice,
2182 import_hint_name_index,
2183 0,
2184 ).? + 1,
2185 ));
2186 switch (magic) {
2187 _ => unreachable,
2188 inline .PE32, .@"PE32+" => |ct_magic| {
2189 const Addr = switch (ct_magic) {
2190 _ => comptime unreachable,
2191 .PE32 => u32,
2192 .@"PE32+" => u64,
2193 };
2194 const import_lookup_table: []Addr = @ptrCast(@alignCast(import_lookup_slice));
2195 const import_address_table: []Addr = @ptrCast(@alignCast(import_address_slice));
2196 const rva = std.mem.nativeTo(
2197 Addr,
2198 import_hint_name_rva + import_hint_name_index,
2199 target_endian,
2200 );
2201 import_lookup_table[import_symbol_index] = rva;
2202 import_address_table[import_symbol_index] = rva;
2203 },
2204 }
2205 import_hint_name_index += 2;
2206 }
2207 },
2208 inline .pseudo_section,
2209 .object_section,
2210 .global,
2211 .nav,
2212 .uav,
2213 .lazy_code,
2214 .lazy_const_data,
2215 => |mi| mi.symbol(coff).flushMoved(coff),
2216 }
2217 try ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
2218}
2219
2220fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
2221 _, const size = ni.location(&coff.mf).resolve(&coff.mf);
2222 switch (coff.getNode(ni)) {
2223 .file => {},
2224 .header => {
2225 switch (coff.optionalHeaderPtr()) {
2226 inline else => |optional_header| coff.targetStore(
2227 &optional_header.size_of_headers,
2228 @intCast(size),
2229 ),
2230 }
2231 if (size > coff.image_section_table.items[0].get(coff).rva) try coff.virtualSlide(
2232 0,
2233 std.mem.alignForward(
2234 u32,
2235 @intCast(size * 4),
2236 coff.optionalHeaderField(.section_alignment),
2237 ),
2238 );
2239 },
2240 .signature, .coff_header, .optional_header, .data_directories => unreachable,
2241 .section_table => {},
2242 .image_section => |si| {
2243 const sym = si.get(coff);
2244 const section_index = sym.section_number.toIndex();
2245 const section = &coff.sectionTableSlice()[section_index];
2246 coff.targetStore(§ion.size_of_raw_data, @intCast(size));
2247 if (size > coff.targetLoad(§ion.virtual_size)) {
2248 const virtual_size = std.mem.alignForward(
2249 u32,
2250 @intCast(size * 4),
2251 coff.optionalHeaderField(.section_alignment),
2252 );
2253 coff.targetStore(§ion.virtual_size, virtual_size);
2254 try coff.virtualSlide(section_index + 1, sym.rva + virtual_size);
2255 }
2256 },
2257 .import_directory_table => coff.targetStore(
2258 &coff.dataDirectoryPtr(.IMPORT).size,
2259 @intCast(size),
2260 ),
2261 .import_lookup_table, .import_address_table, .import_hint_name_table => {},
2262 inline .pseudo_section,
2263 .object_section,
2264 => |smi| smi.symbol(coff).get(coff).size = @intCast(size),
2265 .global, .nav, .uav, .lazy_code, .lazy_const_data => {},
2266 }
2267}
2268fn virtualSlide(coff: *Coff, start_section_index: usize, start_rva: u32) !void {
2269 var rva = start_rva;
2270 for (
2271 coff.image_section_table.items[start_section_index..],
2272 coff.sectionTableSlice()[start_section_index..],
2273 ) |section_si, *section| {
2274 const section_sym = section_si.get(coff);
2275 section_sym.rva = rva;
2276 coff.targetStore(§ion.virtual_address, rva);
2277 try section_sym.ni.childrenMoved(coff.base.comp.gpa, &coff.mf);
2278 rva += coff.targetLoad(§ion.virtual_size);
2279 }
2280 switch (coff.optionalHeaderPtr()) {
2281 inline else => |optional_header| coff.targetStore(
2282 &optional_header.size_of_image,
2283 @intCast(rva),
2284 ),
2285 }
2286}
2287
2288pub fn updateExports(
2289 coff: *Coff,
2290 pt: Zcu.PerThread,
2291 exported: Zcu.Exported,
2292 export_indices: []const Zcu.Export.Index,
2293) !void {
2294 return coff.updateExportsInner(pt, exported, export_indices) catch |err| switch (err) {
2295 error.OutOfMemory => error.OutOfMemory,
2296 error.LinkFailure => error.AnalysisFail,
2297 };
2298}
2299fn updateExportsInner(
2300 coff: *Coff,
2301 pt: Zcu.PerThread,
2302 exported: Zcu.Exported,
2303 export_indices: []const Zcu.Export.Index,
2304) !void {
2305 const zcu = pt.zcu;
2306 const gpa = zcu.gpa;
2307 const ip = &zcu.intern_pool;
2308
2309 switch (exported) {
2310 .nav => |nav| log.debug("updateExports({f})", .{ip.getNav(nav).fqn.fmt(ip)}),
2311 .uav => |uav| log.debug("updateExports(@as({f}, {f}))", .{
2312 Type.fromInterned(ip.typeOf(uav)).fmt(pt),
2313 Value.fromInterned(uav).fmtValue(pt),
2314 }),
2315 }
2316 try coff.symbol_table.ensureUnusedCapacity(gpa, export_indices.len);
2317 const exported_si: Symbol.Index = switch (exported) {
2318 .nav => |nav| try coff.navSymbol(zcu, nav),
2319 .uav => |uav| @enumFromInt(switch (try coff.lowerUav(
2320 pt,
2321 uav,
2322 Type.fromInterned(ip.typeOf(uav)).abiAlignment(zcu),
2323 export_indices[0].ptr(zcu).src,
2324 )) {
2325 .sym_index => |si| si,
2326 .fail => |em| {
2327 defer em.destroy(gpa);
2328 return coff.base.comp.link_diags.fail("{s}", .{em.msg});
2329 },
2330 }),
2331 };
2332 while (try coff.idle(pt.tid)) {}
2333 const exported_ni = exported_si.node(coff);
2334 const exported_sym = exported_si.get(coff);
2335 for (export_indices) |export_index| {
2336 const @"export" = export_index.ptr(zcu);
2337 const export_si = try coff.globalSymbol(@"export".opts.name.toSlice(ip), null);
2338 const export_sym = export_si.get(coff);
2339 export_sym.ni = exported_ni;
2340 export_sym.rva = exported_sym.rva;
2341 export_sym.size = exported_sym.size;
2342 export_sym.section_number = exported_sym.section_number;
2343 export_si.applyTargetRelocs(coff);
2344 if (@"export".opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
2345 coff.optionalHeaderStandardPtr().address_of_entry_point = exported_sym.rva;
2346 } else if (@"export".opts.name.eqlSlice("_tls_used", ip)) {
2347 const tls_directory = coff.dataDirectoryPtr(.TLS);
2348 tls_directory.* = .{ .virtual_address = exported_sym.rva, .size = exported_sym.size };
2349 if (coff.targetEndian() != native_endian)
2350 std.mem.byteSwapAllFields(std.coff.ImageDataDirectory, tls_directory);
2351 }
2352 }
2353}
2354
2355pub fn deleteExport(coff: *Coff, exported: Zcu.Exported, name: InternPool.NullTerminatedString) void {
2356 _ = coff;
2357 _ = exported;
2358 _ = name;
2359}
2360
2361pub fn dump(coff: *Coff, tid: Zcu.PerThread.Id) void {
2362 const w, _ = std.debug.lockStderrWriter(&.{});
2363 defer std.debug.unlockStderrWriter();
2364 coff.printNode(tid, w, .root, 0) catch {};
2365}
2366
2367pub fn printNode(
2368 coff: *Coff,
2369 tid: Zcu.PerThread.Id,
2370 w: *std.Io.Writer,
2371 ni: MappedFile.Node.Index,
2372 indent: usize,
2373) !void {
2374 const node = coff.getNode(ni);
2375 try w.splatByteAll(' ', indent);
2376 try w.writeAll(@tagName(node));
2377 switch (node) {
2378 else => {},
2379 .image_section => |si| try w.print("({s})", .{
2380 std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
2381 }),
2382 .import_lookup_table,
2383 .import_address_table,
2384 .import_hint_name_table,
2385 => |import_index| try w.print("({s})", .{
2386 std.mem.sliceTo(import_index.get(coff).import_hint_name_table_ni.sliceConst(&coff.mf), 0),
2387 }),
2388 inline .pseudo_section, .object_section => |smi| try w.print("({s})", .{
2389 smi.name(coff).toSlice(coff),
2390 }),
2391 .global => |gmi| {
2392 const gn = gmi.globalName(coff);
2393 try w.writeByte('(');
2394 if (gn.lib_name.toSlice(coff)) |lib_name| try w.print("{s}.dll, ", .{lib_name});
2395 try w.print("{s})", .{gn.name.toSlice(coff)});
2396 },
2397 .nav => |nmi| {
2398 const zcu = coff.base.comp.zcu.?;
2399 const ip = &zcu.intern_pool;
2400 const nav = ip.getNav(nmi.navIndex(coff));
2401 try w.print("({f}, {f})", .{
2402 Type.fromInterned(nav.typeOf(ip)).fmt(.{ .zcu = zcu, .tid = tid }),
2403 nav.fqn.fmt(ip),
2404 });
2405 },
2406 .uav => |umi| {
2407 const zcu = coff.base.comp.zcu.?;
2408 const val: Value = .fromInterned(umi.uavValue(coff));
2409 try w.print("({f}, {f})", .{
2410 val.typeOf(zcu).fmt(.{ .zcu = zcu, .tid = tid }),
2411 val.fmtValue(.{ .zcu = zcu, .tid = tid }),
2412 });
2413 },
2414 inline .lazy_code, .lazy_const_data => |lmi| try w.print("({f})", .{
2415 Type.fromInterned(lmi.lazySymbol(coff).ty).fmt(.{
2416 .zcu = coff.base.comp.zcu.?,
2417 .tid = tid,
2418 }),
2419 }),
2420 }
2421 {
2422 const mf_node = &coff.mf.nodes.items[@intFromEnum(ni)];
2423 const off, const size = mf_node.location().resolve(&coff.mf);
2424 try w.print(" index={d} offset=0x{x} size=0x{x} align=0x{x}{s}{s}{s}{s}\n", .{
2425 @intFromEnum(ni),
2426 off,
2427 size,
2428 mf_node.flags.alignment.toByteUnits(),
2429 if (mf_node.flags.fixed) " fixed" else "",
2430 if (mf_node.flags.moved) " moved" else "",
2431 if (mf_node.flags.resized) " resized" else "",
2432 if (mf_node.flags.has_content) " has_content" else "",
2433 });
2434 }
2435 var leaf = true;
2436 var child_it = ni.children(&coff.mf);
2437 while (child_it.next()) |child_ni| {
2438 leaf = false;
2439 try coff.printNode(tid, w, child_ni, indent + 1);
2440 }
2441 if (leaf) {
2442 const file_loc = ni.fileLocation(&coff.mf, false);
2443 if (file_loc.size == 0) return;
2444 var address = file_loc.offset;
2445 const line_len = 0x10;
2446 var line_it = std.mem.window(
2447 u8,
2448 coff.mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)],
2449 line_len,
2450 line_len,
2451 );
2452 while (line_it.next()) |line_bytes| : (address += line_len) {
2453 try w.splatByteAll(' ', indent + 1);
2454 try w.print("{x:0>8} ", .{address});
2455 for (line_bytes) |byte| try w.print("{x:0>2} ", .{byte});
2456 try w.splatByteAll(' ', 3 * (line_len - line_bytes.len) + 1);
2457 for (line_bytes) |byte| try w.writeByte(if (std.ascii.isPrint(byte)) byte else '.');
2458 try w.writeByte('\n');
2459 }
2460 }
2461}
2462
2463const assert = std.debug.assert;
2464const builtin = @import("builtin");
2465const codegen = @import("../codegen.zig");
2466const Compilation = @import("../Compilation.zig");
2467const Coff = @This();
2468const InternPool = @import("../InternPool.zig");
2469const link = @import("../link.zig");
2470const log = std.log.scoped(.link);
2471const MappedFile = @import("MappedFile.zig");
2472const native_endian = builtin.cpu.arch.endian();
2473const std = @import("std");
2474const target_util = @import("../target.zig");
2475const Type = @import("../Type.zig");
2476const Value = @import("../Value.zig");
2477const Zcu = @import("../Zcu.zig");