Commit 90b3599c68
Changed files (14)
lib
std
src
arch
link
lib/std/coff.zig
@@ -303,6 +303,15 @@ pub const SectionHeader = extern struct {
return std.math.powi(u16, 2, self.flags.ALIGN - 1) catch unreachable;
}
+ pub fn setAlignment(self: *SectionHeader, new_alignment: u16) void {
+ assert(new_alignment > 0 and new_alignment <= 8192);
+ self.flags.ALIGN = std.math.log2(new_alignment);
+ }
+
+ pub fn isCode(self: SectionHeader) bool {
+ return self.flags.CNT_CODE == 0b1;
+ }
+
pub fn isComdat(self: SectionHeader) bool {
return self.flags.LNK_COMDAT == 0b1;
}
src/arch/aarch64/CodeGen.zig
@@ -3475,10 +3475,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
- coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
- else
- unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = fn_owner_decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ break :blk got_sym.value;
+ } else unreachable;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
@@ -5110,8 +5111,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
assert(decl.link.macho.sym_index != 0);
return MCValue{ .got_load = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ return MCValue{ .memory = got_sym.value };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
src/arch/arm/CodeGen.zig
@@ -3709,10 +3709,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
- coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
- else
- unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = fn_owner_decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ break :blk @intCast(u32, got_sym.value);
+ } else unreachable;
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (func_value.castTag(.extern_fn)) |_| {
@@ -5549,8 +5550,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ return MCValue{ .memory = got_sym.value };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
src/arch/riscv64/CodeGen.zig
@@ -1755,10 +1755,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
- coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
- else
- unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = fn_owner_decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ break :blk got_sym.value;
+ } else unreachable;
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
@@ -2592,8 +2593,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ return MCValue{ .memory = got_sym.value };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
src/arch/x86_64/CodeGen.zig
@@ -3971,10 +3971,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
- } else if (self.bin_file.cast(link.File.Coff)) |coff_file|
- @intCast(u32, coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes)
- else
- unreachable;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = fn_owner_decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ break :blk got_sym.value;
+ } else unreachable;
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
@@ -6847,8 +6848,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
assert(decl.link.macho.sym_index != 0);
return MCValue{ .got_load = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
- return MCValue{ .memory = got_addr };
+ const got_atom = coff_file.getGotAtomForSymbol(.{ .sym_index = decl.link.coff.sym_index, .file = null }).?;
+ const got_sym = coff_file.getSymbol(got_atom.getSymbolWithLoc());
+ return MCValue{ .memory = got_sym.value };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
src/link/Coff/Atom.zig
@@ -0,0 +1,85 @@
+const Atom = @This();
+
+const std = @import("std");
+const coff = std.coff;
+
+const Allocator = std.mem.Allocator;
+
+const Coff = @import("../Coff.zig");
+const SymbolWithLoc = Coff.SymbolWithLoc;
+
+/// Each decl always gets a local symbol with the fully qualified name.
+/// The vaddr and size are found here directly.
+/// The file offset is found by computing the vaddr offset from the section vaddr
+/// the symbol references, and adding that to the file offset of the section.
+/// If this field is 0, it means the codegen size = 0 and there is no symbol or
+/// offset table entry.
+sym_index: u32,
+
+/// null means symbol defined by Zig source.
+file: ?u32,
+
+/// Used size of the atom
+size: u64,
+
+/// Alignment of the atom
+alignment: u32,
+
+/// Points to the previous and next neighbors, based on the `text_offset`.
+/// This can be used to find, for example, the capacity of this `Atom`.
+prev: ?*Atom,
+next: ?*Atom,
+
+pub const empty = Atom{
+ .sym_index = 0,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev = null,
+ .next = null,
+};
+
+pub fn deinit(self: *Atom, gpa: Allocator) void {
+ _ = self;
+ _ = gpa;
+}
+
+pub fn getSymbol(self: Atom, coff_file: *Coff) coff.Symbol {
+ return self.getSymbolPtr(coff_file).*;
+}
+
+pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol {
+ return coff_file.getSymbolPtr(.{
+ .sym_index = self.sym_index,
+ .file = self.file,
+ });
+}
+
+pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
+ return .{ .sym_index = self.sym_index, .file = self.file };
+}
+
+/// Returns how much room there is to grow in virtual address space.
+pub fn capacity(self: Atom, coff_file: *Coff) u64 {
+ const self_sym = self.getSymbol(coff_file);
+ if (self.next) |next| {
+ const next_sym = next.getSymbol(coff_file);
+ return next_sym.value - self_sym.value;
+ } else {
+ // We are the last atom.
+ // The capacity is limited only by virtual address space.
+ return std.math.maxInt(u64) - self_sym.value;
+ }
+}
+
+pub fn freeListEligible(self: Atom, coff_file: *Coff) bool {
+ // No need to keep a free list node for the last atom.
+ const next = self.next orelse return false;
+ const self_sym = self.getSymbol(coff_file);
+ const next_sym = next.getSymbol(coff_file);
+ const cap = next_sym.value - self_sym.value;
+ const ideal_cap = Coff.padToIdeal(self.size);
+ if (cap <= ideal_cap) return false;
+ const surplus = cap - ideal_cap;
+ return surplus >= Coff.min_text_capacity;
+}
src/link/Coff/lld.zig
@@ -0,0 +1,602 @@
+const std = @import("std");
+const build_options = @import("build_options");
+const allocPrint = std.fmt.allocPrint;
+const assert = std.debug.assert;
+const fs = std.fs;
+const log = std.log.scoped(.link);
+const mem = std.mem;
+
+const mingw = @import("../../mingw.zig");
+const link = @import("../../link.zig");
+const lldMain = @import("../../main.zig").lldMain;
+const trace = @import("../../tracy.zig").trace;
+
+const Allocator = mem.Allocator;
+
+const Cache = @import("../../Cache.zig");
+const Coff = @import("../Coff.zig");
+const Compilation = @import("../../Compilation.zig");
+
+pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+ defer arena_allocator.deinit();
+ const arena = arena_allocator.allocator();
+
+ const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
+ const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
+
+ // If there is no Zig code to compile, then we should skip flushing the output file because it
+ // will not be part of the linker line anyway.
+ const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
+ const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
+ if (use_stage1) {
+ const obj_basename = try std.zig.binNameAlloc(arena, .{
+ .root_name = self.base.options.root_name,
+ .target = self.base.options.target,
+ .output_mode = .Obj,
+ });
+ switch (self.base.options.cache_mode) {
+ .incremental => break :blk try module.zig_cache_artifact_directory.join(
+ arena,
+ &[_][]const u8{obj_basename},
+ ),
+ .whole => break :blk try fs.path.join(arena, &.{
+ fs.path.dirname(full_out_path).?, obj_basename,
+ }),
+ }
+ }
+
+ try self.flushModule(comp, prog_node);
+
+ if (fs.path.dirname(full_out_path)) |dirname| {
+ break :blk try fs.path.join(arena, &.{ dirname, self.base.intermediary_basename.? });
+ } else {
+ break :blk self.base.intermediary_basename.?;
+ }
+ } else null;
+
+ var sub_prog_node = prog_node.start("LLD Link", 0);
+ sub_prog_node.activate();
+ sub_prog_node.context.refresh();
+ defer sub_prog_node.end();
+
+ const is_lib = self.base.options.output_mode == .Lib;
+ const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
+ const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
+ const link_in_crt = self.base.options.link_libc and is_exe_or_dyn_lib;
+ const target = self.base.options.target;
+
+ // See link/Elf.zig for comments on how this mechanism works.
+ const id_symlink_basename = "lld.id";
+
+ var man: Cache.Manifest = undefined;
+ defer if (!self.base.options.disable_lld_caching) man.deinit();
+
+ var digest: [Cache.hex_digest_len]u8 = undefined;
+
+ if (!self.base.options.disable_lld_caching) {
+ man = comp.cache_parent.obtain();
+ self.base.releaseLock();
+
+ comptime assert(Compilation.link_hash_implementation_version == 7);
+
+ for (self.base.options.objects) |obj| {
+ _ = try man.addFile(obj.path, null);
+ man.hash.add(obj.must_link);
+ }
+ for (comp.c_object_table.keys()) |key| {
+ _ = try man.addFile(key.status.success.object_path, null);
+ }
+ try man.addOptionalFile(module_obj_path);
+ man.hash.addOptionalBytes(self.base.options.entry);
+ man.hash.addOptional(self.base.options.stack_size_override);
+ man.hash.addOptional(self.base.options.image_base_override);
+ man.hash.addListOfBytes(self.base.options.lib_dirs);
+ man.hash.add(self.base.options.skip_linker_dependencies);
+ if (self.base.options.link_libc) {
+ man.hash.add(self.base.options.libc_installation != null);
+ if (self.base.options.libc_installation) |libc_installation| {
+ man.hash.addBytes(libc_installation.crt_dir.?);
+ if (target.abi == .msvc) {
+ man.hash.addBytes(libc_installation.msvc_lib_dir.?);
+ man.hash.addBytes(libc_installation.kernel32_lib_dir.?);
+ }
+ }
+ }
+ link.hashAddSystemLibs(&man.hash, self.base.options.system_libs);
+ man.hash.addListOfBytes(self.base.options.force_undefined_symbols.keys());
+ man.hash.addOptional(self.base.options.subsystem);
+ man.hash.add(self.base.options.is_test);
+ man.hash.add(self.base.options.tsaware);
+ man.hash.add(self.base.options.nxcompat);
+ man.hash.add(self.base.options.dynamicbase);
+ // strip does not need to go into the linker hash because it is part of the hash namespace
+ man.hash.addOptional(self.base.options.major_subsystem_version);
+ man.hash.addOptional(self.base.options.minor_subsystem_version);
+
+ // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ _ = try man.hit();
+ digest = man.final();
+ var prev_digest_buf: [digest.len]u8 = undefined;
+ const prev_digest: []u8 = Cache.readSmallFile(
+ directory.handle,
+ id_symlink_basename,
+ &prev_digest_buf,
+ ) catch |err| blk: {
+ log.debug("COFF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
+ // Handle this as a cache miss.
+ break :blk prev_digest_buf[0..0];
+ };
+ if (mem.eql(u8, prev_digest, &digest)) {
+ log.debug("COFF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
+ // Hot diggity dog! The output binary is already there.
+ self.base.lock = man.toOwnedLock();
+ return;
+ }
+ log.debug("COFF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
+
+ // We are about to change the output file to be different, so we invalidate the build hash now.
+ directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return e,
+ };
+ }
+
+ if (self.base.options.output_mode == .Obj) {
+ // LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
+ // here. TODO: think carefully about how we can avoid this redundant operation when doing
+ // build-obj. See also the corresponding TODO in linkAsArchive.
+ const the_object_path = blk: {
+ if (self.base.options.objects.len != 0)
+ break :blk self.base.options.objects[0].path;
+
+ if (comp.c_object_table.count() != 0)
+ break :blk comp.c_object_table.keys()[0].status.success.object_path;
+
+ if (module_obj_path) |p|
+ break :blk p;
+
+ // TODO I think this is unreachable. Audit this situation when solving the above TODO
+ // regarding eliding redundant object -> object transformations.
+ return error.NoObjectsToLink;
+ };
+ // This can happen when using --enable-cache and using the stage1 backend. In this case
+ // we can skip the file copy.
+ if (!mem.eql(u8, the_object_path, full_out_path)) {
+ try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
+ }
+ } else {
+ // Create an LLD command line and invoke it.
+ var argv = std.ArrayList([]const u8).init(self.base.allocator);
+ defer argv.deinit();
+ // We will invoke ourselves as a child process to gain access to LLD.
+ // This is necessary because LLD does not behave properly as a library -
+ // it calls exit() and does not reset all global data between invocations.
+ try argv.appendSlice(&[_][]const u8{ comp.self_exe_path.?, "lld-link" });
+
+ try argv.append("-ERRORLIMIT:0");
+ try argv.append("-NOLOGO");
+ if (!self.base.options.strip) {
+ try argv.append("-DEBUG");
+ }
+ if (self.base.options.lto) {
+ switch (self.base.options.optimize_mode) {
+ .Debug => {},
+ .ReleaseSmall => try argv.append("-OPT:lldlto=2"),
+ .ReleaseFast, .ReleaseSafe => try argv.append("-OPT:lldlto=3"),
+ }
+ }
+ if (self.base.options.output_mode == .Exe) {
+ const stack_size = self.base.options.stack_size_override orelse 16777216;
+ try argv.append(try allocPrint(arena, "-STACK:{d}", .{stack_size}));
+ }
+ if (self.base.options.image_base_override) |image_base| {
+ try argv.append(try std.fmt.allocPrint(arena, "-BASE:{d}", .{image_base}));
+ }
+
+ if (target.cpu.arch == .i386) {
+ try argv.append("-MACHINE:X86");
+ } else if (target.cpu.arch == .x86_64) {
+ try argv.append("-MACHINE:X64");
+ } else if (target.cpu.arch.isARM()) {
+ if (target.cpu.arch.ptrBitWidth() == 32) {
+ try argv.append("-MACHINE:ARM");
+ } else {
+ try argv.append("-MACHINE:ARM64");
+ }
+ }
+
+ for (self.base.options.force_undefined_symbols.keys()) |symbol| {
+ try argv.append(try allocPrint(arena, "-INCLUDE:{s}", .{symbol}));
+ }
+
+ if (is_dyn_lib) {
+ try argv.append("-DLL");
+ }
+
+ if (self.base.options.entry) |entry| {
+ try argv.append(try allocPrint(arena, "-ENTRY:{s}", .{entry}));
+ }
+
+ if (self.base.options.tsaware) {
+ try argv.append("-tsaware");
+ }
+ if (self.base.options.nxcompat) {
+ try argv.append("-nxcompat");
+ }
+ if (self.base.options.dynamicbase) {
+ try argv.append("-dynamicbase");
+ }
+
+ try argv.append(try allocPrint(arena, "-OUT:{s}", .{full_out_path}));
+
+ if (self.base.options.implib_emit) |emit| {
+ const implib_out_path = try emit.directory.join(arena, &[_][]const u8{emit.sub_path});
+ try argv.append(try allocPrint(arena, "-IMPLIB:{s}", .{implib_out_path}));
+ }
+
+ if (self.base.options.link_libc) {
+ if (self.base.options.libc_installation) |libc_installation| {
+ try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.crt_dir.?}));
+
+ if (target.abi == .msvc) {
+ try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.msvc_lib_dir.?}));
+ try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.kernel32_lib_dir.?}));
+ }
+ }
+ }
+
+ for (self.base.options.lib_dirs) |lib_dir| {
+ try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_dir}));
+ }
+
+ try argv.ensureUnusedCapacity(self.base.options.objects.len);
+ for (self.base.options.objects) |obj| {
+ if (obj.must_link) {
+ argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{s}", .{obj.path}));
+ } else {
+ argv.appendAssumeCapacity(obj.path);
+ }
+ }
+
+ for (comp.c_object_table.keys()) |key| {
+ try argv.append(key.status.success.object_path);
+ }
+
+ if (module_obj_path) |p| {
+ try argv.append(p);
+ }
+
+ const resolved_subsystem: ?std.Target.SubSystem = blk: {
+ if (self.base.options.subsystem) |explicit| break :blk explicit;
+ switch (target.os.tag) {
+ .windows => {
+ if (self.base.options.module) |module| {
+ if (module.stage1_flags.have_dllmain_crt_startup or is_dyn_lib)
+ break :blk null;
+ if (module.stage1_flags.have_c_main or self.base.options.is_test or
+ module.stage1_flags.have_winmain_crt_startup or
+ module.stage1_flags.have_wwinmain_crt_startup)
+ {
+ break :blk .Console;
+ }
+ if (module.stage1_flags.have_winmain or module.stage1_flags.have_wwinmain)
+ break :blk .Windows;
+ }
+ },
+ .uefi => break :blk .EfiApplication,
+ else => {},
+ }
+ break :blk null;
+ };
+
+ const Mode = enum { uefi, win32 };
+ const mode: Mode = mode: {
+ if (resolved_subsystem) |subsystem| {
+ const subsystem_suffix = ss: {
+ if (self.base.options.major_subsystem_version) |major| {
+ if (self.base.options.minor_subsystem_version) |minor| {
+ break :ss try allocPrint(arena, ",{d}.{d}", .{ major, minor });
+ } else {
+ break :ss try allocPrint(arena, ",{d}", .{major});
+ }
+ }
+ break :ss "";
+ };
+
+ switch (subsystem) {
+ .Console => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:console{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .win32;
+ },
+ .EfiApplication => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_application{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .uefi;
+ },
+ .EfiBootServiceDriver => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_boot_service_driver{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .uefi;
+ },
+ .EfiRom => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_rom{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .uefi;
+ },
+ .EfiRuntimeDriver => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_runtime_driver{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .uefi;
+ },
+ .Native => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:native{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .win32;
+ },
+ .Posix => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:posix{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .win32;
+ },
+ .Windows => {
+ try argv.append(try allocPrint(arena, "-SUBSYSTEM:windows{s}", .{
+ subsystem_suffix,
+ }));
+ break :mode .win32;
+ },
+ }
+ } else if (target.os.tag == .uefi) {
+ break :mode .uefi;
+ } else {
+ break :mode .win32;
+ }
+ };
+
+ switch (mode) {
+ .uefi => try argv.appendSlice(&[_][]const u8{
+ "-BASE:0",
+ "-ENTRY:EfiMain",
+ "-OPT:REF",
+ "-SAFESEH:NO",
+ "-MERGE:.rdata=.data",
+ "-ALIGN:32",
+ "-NODEFAULTLIB",
+ "-SECTION:.xdata,D",
+ }),
+ .win32 => {
+ if (link_in_crt) {
+ if (target.abi.isGnu()) {
+ try argv.append("-lldmingw");
+
+ if (target.cpu.arch == .i386) {
+ try argv.append("-ALTERNATENAME:__image_base__=___ImageBase");
+ } else {
+ try argv.append("-ALTERNATENAME:__image_base__=__ImageBase");
+ }
+
+ if (is_dyn_lib) {
+ try argv.append(try comp.get_libc_crt_file(arena, "dllcrt2.obj"));
+ if (target.cpu.arch == .i386) {
+ try argv.append("-ALTERNATENAME:__DllMainCRTStartup@12=_DllMainCRTStartup@12");
+ } else {
+ try argv.append("-ALTERNATENAME:_DllMainCRTStartup=DllMainCRTStartup");
+ }
+ } else {
+ try argv.append(try comp.get_libc_crt_file(arena, "crt2.obj"));
+ }
+
+ try argv.append(try comp.get_libc_crt_file(arena, "mingw32.lib"));
+ try argv.append(try comp.get_libc_crt_file(arena, "mingwex.lib"));
+ try argv.append(try comp.get_libc_crt_file(arena, "msvcrt-os.lib"));
+
+ for (mingw.always_link_libs) |name| {
+ if (!self.base.options.system_libs.contains(name)) {
+ const lib_basename = try allocPrint(arena, "{s}.lib", .{name});
+ try argv.append(try comp.get_libc_crt_file(arena, lib_basename));
+ }
+ }
+ } else {
+ const lib_str = switch (self.base.options.link_mode) {
+ .Dynamic => "",
+ .Static => "lib",
+ };
+ const d_str = switch (self.base.options.optimize_mode) {
+ .Debug => "d",
+ else => "",
+ };
+ switch (self.base.options.link_mode) {
+ .Static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
+ .Dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
+ }
+
+ try argv.append(try allocPrint(arena, "{s}vcruntime{s}.lib", .{ lib_str, d_str }));
+ try argv.append(try allocPrint(arena, "{s}ucrt{s}.lib", .{ lib_str, d_str }));
+
+ //Visual C++ 2015 Conformance Changes
+ //https://msdn.microsoft.com/en-us/library/bb531344.aspx
+ try argv.append("legacy_stdio_definitions.lib");
+
+ // msvcrt depends on kernel32 and ntdll
+ try argv.append("kernel32.lib");
+ try argv.append("ntdll.lib");
+ }
+ } else {
+ try argv.append("-NODEFAULTLIB");
+ if (!is_lib) {
+ if (self.base.options.module) |module| {
+ if (module.stage1_flags.have_winmain_crt_startup) {
+ try argv.append("-ENTRY:WinMainCRTStartup");
+ } else {
+ try argv.append("-ENTRY:wWinMainCRTStartup");
+ }
+ } else {
+ try argv.append("-ENTRY:wWinMainCRTStartup");
+ }
+ }
+ }
+ },
+ }
+
+ // libc++ dep
+ if (self.base.options.link_libcpp) {
+ try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
+ try argv.append(comp.libcxx_static_lib.?.full_object_path);
+ }
+
+ // libunwind dep
+ if (self.base.options.link_libunwind) {
+ try argv.append(comp.libunwind_static_lib.?.full_object_path);
+ }
+
+ if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
+ if (!self.base.options.link_libc) {
+ if (comp.libc_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
+ }
+ // MinGW doesn't provide libssp symbols
+ if (target.abi.isGnu()) {
+ if (comp.libssp_static_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
+ }
+ // MSVC compiler_rt is missing some stuff, so we build it unconditionally but
+ // and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
+ if (comp.compiler_rt_lib) |lib| {
+ try argv.append(lib.full_object_path);
+ }
+ }
+
+ try argv.ensureUnusedCapacity(self.base.options.system_libs.count());
+ for (self.base.options.system_libs.keys()) |key| {
+ const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
+ if (comp.crt_files.get(lib_basename)) |crt_file| {
+ argv.appendAssumeCapacity(crt_file.full_object_path);
+ continue;
+ }
+ if (try findLib(arena, lib_basename, self.base.options.lib_dirs)) |full_path| {
+ argv.appendAssumeCapacity(full_path);
+ continue;
+ }
+ if (target.abi.isGnu()) {
+ const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
+ if (try findLib(arena, fallback_name, self.base.options.lib_dirs)) |full_path| {
+ argv.appendAssumeCapacity(full_path);
+ continue;
+ }
+ }
+ log.err("DLL import library for -l{s} not found", .{key});
+ return error.DllImportLibraryNotFound;
+ }
+
+ if (self.base.options.verbose_link) {
+ // Skip over our own name so that the LLD linker name is the first argv item.
+ Compilation.dump_argv(argv.items[1..]);
+ }
+
+ if (std.process.can_spawn) {
+ // If possible, we run LLD as a child process because it does not always
+ // behave properly as a library, unfortunately.
+ // https://github.com/ziglang/zig/issues/3825
+ var child = std.ChildProcess.init(argv.items, arena);
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
+
+ const term = child.spawnAndWait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ std.process.exit(code);
+ }
+ },
+ else => std.process.abort(),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Ignore;
+ child.stderr_behavior = .Pipe;
+
+ try child.spawn();
+
+ const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
+
+ const term = child.wait() catch |err| {
+ log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
+ return error.UnableToSpawnSelf;
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse this output and surface with the Compilation API rather than
+ // directly outputting to stderr here.
+ std.debug.print("{s}", .{stderr});
+ return error.LLDReportedFailure;
+ }
+ },
+ else => {
+ log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
+ return error.LLDCrashed;
+ },
+ }
+
+ if (stderr.len != 0) {
+ log.warn("unexpected LLD stderr:\n{s}", .{stderr});
+ }
+ }
+ } else {
+ const exit_code = try lldMain(arena, argv.items, false);
+ if (exit_code != 0) {
+ if (comp.clang_passthrough_mode) {
+ std.process.exit(exit_code);
+ } else {
+ return error.LLDReportedFailure;
+ }
+ }
+ }
+ }
+
+ if (!self.base.options.disable_lld_caching) {
+ // Update the file with the digest. If it fails we can continue; it only
+ // means that the next invocation will have an unnecessary cache miss.
+ Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
+ log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)});
+ };
+ // Again failure here only means an unnecessary cache miss.
+ man.writeManifest() catch |err| {
+ log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
+ };
+ // We hang on to this lock so that the output file path can be used without
+ // other processes clobbering it.
+ self.base.lock = man.toOwnedLock();
+ }
+}
+
+fn findLib(arena: Allocator, name: []const u8, lib_dirs: []const []const u8) !?[]const u8 {
+ for (lib_dirs) |lib_dir| {
+ const full_path = try fs.path.join(arena, &.{ lib_dir, name });
+ fs.cwd().access(full_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => continue,
+ else => |e| return e,
+ };
+ return full_path;
+ }
+ return null;
+}
src/link/MachO/DebugSymbols.zig
@@ -18,7 +18,6 @@ const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Module = @import("../../Module.zig");
const StringTable = @import("../strtab.zig").StringTable;
-const TextBlock = MachO.TextBlock;
const Type = @import("../../type.zig").Type;
base: *MachO,
src/link/Coff.zig
@@ -1,39 +1,30 @@
const Coff = @This();
const std = @import("std");
+const build_options = @import("build_options");
const builtin = @import("builtin");
-const log = std.log.scoped(.link);
-const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
-const fs = std.fs;
-const allocPrint = std.fmt.allocPrint;
+const coff = std.coff;
+const log = std.log.scoped(.link);
+const math = std.math;
const mem = std.mem;
-const lldMain = @import("../main.zig").lldMain;
-const trace = @import("../tracy.zig").trace;
-const Module = @import("../Module.zig");
-const Compilation = @import("../Compilation.zig");
+const Allocator = std.mem.Allocator;
+
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
-const build_options = @import("build_options");
-const Cache = @import("../Cache.zig");
-const mingw = @import("../mingw.zig");
+const lld = @import("Coff/lld.zig");
+const trace = @import("../tracy.zig").trace;
+
const Air = @import("../Air.zig");
+pub const Atom = @import("Coff/Atom.zig");
+const Compilation = @import("../Compilation.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
+const Module = @import("../Module.zig");
+const StringTable = @import("strtab.zig").StringTable;
const TypedValue = @import("../TypedValue.zig");
-const allocation_padding = 4 / 3;
-const minimum_text_block_size = 64 * allocation_padding;
-
-const section_alignment = 4096;
-const file_alignment = 512;
-const default_image_base = 0x400_000;
-const section_table_size = 2 * 40;
-comptime {
- assert(mem.isAligned(default_image_base, section_alignment));
-}
-
pub const base_tag: link.File.Tag = .coff;
const msdos_stub = @embedFile("msdos-stub.bin");
@@ -42,91 +33,94 @@ const msdos_stub = @embedFile("msdos-stub.bin");
llvm_object: ?*LlvmObject = null,
base: link.File,
-ptr_width: PtrWidth,
error_flags: link.File.ErrorFlags = .{},
-text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
-last_text_block: ?*TextBlock = null,
-
-/// Section table file pointer.
-section_table_offset: u32 = 0,
-/// Section data file pointer.
-section_data_offset: u32 = 0,
-/// Optional header file pointer.
-optional_header_offset: u32 = 0,
-
-/// Absolute virtual address of the offset table when the executable is loaded in memory.
-offset_table_virtual_address: u32 = 0,
-/// Current size of the offset table on disk, must be a multiple of `file_alignment`
-offset_table_size: u32 = 0,
-/// Contains absolute virtual addresses
-offset_table: std.ArrayListUnmanaged(u64) = .{},
-/// Free list of offset table indices
-offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
-
-/// Virtual address of the entry point procedure relative to image base.
-entry_addr: ?u32 = null,
+ptr_width: PtrWidth,
-/// Absolute virtual address of the text section when the executable is loaded in memory.
-text_section_virtual_address: u32 = 0,
-/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
-text_section_size: u32 = 0,
+sections: std.MultiArrayList(Section) = .{},
-offset_table_size_dirty: bool = false,
-text_section_size_dirty: bool = false,
-/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
-/// and needs to be updated in the optional header.
-size_of_image_dirty: bool = false,
+text_section_index: ?u16 = null,
+got_section_index: ?u16 = null,
-pub const PtrWidth = enum { p32, p64 };
+locals: std.ArrayListUnmanaged(coff.Symbol) = .{},
+globals: std.StringArrayHashMapUnmanaged(SymbolWithLoc) = .{},
-pub const TextBlock = struct {
- /// Offset of the code relative to the start of the text section
- text_offset: u32,
- /// Used size of the text block
- size: u32,
- /// This field is undefined for symbols with size = 0.
- offset_table_index: u32,
- /// Points to the previous and next neighbors, based on the `text_offset`.
- /// This can be used to find, for example, the capacity of this `TextBlock`.
- prev: ?*TextBlock,
- next: ?*TextBlock,
-
- pub const empty = TextBlock{
- .text_offset = 0,
- .size = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
- };
+locals_free_list: std.ArrayListUnmanaged(u32) = .{},
- /// Returns how much room there is to grow in virtual address space.
- fn capacity(self: TextBlock) u64 {
- if (self.next) |next| {
- return next.text_offset - self.text_offset;
- }
- // This is the last block, the capacity is only limited by the address space.
- return std.math.maxInt(u32) - self.text_offset;
- }
+strtab: StringTable(.strtab) = .{},
- fn freeListEligible(self: TextBlock) bool {
- // No need to keep a free list node for the last block.
- const next = self.next orelse return false;
- const cap = next.text_offset - self.text_offset;
- const ideal_cap = self.size * allocation_padding;
- if (cap <= ideal_cap) return false;
- const surplus = cap - ideal_cap;
- return surplus >= minimum_text_block_size;
- }
+got_entries: std.AutoArrayHashMapUnmanaged(SymbolWithLoc, u32) = .{},
+got_entries_free_list: std.ArrayListUnmanaged(u32) = .{},
- /// Absolute virtual address of the text block when the file is loaded in memory.
- fn getVAddr(self: TextBlock, coff: Coff) u32 {
- return coff.text_section_virtual_address + self.text_offset;
- }
+/// Virtual address of the entry point procedure relative to image base.
+entry_addr: ?u64 = null,
+
+/// Table of Decls that are currently alive.
+/// We store them here so that we can properly dispose of any allocated
+/// memory within the atom in the incremental linker.
+/// TODO consolidate this.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+
+/// List of atoms that are either synthetic or map directly to the Zig source program.
+managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+
+/// Table of atoms indexed by the symbol index.
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+
+const page_size: u16 = 0x1000;
+
+const Section = struct {
+ header: coff.SectionHeader,
+
+ last_atom: ?*Atom = null,
+
+ /// A list of atoms that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// An atom has surplus capacity when its overcapacity value is greater than
+ /// padToIdeal(minimum_atom_size). That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size + (size / ideal_factor).
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh atom, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ free_list: std.ArrayListUnmanaged(*Atom) = .{},
};
+pub const PtrWidth = enum { p32, p64 };
pub const SrcFn = void;
+pub const Export = struct {
+ sym_index: ?u32 = null,
+};
+
+pub const SymbolWithLoc = struct {
+ // Index into the respective symbol table.
+ sym_index: u32,
+
+ // null means it's a synthetic global or Zig source.
+ file: ?u32 = null,
+};
+
+/// When allocating, the ideal_capacity is calculated by
+/// actual_capacity + (actual_capacity / ideal_factor)
+const ideal_factor = 3;
+
+/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
+/// it as a possible place to put new symbols, it must have enough room for this many bytes
+/// (plus extra for reserved capacity).
+const minimum_text_block_size = 64;
+pub const min_text_capacity = padToIdeal(minimum_text_block_size);
+
+/// We commit 0x1000 = 4096 bytes of space to the headers.
+/// This should be plenty for any potential future extensions.
+const default_headerpad_size: u32 = 0x1000;
+
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Coff {
assert(options.target.ofmt == .coff);
@@ -144,25 +138,18 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
});
self.base.file = file;
- const coff_file_header_offset: u32 = if (options.output_mode == .Exe) msdos_stub.len + 4 else 0;
- const default_offset_table_size = file_alignment;
- const data_directory_count = 0;
- const default_size_of_code = 0;
- const optional_header_size = switch (options.output_mode) {
- .Exe => data_directory_count * 8 + switch (self.ptr_width) {
- .p32 => @as(u16, 96),
- .p64 => 112,
- },
- else => 0,
- };
- const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
- self.section_data_offset = mem.alignForwardGeneric(u32, section_table_offset + section_table_size, file_alignment);
- const section_data_relative_virtual_address = mem.alignForwardGeneric(u32, section_table_offset + section_table_size, section_alignment);
- self.offset_table_virtual_address = default_image_base + section_data_relative_virtual_address;
- self.offset_table_size = default_offset_table_size;
- self.section_table_offset = section_table_offset;
- self.text_section_virtual_address = default_image_base + section_data_relative_virtual_address + section_alignment;
- self.text_section_size = default_size_of_code;
+ // Index 0 is always a null symbol.
+ try self.locals.append(allocator, .{
+ .name = [_]u8{0} ** 8,
+ .value = 0,
+ .section_number = @intToEnum(coff.SectionNumber, 0),
+ .@"type" = .{ .base_type = .NULL, .complex_type = .NULL },
+ .storage_class = .NULL,
+ .number_of_aux_symbols = 0,
+ });
+ try self.strtab.buffer.append(allocator, 0);
+
+ try self.populateMissingMetadata();
return self;
}
@@ -193,245 +180,259 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
return self;
}
-pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void {
- if (self.llvm_object) |_| return;
+pub fn deinit(self: *Coff) void {
+ const gpa = self.base.allocator;
- try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
+ if (build_options.have_llvm) {
+ if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+ }
- const decl = self.base.options.module.?.declPtr(decl_index);
- if (self.offset_table_free_list.popOrNull()) |i| {
- decl.link.coff.offset_table_index = i;
- } else {
- decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
- _ = self.offset_table.addOneAssumeCapacity();
+ for (self.sections.items(.free_list)) |*free_list| {
+ free_list.deinit(gpa);
+ }
+ self.sections.deinit(gpa);
- const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
- if (self.offset_table.items.len > self.offset_table_size / entry_size) {
- self.offset_table_size_dirty = true;
- }
+ for (self.managed_atoms.items) |atom| {
+ gpa.destroy(atom);
}
+ self.managed_atoms.deinit(gpa);
- self.offset_table.items[decl.link.coff.offset_table_index] = 0;
+ self.locals.deinit(gpa);
+ self.globals.deinit(gpa);
+ self.locals_free_list.deinit(gpa);
+ self.strtab.deinit(gpa);
+ self.got_entries.deinit(gpa);
+ self.got_entries_free_list.deinit(gpa);
+ self.decls.deinit(gpa);
+ self.atom_by_index_table.deinit(gpa);
}
-fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
- const new_block_min_capacity = new_block_size * allocation_padding;
+fn populateMissingMetadata(self: *Coff) !void {
+ _ = self;
+ @panic("TODO populateMissingMetadata");
+}
+
+pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void {
+ if (self.llvm_object) |_| return;
+ const decl = self.base.options.module.?.declPtr(decl_index);
+ if (decl.link.coff.sym_index != 0) return;
+ decl.link.coff.sym_index = try self.allocateSymbol();
+ const gpa = self.base.allocator;
+ try self.atom_by_index_table.putNoClobber(gpa, decl.link.coff.sym_index, &decl.link.coff);
+ try self.decls.putNoClobber(gpa, decl_index, null);
+}
+
+fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u64, alignment: u64, sect_id: u16) !u64 {
+ const tracy = trace(@src());
+ defer tracy.end();
- // We use these to indicate our intention to update metadata, placing the new block,
+ const header = &self.sections.items(.header)[sect_id];
+ const free_list = &self.sections.items(.free_list)[sect_id];
+ const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
+
+ // We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
+ var atom_placement: ?*Atom = null;
var free_list_removal: ?usize = null;
- const vaddr = blk: {
+ // First we look for an appropriately sized free list node.
+ // The list is unordered. We'll just take the first thing that works.
+ var vaddr = blk: {
var i: usize = 0;
- while (i < self.text_block_free_list.items.len) {
- const free_block = self.text_block_free_list.items[i];
-
- const next_block_text_offset = free_block.text_offset + free_block.capacity();
- const new_block_text_offset = mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
- if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
- block_placement = free_block;
-
- const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
- if (remaining_capacity < minimum_text_block_size) {
- free_list_removal = i;
- }
-
- break :blk new_block_text_offset + self.text_section_virtual_address;
- } else {
- if (!free_block.freeListEligible()) {
- _ = self.text_block_free_list.swapRemove(i);
+ while (i < free_list.items.len) {
+ const big_atom = free_list.items[i];
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const sym = big_atom.getSymbol(self);
+ const capacity = big_atom.capacity(self);
+ const ideal_capacity = if (header.isCode()) padToIdeal(capacity) else capacity;
+ const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = sym.n_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
+ const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
+ if (new_start_vaddr < ideal_capacity_end_vaddr) {
+ // Additional bookkeeping here to notice if this free list node
+ // should be deleted because the atom that it points to has grown to take up
+ // more of the extra capacity.
+ if (!big_atom.freeListEligible(self)) {
+ _ = free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
- } else if (self.last_text_block) |last| {
- const new_block_vaddr = mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
- block_placement = last;
- break :blk new_block_vaddr;
+ // At this point we know that we will place the new atom here. But the
+ // remaining question is whether there is still yet enough capacity left
+ // over for there to still be a free list node.
+ const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
+ const keep_free_list_node = remaining_capacity >= min_text_capacity;
+
+ // Set up the metadata to be updated, after errors are no longer possible.
+ atom_placement = big_atom;
+ if (!keep_free_list_node) {
+ free_list_removal = i;
+ }
+ break :blk new_start_vaddr;
+ } else if (maybe_last_atom.*) |last| {
+ const last_symbol = last.getSymbol(self);
+ const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
+ const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
+ const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
+ atom_placement = last;
+ break :blk new_start_vaddr;
} else {
- break :blk self.text_section_virtual_address;
+ break :blk mem.alignForwardGeneric(u64, header.addr, alignment);
}
};
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
- const needed_size = @intCast(u32, mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
- if (needed_size > self.text_section_size) {
- const current_text_section_virtual_size = mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
- const new_text_section_virtual_size = mem.alignForwardGeneric(u32, needed_size, section_alignment);
- if (current_text_section_virtual_size != new_text_section_virtual_size) {
- self.size_of_image_dirty = true;
- // Write new virtual size
- var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
- try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
- }
+ const expand_section = atom_placement == null or atom_placement.?.next == null;
+ if (expand_section) {
+ @panic("TODO expand section in allocateAtom");
+ }
- self.text_section_size = needed_size;
- self.text_section_size_dirty = true;
- }
- self.last_text_block = text_block;
+ if (header.getAlignment() < alignment) {
+ header.setAlignment(alignment);
}
- text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
- text_block.size = @intCast(u32, new_block_size);
-
- // This function can also reallocate a text block.
- // In this case we need to "unplug" it from its previous location before
- // plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ atom.size = new_atom_size;
+ atom.alignment = alignment;
+
+ if (atom.prev) |prev| {
+ prev.next = atom.next;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next) |next| {
+ next.prev = atom.prev;
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
+ if (atom_placement) |big_atom| {
+ atom.prev = big_atom;
+ atom.next = big_atom.next;
+ big_atom.next = atom;
} else {
- text_block.prev = null;
- text_block.next = null;
+ atom.prev = null;
+ atom.next = null;
}
if (free_list_removal) |i| {
- _ = self.text_block_free_list.swapRemove(i);
+ _ = free_list.swapRemove(i);
}
+
return vaddr;
}
-fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
- const block_vaddr = text_block.getVAddr(self.*);
- const align_ok = mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
- const need_realloc = !align_ok or new_block_size > text_block.capacity();
- if (!need_realloc) return @as(u64, block_vaddr);
- return self.allocateTextBlock(text_block, new_block_size, alignment);
+fn allocateSymbol(self: *Coff) !u32 {
+ const gpa = self.base.allocator;
+ try self.locals.ensureUnusedCapacity(gpa, 1);
+
+ const index = blk: {
+ if (self.locals_free_list.popOrNull()) |index| {
+ log.debug(" (reusing symbol index {d})", .{index});
+ break :blk index;
+ } else {
+ log.debug(" (allocating symbol index {d})", .{self.locals.items.len});
+ const index = @intCast(u32, self.locals.items.len);
+ _ = self.locals.addOneAssumeCapacity();
+ break :blk index;
+ }
+ };
+
+ self.locals.items[index] = .{
+ .name = [_]u8{0} ** 8,
+ .value = 0,
+ .section_number = @intToEnum(coff.SectionNumber, 0),
+ .@"type" = .{ .base_type = .NULL, .complex_type = .NULL },
+ .storage_class = .NULL,
+ .number_of_aux_symbols = 0,
+ };
+
+ return index;
}
-fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
- text_block.size = @intCast(u32, new_block_size);
- if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
- self.text_block_free_list.append(self.base.allocator, text_block) catch {};
+pub fn allocateGotEntry(self: *Coff, target: SymbolWithLoc) !u32 {
+ const gpa = self.base.allocator;
+ try self.got_entries.ensureUnusedCapacity(gpa, 1);
+ if (self.got_entries_free_list.popOrNull()) |index| {
+ log.debug(" (reusing GOT entry index {d})", .{index});
+ if (self.got_entries.getIndex(target)) |existing| {
+ assert(existing == index);
+ }
+ self.got_entries.keys()[index] = target;
+ return index;
+ } else {
+ log.debug(" (allocating GOT entry at index {d})", .{self.got_entries.keys().len});
+ const index = @intCast(u32, self.got_entries.keys().len);
+ try self.got_entries.putAssumeCapacityNoClobber(target, 0);
+ return index;
}
}
-fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
+fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u64, alignment: u64, sect_id: u16) !u64 {
+ const sym = atom.getSymbol(self);
+ const align_ok = mem.alignBackwardGeneric(u64, sym.value, alignment) == sym.value;
+ const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
+ if (!need_realloc) return sym.value;
+ return self.allocateAtom(atom, new_atom_size, alignment, sect_id);
+}
+
+fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u64, sect_id: u16) void {
+ _ = self;
+ _ = atom;
+ _ = new_block_size;
+ _ = sect_id;
+ // TODO check the new capacity, and if it crosses the size threshold into a big enough
+ // capacity, insert a free list node for it.
+}
+
+fn freeAtom(self: *Coff, atom: *Atom, sect_id: u16) void {
+ log.debug("freeAtom {*}", .{atom});
+
+ const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
{
var i: usize = 0;
- // TODO turn text_block_free_list into a hash map
- while (i < self.text_block_free_list.items.len) {
- if (self.text_block_free_list.items[i] == text_block) {
- _ = self.text_block_free_list.swapRemove(i);
+ // TODO turn free_list into a hash map
+ while (i < free_list.items.len) {
+ if (free_list.items[i] == atom) {
+ _ = free_list.swapRemove(i);
continue;
}
- if (self.text_block_free_list.items[i] == text_block.prev) {
+ if (free_list.items[i] == atom.prev) {
already_have_free_list_node = true;
}
i += 1;
}
}
- if (self.last_text_block == text_block) {
- self.last_text_block = text_block.prev;
- }
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
- if (!already_have_free_list_node and prev.freeListEligible()) {
- // The free list is heuristics, it doesn't have to be perfect, so we can
- // ignore the OOM here.
- self.text_block_free_list.append(self.base.allocator, prev) catch {};
+ const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ if (maybe_last_atom.*) |last_atom| {
+ if (last_atom == atom) {
+ if (atom.prev) |prev| {
+ // TODO shrink the section size here
+ maybe_last_atom.* = prev;
+ } else {
+ maybe_last_atom.* = null;
+ }
}
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
- }
-}
+ if (atom.prev) |prev| {
+ prev.next = atom.next;
-fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
- const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
- const endian = self.base.options.target.cpu.arch.endian();
-
- const offset_table_start = self.section_data_offset;
- if (self.offset_table_size_dirty) {
- const current_raw_size = self.offset_table_size;
- const new_raw_size = self.offset_table_size * 2;
- log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
-
- // Move the text section to a new place in the executable
- const current_text_section_start = self.section_data_offset + current_raw_size;
- const new_text_section_start = self.section_data_offset + new_raw_size;
-
- const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
- if (amt != self.text_section_size) return error.InputOutput;
-
- // Write the new raw size in the .got header
- var buf: [8]u8 = undefined;
- mem.writeIntLittle(u32, buf[0..4], new_raw_size);
- try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
- // Write the new .text section file offset in the .text section header
- mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
- try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
-
- const current_virtual_size = mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
- const new_virtual_size = mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
- // If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
- // and the virtual size of the `.got` section
-
- if (new_virtual_size != current_virtual_size) {
- log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
- self.size_of_image_dirty = true;
- const va_offset = new_virtual_size - current_virtual_size;
-
- // Write .got virtual size
- mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
- try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
-
- // Write .text new virtual address
- self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
- mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - default_image_base);
- try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
-
- // Fix the VAs in the offset table
- for (self.offset_table.items) |*va, idx| {
- if (va.* != 0) {
- va.* += va_offset;
-
- switch (entry_size) {
- 4 => {
- mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
- try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
- },
- 8 => {
- mem.writeInt(u64, &buf, va.*, endian);
- try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
- },
- else => unreachable,
- }
- }
- }
+ if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ // The free list is heuristics, it doesn't have to be perfect, so we can
+ // ignore the OOM here.
+ free_list.append(self.base.allocator, prev) catch {};
}
- self.offset_table_size = new_raw_size;
- self.offset_table_size_dirty = false;
+ } else {
+ atom.prev = null;
}
- // Write the new entry
- switch (entry_size) {
- 4 => {
- var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
- try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
- },
- 8 => {
- var buf: [8]u8 = undefined;
- mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
- try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
- },
- else => unreachable,
+
+ if (atom.next) |next| {
+ next.prev = atom.prev;
+ } else {
+ atom.next = null;
}
}
@@ -470,15 +471,19 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
},
};
- return self.finishUpdateDecl(module, func.owner_decl, code);
+ const sym = try self.updateDeclCode(decl_index, code);
+ log.debug("updated decl code has sym {}", .{sym});
+
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
+ return self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
_ = self;
_ = tv;
_ = decl_index;
- log.debug("TODO lowerUnnamedConst for Coff", .{});
- return error.AnalysisFail;
+ @panic("TODO lowerUnnamedConst");
}
pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
@@ -503,9 +508,6 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
- // TODO COFF/PE debug information
- // TODO Implement exports
-
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -526,49 +528,21 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
},
};
- return self.finishUpdateDecl(module, decl_index, code);
-}
-
-fn finishUpdateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index, code: []const u8) !void {
- const decl = module.declPtr(decl_index);
- const required_alignment = decl.ty.abiAlignment(self.base.options.target);
- const curr_size = decl.link.coff.size;
- if (curr_size != 0) {
- const capacity = decl.link.coff.capacity();
- const need_realloc = code.len > capacity or
- !mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
- if (need_realloc) {
- const curr_vaddr = self.text_section_virtual_address + decl.link.coff.text_offset;
- const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
- log.debug("growing {s} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
- if (vaddr != curr_vaddr) {
- log.debug(" (writing new offset table entry)\n", .{});
- self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
- }
- } else if (code.len < curr_size) {
- self.shrinkTextBlock(&decl.link.coff, code.len);
- }
- } else {
- const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
- log.debug("allocated text block for {s} at 0x{x} (size: {Bi})\n", .{
- mem.sliceTo(decl.name, 0),
- vaddr,
- std.fmt.fmtIntSizeDec(code.len),
- });
- errdefer self.freeTextBlock(&decl.link.coff);
- self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
- }
-
- // Write the code into the file
- try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
+ const sym = try self.updateDeclCode(decl_index, code);
+ log.debug("updated decl code for {}", .{sym});
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
}
+fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8) !*coff.Symbol {
+ _ = self;
+ _ = decl_index;
+ _ = code;
+ @panic("TODO updateDeclCode");
+}
+
pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
@@ -577,9 +551,31 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
+ log.debug("freeDecl {*}", .{decl});
+
+ const kv = self.decls.fetchRemove(decl_index);
+ if (kv.?.value) |index| {
+ self.freeAtom(&decl.link.coff, index);
+ }
+
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- self.freeTextBlock(&decl.link.coff);
- self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
+ const gpa = self.base.allocator;
+ const sym_index = decl.link.coff.sym_index;
+ if (sym_index != 0) {
+ self.locals_free_list.append(gpa, sym_index) catch {};
+
+ // Try freeing GOT atom if this decl had one
+ const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ if (self.got_entries.getIndex(got_target)) |got_index| {
+ self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
+ self.got_entries.values()[got_index] = 0;
+ log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
+ }
+
+ self.locals.items[sym_index].section_number = @intToEnum(coff.SectionNumber, 0);
+ _ = self.atom_by_index_table.remove(sym_index);
+ decl.link.coff.sym_index = 0;
+ }
}
pub fn updateDeclExports(
@@ -625,28 +621,7 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
}
- const decl = module.declPtr(decl_index);
- for (exports) |exp| {
- if (exp.options.section) |section_name| {
- if (!mem.eql(u8, section_name, ".text")) {
- try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
- module.failed_exports.putAssumeCapacityNoClobber(
- exp,
- try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
- );
- continue;
- }
- }
- if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) {
- self.entry_addr = decl.link.coff.getVAddr(self.*) - default_image_base;
- } else {
- try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
- module.failed_exports.putAssumeCapacityNoClobber(
- exp,
- try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than 'wWinMainCRTStartup'", .{}),
- );
- }
- }
+ @panic("TODO updateDeclExports");
}
pub fn flush(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@@ -660,7 +635,7 @@ pub fn flush(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !vo
}
const use_lld = build_options.have_llvm and self.base.options.use_lld;
if (use_lld) {
- return self.linkWithLLD(comp, prog_node);
+ return lld.linkWithLLD(self, comp, prog_node);
}
switch (self.base.options.output_mode) {
.Exe, .Obj => return self.flushModule(comp, prog_node),
@@ -682,888 +657,68 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
sub_prog_node.activate();
defer sub_prog_node.end();
- const output_mode = self.base.options.output_mode;
- log.debug("in flushModule with {}", .{output_mode});
-
- var coff_file_header_offset: u32 = 0;
- if (output_mode == .Exe) {
- // Write the MS-DOS stub and the PE signature
- try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
- coff_file_header_offset = msdos_stub.len + 4;
- }
-
- // COFF file header
- const data_directory_count = 0;
- var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
- var index: usize = 0;
-
- const machine = self.base.options.target.cpu.arch.toCoffMachine();
- if (machine == .Unknown) {
- return error.UnsupportedCOFFArchitecture;
- }
- mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
- index += 2;
-
- // Number of sections (we only use .got, .text)
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
- index += 2;
- // TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
- mem.set(u8, hdr_data[index..][0..12], 0);
- index += 12;
-
- const optional_header_size = switch (output_mode) {
- .Exe => data_directory_count * 8 + switch (self.ptr_width) {
- .p32 => @as(u16, 96),
- .p64 => 112,
- },
- else => 0,
- };
-
- const default_offset_table_size = file_alignment;
- const default_size_of_code = 0;
-
- // Size of file when loaded in memory
- const size_of_image = mem.alignForwardGeneric(u32, self.text_section_virtual_address - default_image_base + default_size_of_code, section_alignment);
-
- mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
- index += 2;
-
- // Characteristics
- var flags: std.coff.CoffHeaderFlags = .{
- // TODO Remove debug info stripped flag when necessary
- .DEBUG_STRIPPED = 1,
- .RELOCS_STRIPPED = 1,
- };
- if (output_mode == .Exe) {
- flags.EXECUTABLE_IMAGE = 1;
- }
- switch (self.ptr_width) {
- .p32 => flags.@"32BIT_MACHINE" = 1,
- .p64 => flags.LARGE_ADDRESS_AWARE = 1,
- }
- mem.writeIntLittle(u16, hdr_data[index..][0..2], @bitCast(u16, flags));
- index += 2;
-
- assert(index == 20);
- try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
-
- if (output_mode == .Exe) {
- self.optional_header_offset = coff_file_header_offset + 20;
- // Optional header
- index = 0;
- mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
- .p32 => @as(u16, 0x10b),
- .p64 => 0x20b,
- });
- index += 2;
-
- // Linker version (u8 + u8)
- mem.set(u8, hdr_data[index..][0..2], 0);
- index += 2;
-
- // SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
- mem.set(u8, hdr_data[index..][0..20], 0);
- index += 20;
-
- if (self.ptr_width == .p32) {
- // Base of data relative to the image base (UNUSED)
- mem.set(u8, hdr_data[index..][0..4], 0);
- index += 4;
-
- // Image base address
- mem.writeIntLittle(u32, hdr_data[index..][0..4], default_image_base);
- index += 4;
- } else {
- // Image base address
- mem.writeIntLittle(u64, hdr_data[index..][0..8], default_image_base);
- index += 8;
- }
-
- // Section alignment
- mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
- index += 4;
- // File alignment
- mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
- index += 4;
- // Required OS version, 6.0 is vista
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
- index += 2;
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
- index += 2;
- // Image version
- mem.set(u8, hdr_data[index..][0..4], 0);
- index += 4;
- // Required subsystem version, same as OS version
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
- index += 2;
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
- index += 2;
- // Reserved zeroes (u32)
- mem.set(u8, hdr_data[index..][0..4], 0);
- index += 4;
- mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
- index += 4;
- mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
- index += 4;
- // CheckSum (u32)
- mem.set(u8, hdr_data[index..][0..4], 0);
- index += 4;
- // Subsystem, TODO: Let users specify the subsystem, always CUI for now
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
- index += 2;
- // DLL characteristics
- mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
- index += 2;
-
- switch (self.ptr_width) {
- .p32 => {
- // Size of stack reserve + commit
- mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
- index += 4;
- mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
- index += 4;
- // Size of heap reserve + commit
- mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
- index += 4;
- mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
- index += 4;
- },
- .p64 => {
- // Size of stack reserve + commit
- mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
- index += 8;
- mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
- index += 8;
- // Size of heap reserve + commit
- mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
- index += 8;
- mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
- index += 8;
- },
- }
-
- // Reserved zeroes
- mem.set(u8, hdr_data[index..][0..4], 0);
- index += 4;
-
- // Number of data directories
- mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
- index += 4;
- // Initialize data directories to zero
- mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
- index += data_directory_count * 8;
-
- assert(index == optional_header_size);
- }
-
- // Write section table.
- // First, the .got section
- hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
- index += 8;
- if (output_mode == .Exe) {
- // Virtual size (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
- index += 4;
- // Virtual address (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - default_image_base);
- index += 4;
- } else {
- mem.set(u8, hdr_data[index..][0..8], 0);
- index += 8;
- }
- // Size of raw data (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
- index += 4;
- // File pointer to the start of the section
- mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
- index += 4;
- // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
- mem.set(u8, hdr_data[index..][0..12], 0);
- index += 12;
- // Section flags
- mem.writeIntLittle(u32, hdr_data[index..][0..4], @bitCast(u32, std.coff.SectionHeaderFlags{
- .CNT_INITIALIZED_DATA = 1,
- .MEM_READ = 1,
- }));
- index += 4;
- // Then, the .text section
- hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
- index += 8;
- if (output_mode == .Exe) {
- // Virtual size (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
- index += 4;
- // Virtual address (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - default_image_base);
- index += 4;
- } else {
- mem.set(u8, hdr_data[index..][0..8], 0);
- index += 8;
- }
- // Size of raw data (u32)
- mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
- index += 4;
- // File pointer to the start of the section
- mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
- index += 4;
- // Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
- mem.set(u8, hdr_data[index..][0..12], 0);
- index += 12;
- // Section flags
- mem.writeIntLittle(u32, hdr_data[index..][0..4], @bitCast(u32, std.coff.SectionHeaderFlags{
- .CNT_CODE = 1,
- .MEM_EXECUTE = 1,
- .MEM_READ = 1,
- .MEM_WRITE = 1,
- }));
- index += 4;
-
- assert(index == optional_header_size + section_table_size);
- try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
- try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
-
- if (self.text_section_size_dirty) {
- // Write the new raw size in the .text header
- var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, self.text_section_size);
- try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
- try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
- self.text_section_size_dirty = false;
- }
-
- if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
- const new_size_of_image = mem.alignForwardGeneric(u32, self.text_section_virtual_address - default_image_base + self.text_section_size, section_alignment);
- var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, new_size_of_image);
- try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
- self.size_of_image_dirty = false;
- }
-
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
-
- if (self.base.options.output_mode == .Exe) {
- // Write AddressOfEntryPoint
- var buf: [4]u8 = undefined;
- mem.writeIntLittle(u32, &buf, self.entry_addr.?);
- try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
- }
}
}
-fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
- defer arena_allocator.deinit();
- const arena = arena_allocator.allocator();
-
- const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
- const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
-
- // If there is no Zig code to compile, then we should skip flushing the output file because it
- // will not be part of the linker line anyway.
- const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
- const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
- if (use_stage1) {
- const obj_basename = try std.zig.binNameAlloc(arena, .{
- .root_name = self.base.options.root_name,
- .target = self.base.options.target,
- .output_mode = .Obj,
- });
- switch (self.base.options.cache_mode) {
- .incremental => break :blk try module.zig_cache_artifact_directory.join(
- arena,
- &[_][]const u8{obj_basename},
- ),
- .whole => break :blk try fs.path.join(arena, &.{
- fs.path.dirname(full_out_path).?, obj_basename,
- }),
- }
- }
-
- try self.flushModule(comp, prog_node);
-
- if (fs.path.dirname(full_out_path)) |dirname| {
- break :blk try fs.path.join(arena, &.{ dirname, self.base.intermediary_basename.? });
- } else {
- break :blk self.base.intermediary_basename.?;
- }
- } else null;
-
- var sub_prog_node = prog_node.start("LLD Link", 0);
- sub_prog_node.activate();
- sub_prog_node.context.refresh();
- defer sub_prog_node.end();
-
- const is_lib = self.base.options.output_mode == .Lib;
- const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
- const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
- const link_in_crt = self.base.options.link_libc and is_exe_or_dyn_lib;
- const target = self.base.options.target;
-
- // See link/Elf.zig for comments on how this mechanism works.
- const id_symlink_basename = "lld.id";
-
- var man: Cache.Manifest = undefined;
- defer if (!self.base.options.disable_lld_caching) man.deinit();
-
- var digest: [Cache.hex_digest_len]u8 = undefined;
-
- if (!self.base.options.disable_lld_caching) {
- man = comp.cache_parent.obtain();
- self.base.releaseLock();
-
- comptime assert(Compilation.link_hash_implementation_version == 7);
-
- for (self.base.options.objects) |obj| {
- _ = try man.addFile(obj.path, null);
- man.hash.add(obj.must_link);
- }
- for (comp.c_object_table.keys()) |key| {
- _ = try man.addFile(key.status.success.object_path, null);
- }
- try man.addOptionalFile(module_obj_path);
- man.hash.addOptionalBytes(self.base.options.entry);
- man.hash.addOptional(self.base.options.stack_size_override);
- man.hash.addOptional(self.base.options.image_base_override);
- man.hash.addListOfBytes(self.base.options.lib_dirs);
- man.hash.add(self.base.options.skip_linker_dependencies);
- if (self.base.options.link_libc) {
- man.hash.add(self.base.options.libc_installation != null);
- if (self.base.options.libc_installation) |libc_installation| {
- man.hash.addBytes(libc_installation.crt_dir.?);
- if (target.abi == .msvc) {
- man.hash.addBytes(libc_installation.msvc_lib_dir.?);
- man.hash.addBytes(libc_installation.kernel32_lib_dir.?);
- }
- }
- }
- link.hashAddSystemLibs(&man.hash, self.base.options.system_libs);
- man.hash.addListOfBytes(self.base.options.force_undefined_symbols.keys());
- man.hash.addOptional(self.base.options.subsystem);
- man.hash.add(self.base.options.is_test);
- man.hash.add(self.base.options.tsaware);
- man.hash.add(self.base.options.nxcompat);
- man.hash.add(self.base.options.dynamicbase);
- // strip does not need to go into the linker hash because it is part of the hash namespace
- man.hash.addOptional(self.base.options.major_subsystem_version);
- man.hash.addOptional(self.base.options.minor_subsystem_version);
-
- // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
- _ = try man.hit();
- digest = man.final();
- var prev_digest_buf: [digest.len]u8 = undefined;
- const prev_digest: []u8 = Cache.readSmallFile(
- directory.handle,
- id_symlink_basename,
- &prev_digest_buf,
- ) catch |err| blk: {
- log.debug("COFF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
- // Handle this as a cache miss.
- break :blk prev_digest_buf[0..0];
- };
- if (mem.eql(u8, prev_digest, &digest)) {
- log.debug("COFF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
- // Hot diggity dog! The output binary is already there.
- self.base.lock = man.toOwnedLock();
- return;
- }
- log.debug("COFF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
-
- // We are about to change the output file to be different, so we invalidate the build hash now.
- directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
- error.FileNotFound => {},
- else => |e| return e,
- };
- }
-
- if (self.base.options.output_mode == .Obj) {
- // LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
- // here. TODO: think carefully about how we can avoid this redundant operation when doing
- // build-obj. See also the corresponding TODO in linkAsArchive.
- const the_object_path = blk: {
- if (self.base.options.objects.len != 0)
- break :blk self.base.options.objects[0].path;
-
- if (comp.c_object_table.count() != 0)
- break :blk comp.c_object_table.keys()[0].status.success.object_path;
-
- if (module_obj_path) |p|
- break :blk p;
-
- // TODO I think this is unreachable. Audit this situation when solving the above TODO
- // regarding eliding redundant object -> object transformations.
- return error.NoObjectsToLink;
- };
- // This can happen when using --enable-cache and using the stage1 backend. In this case
- // we can skip the file copy.
- if (!mem.eql(u8, the_object_path, full_out_path)) {
- try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
- }
- } else {
- // Create an LLD command line and invoke it.
- var argv = std.ArrayList([]const u8).init(self.base.allocator);
- defer argv.deinit();
- // We will invoke ourselves as a child process to gain access to LLD.
- // This is necessary because LLD does not behave properly as a library -
- // it calls exit() and does not reset all global data between invocations.
- try argv.appendSlice(&[_][]const u8{ comp.self_exe_path.?, "lld-link" });
-
- try argv.append("-ERRORLIMIT:0");
- try argv.append("-NOLOGO");
- if (!self.base.options.strip) {
- try argv.append("-DEBUG");
- }
- if (self.base.options.lto) {
- switch (self.base.options.optimize_mode) {
- .Debug => {},
- .ReleaseSmall => try argv.append("-OPT:lldlto=2"),
- .ReleaseFast, .ReleaseSafe => try argv.append("-OPT:lldlto=3"),
- }
- }
- if (self.base.options.output_mode == .Exe) {
- const stack_size = self.base.options.stack_size_override orelse 16777216;
- try argv.append(try allocPrint(arena, "-STACK:{d}", .{stack_size}));
- }
- if (self.base.options.image_base_override) |image_base| {
- try argv.append(try std.fmt.allocPrint(arena, "-BASE:{d}", .{image_base}));
- }
-
- if (target.cpu.arch == .i386) {
- try argv.append("-MACHINE:X86");
- } else if (target.cpu.arch == .x86_64) {
- try argv.append("-MACHINE:X64");
- } else if (target.cpu.arch.isARM()) {
- if (target.cpu.arch.ptrBitWidth() == 32) {
- try argv.append("-MACHINE:ARM");
- } else {
- try argv.append("-MACHINE:ARM64");
- }
- }
-
- for (self.base.options.force_undefined_symbols.keys()) |symbol| {
- try argv.append(try allocPrint(arena, "-INCLUDE:{s}", .{symbol}));
- }
-
- if (is_dyn_lib) {
- try argv.append("-DLL");
- }
-
- if (self.base.options.entry) |entry| {
- try argv.append(try allocPrint(arena, "-ENTRY:{s}", .{entry}));
- }
-
- if (self.base.options.tsaware) {
- try argv.append("-tsaware");
- }
- if (self.base.options.nxcompat) {
- try argv.append("-nxcompat");
- }
- if (self.base.options.dynamicbase) {
- try argv.append("-dynamicbase");
- }
-
- try argv.append(try allocPrint(arena, "-OUT:{s}", .{full_out_path}));
-
- if (self.base.options.implib_emit) |emit| {
- const implib_out_path = try emit.directory.join(arena, &[_][]const u8{emit.sub_path});
- try argv.append(try allocPrint(arena, "-IMPLIB:{s}", .{implib_out_path}));
- }
-
- if (self.base.options.link_libc) {
- if (self.base.options.libc_installation) |libc_installation| {
- try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.crt_dir.?}));
-
- if (target.abi == .msvc) {
- try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.msvc_lib_dir.?}));
- try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.kernel32_lib_dir.?}));
- }
- }
- }
-
- for (self.base.options.lib_dirs) |lib_dir| {
- try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_dir}));
- }
-
- try argv.ensureUnusedCapacity(self.base.options.objects.len);
- for (self.base.options.objects) |obj| {
- if (obj.must_link) {
- argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{s}", .{obj.path}));
- } else {
- argv.appendAssumeCapacity(obj.path);
- }
- }
-
- for (comp.c_object_table.keys()) |key| {
- try argv.append(key.status.success.object_path);
- }
-
- if (module_obj_path) |p| {
- try argv.append(p);
- }
-
- const resolved_subsystem: ?std.Target.SubSystem = blk: {
- if (self.base.options.subsystem) |explicit| break :blk explicit;
- switch (target.os.tag) {
- .windows => {
- if (self.base.options.module) |module| {
- if (module.stage1_flags.have_dllmain_crt_startup or is_dyn_lib)
- break :blk null;
- if (module.stage1_flags.have_c_main or self.base.options.is_test or
- module.stage1_flags.have_winmain_crt_startup or
- module.stage1_flags.have_wwinmain_crt_startup)
- {
- break :blk .Console;
- }
- if (module.stage1_flags.have_winmain or module.stage1_flags.have_wwinmain)
- break :blk .Windows;
- }
- },
- .uefi => break :blk .EfiApplication,
- else => {},
- }
- break :blk null;
- };
-
- const Mode = enum { uefi, win32 };
- const mode: Mode = mode: {
- if (resolved_subsystem) |subsystem| {
- const subsystem_suffix = ss: {
- if (self.base.options.major_subsystem_version) |major| {
- if (self.base.options.minor_subsystem_version) |minor| {
- break :ss try allocPrint(arena, ",{d}.{d}", .{ major, minor });
- } else {
- break :ss try allocPrint(arena, ",{d}", .{major});
- }
- }
- break :ss "";
- };
-
- switch (subsystem) {
- .Console => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:console{s}", .{
- subsystem_suffix,
- }));
- break :mode .win32;
- },
- .EfiApplication => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_application{s}", .{
- subsystem_suffix,
- }));
- break :mode .uefi;
- },
- .EfiBootServiceDriver => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_boot_service_driver{s}", .{
- subsystem_suffix,
- }));
- break :mode .uefi;
- },
- .EfiRom => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_rom{s}", .{
- subsystem_suffix,
- }));
- break :mode .uefi;
- },
- .EfiRuntimeDriver => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_runtime_driver{s}", .{
- subsystem_suffix,
- }));
- break :mode .uefi;
- },
- .Native => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:native{s}", .{
- subsystem_suffix,
- }));
- break :mode .win32;
- },
- .Posix => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:posix{s}", .{
- subsystem_suffix,
- }));
- break :mode .win32;
- },
- .Windows => {
- try argv.append(try allocPrint(arena, "-SUBSYSTEM:windows{s}", .{
- subsystem_suffix,
- }));
- break :mode .win32;
- },
- }
- } else if (target.os.tag == .uefi) {
- break :mode .uefi;
- } else {
- break :mode .win32;
- }
- };
-
- switch (mode) {
- .uefi => try argv.appendSlice(&[_][]const u8{
- "-BASE:0",
- "-ENTRY:EfiMain",
- "-OPT:REF",
- "-SAFESEH:NO",
- "-MERGE:.rdata=.data",
- "-ALIGN:32",
- "-NODEFAULTLIB",
- "-SECTION:.xdata,D",
- }),
- .win32 => {
- if (link_in_crt) {
- if (target.abi.isGnu()) {
- try argv.append("-lldmingw");
-
- if (target.cpu.arch == .i386) {
- try argv.append("-ALTERNATENAME:__image_base__=___ImageBase");
- } else {
- try argv.append("-ALTERNATENAME:__image_base__=__ImageBase");
- }
-
- if (is_dyn_lib) {
- try argv.append(try comp.get_libc_crt_file(arena, "dllcrt2.obj"));
- if (target.cpu.arch == .i386) {
- try argv.append("-ALTERNATENAME:__DllMainCRTStartup@12=_DllMainCRTStartup@12");
- } else {
- try argv.append("-ALTERNATENAME:_DllMainCRTStartup=DllMainCRTStartup");
- }
- } else {
- try argv.append(try comp.get_libc_crt_file(arena, "crt2.obj"));
- }
-
- try argv.append(try comp.get_libc_crt_file(arena, "mingw32.lib"));
- try argv.append(try comp.get_libc_crt_file(arena, "mingwex.lib"));
- try argv.append(try comp.get_libc_crt_file(arena, "msvcrt-os.lib"));
-
- for (mingw.always_link_libs) |name| {
- if (!self.base.options.system_libs.contains(name)) {
- const lib_basename = try allocPrint(arena, "{s}.lib", .{name});
- try argv.append(try comp.get_libc_crt_file(arena, lib_basename));
- }
- }
- } else {
- const lib_str = switch (self.base.options.link_mode) {
- .Dynamic => "",
- .Static => "lib",
- };
- const d_str = switch (self.base.options.optimize_mode) {
- .Debug => "d",
- else => "",
- };
- switch (self.base.options.link_mode) {
- .Static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
- .Dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
- }
-
- try argv.append(try allocPrint(arena, "{s}vcruntime{s}.lib", .{ lib_str, d_str }));
- try argv.append(try allocPrint(arena, "{s}ucrt{s}.lib", .{ lib_str, d_str }));
-
- //Visual C++ 2015 Conformance Changes
- //https://msdn.microsoft.com/en-us/library/bb531344.aspx
- try argv.append("legacy_stdio_definitions.lib");
-
- // msvcrt depends on kernel32 and ntdll
- try argv.append("kernel32.lib");
- try argv.append("ntdll.lib");
- }
- } else {
- try argv.append("-NODEFAULTLIB");
- if (!is_lib) {
- if (self.base.options.module) |module| {
- if (module.stage1_flags.have_winmain_crt_startup) {
- try argv.append("-ENTRY:WinMainCRTStartup");
- } else {
- try argv.append("-ENTRY:wWinMainCRTStartup");
- }
- } else {
- try argv.append("-ENTRY:wWinMainCRTStartup");
- }
- }
- }
- },
- }
-
- // libc++ dep
- if (self.base.options.link_libcpp) {
- try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
- try argv.append(comp.libcxx_static_lib.?.full_object_path);
- }
-
- // libunwind dep
- if (self.base.options.link_libunwind) {
- try argv.append(comp.libunwind_static_lib.?.full_object_path);
- }
-
- if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
- if (!self.base.options.link_libc) {
- if (comp.libc_static_lib) |lib| {
- try argv.append(lib.full_object_path);
- }
- }
- // MinGW doesn't provide libssp symbols
- if (target.abi.isGnu()) {
- if (comp.libssp_static_lib) |lib| {
- try argv.append(lib.full_object_path);
- }
- }
- // MSVC compiler_rt is missing some stuff, so we build it unconditionally but
- // and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
- if (comp.compiler_rt_lib) |lib| {
- try argv.append(lib.full_object_path);
- }
- }
-
- try argv.ensureUnusedCapacity(self.base.options.system_libs.count());
- for (self.base.options.system_libs.keys()) |key| {
- const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
- if (comp.crt_files.get(lib_basename)) |crt_file| {
- argv.appendAssumeCapacity(crt_file.full_object_path);
- continue;
- }
- if (try self.findLib(arena, lib_basename)) |full_path| {
- argv.appendAssumeCapacity(full_path);
- continue;
- }
- if (target.abi.isGnu()) {
- const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
- if (try self.findLib(arena, fallback_name)) |full_path| {
- argv.appendAssumeCapacity(full_path);
- continue;
- }
- }
- log.err("DLL import library for -l{s} not found", .{key});
- return error.DllImportLibraryNotFound;
- }
-
- if (self.base.options.verbose_link) {
- // Skip over our own name so that the LLD linker name is the first argv item.
- Compilation.dump_argv(argv.items[1..]);
- }
-
- if (std.process.can_spawn) {
- // If possible, we run LLD as a child process because it does not always
- // behave properly as a library, unfortunately.
- // https://github.com/ziglang/zig/issues/3825
- var child = std.ChildProcess.init(argv.items, arena);
- if (comp.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
-
- const term = child.spawnAndWait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- std.process.exit(code);
- }
- },
- else => std.process.abort(),
- }
- } else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Ignore;
- child.stderr_behavior = .Pipe;
-
- try child.spawn();
-
- const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
-
- const term = child.wait() catch |err| {
- log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
- return error.UnableToSpawnSelf;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse this output and surface with the Compilation API rather than
- // directly outputting to stderr here.
- std.debug.print("{s}", .{stderr});
- return error.LLDReportedFailure;
- }
- },
- else => {
- log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
- return error.LLDCrashed;
- },
- }
-
- if (stderr.len != 0) {
- log.warn("unexpected LLD stderr:\n{s}", .{stderr});
- }
- }
- } else {
- const exit_code = try lldMain(arena, argv.items, false);
- if (exit_code != 0) {
- if (comp.clang_passthrough_mode) {
- std.process.exit(exit_code);
- } else {
- return error.LLDReportedFailure;
- }
- }
- }
- }
-
- if (!self.base.options.disable_lld_caching) {
- // Update the file with the digest. If it fails we can continue; it only
- // means that the next invocation will have an unnecessary cache miss.
- Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
- log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)});
- };
- // Again failure here only means an unnecessary cache miss.
- man.writeManifest() catch |err| {
- log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
- };
- // We hang on to this lock so that the output file path can be used without
- // other processes clobbering it.
- self.base.lock = man.toOwnedLock();
- }
-}
-
-fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
- for (self.base.options.lib_dirs) |lib_dir| {
- const full_path = try fs.path.join(arena, &.{ lib_dir, name });
- fs.cwd().access(full_path, .{}) catch |err| switch (err) {
- error.FileNotFound => continue,
- else => |e| return e,
- };
- return full_path;
- }
- return null;
-}
-
pub fn getDeclVAddr(
self: *Coff,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
+ _ = self;
+ _ = decl_index;
_ = reloc_info;
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- assert(self.llvm_object == null);
- return self.text_section_virtual_address + decl.link.coff.text_offset;
+ @panic("TODO getDeclVAddr");
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
_ = self;
_ = module;
_ = decl;
- // TODO Implement this
+ log.debug("TODO implement updateDeclLineNumber", .{});
}
-pub fn deinit(self: *Coff) void {
- if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
- }
+pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
+ // TODO https://github.com/ziglang/zig/issues/1284
+ return math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch
+ math.maxInt(@TypeOf(actual_size));
+}
+
+/// Returns pointer-to-symbol described by `sym_with_loc` descriptor.
+pub fn getSymbolPtr(self: *Coff, sym_loc: SymbolWithLoc) *coff.Symbol {
+ assert(sym_loc.file == null); // TODO linking object files
+ return &self.locals.items[sym_loc.sym_index];
+}
+
+/// Returns symbol described by `sym_with_loc` descriptor.
+pub fn getSymbol(self: *Coff, sym_loc: SymbolWithLoc) coff.Symbol {
+ return self.getSymbolPtr(sym_loc).*;
+}
+
+/// Returns name of the symbol described by `sym_with_loc` descriptor.
+pub fn getSymbolName(self: *Coff, sym_loc: SymbolWithLoc) []const u8 {
+ assert(sym_loc.file == null); // TODO linking object files
+ const sym = self.locals.items[sym_loc.sym_index];
+ const offset = sym.getNameOffset() orelse return sym.getName().?;
+ return self.strtab.get(offset).?;
+}
+
+/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
+/// Returns null on failure.
+pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+ assert(sym_loc.file == null); // TODO linking with object files
+ return self.atom_by_index_table.get(sym_loc.sym_index);
+}
- self.text_block_free_list.deinit(self.base.allocator);
- self.offset_table.deinit(self.base.allocator);
- self.offset_table_free_list.deinit(self.base.allocator);
+/// Returns GOT atom that references `sym_with_loc` if one exists.
+/// Returns null otherwise.
+pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+ const got_index = self.got_entries.get(sym_loc) orelse return null;
+ return self.atom_by_index_table.get(got_index);
}
src/link/MachO.zig
@@ -26,7 +26,7 @@ const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
-const Atom = @import("MachO/Atom.zig");
+pub const Atom = @import("MachO/Atom.zig");
const Cache = @import("../Cache.zig");
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
@@ -44,7 +44,6 @@ const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
-pub const TextBlock = Atom;
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
pub const base_tag: File.Tag = File.Tag.macho;
src/link.zig
@@ -245,8 +245,8 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
- coff: Coff.TextBlock,
- macho: MachO.TextBlock,
+ coff: Coff.Atom,
+ macho: MachO.Atom,
plan9: Plan9.DeclBlock,
c: void,
wasm: Wasm.DeclBlock,
@@ -267,7 +267,7 @@ pub const File = struct {
pub const Export = union {
elf: Elf.Export,
- coff: void,
+ coff: Coff.Export,
macho: MachO.Export,
plan9: Plan9.Export,
c: void,
src/Module.zig
@@ -5259,9 +5259,9 @@ pub fn clearDecl(
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.TextBlock.empty },
+ .coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.TextBlock.empty },
+ .macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
@@ -5680,9 +5680,9 @@ pub fn allocateNewDecl(
.zir_decl_index = 0,
.src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.TextBlock.empty },
+ .coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.TextBlock.empty },
+ .macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
src/Sema.zig
@@ -5076,7 +5076,7 @@ pub fn analyzeExport(
},
.src = src,
.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
+ .coff => .{ .coff = .{} },
.elf => .{ .elf = .{} },
.macho => .{ .macho = .{} },
.plan9 => .{ .plan9 = null },
CMakeLists.txt
@@ -753,6 +753,8 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link.zig"
"${CMAKE_SOURCE_DIR}/src/link/C.zig"
"${CMAKE_SOURCE_DIR}/src/link/Coff.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/Coff/Atom.zig"
+ "${CMAKE_SOURCE_DIR}/src/link/Coff/lld.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"