Commit d9fffd431a
Changed files (5)
src/link/Elf/file.zig
@@ -0,0 +1,110 @@
+pub const File = union(enum) {
+ zig_module: *ZigModule,
+ linker_defined: *LinkerDefined,
+ // object: *Object,
+ // shared_object: *SharedObject,
+
+ pub fn index(file: File) Index {
+ return switch (file) {
+ inline else => |x| x.index,
+ };
+ }
+
+ pub fn fmtPath(file: File) std.fmt.Formatter(formatPath) {
+ return .{ .data = file };
+ }
+
+ fn formatPath(
+ file: File,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) !void {
+ _ = unused_fmt_string;
+ _ = options;
+ switch (file) {
+ .zig_module => try writer.writeAll("(zig module)"),
+ .linker_defined => try writer.writeAll("(linker defined)"),
+ .object => |x| try writer.print("{}", .{x.fmtPath()}),
+ .shared_object => |x| try writer.writeAll(x.path),
+ }
+ }
+
+ pub fn resolveSymbols(file: File, elf_file: *Elf) void {
+ switch (file) {
+ .zig_module => unreachable, // handled separately
+ inline else => |x| x.resolveSymbols(elf_file),
+ }
+ }
+
+ // pub fn resetGlobals(file: File, elf_file: *Elf) void {
+ // switch (file) {
+ // inline else => |x| x.resetGlobals(elf_file),
+ // }
+ // }
+
+ pub fn isAlive(file: File) bool {
+ return switch (file) {
+ .zig_module => true,
+ .linker_defined => true,
+ inline else => |x| x.alive,
+ };
+ }
+
+ /// Encodes symbol rank so that the following ordering applies:
+ /// * strong defined
+ /// * weak defined
+ /// * strong in lib (dso/archive)
+ /// * weak in lib (dso/archive)
+ /// * common
+ /// * common in lib (archive)
+ /// * unclaimed
+ pub fn symbolRank(file: File, sym: elf.Elf64_Sym, in_archive: bool) u32 {
+ const base: u3 = blk: {
+ if (sym.st_shndx == elf.SHN_COMMON) break :blk if (in_archive) 6 else 5;
+ if (file == .shared or in_archive) break :blk switch (sym.st_bind()) {
+ elf.STB_GLOBAL => 3,
+ else => 4,
+ };
+ break :blk switch (sym.st_bind()) {
+ elf.STB_GLOBAL => 1,
+ else => 2,
+ };
+ };
+ return (@as(u32, base) << 24) + file.index();
+ }
+
+ pub fn setAlive(file: File) void {
+ switch (file) {
+ .zig_module, .linker_defined => {},
+ inline else => |x| x.alive = true,
+ }
+ }
+
+ pub fn markLive(file: File, elf_file: *Elf) void {
+ switch (file) {
+ .zig_module, .linker_defined => {},
+ inline else => |x| x.markLive(elf_file),
+ }
+ }
+
+ pub const Index = u32;
+
+ pub const Entry = union(enum) {
+ null: void,
+ zig_module: ZigModule,
+ linker_defined: LinkerDefined,
+ // object: Object,
+ // shared_object: SharedObject,
+ };
+};
+
+const std = @import("std");
+const elf = std.elf;
+
+const Allocator = std.mem.Allocator;
+const Elf = @import("../Elf.zig");
+const LinkerDefined = @import("LinkerDefined.zig");
+// const Object = @import("Object.zig");
+// const SharedObject = @import("SharedObject.zig");
+const ZigModule = @import("ZigModule.zig");
src/link/Elf/LinkerDefined.zig
@@ -0,0 +1,132 @@
+index: File.Index,
+symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
+symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
+alive: bool = true,
+
+// output_symtab_size: Elf.SymtabSize = .{},
+
+pub fn deinit(self: *LinkerDefined, allocator: Allocator) void {
+ self.symtab.deinit(allocator);
+ self.symbols.deinit(allocator);
+}
+
+pub fn addGlobal(self: *LinkerDefined, name: [:0]const u8, elf_file: *Elf) !u32 {
+ const gpa = elf_file.base.allocator;
+ try self.symtab.ensureUnusedCapacity(gpa, 1);
+ try self.symbols.ensureUnusedCapacity(gpa, 1);
+ self.symtab.appendAssumeCapacity(.{
+ .st_name = try elf_file.strtab.insert(gpa, name),
+ .st_info = elf.STB_GLOBAL << 4,
+ .st_other = @intFromEnum(elf.STV.HIDDEN),
+ .st_shndx = elf.SHN_ABS,
+ .st_value = 0,
+ .st_size = 0,
+ });
+ const off = try elf_file.internString("{s}", .{name});
+ const gop = try elf_file.getOrCreateGlobal(off);
+ self.symbols.addOneAssumeCapacity().* = gop.index;
+ return gop.index;
+}
+
+pub fn resolveSymbols(self: *LinkerDefined, elf_file: *Elf) void {
+ for (self.symbols.items, 0..) |index, i| {
+ const sym_idx = @as(u32, @intCast(i));
+ const this_sym = self.symtab.items[sym_idx];
+
+ if (this_sym.st_shndx == elf.SHN_UNDEF) continue;
+
+ const global = elf_file.symbol(index);
+ if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
+ global.* = .{
+ .value = 0,
+ .name = global.name,
+ .atom = 0,
+ .file = self.index,
+ .sym_idx = sym_idx,
+ .ver_idx = elf_file.default_sym_version,
+ };
+ }
+ }
+}
+
+// pub fn resetGlobals(self: *LinkerDefined, elf_file: *Elf) void {
+// for (self.symbols.items) |index| {
+// const global = elf_file.getSymbol(index);
+// const name = global.name;
+// global.* = .{};
+// global.name = name;
+// }
+// }
+
+// pub fn calcSymtabSize(self: *InternalObject, elf_file: *Elf) !void {
+// if (elf_file.options.strip_all) return;
+
+// for (self.getGlobals()) |global_index| {
+// const global = elf_file.getSymbol(global_index);
+// if (global.getFile(elf_file)) |file| if (file.getIndex() != self.index) continue;
+// global.flags.output_symtab = true;
+// self.output_symtab_size.nlocals += 1;
+// self.output_symtab_size.strsize += @as(u32, @intCast(global.getName(elf_file).len + 1));
+// }
+// }
+
+// pub fn writeSymtab(self: *LinkerDefined, elf_file: *Elf, ctx: Elf.WriteSymtabCtx) !void {
+// if (elf_file.options.strip_all) return;
+
+// const gpa = elf_file.base.allocator;
+
+// var ilocal = ctx.ilocal;
+// for (self.getGlobals()) |global_index| {
+// const global = elf_file.getSymbol(global_index);
+// if (global.getFile(elf_file)) |file| if (file.getIndex() != self.index) continue;
+// if (!global.flags.output_symtab) continue;
+// const st_name = try ctx.strtab.insert(gpa, global.getName(elf_file));
+// ctx.symtab[ilocal] = global.asElfSym(st_name, elf_file);
+// ilocal += 1;
+// }
+// }
+
+pub fn asFile(self: *LinkerDefined) File {
+ return .{ .linker_defined = self };
+}
+
+pub inline fn getGlobals(self: *LinkerDefined) []const u32 {
+ return self.symbols.items;
+}
+
+pub fn fmtSymtab(self: *InternalObject, elf_file: *Elf) std.fmt.Formatter(formatSymtab) {
+ return .{ .data = .{
+ .self = self,
+ .elf_file = elf_file,
+ } };
+}
+
+const FormatContext = struct {
+ self: *InternalObject,
+ elf_file: *Elf,
+};
+
+fn formatSymtab(
+ ctx: FormatContext,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = unused_fmt_string;
+ _ = options;
+ try writer.writeAll(" globals\n");
+ for (ctx.self.getGlobals()) |index| {
+ const global = ctx.elf_file.getSymbol(index);
+ try writer.print(" {}\n", .{global.fmt(ctx.elf_file)});
+ }
+}
+
+const std = @import("std");
+const elf = std.elf;
+
+const Allocator = std.mem.Allocator;
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const LinkerDefined = @This();
+// const Object = @import("Object.zig");
+const Symbol = @import("Symbol.zig");
src/link/Elf/Symbol.zig
@@ -0,0 +1,337 @@
+//! Represents a defined symbol.
+
+/// Allocated address value of this symbol.
+value: u64 = 0,
+
+/// Offset into the linker's string table.
+name_offset: u32 = 0,
+
+/// Index of file where this symbol is defined.
+file_index: File.Index = 0,
+
+/// Index of atom containing this symbol.
+/// Index of 0 means there is no associated atom with this symbol.
+/// Use `atom` to get the pointer to the atom.
+atom_index: Atom.Index = 0,
+
+/// Assigned output section index for this atom.
+output_section_index: u16 = 0,
+
+/// Index of the source symbol this symbol references.
+/// Use `getSourceSymbol` to pull the source symbol from the relevant file.
+symbol_index: Index = 0,
+
+/// Index of the source version symbol this symbol references if any.
+/// If the symbol is unversioned it will have either VER_NDX_LOCAL or VER_NDX_GLOBAL.
+version_index: elf.Elf64_Versym = elf.VER_NDX_LOCAL,
+
+/// Misc flags for the symbol packaged as packed struct for compression.
+flags: Flags = .{},
+
+extra_index: u32 = 0,
+
+pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
+ const file_ptr = symbol.file(elf_file).?;
+ if (file_ptr == .shared) return symbol.sourceSymbol(elf_file).st_shndx == elf.SHN_ABS;
+ return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.shndx == 0
+ and file_ptr != .linker_defined and file_ptr != .zig_module;
+}
+
+pub fn isLocal(symbol: Symbol) bool {
+ return !(symbol.flags.import or symbol.flags.@"export");
+}
+
+pub inline fn isIFunc(symbol: Symbol, elf_file: *Elf) bool {
+ return symbol.@"type"(elf_file) == elf.STT_GNU_IFUNC;
+}
+
+pub fn @"type"(symbol: Symbol, elf_file: *Elf) u4 {
+ const file_ptr = symbol.file(elf_file).?;
+ const s_sym = symbol.sourceSymbol(elf_file);
+ if (s_sym.st_type() == elf.STT_GNU_IFUNC and file_ptr == .shared) return elf.STT_FUNC;
+ return s_sym.st_type();
+}
+
+pub fn name(symbol: Symbol, elf_file: *Elf) [:0]const u8 {
+ return elf_file.strtab.getAssumeExists(symbol.name);
+}
+
+pub fn atom(symbol: Symbol, elf_file: *Elf) ?*Atom {
+ return elf_file.atom(symbol.atom);
+}
+
+pub fn file(symbol: Symbol, elf_file: *Elf) ?File {
+ return elf_file.file(symbol.file);
+}
+
+pub fn sourceSymbol(symbol: Symbol, elf_file: *Elf) elf.Elf64_Sym {
+ const file_ptr = symbol.file(elf_file).?;
+ return switch (file_ptr) {
+ .linker_defined, .zig_module => |x| x.symtab.items[symbol.sym_idx],
+ inline else => |x| x.symtab[symbol.sym_idx],
+ };
+}
+
+pub fn symbolRank(symbol: Symbol, elf_file: *Elf) u32 {
+ const file_ptr = symbol.file(elf_file) orelse return std.math.maxInt(u32);
+ const sym = symbol.sourceSymbol(elf_file);
+ const in_archive = switch (file) {
+ // .object => |x| !x.alive,
+ else => false,
+ };
+ return file_ptr.symbolRank(sym, in_archive);
+}
+
+pub fn address(symbol: Symbol, opts: struct {
+ plt: bool = true,
+}, elf_file: *Elf) u64 {
+ // if (symbol.flags.copy_rel) {
+ // return elf_file.sectionAddress(elf_file.copy_rel_sect_index.?) + symbol.value;
+ // }
+ // if (symbol.flags.plt and opts.plt) {
+ // const extra = symbol.getExtra(elf_file).?;
+ // if (!symbol.flags.is_canonical and symbol.flags.got) {
+ // // We have a non-lazy bound function pointer, use that!
+ // return elf_file.getPltGotEntryAddress(extra.plt_got);
+ // }
+ // // Lazy-bound function it is!
+ // return elf_file.getPltEntryAddress(extra.plt);
+ // }
+ return symbol.value;
+}
+
+pub fn gotAddress(symbol: Symbol, elf_file: *Elf) u64 {
+ if (!symbol.flags.got) return 0;
+ const extra = symbol.extra(elf_file).?;
+ return elf_file.gotEntryAddress(extra.got);
+}
+
+// pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
+// if (!symbol.flags.tlsgd) return 0;
+// const extra = symbol.getExtra(elf_file).?;
+// return elf_file.getGotEntryAddress(extra.tlsgd);
+// }
+
+// pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
+// if (!symbol.flags.gottp) return 0;
+// const extra = symbol.getExtra(elf_file).?;
+// return elf_file.getGotEntryAddress(extra.gottp);
+// }
+
+// pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
+// if (!symbol.flags.tlsdesc) return 0;
+// const extra = symbol.getExtra(elf_file).?;
+// return elf_file.getGotEntryAddress(extra.tlsdesc);
+// }
+
+// pub fn alignment(symbol: Symbol, elf_file: *Elf) !u64 {
+// const file = symbol.getFile(elf_file) orelse return 0;
+// const shared = file.shared;
+// const s_sym = symbol.getSourceSymbol(elf_file);
+// const shdr = shared.getShdrs()[s_sym.st_shndx];
+// const alignment = @max(1, shdr.sh_addralign);
+// return if (s_sym.st_value == 0)
+// alignment
+// else
+// @min(alignment, try std.math.powi(u64, 2, @ctz(s_sym.st_value)));
+// }
+
+pub fn addExtra(symbol: *Symbol, extra: Extra, elf_file: *Elf) !void {
+ symbol.extra = try elf_file.addSymbolExtra(extra);
+}
+
+pub fn extra(symbol: Symbol, elf_file: *Elf) ?Extra {
+ return elf_file.symbolExtra(symbol.extra);
+}
+
+pub fn setExtra(symbol: Symbol, extra: Extra, elf_file: *Elf) void {
+ elf_file.setSymbolExtra(symbol.extra, extra);
+}
+
+pub fn asElfSym(symbol: Symbol, st_name: u32, elf_file: *Elf) elf.Elf64_Sym {
+ const file_ptr = symbol.file(elf_file).?;
+ const s_sym = symbol.sourceSymbol(elf_file);
+ const st_type = symbol.@"type"(elf_file);
+ const st_bind: u8 = blk: {
+ if (symbol.isLocal()) break :blk 0;
+ if (symbol.flags.weak) break :blk elf.STB_WEAK;
+ // if (file_ptr == .shared) break :blk elf.STB_GLOBAL;
+ break :blk s_sym.st_bind();
+ };
+ const st_shndx = blk: {
+ // if (symbol.flags.copy_rel) break :blk elf_file.copy_rel_sect_index.?;
+ // if (file_ptr == .shared or s_sym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
+ if (symbol.atom(elf_file) == null and file_ptr != .linker_defined and file_ptr != .zig_module)
+ break :blk elf.SHN_ABS;
+ break :blk symbol.shndx;
+ };
+ const st_value = blk: {
+ // if (symbol.flags.copy_rel) break :blk symbol.address(.{}, elf_file);
+ // if (file_ptr == .shared or s_sym.st_shndx == elf.SHN_UNDEF) {
+ // if (symbol.flags.is_canonical) break :blk symbol.address(.{}, elf_file);
+ // break :blk 0;
+ // }
+ // if (st_shndx == elf.SHN_ABS) break :blk symbol.value;
+ // const shdr = &elf_file.sections.items(.shdr)[st_shndx];
+ // if (Elf.shdrIsTls(shdr)) break :blk symbol.value - elf_file.getTlsAddress();
+ break :blk symbol.value;
+ };
+ return elf.Elf64_Sym{
+ .st_name = st_name,
+ .st_info = (st_bind << 4) | st_type,
+ .st_other = s_sym.st_other,
+ .st_shndx = st_shndx,
+ .st_value = st_value,
+ .st_size = s_sym.st_size,
+ };
+}
+
+pub fn format(
+ symbol: Symbol,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = symbol;
+ _ = unused_fmt_string;
+ _ = options;
+ _ = writer;
+ @compileError("do not format symbols directly");
+}
+
+const FormatContext = struct {
+ symbol: Symbol,
+ elf_file: *Elf,
+};
+
+pub fn fmtName(symbol: Symbol, elf_file: *Elf) std.fmt.Formatter(formatName) {
+ return .{ .data = .{
+ .symbol = symbol,
+ .elf_file = elf_file,
+ } };
+}
+
+fn formatName(
+ ctx: FormatContext,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = options;
+ _ = unused_fmt_string;
+ const elf_file = ctx.elf_file;
+ const symbol = ctx.symbol;
+ try writer.writeAll(symbol.getName(elf_file));
+ switch (symbol.ver_idx & elf.VERSYM_VERSION) {
+ elf.VER_NDX_LOCAL, elf.VER_NDX_GLOBAL => {},
+ else => {
+ unreachable;
+ // const shared = symbol.getFile(elf_file).?.shared;
+ // try writer.print("@{s}", .{shared.getVersionString(symbol.ver_idx)});
+ },
+ }
+}
+
+pub fn fmt(symbol: Symbol, elf_file: *Elf) std.fmt.Formatter(format2) {
+ return .{ .data = .{
+ .symbol = symbol,
+ .elf_file = elf_file,
+ } };
+}
+
+fn format2(
+ ctx: FormatContext,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = options;
+ _ = unused_fmt_string;
+ const symbol = ctx.symbol;
+ try writer.print("%{d} : {s} : @{x}", .{ symbol.sym_idx, symbol.fmtName(ctx.elf_file), symbol.value });
+ if (symbol.getFile(ctx.elf_file)) |file| {
+ if (symbol.isAbs(ctx.elf_file)) {
+ if (symbol.getSourceSymbol(ctx.elf_file).st_shndx == elf.SHN_UNDEF) {
+ try writer.writeAll(" : undef");
+ } else {
+ try writer.writeAll(" : absolute");
+ }
+ } else if (symbol.shndx != 0) {
+ try writer.print(" : sect({d})", .{symbol.shndx});
+ }
+ if (symbol.getAtom(ctx.elf_file)) |atom| {
+ try writer.print(" : atom({d})", .{atom.atom_index});
+ }
+ var buf: [2]u8 = .{'_'} ** 2;
+ if (symbol.flags.@"export") buf[0] = 'E';
+ if (symbol.flags.import) buf[1] = 'I';
+ try writer.print(" : {s}", .{&buf});
+ if (symbol.flags.weak) try writer.writeAll(" : weak");
+ switch (file) {
+ .internal => |x| try writer.print(" : internal({d})", .{x.index}),
+ .object => |x| try writer.print(" : object({d})", .{x.index}),
+ .shared => |x| try writer.print(" : shared({d})", .{x.index}),
+ }
+ } else try writer.writeAll(" : unresolved");
+}
+
+pub const Flags = packed struct {
+ /// Whether the symbol is imported at runtime.
+ import: bool = false,
+
+ /// Whether the symbol is exported at runtime.
+ @"export": bool = false,
+
+ /// Whether this symbol is weak.
+ weak: bool = false,
+
+ /// Whether the symbol makes into the output symtab or not.
+ output_symtab: bool = false,
+
+ /// Whether the symbol contains GOT indirection.
+ got: bool = false,
+
+ /// Whether the symbol contains PLT indirection.
+ plt: bool = false,
+ /// Whether the PLT entry is canonical.
+ is_canonical: bool = false,
+
+ /// Whether the symbol contains COPYREL directive.
+ copy_rel: bool = false,
+ has_copy_rel: bool = false,
+ has_dynamic: bool = false,
+
+ /// Whether the symbol contains TLSGD indirection.
+ tlsgd: bool = false,
+
+ /// Whether the symbol contains GOTTP indirection.
+ gottp: bool = false,
+
+ /// Whether the symbol contains TLSDESC indirection.
+ tlsdesc: bool = false,
+};
+
+pub const Extra = struct {
+ got: u32 = 0,
+ plt: u32 = 0,
+ plt_got: u32 = 0,
+ dynamic: u32 = 0,
+ copy_rel: u32 = 0,
+ tlsgd: u32 = 0,
+ gottp: u32 = 0,
+ tlsdesc: u32 = 0,
+};
+
+pub const Index = u32;
+
+const std = @import("std");
+const assert = std.debug.assert;
+const elf = std.elf;
+
+const Atom = @import("Atom.zig");
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const InternalObject = @import("InternalObject.zig");
+const Object = @import("Object.zig");
+const SharedObject = @import("SharedObject.zig");
+const Symbol = @This();
src/link/Elf/ZigModule.zig
@@ -0,0 +1,69 @@
+index: File.Index,
+elf_locals: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
+locals: std.ArrayListUnmanaged(Symbol.Index) = .{},
+elf_globals: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
+globals: std.ArrayListUnmanaged(Symbol.Index) = .{},
+alive: bool = true,
+
+// output_symtab_size: Elf.SymtabSize = .{},
+
+pub fn deinit(self: *ZigModule, allocator: Allocator) void {
+ self.elf_locals.deinit(allocator);
+ self.locals.deinit(allocator);
+ self.elf_globals.deinit(allocator);
+ self.globals.deinit(allocator);
+}
+
+pub fn asFile(self: *ZigModule) File {
+ return .{ .zig_module = self };
+}
+
+pub fn getLocals(self: *ZigModule) []const Symbol.Index {
+ return self.locals.items;
+}
+
+pub fn getGlobals(self: *ZigModule) []const Symbol.Index {
+ return self.globals.items;
+}
+
+pub fn fmtSymtab(self: *ZigModule, elf_file: *Elf) std.fmt.Formatter(formatSymtab) {
+ return .{ .data = .{
+ .self = self,
+ .elf_file = elf_file,
+ } };
+}
+
+const FormatContext = struct {
+ self: *ZigModule,
+ elf_file: *Elf,
+};
+
+fn formatSymtab(
+ ctx: FormatContext,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = unused_fmt_string;
+ _ = options;
+ try writer.writeAll(" locals\n");
+ for (ctx.self.getLocals()) |index| {
+ const local = ctx.elf_file.symbol(index);
+ try writer.print(" {}\n", .{local.fmt(ctx.elf_file)});
+ }
+ try writer.writeAll(" globals\n");
+ for (ctx.self.getGlobals()) |index| {
+ const global = ctx.elf_file.getSymbol(index);
+ try writer.print(" {}\n", .{global.fmt(ctx.elf_file)});
+ }
+}
+
+const std = @import("std");
+const elf = std.elf;
+
+const Allocator = std.mem.Allocator;
+const Elf = @import("../Elf.zig");
+const File = @import("file.zig").File;
+const ZigModule = @This();
+// const Object = @import("Object.zig");
+const Symbol = @import("Symbol.zig");
src/link/Elf.zig
@@ -6,6 +6,10 @@ ptr_width: PtrWidth,
/// If this is not null, an object file is created by LLVM and linked with LLD afterwards.
llvm_object: ?*LlvmObject = null,
+files: std.MutliArrayList(File.Entry) = .{},
+zig_module_index: ?File.Index = null,
+linker_defined_index: ?File.Index = null,
+
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
sections: std.MultiArrayList(Section) = .{},
@@ -51,16 +55,12 @@ debug_line_section_index: ?u16 = null,
shstrtab_section_index: ?u16 = null,
strtab_section_index: ?u16 = null,
-/// The same order as in the file. ELF requires global symbols to all be after the
-/// local symbols, they cannot be mixed. So we must buffer all the global symbols and
-/// write them at the end. These are only the local symbols. The length of this array
-/// is the value used for sh_info in the .symtab section.
-locals: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
-globals: std.ArrayListUnmanaged(u32) = .{},
+symbols: std.ArrayListUnmanaged(Symbol) = .{},
+globals: std.ArrayListUnmanaged(Symbol.Index) = .{},
resolver: std.StringHashMapUnmanaged(u32) = .{},
unresolved: std.AutoArrayHashMapUnmanaged(u32, void) = .{},
-locals_free_list: std.ArrayListUnmanaged(u32) = .{},
+symbols_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
got_table: TableSection(u32) = .{},
@@ -77,7 +77,7 @@ debug_aranges_section_dirty: bool = false,
debug_info_header_dirty: bool = false,
debug_line_header_dirty: bool = false,
-error_flags: File.ErrorFlags = File.ErrorFlags{},
+error_flags: link.File.ErrorFlags = link.File.ErrorFlags{},
/// Table of tracked LazySymbols.
lazy_syms: LazySymbolTable = .{},
@@ -153,9 +153,11 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
self.shdr_table_dirty = true;
// Index 0 is always a null symbol.
- try self.locals.append(allocator, null_sym);
+ try self.symbols.append(allocator, .{});
// Allocate atom index 0 to null atom
try self.atoms.append(allocator, .{});
+ // Append null file at index 0
+ try self.files.append(allocator, .null);
// There must always be a null section in index 0
try self.sections.append(allocator, .{
.shdr = .{
@@ -222,6 +224,15 @@ pub fn deinit(self: *Elf) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
+ for (self.files.items(.tags), self.files.items(.data)) |tag, *data| switch (tag) {
+ .null => {},
+ .zig_module => data.zig_module.deinit(gpa),
+ .linker_defined => data.linker_defined.deinit(gpa),
+ // .object => data.object.deinit(gpa),
+ // .shared_object => data.shared_object.deinit(gpa),
+ };
+ self.files.deinit(gpa);
+
for (self.sections.items(.free_list)) |*free_list| {
free_list.deinit(gpa);
}
@@ -230,10 +241,10 @@ pub fn deinit(self: *Elf) void {
self.program_headers.deinit(gpa);
self.shstrtab.deinit(gpa);
self.strtab.deinit(gpa);
- self.locals.deinit(gpa);
+ self.symbols.deinit(gpa);
+ self.symbols_free_list.deinit(gpa);
self.globals.deinit(gpa);
self.globals_free_list.deinit(gpa);
- self.locals_free_list.deinit(gpa);
self.got_table.deinit(gpa);
self.unresolved.deinit(gpa);
@@ -278,7 +289,7 @@ pub fn deinit(self: *Elf) void {
}
}
-pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
+pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
@@ -653,7 +664,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = file_size,
// The section header index of the associated string table.
.sh_link = self.strtab_section_index.?,
- .sh_info = @as(u32, @intCast(self.locals.items.len)),
+ .sh_info = @as(u32, @intCast(self.symbols.items.len)),
.sh_addralign = min_align,
.sh_entsize = each_size,
},
@@ -818,7 +829,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
{
// Iterate over symbols, populating free_list and last_text_block.
- if (self.locals.items.len != 1) {
+ if (self.symbols.items.len != 1) {
@panic("TODO implement setting up free_list and last_text_block from existing ELF file");
}
// We are starting with an empty file. The default values are correct, null and empty list.
@@ -975,280 +986,294 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// corresponds to the Zig source code.
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
- if (self.lazy_syms.getPtr(.none)) |metadata| {
- // Most lazy symbols can be updated on first use, but
- // anyerror needs to wait for everything to be flushed.
- if (metadata.text_state != .unused) self.updateLazySymbolAtom(
- File.LazySymbol.initDecl(.code, null, module),
- metadata.text_atom,
- self.text_section_index.?,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.FlushFailure,
- else => |e| e,
- };
- if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
- File.LazySymbol.initDecl(.const_data, null, module),
- metadata.rodata_atom,
- self.rodata_section_index.?,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.FlushFailure,
- else => |e| e,
- };
- }
- for (self.lazy_syms.values()) |*metadata| {
- if (metadata.text_state != .unused) metadata.text_state = .flushed;
- if (metadata.rodata_state != .unused) metadata.rodata_state = .flushed;
- }
-
- const target_endian = self.base.options.target.cpu.arch.endian();
- const foreign_endian = target_endian != builtin.cpu.arch.endian();
-
- if (self.dwarf) |*dw| {
- try dw.flushModule(module);
- }
-
- {
- var it = self.relocs.iterator();
- while (it.next()) |entry| {
- const atom_index = entry.key_ptr.*;
- const relocs = entry.value_ptr.*;
- const atom_ptr = self.atom(atom_index);
- const source_sym = atom_ptr.symbol(self);
- const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
-
- log.debug("relocating '{?s}'", .{self.strtab.get(source_sym.st_name)});
-
- for (relocs.items) |*reloc| {
- const target_sym = self.locals.items[reloc.target];
- const target_vaddr = target_sym.st_value + reloc.addend;
-
- if (target_vaddr == reloc.prev_vaddr) continue;
-
- const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
- const file_offset = source_shdr.sh_offset + section_offset;
-
- log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
- reloc.offset,
- target_vaddr,
- self.strtab.get(target_sym.st_name),
- });
-
- switch (self.ptr_width) {
- .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset),
- .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
- }
-
- reloc.prev_vaddr = target_vaddr;
- }
- }
- }
-
- try self.writeSymbols();
-
- if (build_options.enable_logging) {
- self.logSymtab();
- }
-
- if (self.dwarf) |*dw| {
- if (self.debug_abbrev_section_dirty) {
- try dw.writeDbgAbbrev();
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_abbrev_section_index.?);
- }
- self.debug_abbrev_section_dirty = false;
- }
-
- if (self.debug_info_header_dirty) {
- // Currently only one compilation unit is supported, so the address range is simply
- // identical to the main program header virtual address and memory size.
- const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- const low_pc = text_phdr.p_vaddr;
- const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
- try dw.writeDbgInfoHeader(module, low_pc, high_pc);
- self.debug_info_header_dirty = false;
- }
-
- if (self.debug_aranges_section_dirty) {
- // Currently only one compilation unit is supported, so the address range is simply
- // identical to the main program header virtual address and memory size.
- const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.debug_aranges_section_index.?);
- }
- self.debug_aranges_section_dirty = false;
- }
-
- if (self.debug_line_header_dirty) {
- try dw.writeDbgLineHeader();
- self.debug_line_header_dirty = false;
- }
- }
-
- if (self.phdr_table_dirty) {
- const phsize: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Phdr),
- .p64 => @sizeOf(elf.Elf64_Phdr),
- };
-
- const phdr_table_index = self.phdr_table_index.?;
- const phdr_table = &self.program_headers.items[phdr_table_index];
- const phdr_table_load = &self.program_headers.items[self.phdr_table_load_index.?];
-
- const allocated_size = self.allocatedSize(phdr_table.p_offset);
- const needed_size = self.program_headers.items.len * phsize;
-
- if (needed_size > allocated_size) {
- phdr_table.p_offset = 0; // free the space
- phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align)));
- }
-
- phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
- const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset;
- phdr_table_load.p_filesz = load_align_offset + needed_size;
- phdr_table_load.p_memsz = load_align_offset + needed_size;
-
- phdr_table.p_filesz = needed_size;
- phdr_table.p_vaddr = phdr_table_load.p_vaddr + load_align_offset;
- phdr_table.p_paddr = phdr_table_load.p_paddr + load_align_offset;
- phdr_table.p_memsz = needed_size;
-
- switch (self.ptr_width) {
- .p32 => {
- const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer gpa.free(buf);
-
- for (buf, 0..) |*phdr, i| {
- phdr.* = progHeaderTo32(self.program_headers.items[i]);
- if (foreign_endian) {
- mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
- }
- }
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
- },
- .p64 => {
- const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer gpa.free(buf);
-
- for (buf, 0..) |*phdr, i| {
- phdr.* = self.program_headers.items[i];
- if (foreign_endian) {
- mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
- }
- }
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
- },
- }
-
- // We don't actually care if the phdr load section overlaps, only the phdr section matters.
- phdr_table_load.p_offset = 0;
- phdr_table_load.p_filesz = 0;
-
- self.phdr_table_dirty = false;
- }
-
- {
- const shdr_index = self.shstrtab_section_index.?;
- if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
- const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
- try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
- self.shstrtab_dirty = false;
- }
- }
-
- {
- const shdr_index = self.strtab_section_index.?;
- if (self.strtab_dirty or self.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.strtab.buffer.items.len, 1, false);
- const strtab_sect = self.sections.items(.shdr)[shdr_index];
- try self.base.file.?.pwriteAll(self.strtab.buffer.items, strtab_sect.sh_offset);
- self.strtab_dirty = false;
- }
- }
-
- if (self.dwarf) |dwarf| {
- const shdr_index = self.debug_str_section_index.?;
- if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
- const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
- try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
- self.debug_strtab_dirty = false;
- }
- }
-
- if (self.shdr_table_dirty) {
- const shsize: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Shdr),
- .p64 => @sizeOf(elf.Elf64_Shdr),
- };
- const shalign: u16 = switch (self.ptr_width) {
- .p32 => @alignOf(elf.Elf32_Shdr),
- .p64 => @alignOf(elf.Elf64_Shdr),
- };
- const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.slice().len * shsize;
-
- if (needed_size > allocated_size) {
- self.shdr_table_offset = null; // free the space
- self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
- }
+ self.zig_module_index = blk: {
+ const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
+ self.files.set(index, .{ .zig_module = .{ .index = index } });
+ break :blk index;
+ };
- switch (self.ptr_width) {
- .p32 => {
- const slice = self.sections.slice();
- const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
- defer gpa.free(buf);
+ self.linker_defined = blk: {
+ const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
+ self.files.set(index, .{ .linker_defined = .{} });
+ break :blk index;
+ };
- for (buf, 0..) |*shdr, i| {
- shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
- log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
- if (foreign_endian) {
- mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
- }
- }
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
- },
- .p64 => {
- const slice = self.sections.slice();
- const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
- defer gpa.free(buf);
-
- for (buf, 0..) |*shdr, i| {
- shdr.* = slice.items(.shdr)[i];
- log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
- if (foreign_endian) {
- mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
- }
- }
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
- },
- }
- self.shdr_table_dirty = false;
- }
- if (self.entry_addr == null and self.base.options.effectiveOutputMode() == .Exe) {
- log.debug("flushing. no_entry_point_found = true", .{});
- self.error_flags.no_entry_point_found = true;
- } else {
- log.debug("flushing. no_entry_point_found = false", .{});
- self.error_flags.no_entry_point_found = false;
- try self.writeElfHeader();
- }
-
- // The point of flush() is to commit changes, so in theory, nothing should
- // be dirty after this. However, it is possible for some things to remain
- // dirty because they fail to be written in the event of compile errors,
- // such as debug_line_header_dirty and debug_info_header_dirty.
- assert(!self.debug_abbrev_section_dirty);
- assert(!self.debug_aranges_section_dirty);
- assert(!self.phdr_table_dirty);
- assert(!self.shdr_table_dirty);
- assert(!self.shstrtab_dirty);
- assert(!self.strtab_dirty);
- assert(!self.debug_strtab_dirty);
- assert(!self.got_table_count_dirty);
+ std.debug.print("{}\n", .{self.dumpState()});
+
+ // if (self.lazy_syms.getPtr(.none)) |metadata| {
+ // // Most lazy symbols can be updated on first use, but
+ // // anyerror needs to wait for everything to be flushed.
+ // if (metadata.text_state != .unused) self.updateLazySymbolAtom(
+ // link.File.LazySymbol.initDecl(.code, null, module),
+ // metadata.text_atom,
+ // self.text_section_index.?,
+ // ) catch |err| return switch (err) {
+ // error.CodegenFail => error.FlushFailure,
+ // else => |e| e,
+ // };
+ // if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
+ // link.File.LazySymbol.initDecl(.const_data, null, module),
+ // metadata.rodata_atom,
+ // self.rodata_section_index.?,
+ // ) catch |err| return switch (err) {
+ // error.CodegenFail => error.FlushFailure,
+ // else => |e| e,
+ // };
+ // }
+ // for (self.lazy_syms.values()) |*metadata| {
+ // if (metadata.text_state != .unused) metadata.text_state = .flushed;
+ // if (metadata.rodata_state != .unused) metadata.rodata_state = .flushed;
+ // }
+
+ // const target_endian = self.base.options.target.cpu.arch.endian();
+ // const foreign_endian = target_endian != builtin.cpu.arch.endian();
+
+ // if (self.dwarf) |*dw| {
+ // try dw.flushModule(module);
+ // }
+
+ // {
+ // var it = self.relocs.iterator();
+ // while (it.next()) |entry| {
+ // const atom_index = entry.key_ptr.*;
+ // const relocs = entry.value_ptr.*;
+ // const atom_ptr = self.atom(atom_index);
+ // const source_sym = atom_ptr.symbol(self);
+ // const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
+
+ // log.debug("relocating '{?s}'", .{self.strtab.get(source_sym.st_name)});
+
+ // for (relocs.items) |*reloc| {
+ // const target_sym = self.locals.items[reloc.target];
+ // const target_vaddr = target_sym.st_value + reloc.addend;
+
+ // if (target_vaddr == reloc.prev_vaddr) continue;
+
+ // const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
+ // const file_offset = source_shdr.sh_offset + section_offset;
+
+ // log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
+ // reloc.offset,
+ // target_vaddr,
+ // self.strtab.get(target_sym.st_name),
+ // });
+
+ // switch (self.ptr_width) {
+ // .p32 => try self.base.file.?.pwriteAll(mem.asBytes(&@as(u32, @intCast(target_vaddr))), file_offset),
+ // .p64 => try self.base.file.?.pwriteAll(mem.asBytes(&target_vaddr), file_offset),
+ // }
+
+ // reloc.prev_vaddr = target_vaddr;
+ // }
+ // }
+ // }
+
+ // try self.writeSymbols();
+
+ // if (build_options.enable_logging) {
+ // self.logSymtab();
+ // }
+
+ // if (self.dwarf) |*dw| {
+ // if (self.debug_abbrev_section_dirty) {
+ // try dw.writeDbgAbbrev();
+ // if (!self.shdr_table_dirty) {
+ // // Then it won't get written with the others and we need to do it.
+ // try self.writeSectHeader(self.debug_abbrev_section_index.?);
+ // }
+ // self.debug_abbrev_section_dirty = false;
+ // }
+
+ // if (self.debug_info_header_dirty) {
+ // // Currently only one compilation unit is supported, so the address range is simply
+ // // identical to the main program header virtual address and memory size.
+ // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ // const low_pc = text_phdr.p_vaddr;
+ // const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
+ // try dw.writeDbgInfoHeader(module, low_pc, high_pc);
+ // self.debug_info_header_dirty = false;
+ // }
+
+ // if (self.debug_aranges_section_dirty) {
+ // // Currently only one compilation unit is supported, so the address range is simply
+ // // identical to the main program header virtual address and memory size.
+ // const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ // try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
+ // if (!self.shdr_table_dirty) {
+ // // Then it won't get written with the others and we need to do it.
+ // try self.writeSectHeader(self.debug_aranges_section_index.?);
+ // }
+ // self.debug_aranges_section_dirty = false;
+ // }
+
+ // if (self.debug_line_header_dirty) {
+ // try dw.writeDbgLineHeader();
+ // self.debug_line_header_dirty = false;
+ // }
+ // }
+
+ // if (self.phdr_table_dirty) {
+ // const phsize: u64 = switch (self.ptr_width) {
+ // .p32 => @sizeOf(elf.Elf32_Phdr),
+ // .p64 => @sizeOf(elf.Elf64_Phdr),
+ // };
+
+ // const phdr_table_index = self.phdr_table_index.?;
+ // const phdr_table = &self.program_headers.items[phdr_table_index];
+ // const phdr_table_load = &self.program_headers.items[self.phdr_table_load_index.?];
+
+ // const allocated_size = self.allocatedSize(phdr_table.p_offset);
+ // const needed_size = self.program_headers.items.len * phsize;
+
+ // if (needed_size > allocated_size) {
+ // phdr_table.p_offset = 0; // free the space
+ // phdr_table.p_offset = self.findFreeSpace(needed_size, @as(u32, @intCast(phdr_table.p_align)));
+ // }
+
+ // phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
+ // const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset;
+ // phdr_table_load.p_filesz = load_align_offset + needed_size;
+ // phdr_table_load.p_memsz = load_align_offset + needed_size;
+
+ // phdr_table.p_filesz = needed_size;
+ // phdr_table.p_vaddr = phdr_table_load.p_vaddr + load_align_offset;
+ // phdr_table.p_paddr = phdr_table_load.p_paddr + load_align_offset;
+ // phdr_table.p_memsz = needed_size;
+
+ // switch (self.ptr_width) {
+ // .p32 => {
+ // const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ // defer gpa.free(buf);
+
+ // for (buf, 0..) |*phdr, i| {
+ // phdr.* = progHeaderTo32(self.program_headers.items[i]);
+ // if (foreign_endian) {
+ // mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
+ // }
+ // }
+ // try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
+ // },
+ // .p64 => {
+ // const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ // defer gpa.free(buf);
+
+ // for (buf, 0..) |*phdr, i| {
+ // phdr.* = self.program_headers.items[i];
+ // if (foreign_endian) {
+ // mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
+ // }
+ // }
+ // try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
+ // },
+ // }
+
+ // // We don't actually care if the phdr load section overlaps, only the phdr section matters.
+ // phdr_table_load.p_offset = 0;
+ // phdr_table_load.p_filesz = 0;
+
+ // self.phdr_table_dirty = false;
+ // }
+
+ // {
+ // const shdr_index = self.shstrtab_section_index.?;
+ // if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ // try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
+ // const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
+ // try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
+ // self.shstrtab_dirty = false;
+ // }
+ // }
+
+ // {
+ // const shdr_index = self.strtab_section_index.?;
+ // if (self.strtab_dirty or self.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ // try self.growNonAllocSection(shdr_index, self.strtab.buffer.items.len, 1, false);
+ // const strtab_sect = self.sections.items(.shdr)[shdr_index];
+ // try self.base.file.?.pwriteAll(self.strtab.buffer.items, strtab_sect.sh_offset);
+ // self.strtab_dirty = false;
+ // }
+ // }
+
+ // if (self.dwarf) |dwarf| {
+ // const shdr_index = self.debug_str_section_index.?;
+ // if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ // try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
+ // const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ // try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
+ // self.debug_strtab_dirty = false;
+ // }
+ // }
+
+ // if (self.shdr_table_dirty) {
+ // const shsize: u64 = switch (self.ptr_width) {
+ // .p32 => @sizeOf(elf.Elf32_Shdr),
+ // .p64 => @sizeOf(elf.Elf64_Shdr),
+ // };
+ // const shalign: u16 = switch (self.ptr_width) {
+ // .p32 => @alignOf(elf.Elf32_Shdr),
+ // .p64 => @alignOf(elf.Elf64_Shdr),
+ // };
+ // const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
+ // const needed_size = self.sections.slice().len * shsize;
+
+ // if (needed_size > allocated_size) {
+ // self.shdr_table_offset = null; // free the space
+ // self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
+ // }
+
+ // switch (self.ptr_width) {
+ // .p32 => {
+ // const slice = self.sections.slice();
+ // const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
+ // defer gpa.free(buf);
+
+ // for (buf, 0..) |*shdr, i| {
+ // shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
+ // log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
+ // if (foreign_endian) {
+ // mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
+ // }
+ // }
+ // try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ // },
+ // .p64 => {
+ // const slice = self.sections.slice();
+ // const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
+ // defer gpa.free(buf);
+
+ // for (buf, 0..) |*shdr, i| {
+ // shdr.* = slice.items(.shdr)[i];
+ // log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
+ // if (foreign_endian) {
+ // mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
+ // }
+ // }
+ // try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ // },
+ // }
+ // self.shdr_table_dirty = false;
+ // }
+ // if (self.entry_addr == null and self.base.options.effectiveOutputMode() == .Exe) {
+ // log.debug("flushing. no_entry_point_found = true", .{});
+ // self.error_flags.no_entry_point_found = true;
+ // } else {
+ // log.debug("flushing. no_entry_point_found = false", .{});
+ // self.error_flags.no_entry_point_found = false;
+ // try self.writeElfHeader();
+ // }
+
+ // // The point of flush() is to commit changes, so in theory, nothing should
+ // // be dirty after this. However, it is possible for some things to remain
+ // // dirty because they fail to be written in the event of compile errors,
+ // // such as debug_line_header_dirty and debug_info_header_dirty.
+ // assert(!self.debug_abbrev_section_dirty);
+ // assert(!self.debug_aranges_section_dirty);
+ // assert(!self.phdr_table_dirty);
+ // assert(!self.shdr_table_dirty);
+ // assert(!self.shstrtab_dirty);
+ // assert(!self.strtab_dirty);
+ // assert(!self.debug_strtab_dirty);
+ // assert(!self.got_table_count_dirty);
}
fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@@ -2347,7 +2372,7 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
}
}
-pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index {
+pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: link.File.LazySymbol) !Atom.Index {
const mod = self.base.options.module.?;
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
@@ -2559,7 +2584,7 @@ pub fn updateDecl(
self: *Elf,
mod: *Module,
decl_index: Module.Decl.Index,
-) File.UpdateDeclError!void {
+) link.File.UpdateDeclError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -2634,7 +2659,7 @@ pub fn updateDecl(
fn updateLazySymbolAtom(
self: *Elf,
- sym: File.LazySymbol,
+ sym: link.File.LazySymbol,
atom_index: Atom.Index,
shdr_index: u16,
) !void {
@@ -2776,7 +2801,7 @@ pub fn updateDeclExports(
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
-) File.UpdateDeclExportsError!void {
+) link.File.UpdateDeclExportsError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -3420,6 +3445,18 @@ pub fn atomIndexForSymbol(self: *Elf, sym_index: u32) ?Atom.Index {
return self.atom_by_index_table.get(sym_index);
}
+fn dumpState(self: *Elf ) std.fmt.Formatter(fmtDumpState) {
+ return .{ .data = self };
+}
+
+fn fmtDumpState(self: *Elf,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+
+}
+
pub const null_sym = elf.Elf64_Sym{
.st_name = 0,
.st_info = 0,
@@ -3431,7 +3468,7 @@ pub const null_sym = elf.Elf64_Sym{
const default_entry_addr = 0x8000000;
-pub const base_tag: File.Tag = .elf;
+pub const base_tag: link.File.Tag = .elf;
const Section = struct {
shdr: elf.Elf64_Shdr,
@@ -3506,7 +3543,8 @@ pub const Atom = @import("Elf/Atom.zig");
const Cache = std.Build.Cache;
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
-const File = link.File;
+const File = @import("Elf/File.zig");
+const LinkerDefined = @import("Elf/LinkerDefined.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Module = @import("../Module.zig");
@@ -3517,3 +3555,4 @@ const TableSection = @import("table_section.zig").TableSection;
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
+const ZigModule = @import("Elf/ZigModule.zig");