Commit e10a2018a7
Changed files (3)
src
link
src/link/MachO/Atom.zig
@@ -1143,10 +1143,10 @@ fn format2(
_ = unused_fmt_string;
const atom = ctx.atom;
const macho_file = ctx.macho_file;
- try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : thunk({d})", .{
- atom.atom_index, atom.getName(macho_file), atom.value,
- atom.out_n_sect, atom.alignment, atom.size,
- atom.thunk_index,
+ try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : nreloc({d}) : thunk({d})", .{
+ atom.atom_index, atom.getName(macho_file), atom.value,
+ atom.out_n_sect, atom.alignment, atom.size,
+ atom.getRelocs(macho_file).len, atom.thunk_index,
});
if (!atom.flags.alive) try writer.writeAll(" : [*]");
if (atom.unwind_records.len > 0) {
src/link/MachO/relocatable.zig
@@ -63,14 +63,15 @@ pub fn flush(macho_file: *MachO, comp: *Compilation, module_obj_path: ?[]const u
allocateSegment(macho_file);
macho_file.allocateAtoms();
- state_log.debug("{}", .{macho_file.dumpState()});
-
var off = off: {
const seg = macho_file.segments.items[0];
const off = math.cast(u32, seg.fileoff + seg.filesize) orelse return error.Overflow;
break :off mem.alignForward(u32, off, @alignOf(macho.relocation_info));
};
off = allocateSectionsRelocs(macho_file, off);
+
+ state_log.debug("{}", .{macho_file.dumpState()});
+
try macho_file.calcSymtabSize();
try writeAtoms(macho_file);
try writeCompactUnwind(macho_file);
@@ -195,6 +196,16 @@ fn calcSectionSizes(macho_file: *MachO) !void {
sect.@"align" = 3;
sect.nreloc = eh_frame.calcNumRelocs(macho_file);
}
+
+ if (macho_file.getZigObject()) |zo| {
+ for (zo.atoms.items) |atom_index| {
+ const atom = macho_file.getAtom(atom_index) orelse continue;
+ if (!atom.flags.alive) continue;
+ const header = &macho_file.sections.items(.header)[atom.out_n_sect];
+ if (mem.indexOf(u8, header.segName(), "ZIG") == null) continue;
+ header.nreloc += atom.calcNumRelocs(macho_file);
+ }
+ }
}
fn calcCompactUnwindSize(macho_file: *MachO, sect_index: u8) void {
@@ -303,6 +314,7 @@ fn writeAtoms(macho_file: *MachO) !void {
for (slice.items(.header), slice.items(.atoms)) |header, atoms| {
if (atoms.items.len == 0) continue;
if (header.isZerofill()) continue;
+ if (mem.indexOf(u8, header.segName(), "ZIG") != null) continue;
const size = math.cast(usize, header.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
@@ -330,6 +342,63 @@ fn writeAtoms(macho_file: *MachO) !void {
try macho_file.base.file.?.pwriteAll(code, header.offset);
try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
}
+
+ if (macho_file.getZigObject()) |zo| {
+ // TODO: this is ugly; perhaps we should aggregrate before?
+ var relocs = std.AutoArrayHashMap(u8, std.ArrayList(macho.relocation_info)).init(gpa);
+ defer {
+ for (relocs.values()) |*list| {
+ list.deinit();
+ }
+ relocs.deinit();
+ }
+
+ for (macho_file.sections.items(.header), 0..) |header, n_sect| {
+ if (header.isZerofill()) continue;
+ if (mem.indexOf(u8, header.segName(), "ZIG") == null) continue;
+ const gop = try relocs.getOrPut(@intCast(n_sect));
+ if (gop.found_existing) continue;
+ gop.value_ptr.* = try std.ArrayList(macho.relocation_info).initCapacity(gpa, header.nreloc);
+ }
+
+ for (zo.atoms.items) |atom_index| {
+ const atom = macho_file.getAtom(atom_index) orelse continue;
+ if (!atom.flags.alive) continue;
+ const header = macho_file.sections.items(.header)[atom.out_n_sect];
+ if (header.isZerofill()) continue;
+ if (mem.indexOf(u8, header.segName(), "ZIG") == null) continue;
+ if (atom.getRelocs(macho_file).len == 0) continue;
+ const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
+ const code = try gpa.alloc(u8, atom_size);
+ defer gpa.free(code);
+ atom.getData(macho_file, code) catch |err| switch (err) {
+ error.InputOutput => {
+ try macho_file.reportUnexpectedError("fetching code for '{s}' failed", .{
+ atom.getName(macho_file),
+ });
+ return error.FlushFailure;
+ },
+ else => |e| {
+ try macho_file.reportUnexpectedError("unexpected error while fetching code for '{s}': {s}", .{
+ atom.getName(macho_file),
+ @errorName(e),
+ });
+ return error.FlushFailure;
+ },
+ };
+ const file_offset = header.offset + atom.value - header.addr;
+ const rels = relocs.getPtr(atom.out_n_sect).?;
+ try atom.writeRelocs(macho_file, code, rels);
+ try macho_file.base.file.?.pwriteAll(code, file_offset);
+ }
+
+ for (relocs.keys(), relocs.values()) |sect_id, rels| {
+ const header = macho_file.sections.items(.header)[sect_id];
+ assert(rels.items.len == header.nreloc);
+ mem.sort(macho.relocation_info, rels.items, {}, sortReloc);
+ try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(rels.items), header.reloff);
+ }
+ }
}
fn writeCompactUnwind(macho_file: *MachO) !void {
src/link/MachO.zig
@@ -615,6 +615,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node
const sect = &self.sections.items(.header)[atom.out_n_sect];
if (sect.isZerofill()) continue;
if (mem.indexOf(u8, sect.segName(), "ZIG") == null) continue; // Non-Zig sections are handled separately
+ if (atom.getRelocs(self).len == 0) continue;
// TODO: we will resolve and write ZigObject's TLS data twice:
// once here, and once in writeAtoms
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
@@ -4107,10 +4108,13 @@ fn formatSections(
_ = unused_fmt_string;
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.segment_id), 0..) |header, seg_id, i| {
- try writer.print("sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x})\n", .{
- i, seg_id, header.segName(), header.sectName(), header.addr, header.offset,
- header.@"align", header.size,
- });
+ try writer.print(
+ "sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x}) : relocs({x};{d})\n",
+ .{
+ i, seg_id, header.segName(), header.sectName(), header.addr, header.offset,
+ header.@"align", header.size, header.reloff, header.nreloc,
+ },
+ );
}
}