Commit 16180f525a
Changed files (16)
src/link/MachO/Atom.zig
@@ -971,7 +971,7 @@ pub fn calcNumRelocs(self: Atom, macho_file: *MachO) u32 {
}
}
-pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) !void {
+pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) error{ LinkFailure, OutOfMemory }!void {
const tracy = trace(@src());
defer tracy.end();
@@ -983,15 +983,15 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
var i: usize = 0;
for (relocs) |rel| {
defer i += 1;
- const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow;
- const r_address: i32 = math.cast(i32, self.value + rel_offset) orelse return error.Overflow;
+ const rel_offset = try macho_file.cast(usize, rel.offset - self.off);
+ const r_address: i32 = try macho_file.cast(i32, self.value + rel_offset);
assert(r_address >= 0);
const r_symbolnum = r_symbolnum: {
const r_symbolnum: u32 = switch (rel.tag) {
.local => rel.getTargetAtom(self, macho_file).out_n_sect + 1,
.@"extern" => rel.getTargetSymbol(self, macho_file).getOutputSymtabIndex(macho_file).?,
};
- break :r_symbolnum math.cast(u24, r_symbolnum) orelse return error.Overflow;
+ break :r_symbolnum try macho_file.cast(u24, r_symbolnum);
};
const r_extern = rel.tag == .@"extern";
var addend = rel.addend + rel.getRelocAddend(cpu_arch);
@@ -1027,7 +1027,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r
} else if (addend > 0) {
buffer[i] = .{
.r_address = r_address,
- .r_symbolnum = @bitCast(math.cast(i24, addend) orelse return error.Overflow),
+ .r_symbolnum = @bitCast(try macho_file.cast(i24, addend)),
.r_pcrel = 0,
.r_length = 2,
.r_extern = 0,
src/link/MachO/InternalObject.zig
@@ -414,10 +414,11 @@ pub fn resolveLiterals(self: *InternalObject, lp: *MachO.LiteralPool, macho_file
const rel = relocs[0];
assert(rel.tag == .@"extern");
const target = rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?;
- const target_size = std.math.cast(usize, target.size) orelse return error.Overflow;
+ const target_size = try macho_file.cast(usize, target.size);
try buffer.ensureUnusedCapacity(target_size);
buffer.resize(target_size) catch unreachable;
- @memcpy(buffer.items, try self.getSectionData(target.n_sect));
+ const section_data = try self.getSectionData(target.n_sect, macho_file);
+ @memcpy(buffer.items, section_data);
const res = try lp.insert(gpa, header.type(), buffer.items);
buffer.clearRetainingCapacity();
if (!res.found_existing) {
@@ -607,10 +608,11 @@ pub fn writeAtoms(self: *InternalObject, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
- const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
- const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const off = try macho_file.cast(usize, atom.value);
+ const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items[off..][0..size];
- @memcpy(buffer, try self.getSectionData(atom.n_sect));
+ const section_data = try self.getSectionData(atom.n_sect, macho_file);
+ @memcpy(buffer, section_data);
try atom.resolveRelocs(macho_file, buffer);
}
}
@@ -644,13 +646,13 @@ fn addSection(self: *InternalObject, allocator: Allocator, segname: []const u8,
return n_sect;
}
-fn getSectionData(self: *const InternalObject, index: u32) error{Overflow}![]const u8 {
+fn getSectionData(self: *const InternalObject, index: u32, macho_file: *MachO) error{LinkFailure}![]const u8 {
const slice = self.sections.slice();
assert(index < slice.items(.header).len);
const sect = slice.items(.header)[index];
const extra = slice.items(.extra)[index];
if (extra.is_objc_methname) {
- const size = std.math.cast(usize, sect.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, sect.size);
return self.objc_methnames.items[sect.offset..][0..size];
} else if (extra.is_objc_selref)
return &self.objc_selrefs
src/link/MachO/Object.zig
@@ -582,7 +582,7 @@ fn initPointerLiterals(self: *Object, allocator: Allocator, macho_file: *MachO)
);
return error.MalformedObject;
}
- const num_ptrs = math.cast(usize, @divExact(sect.size, rec_size)) orelse return error.Overflow;
+ const num_ptrs = try macho_file.cast(usize, @divExact(sect.size, rec_size));
for (0..num_ptrs) |i| {
const pos: u32 = @as(u32, @intCast(i)) * rec_size;
@@ -650,8 +650,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
for (subs.items) |sub| {
const atom = self.getAtom(sub.atom).?;
- const atom_off = math.cast(usize, atom.off) orelse return error.Overflow;
- const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
+ const atom_off = try macho_file.cast(usize, atom.off);
+ const atom_size = try macho_file.cast(usize, atom.size);
const atom_data = data[atom_off..][0..atom_size];
const res = try lp.insert(gpa, header.type(), atom_data);
if (!res.found_existing) {
@@ -674,8 +674,8 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
.local => rel.getTargetAtom(atom.*, macho_file),
.@"extern" => rel.getTargetSymbol(atom.*, macho_file).getAtom(macho_file).?,
};
- const addend = math.cast(u32, rel.addend) orelse return error.Overflow;
- const target_size = math.cast(usize, target.size) orelse return error.Overflow;
+ const addend = try macho_file.cast(u32, rel.addend);
+ const target_size = try macho_file.cast(usize, target.size);
try buffer.ensureUnusedCapacity(target_size);
buffer.resize(target_size) catch unreachable;
const gop = try sections_data.getOrPut(target.n_sect);
@@ -683,7 +683,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect));
}
const data = gop.value_ptr.*;
- const target_off = math.cast(usize, target.off) orelse return error.Overflow;
+ const target_off = try macho_file.cast(usize, target.off);
@memcpy(buffer.items, data[target_off..][0..target_size]);
const res = try lp.insert(gpa, header.type(), buffer.items[addend..]);
buffer.clearRetainingCapacity();
@@ -1033,7 +1033,7 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
const sect = slice.items(.header)[sect_id];
const relocs = slice.items(.relocs)[sect_id];
- const size = math.cast(usize, sect.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, sect.size);
try self.eh_frame_data.resize(allocator, size);
const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset);
if (amt != self.eh_frame_data.items.len) return error.InputOutput;
@@ -1696,7 +1696,7 @@ pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
// Header
- const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, self.output_ar_state.size);
const basename = std.fs.path.basename(self.path.sub_path);
try Archive.writeHeader(basename, size, ar_format, writer);
// Data
@@ -1826,7 +1826,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
for (headers, 0..) |header, n_sect| {
if (header.isZerofill()) continue;
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
@@ -1837,9 +1837,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
- const value = math.cast(usize, atom.value) orelse return error.Overflow;
- const off = math.cast(usize, atom.off) orelse return error.Overflow;
- const size = math.cast(usize, atom.size) orelse return error.Overflow;
+ const value = try macho_file.cast(usize, atom.value);
+ const off = try macho_file.cast(usize, atom.off);
+ const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
const data = sections_data[atom.n_sect];
@memcpy(buffer[value..][0..size], data[off..][0..size]);
@@ -1865,7 +1865,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
for (headers, 0..) |header, n_sect| {
if (header.isZerofill()) continue;
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
@@ -1876,9 +1876,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
if (!atom.isAlive()) continue;
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
- const value = math.cast(usize, atom.value) orelse return error.Overflow;
- const off = math.cast(usize, atom.off) orelse return error.Overflow;
- const size = math.cast(usize, atom.size) orelse return error.Overflow;
+ const value = try macho_file.cast(usize, atom.value);
+ const off = try macho_file.cast(usize, atom.off);
+ const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
const data = sections_data[atom.n_sect];
@memcpy(buffer[value..][0..size], data[off..][0..size]);
@@ -1909,29 +1909,27 @@ pub fn calcCompactUnwindSizeRelocatable(self: *Object, macho_file: *MachO) void
}
}
+fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info {
+ return .{
+ .r_address = std.math.cast(i32, offset) orelse return error.Overflow,
+ .r_symbolnum = 0,
+ .r_pcrel = 0,
+ .r_length = 3,
+ .r_extern = 0,
+ .r_type = switch (arch) {
+ .aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
+ .x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
+ else => unreachable,
+ },
+ };
+}
+
pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const cpu_arch = macho_file.getTarget().cpu.arch;
- const addReloc = struct {
- fn addReloc(offset: u32, arch: std.Target.Cpu.Arch) !macho.relocation_info {
- return .{
- .r_address = math.cast(i32, offset) orelse return error.Overflow,
- .r_symbolnum = 0,
- .r_pcrel = 0,
- .r_length = 3,
- .r_extern = 0,
- .r_type = switch (arch) {
- .aarch64 => @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
- .x86_64 => @intFromEnum(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
- else => unreachable,
- },
- };
- }
- }.addReloc;
-
const nsect = macho_file.unwind_info_sect_index.?;
const buffer = macho_file.sections.items(.out)[nsect].items;
const relocs = macho_file.sections.items(.relocs)[nsect].items;
@@ -1967,7 +1965,7 @@ pub fn writeCompactUnwindRelocatable(self: *Object, macho_file: *MachO) !void {
// Personality function
if (rec.getPersonality(macho_file)) |sym| {
- const r_symbolnum = math.cast(u24, sym.getOutputSymtabIndex(macho_file).?) orelse return error.Overflow;
+ const r_symbolnum = try macho_file.cast(u24, sym.getOutputSymtabIndex(macho_file).?);
var reloc = try addReloc(offset + 16, cpu_arch);
reloc.r_symbolnum = r_symbolnum;
reloc.r_extern = 1;
src/link/MachO/relocatable.zig
@@ -18,13 +18,15 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
// debug info segments/sections (this is apparently by design by Apple), we copy
// the *only* input file over.
- // TODO: in the future, when we implement `dsymutil` alternative directly in the Zig
- // compiler, investigate if we can get rid of this `if` prong here.
const path = positionals.items[0].path().?;
- const in_file = try path.root_dir.handle.openFile(path.sub_path, .{});
- const stat = try in_file.stat();
- const amt = try in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size);
- if (amt != stat.size) return error.InputOutput; // TODO: report an actual user error
+ const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err|
+ return diags.fail("failed to open {}: {s}", .{ path, @errorName(err) });
+ const stat = in_file.stat() catch |err|
+ return diags.fail("failed to stat {}: {s}", .{ path, @errorName(err) });
+ const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
+ return diags.fail("failed to copy range of file {}: {s}", .{ path, @errorName(err) });
+ if (amt != stat.size)
+ return diags.fail("unexpected short write in copy range of file {}", .{path});
return;
}
@@ -40,7 +42,11 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
if (diags.hasErrors()) return error.LinkFailure;
try macho_file.resolveSymbols();
- try macho_file.dedupLiterals();
+ macho_file.dedupLiterals() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.LinkFailure => return error.LinkFailure,
+ else => |e| return diags.fail("failed to update ar size: {s}", .{@errorName(e)}),
+ };
markExports(macho_file);
claimUnresolved(macho_file);
try initOutputSections(macho_file);
@@ -108,7 +114,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try macho_file.addAtomsToSections();
try calcSectionSizes(macho_file);
try createSegment(macho_file);
- try allocateSections(macho_file);
+ allocateSections(macho_file) catch |err|
+ return diags.fail("failed to allocate sections: {s}", .{@errorName(err)});
allocateSegment(macho_file);
if (build_options.enable_logging) {
@@ -126,8 +133,6 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
const ncmds, const sizeofcmds = try writeLoadCommands(macho_file);
try writeHeader(macho_file, ncmds, sizeofcmds);
- // TODO we can avoid reading in the file contents we just wrote if we give the linker
- // ability to write directly to a buffer.
try zo.readFileContents(macho_file);
}
@@ -152,7 +157,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
// Update sizes of contributing objects
for (files.items) |index| {
- try macho_file.getFile(index).?.updateArSize(macho_file);
+ macho_file.getFile(index).?.updateArSize(macho_file) catch |err|
+ return diags.fail("failed to update ar size: {s}", .{@errorName(err)});
}
// Update file offsets of contributing objects
@@ -171,7 +177,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state.file_off = pos;
pos += @sizeOf(Archive.ar_hdr);
pos += mem.alignForward(usize, zo.basename.len + 1, ptr_width);
- pos += math.cast(usize, state.size) orelse return error.Overflow;
+ pos += try macho_file.cast(usize, state.size);
},
.object => |o| {
const state = &o.output_ar_state;
@@ -179,7 +185,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
state.file_off = pos;
pos += @sizeOf(Archive.ar_hdr);
pos += mem.alignForward(usize, o.path.basename().len + 1, ptr_width);
- pos += math.cast(usize, state.size) orelse return error.Overflow;
+ pos += try macho_file.cast(usize, state.size);
},
else => unreachable,
}
@@ -201,7 +207,10 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
try writer.writeAll(Archive.ARMAG);
// Write symtab
- try ar_symtab.write(format, macho_file, writer);
+ ar_symtab.write(format, macho_file, writer) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return diags.fail("failed to write archive symbol table: {s}", .{@errorName(e)}),
+ };
// Write object files
for (files.items) |index| {
@@ -210,13 +219,14 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
if (padding > 0) {
try writer.writeByteNTimes(0, padding);
}
- try macho_file.getFile(index).?.writeAr(format, macho_file, writer);
+ macho_file.getFile(index).?.writeAr(format, macho_file, writer) catch |err|
+ return diags.fail("failed to write archive: {s}", .{@errorName(err)});
}
assert(buffer.items.len == total_size);
- try macho_file.base.file.?.setEndPos(total_size);
- try macho_file.base.file.?.pwriteAll(buffer.items, 0);
+ try macho_file.setEndPos(total_size);
+ try macho_file.pwriteAll(buffer.items, 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@@ -452,11 +462,10 @@ fn allocateSections(macho_file: *MachO) !void {
for (slice.items(.header)) |*header| {
const needed_size = header.size;
header.size = 0;
- const alignment = try math.powi(u32, 2, header.@"align");
+ const alignment = try macho_file.alignPow(header.@"align");
if (!header.isZerofill()) {
if (needed_size > macho_file.allocatedSize(header.offset)) {
- header.offset = math.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)) orelse
- return error.Overflow;
+ header.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(needed_size, alignment));
}
}
if (needed_size > macho_file.allocatedSizeVirtual(header.addr)) {
@@ -572,7 +581,7 @@ fn sortRelocs(macho_file: *MachO) void {
}
}
-fn writeSections(macho_file: *MachO) !void {
+fn writeSections(macho_file: *MachO) link.File.FlushError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -583,7 +592,7 @@ fn writeSections(macho_file: *MachO) !void {
for (slice.items(.header), slice.items(.out), slice.items(.relocs), 0..) |header, *out, *relocs, n_sect| {
if (header.isZerofill()) continue;
if (!macho_file.isZigSection(@intCast(n_sect))) { // TODO this is wrong; what about debug sections?
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try macho_file.cast(usize, header.size);
try out.resize(gpa, size);
const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
@memset(out.items, padding_byte);
@@ -662,16 +671,16 @@ fn writeSectionsToFile(macho_file: *MachO) !void {
const slice = macho_file.sections.slice();
for (slice.items(.header), slice.items(.out), slice.items(.relocs)) |header, out, relocs| {
- try macho_file.base.file.?.pwriteAll(out.items, header.offset);
- try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
+ try macho_file.pwriteAll(out.items, header.offset);
+ try macho_file.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
}
try macho_file.writeDataInCode();
- try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff);
- try macho_file.base.file.?.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff);
+ try macho_file.pwriteAll(mem.sliceAsBytes(macho_file.symtab.items), macho_file.symtab_cmd.symoff);
+ try macho_file.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff);
}
-fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } {
+fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struct { usize, usize } {
const gpa = macho_file.base.comp.gpa;
const needed_size = load_commands.calcLoadCommandsSizeObject(macho_file);
const buffer = try gpa.alloc(u8, needed_size);
@@ -686,31 +695,45 @@ fn writeLoadCommands(macho_file: *MachO) !struct { usize, usize } {
{
assert(macho_file.segments.items.len == 1);
const seg = macho_file.segments.items[0];
- try writer.writeStruct(seg);
+ writer.writeStruct(seg) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
for (macho_file.sections.items(.header)) |header| {
- try writer.writeStruct(header);
+ writer.writeStruct(header) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
}
ncmds += 1;
}
- try writer.writeStruct(macho_file.data_in_code_cmd);
+ writer.writeStruct(macho_file.data_in_code_cmd) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
ncmds += 1;
- try writer.writeStruct(macho_file.symtab_cmd);
+ writer.writeStruct(macho_file.symtab_cmd) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
ncmds += 1;
- try writer.writeStruct(macho_file.dysymtab_cmd);
+ writer.writeStruct(macho_file.dysymtab_cmd) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
ncmds += 1;
if (macho_file.platform.isBuildVersionCompatible()) {
- try load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer);
+ load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
ncmds += 1;
} else {
- try load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer);
+ load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, writer) catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ };
ncmds += 1;
}
assert(stream.pos == needed_size);
- try macho_file.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
+ try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len };
}
@@ -742,7 +765,7 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
header.ncmds = @intCast(ncmds);
header.sizeofcmds = @intCast(sizeofcmds);
- try macho_file.base.file.?.pwriteAll(mem.asBytes(&header), 0);
+ try macho_file.pwriteAll(mem.asBytes(&header), 0);
}
const std = @import("std");
src/link/MachO/ZigObject.zig
@@ -290,12 +290,15 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void {
+ const diags = &macho_file.base.comp.link_diags;
// Size of the output object file is always the offset + size of the strtab
const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize;
const gpa = macho_file.base.comp.gpa;
try self.data.resize(gpa, size);
- const amt = try macho_file.base.file.?.preadAll(self.data.items, 0);
- if (amt != size) return error.InputOutput;
+ const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err|
+ return diags.fail("failed to read output file: {s}", .{@errorName(err)});
+ if (amt != size)
+ return diags.fail("unexpected EOF reading from output file", .{});
}
pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, macho_file: *MachO) error{OutOfMemory}!void {
@@ -376,7 +379,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void {
if (atom.getRelocs(macho_file).len == 0) continue;
// TODO: we will resolve and write ZigObject's TLS data twice:
// once here, and once in writeAtoms
- const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const atom_size = try macho_file.cast(usize, atom.size);
const code = try gpa.alloc(u8, atom_size);
defer gpa.free(code);
self.getAtomData(macho_file, atom.*, code) catch |err| {
@@ -400,7 +403,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void {
has_error = true;
continue;
};
- try macho_file.base.file.?.pwriteAll(code, file_offset);
+ try macho_file.pwriteAll(code, file_offset);
}
if (has_error) return error.ResolveFailed;
@@ -419,7 +422,7 @@ pub fn calcNumRelocs(self: *ZigObject, macho_file: *MachO) void {
}
}
-pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void {
+pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) error{ LinkFailure, OutOfMemory }!void {
const gpa = macho_file.base.comp.gpa;
const diags = &macho_file.base.comp.link_diags;
@@ -432,14 +435,14 @@ pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void {
if (!macho_file.isZigSection(atom.out_n_sect) and !macho_file.isDebugSection(atom.out_n_sect)) continue;
if (atom.getRelocs(macho_file).len == 0) continue;
const extra = atom.getExtra(macho_file);
- const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const atom_size = try macho_file.cast(usize, atom.size);
const code = try gpa.alloc(u8, atom_size);
defer gpa.free(code);
self.getAtomData(macho_file, atom.*, code) catch |err|
return diags.fail("failed to fetch code for '{s}': {s}", .{ atom.getName(macho_file), @errorName(err) });
const file_offset = header.offset + atom.value;
try atom.writeRelocs(macho_file, code, relocs[extra.rel_out_index..][0..extra.rel_out_count]);
- try macho_file.base.file.?.pwriteAll(code, file_offset);
+ try macho_file.pwriteAll(code, file_offset);
}
}
@@ -457,8 +460,8 @@ pub fn writeAtomsRelocatable(self: *ZigObject, macho_file: *MachO) !void {
if (sect.isZerofill()) continue;
if (macho_file.isZigSection(atom.out_n_sect)) continue;
if (atom.getRelocs(macho_file).len == 0) continue;
- const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
- const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const off = try macho_file.cast(usize, atom.value);
+ const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]);
const relocs = macho_file.sections.items(.relocs)[atom.out_n_sect].items;
@@ -480,8 +483,8 @@ pub fn writeAtoms(self: *ZigObject, macho_file: *MachO) !void {
const sect = atom.getInputSection(macho_file);
if (sect.isZerofill()) continue;
if (macho_file.isZigSection(atom.out_n_sect)) continue;
- const off = std.math.cast(usize, atom.value) orelse return error.Overflow;
- const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
+ const off = try macho_file.cast(usize, atom.value);
+ const size = try macho_file.cast(usize, atom.size);
const buffer = macho_file.sections.items(.out)[atom.out_n_sect].items;
try self.getAtomData(macho_file, atom.*, buffer[off..][0..size]);
try atom.resolveRelocs(macho_file, buffer[off..][0..size]);
@@ -546,7 +549,9 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se
return sect;
}
-pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void {
+pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) link.File.FlushError!void {
+ const diags = &macho_file.base.comp.link_diags;
+
// Handle any lazy symbols that were emitted by incremental compilation.
if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| {
const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid);
@@ -554,24 +559,18 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
- if (metadata.text_state != .unused) self.updateLazySymbol(
+ if (metadata.text_state != .unused) try self.updateLazySymbol(
macho_file,
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_symbol_index,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.LinkFailure,
- else => |e| e,
- };
- if (metadata.const_state != .unused) self.updateLazySymbol(
+ );
+ if (metadata.const_state != .unused) try self.updateLazySymbol(
macho_file,
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.const_symbol_index,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.LinkFailure,
- else => |e| e,
- };
+ );
}
for (self.lazy_syms.values()) |*metadata| {
if (metadata.text_state != .unused) metadata.text_state = .flushed;
@@ -581,7 +580,11 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .activate(macho_file.base.comp.zcu.?, tid);
defer pt.deactivate();
- try dwarf.flushModule(pt);
+ dwarf.flushModule(pt) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.CodegenFail => return error.LinkFailure,
+ else => |e| return diags.fail("failed to flush dwarf module: {s}", .{@errorName(e)}),
+ };
self.debug_abbrev_dirty = false;
self.debug_aranges_dirty = false;
@@ -616,6 +619,7 @@ pub fn getNavVAddr(
const sym = self.symbols.items[sym_index];
const vaddr = sym.getAddress(.{}, macho_file);
switch (reloc_info.parent) {
+ .none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?;
try parent_atom.addReloc(macho_file, .{
@@ -655,6 +659,7 @@ pub fn getUavVAddr(
const sym = self.symbols.items[sym_index];
const vaddr = sym.getAddress(.{}, macho_file);
switch (reloc_info.parent) {
+ .none => unreachable,
.atom_index => |atom_index| {
const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?;
try parent_atom.addReloc(macho_file, .{
@@ -766,7 +771,7 @@ pub fn updateFunc(
func_index: InternPool.Index,
air: Air,
liveness: Liveness,
-) !void {
+) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -936,7 +941,7 @@ fn updateNavCode(
sym_index: Symbol.Index,
sect_index: u8,
code: []const u8,
-) !void {
+) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@@ -950,6 +955,7 @@ fn updateNavCode(
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
+ const diags = &macho_file.base.comp.link_diags;
const sect = &macho_file.sections.items(.header)[sect_index];
const sym = &self.symbols.items[sym_index];
const nlist = &self.symtab.items(.nlist)[sym.nlist_idx];
@@ -978,7 +984,7 @@ fn updateNavCode(
const need_realloc = code.len > capacity or !required_alignment.check(atom.value);
if (need_realloc) {
- try atom.grow(macho_file);
+ atom.grow(macho_file) catch |err| return diags.fail("failed to grow atom: {s}", .{@errorName(err)});
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom.value });
if (old_vaddr != atom.value) {
sym.value = 0;
@@ -1000,7 +1006,7 @@ fn updateNavCode(
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value;
- try macho_file.base.file.?.pwriteAll(code, file_offset);
+ try macho_file.pwriteAll(code, file_offset);
}
}
@@ -1236,7 +1242,7 @@ fn lowerConst(
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value;
- try macho_file.base.file.?.pwriteAll(code, file_offset);
+ try macho_file.pwriteAll(code, file_offset);
return .{ .ok = sym_index };
}
@@ -1347,9 +1353,10 @@ fn updateLazySymbol(
pt: Zcu.PerThread,
lazy_sym: link.File.LazySymbol,
symbol_index: Symbol.Index,
-) !void {
+) error{ OutOfMemory, LinkFailure }!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
+ const diags = &macho_file.base.comp.link_diags;
var required_alignment: Atom.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1365,7 +1372,7 @@ fn updateLazySymbol(
};
const src = Type.fromInterned(lazy_sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
- const res = try codegen.generateLazySymbol(
+ const res = codegen.generateLazySymbol(
&macho_file.base,
pt,
src,
@@ -1374,13 +1381,14 @@ fn updateLazySymbol(
&code_buffer,
.none,
.{ .atom_index = symbol_index },
- );
+ ) catch |err| switch (err) {
+ error.CodegenFail => return error.LinkFailure,
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return diags.fail("failed to codegen symbol: {s}", .{@errorName(e)}),
+ };
const code = switch (res) {
.ok => code_buffer.items,
- .fail => |em| {
- log.err("{s}", .{em.msg});
- return error.CodegenFail;
- },
+ .fail => |em| return diags.fail("codegen failure: {s}", .{em.msg}),
};
const output_section_index = switch (lazy_sym.kind) {
@@ -1412,7 +1420,7 @@ fn updateLazySymbol(
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value;
- try macho_file.base.file.?.pwriteAll(code, file_offset);
+ try macho_file.pwriteAll(code, file_offset);
}
pub fn updateLineNumber(self: *ZigObject, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
@@ -1486,7 +1494,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, macho_file: *MachO) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
- try macho_file.base.file.?.pwriteAll(out, fileoff);
+ try macho_file.pwriteAll(out, fileoff);
}
pub fn getOrCreateMetadataForNav(
src/link/Wasm/Flush.zig
@@ -39,27 +39,17 @@ const DataSegmentIndex = enum(u32) {
pub fn clear(f: *Flush) void {
f.binary_bytes.clearRetainingCapacity();
- f.function_imports.clearRetainingCapacity();
- f.global_imports.clearRetainingCapacity();
- f.functions.clearRetainingCapacity();
- f.globals.clearRetainingCapacity();
f.data_segments.clearRetainingCapacity();
f.data_segment_groups.clearRetainingCapacity();
f.indirect_function_table.clearRetainingCapacity();
- f.function_exports.clearRetainingCapacity();
f.global_exports.clearRetainingCapacity();
}
pub fn deinit(f: *Flush, gpa: Allocator) void {
f.binary_bytes.deinit(gpa);
- f.function_imports.deinit(gpa);
- f.global_imports.deinit(gpa);
- f.functions.deinit(gpa);
- f.globals.deinit(gpa);
f.data_segments.deinit(gpa);
f.data_segment_groups.deinit(gpa);
f.indirect_function_table.deinit(gpa);
- f.function_exports.deinit(gpa);
f.global_exports.deinit(gpa);
f.* = undefined;
}
@@ -79,28 +69,32 @@ pub fn finish(f: *Flush, wasm: *Wasm, arena: Allocator) anyerror!void {
if (wasm.any_exports_updated) {
wasm.any_exports_updated = false;
+
wasm.function_exports.shrinkRetainingCapacity(wasm.function_exports_len);
wasm.global_exports.shrinkRetainingCapacity(wasm.global_exports_len);
const entry_name = if (wasm.entry_resolution.isNavOrUnresolved(wasm)) wasm.entry_name else .none;
try f.missing_exports.reinit(gpa, wasm.missing_exports_init, &.{});
+ try wasm.function_imports.reinit(gpa, wasm.function_imports_init_keys, wasm.function_imports_init_vals);
+ try wasm.global_imports.reinit(gpa, wasm.global_imports_init_keys, wasm.global_imports_init_vals);
+
for (wasm.nav_exports.keys()) |*nav_export| {
if (ip.isFunctionType(ip.getNav(nav_export.nav_index).typeOf(ip))) {
- try wasm.function_exports.append(gpa, .fromNav(nav_export.nav_index, wasm));
- if (nav_export.name.toOptional() == entry_name) {
- wasm.entry_resolution = .pack(wasm, .{ .nav = nav_export.nav_index });
- } else {
- f.missing_exports.swapRemove(nav_export.name);
- }
+ try wasm.function_exports.append(gpa, Wasm.FunctionIndex.fromIpNav(wasm, nav_export.nav_index).?);
+ _ = f.missing_exports.swapRemove(nav_export.name);
+ _ = wasm.function_imports.swapRemove(nav_export.name);
+
+ if (nav_export.name.toOptional() == entry_name)
+ wasm.entry_resolution = .fromIpNav(wasm, nav_export.nav_index);
} else {
- try wasm.global_exports.append(gpa, .fromNav(nav_export.nav_index));
- f.missing_exports.swapRemove(nav_export.name);
+ try wasm.global_exports.append(gpa, Wasm.GlobalIndex.fromIpNav(wasm, nav_export.nav_index).?);
+ _ = f.missing_exports.swapRemove(nav_export.name);
+ _ = wasm.global_imports.swapRemove(nav_export.name);
}
}
for (f.missing_exports.keys()) |exp_name| {
- if (exp_name != .none) continue;
diags.addError("manually specified export name '{s}' undefined", .{exp_name.slice(wasm)});
}
@@ -112,28 +106,31 @@ pub fn finish(f: *Flush, wasm: *Wasm, arena: Allocator) anyerror!void {
}
if (!allow_undefined) {
- for (wasm.function_imports.keys()) |function_import_id| {
- const name, const src_loc = function_import_id.nameAndLoc(wasm);
- diags.addSrcError(src_loc, "undefined function: {s}", .{name.slice(wasm)});
+ for (wasm.function_imports.keys(), wasm.function_imports.values()) |name, function_import_id| {
+ const src_loc = function_import_id.sourceLocation(wasm);
+ src_loc.addError(wasm, "undefined function: {s}", .{name.slice(wasm)});
}
- for (wasm.global_imports.keys()) |global_import_id| {
- const name, const src_loc = global_import_id.nameAndLoc(wasm);
- diags.addSrcError(src_loc, "undefined global: {s}", .{name.slice(wasm)});
+ for (wasm.global_imports.keys(), wasm.global_imports.values()) |name, global_import_id| {
+ const src_loc = global_import_id.sourceLocation(wasm);
+ src_loc.addError(wasm, "undefined global: {s}", .{name.slice(wasm)});
}
- for (wasm.table_imports.keys()) |table_import_id| {
- const name, const src_loc = table_import_id.nameAndLoc(wasm);
- diags.addSrcError(src_loc, "undefined table: {s}", .{name.slice(wasm)});
+ for (wasm.table_imports.keys(), wasm.table_imports.values()) |name, table_import_id| {
+ const src_loc = table_import_id.ptr(wasm).source_location;
+ src_loc.addError(wasm, "undefined table: {s}", .{name.slice(wasm)});
}
}
if (diags.hasErrors()) return error.LinkFailure;
+ wasm.functions.shrinkRetainingCapacity(wasm.functions_len);
+ wasm.globals.shrinkRetainingCapacity(wasm.globals_len);
+
// TODO only include init functions for objects with must_link=true or
// which have any alive functions inside them.
if (wasm.object_init_funcs.items.len > 0) {
// Zig has no constructors so these are only for object file inputs.
mem.sortUnstable(Wasm.InitFunc, wasm.object_init_funcs.items, {}, Wasm.InitFunc.lessThan);
- try f.functions.put(gpa, .__wasm_call_ctors, {});
+ try wasm.functions.put(gpa, .__wasm_call_ctors, {});
}
var any_passive_inits = false;
@@ -149,7 +146,7 @@ pub fn finish(f: *Flush, wasm: *Wasm, arena: Allocator) anyerror!void {
});
}
- try f.functions.ensureUnusedCapacity(gpa, 3);
+ try wasm.functions.ensureUnusedCapacity(gpa, 3);
// Passive segments are used to avoid memory being reinitialized on each
// thread's instantiation. These passive segments are initialized and
@@ -157,14 +154,14 @@ pub fn finish(f: *Flush, wasm: *Wasm, arena: Allocator) anyerror!void {
// We also initialize bss segments (using memory.fill) as part of this
// function.
if (any_passive_inits) {
- f.functions.putAssumeCapacity(.__wasm_init_memory, {});
+ wasm.functions.putAssumeCapacity(.__wasm_init_memory, {});
}
// When we have TLS GOT entries and shared memory is enabled,
// we must perform runtime relocations or else we don't create the function.
if (shared_memory) {
- if (f.need_tls_relocs) f.functions.putAssumeCapacity(.__wasm_apply_global_tls_relocs, {});
- f.functions.putAssumeCapacity(gpa, .__wasm_init_tls, {});
+ if (f.need_tls_relocs) wasm.functions.putAssumeCapacity(.__wasm_apply_global_tls_relocs, {});
+ wasm.functions.putAssumeCapacity(gpa, .__wasm_init_tls, {});
}
// Sort order:
@@ -611,11 +608,11 @@ pub fn finish(f: *Flush, wasm: *Wasm, arena: Allocator) anyerror!void {
}
// Code section.
- if (f.functions.count() != 0) {
+ if (wasm.functions.count() != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const start_offset = binary_bytes.items.len - 5; // minus 5 so start offset is 5 to include entry count
- for (f.functions.keys()) |resolution| switch (resolution.unpack()) {
+ for (wasm.functions.keys()) |resolution| switch (resolution.unpack()) {
.unresolved => unreachable,
.__wasm_apply_global_tls_relocs => @panic("TODO lower __wasm_apply_global_tls_relocs"),
.__wasm_call_ctors => @panic("TODO lower __wasm_call_ctors"),
src/link/Wasm/Object.zig
@@ -26,12 +26,14 @@ start_function: Wasm.OptionalObjectFunctionIndex,
/// (or therefore missing) and must generate an error when another object uses
/// features that are not supported by the other.
features: Wasm.Feature.Set,
-/// Points into Wasm functions
+/// Points into Wasm object_functions
functions: RelativeSlice,
-/// Points into Wasm object_globals_imports
-globals_imports: RelativeSlice,
-/// Points into Wasm object_tables_imports
-tables_imports: RelativeSlice,
+/// Points into Wasm object_function_imports
+function_imports: RelativeSlice,
+/// Points into Wasm object_global_imports
+global_imports: RelativeSlice,
+/// Points into Wasm object_table_imports
+table_imports: RelativeSlice,
/// Points into Wasm object_custom_segments
custom_segments: RelativeSlice,
/// For calculating local section index from `Wasm.SectionIndex`.
@@ -180,13 +182,13 @@ fn parse(
const data_segment_start: u32 = @intCast(wasm.object_data_segments.items.len);
const custom_segment_start: u32 = @intCast(wasm.object_custom_segments.items.len);
- const imports_start: u32 = @intCast(wasm.object_imports.items.len);
const functions_start: u32 = @intCast(wasm.object_functions.items.len);
const tables_start: u32 = @intCast(wasm.object_tables.items.len);
const memories_start: u32 = @intCast(wasm.object_memories.items.len);
const globals_start: u32 = @intCast(wasm.object_globals.items.len);
const init_funcs_start: u32 = @intCast(wasm.object_init_funcs.items.len);
const comdats_start: u32 = @intCast(wasm.object_comdats.items.len);
+ const function_imports_start: u32 = @intCast(wasm.object_function_imports.items.len);
const global_imports_start: u32 = @intCast(wasm.object_global_imports.items.len);
const table_imports_start: u32 = @intCast(wasm.object_table_imports.items.len);
const local_section_index_base = wasm.object_total_sections;
@@ -504,7 +506,7 @@ fn parse(
switch (kind) {
.function => {
const function, pos = readLeb(u32, bytes, pos);
- try ss.function_imports.append(gpa, .{
+ try ss.func_imports.append(gpa, .{
.module_name = interned_module_name,
.name = interned_name,
.index = function,
@@ -854,13 +856,13 @@ fn parse(
.archive_member_name = archive_member_name,
.start_function = start_function,
.features = features,
- .imports = .{
- .off = imports_start,
- .len = @intCast(wasm.object_imports.items.len - imports_start),
- },
.functions = .{
.off = functions_start,
- .len = @intCast(wasm.functions.items.len - functions_start),
+ .len = @intCast(wasm.object_functions.items.len - functions_start),
+ },
+ .globals = .{
+ .off = globals_start,
+ .len = @intCast(wasm.object_globals.items.len - globals_start),
},
.tables = .{
.off = tables_start,
@@ -870,9 +872,17 @@ fn parse(
.off = memories_start,
.len = @intCast(wasm.object_memories.items.len - memories_start),
},
- .globals = .{
- .off = globals_start,
- .len = @intCast(wasm.object_globals.items.len - globals_start),
+ .function_imports = .{
+ .off = function_imports_start,
+ .len = @intCast(wasm.object_function_imports.items.len - function_imports_start),
+ },
+ .global_imports = .{
+ .off = global_imports_start,
+ .len = @intCast(wasm.object_global_imports.items.len - global_imports_start),
+ },
+ .table_imports = .{
+ .off = table_imports_start,
+ .len = @intCast(wasm.object_table_imports.items.len - table_imports_start),
},
.init_funcs = .{
.off = init_funcs_start,
src/link/Coff.zig
@@ -754,7 +754,7 @@ fn allocateGlobal(coff: *Coff) !u32 {
return index;
}
-fn addGotEntry(coff: *Coff, target: SymbolWithLoc) !void {
+fn addGotEntry(coff: *Coff, target: SymbolWithLoc) error{ OutOfMemory, LinkFailure }!void {
const gpa = coff.base.comp.gpa;
if (coff.got_table.lookup.contains(target)) return;
const got_index = try coff.got_table.allocateEntry(gpa, target);
@@ -780,7 +780,7 @@ pub fn createAtom(coff: *Coff) !Atom.Index {
return atom_index;
}
-fn growAtom(coff: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
+fn growAtom(coff: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) link.File.UpdateNavError!u32 {
const atom = coff.getAtom(atom_index);
const sym = atom.getSymbol(coff);
const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value;
@@ -1313,10 +1313,7 @@ fn updateLazySymbolAtom(
};
const code = switch (res) {
.ok => code_buffer.items,
- .fail => |em| {
- log.err("{s}", .{em.msg});
- return error.CodegenFail;
- },
+ .fail => |em| return diags.fail("failed to generate code: {s}", .{em.msg}),
};
const code_len: u32 = @intCast(code.len);
src/link/Dwarf.zig
@@ -23,6 +23,8 @@ debug_str: StringSection,
pub const UpdateError = error{
/// Indicates the error is already reported on `failed_codegen` in the Zcu.
CodegenFail,
+ /// Indicates the error is already reported on `link_diags` in the Compilation.
+ LinkFailure,
OutOfMemory,
};
@@ -590,12 +592,14 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
- if (try dwarf.getFile().?.copyRangeAll(
+ const diags = &dwarf.bin_file.base.comp.link_diags;
+ const n = dwarf.getFile().?.copyRangeAll(
sec.off(dwarf) + unit.off,
dwarf.getFile().?,
sec.off(dwarf) + new_off,
unit.len,
- ) != unit.len) return error.InputOutput;
+ ) catch |err| return diags.fail("failed to copy file range: {s}", .{@errorName(err)});
+ if (n != unit.len) return diags.fail("unexpected short write from copy file range", .{});
unit.off = new_off;
}
src/link/Elf.zig
@@ -575,7 +575,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ if (at_end) try self.setEndPos(end);
return null;
}
@@ -638,7 +638,7 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment:
shdr.sh_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
+ try self.setEndPos(shdr.sh_offset + needed_size);
}
}
@@ -960,7 +960,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
},
else => |e| return e,
};
- try self.base.file.?.pwriteAll(code, file_offset);
+ try self.pwriteAll(code, file_offset);
}
if (has_reloc_errors) return error.LinkFailure;
@@ -2117,7 +2117,7 @@ pub fn writeShdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
const buf = try gpa.alloc(elf.Elf64_Shdr, self.sections.items(.shdr).len);
@@ -2130,7 +2130,7 @@ pub fn writeShdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ try self.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
}
}
@@ -2157,7 +2157,7 @@ fn writePhdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
}
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
+ try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
},
.p64 => {
const buf = try gpa.alloc(elf.Elf64_Phdr, self.phdrs.items.len);
@@ -2169,7 +2169,7 @@ fn writePhdrTable(self: *Elf) !void {
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
}
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
+ try self.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
},
}
}
@@ -2319,7 +2319,7 @@ pub fn writeElfHeader(self: *Elf) !void {
assert(index == e_ehsize);
- try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
+ try self.pwriteAll(hdr_buf[0..index], 0);
}
pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void {
@@ -2497,8 +2497,8 @@ pub fn writeMergeSections(self: *Elf) !void {
for (self.merge_sections.items) |*msec| {
const shdr = self.sections.items(.shdr)[msec.output_section_index];
- const fileoff = math.cast(usize, msec.value + shdr.sh_offset) orelse return error.Overflow;
- const size = math.cast(usize, msec.size) orelse return error.Overflow;
+ const fileoff = try self.cast(usize, msec.value + shdr.sh_offset);
+ const size = try self.cast(usize, msec.size);
try buffer.ensureTotalCapacity(size);
buffer.appendNTimesAssumeCapacity(0, size);
@@ -2506,11 +2506,11 @@ pub fn writeMergeSections(self: *Elf) !void {
const msub = msec.mergeSubsection(msub_index);
assert(msub.alive);
const string = msub.getString(self);
- const off = math.cast(usize, msub.value) orelse return error.Overflow;
+ const off = try self.cast(usize, msub.value);
@memcpy(buffer.items[off..][0..string.len], string);
}
- try self.base.file.?.pwriteAll(buffer.items, fileoff);
+ try self.pwriteAll(buffer.items, fileoff);
buffer.clearRetainingCapacity();
}
}
@@ -3682,7 +3682,7 @@ fn writeAtoms(self: *Elf) !void {
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
try th.write(self, buffer.writer());
assert(buffer.items.len == thunk_size);
- try self.base.file.?.pwriteAll(buffer.items, offset);
+ try self.pwriteAll(buffer.items, offset);
buffer.clearRetainingCapacity();
}
}
@@ -3790,12 +3790,12 @@ fn writeSyntheticSections(self: *Elf) !void {
const contents = buffer[0 .. interp.len + 1];
const shdr = slice.items(.shdr)[shndx];
assert(shdr.sh_size == contents.len);
- try self.base.file.?.pwriteAll(contents, shdr.sh_offset);
+ try self.pwriteAll(contents, shdr.sh_offset);
}
if (self.section_indexes.hash) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- try self.base.file.?.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
+ try self.pwriteAll(self.hash.buffer.items, shdr.sh_offset);
}
if (self.section_indexes.gnu_hash) |shndx| {
@@ -3803,12 +3803,12 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.gnu_hash.size());
defer buffer.deinit();
try self.gnu_hash.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.versym) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset);
+ try self.pwriteAll(mem.sliceAsBytes(self.versym.items), shdr.sh_offset);
}
if (self.section_indexes.verneed) |shndx| {
@@ -3816,7 +3816,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.verneed.size());
defer buffer.deinit();
try self.verneed.write(buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynamic) |shndx| {
@@ -3824,7 +3824,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynamic.size(self));
defer buffer.deinit();
try self.dynamic.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynsymtab) |shndx| {
@@ -3832,12 +3832,12 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.dynsym.size());
defer buffer.deinit();
try self.dynsym.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.dynstrtab) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- try self.base.file.?.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
+ try self.pwriteAll(self.dynstrtab.items, shdr.sh_offset);
}
if (self.section_indexes.eh_frame) |shndx| {
@@ -3847,21 +3847,21 @@ fn writeSyntheticSections(self: *Elf) !void {
break :existing_size sym.atom(self).?.size;
};
const shdr = slice.items(.shdr)[shndx];
- const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
+ const sh_size = try self.cast(usize, shdr.sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
assert(buffer.items.len == sh_size - existing_size);
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
+ try self.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (self.section_indexes.eh_frame_hdr) |shndx| {
const shdr = slice.items(.shdr)[shndx];
- const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
+ const sh_size = try self.cast(usize, shdr.sh_size);
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try eh_frame.writeEhFrameHdr(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.got) |index| {
@@ -3869,7 +3869,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self));
defer buffer.deinit();
try self.got.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.rela_dyn) |shndx| {
@@ -3877,7 +3877,7 @@ fn writeSyntheticSections(self: *Elf) !void {
try self.got.addRela(self);
try self.copy_rel.addRela(self);
self.sortRelaDyn();
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset);
+ try self.pwriteAll(mem.sliceAsBytes(self.rela_dyn.items), shdr.sh_offset);
}
if (self.section_indexes.plt) |shndx| {
@@ -3885,7 +3885,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
try self.plt.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.got_plt) |shndx| {
@@ -3893,7 +3893,7 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got_plt.size(self));
defer buffer.deinit();
try self.got_plt.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.plt_got) |shndx| {
@@ -3901,13 +3901,13 @@ fn writeSyntheticSections(self: *Elf) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt_got.size(self));
defer buffer.deinit();
try self.plt_got.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ try self.pwriteAll(buffer.items, shdr.sh_offset);
}
if (self.section_indexes.rela_plt) |shndx| {
const shdr = slice.items(.shdr)[shndx];
try self.plt.addRela(self);
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset);
+ try self.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset);
}
try self.writeSymtab();
@@ -3919,7 +3919,7 @@ pub fn writeShStrtab(self: *Elf) !void {
if (self.section_indexes.shstrtab) |index| {
const shdr = self.sections.items(.shdr)[index];
log.debug("writing .shstrtab from 0x{x} to 0x{x}", .{ shdr.sh_offset, shdr.sh_offset + shdr.sh_size });
- try self.base.file.?.pwriteAll(self.shstrtab.items, shdr.sh_offset);
+ try self.pwriteAll(self.shstrtab.items, shdr.sh_offset);
}
}
@@ -3934,7 +3934,7 @@ pub fn writeSymtab(self: *Elf) !void {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
};
- const nsyms = math.cast(usize, @divExact(symtab_shdr.sh_size, sym_size)) orelse return error.Overflow;
+ const nsyms = try self.cast(usize, @divExact(symtab_shdr.sh_size, sym_size));
log.debug("writing {d} symbols in .symtab from 0x{x} to 0x{x}", .{
nsyms,
@@ -3947,7 +3947,7 @@ pub fn writeSymtab(self: *Elf) !void {
});
try self.symtab.resize(gpa, nsyms);
- const needed_strtab_size = math.cast(usize, strtab_shdr.sh_size - 1) orelse return error.Overflow;
+ const needed_strtab_size = try self.cast(usize, strtab_shdr.sh_size - 1);
// TODO we could resize instead and in ZigObject/Object always access as slice
self.strtab.clearRetainingCapacity();
self.strtab.appendAssumeCapacity(0);
@@ -4016,17 +4016,17 @@ pub fn writeSymtab(self: *Elf) !void {
};
if (foreign_endian) mem.byteSwapAllFields(elf.Elf32_Sym, out);
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset);
+ try self.pwriteAll(mem.sliceAsBytes(buf), symtab_shdr.sh_offset);
},
.p64 => {
if (foreign_endian) {
for (self.symtab.items) |*sym| mem.byteSwapAllFields(elf.Elf64_Sym, sym);
}
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset);
+ try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), symtab_shdr.sh_offset);
},
}
- try self.base.file.?.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
+ try self.pwriteAll(self.strtab.items, strtab_shdr.sh_offset);
}
/// Always 4 or 8 depending on whether this is 32-bit ELF or 64-bit ELF.
@@ -5190,6 +5190,30 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
return slice[0..mem.indexOfScalar(u8, slice, 0).? :0];
}
+pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
+ const comp = elf_file.base.comp;
+ const diags = &comp.link_diags;
+ elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
+ return diags.fail("failed to write: {s}", .{@errorName(err)});
+ };
+}
+
+pub fn setEndPos(elf_file: *Elf, length: u64) error{LinkFailure}!void {
+ const comp = elf_file.base.comp;
+ const diags = &comp.link_diags;
+ elf_file.base.file.?.setEndPos(length) catch |err| {
+ return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
+ };
+}
+
+pub fn cast(elf_file: *Elf, comptime T: type, x: anytype) error{LinkFailure}!T {
+ return std.math.cast(T, x) orelse {
+ const comp = elf_file.base.comp;
+ const diags = &comp.link_diags;
+ return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
+ };
+}
+
const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
src/link/MachO.zig
@@ -434,7 +434,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
// libc/libSystem dep
self.resolveLibSystem(arena, comp, &system_libs) catch |err| switch (err) {
error.MissingLibSystem => {}, // already reported
- else => |e| return e, // TODO: convert into an error
+ else => |e| return diags.fail("failed to resolve libSystem: {s}", .{@errorName(e)}),
};
for (comp.link_inputs) |link_input| switch (link_input) {
@@ -494,7 +494,10 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try self.resolveSymbols();
try self.convertTentativeDefsAndResolveSpecialSymbols();
- try self.dedupLiterals();
+ self.dedupLiterals() catch |err| switch (err) {
+ error.LinkFailure => return error.LinkFailure,
+ else => |e| return diags.fail("failed to deduplicate literals: {s}", .{@errorName(e)}),
+ };
if (self.base.gc_sections) {
try dead_strip.gcAtoms(self);
@@ -551,7 +554,11 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try self.writeSectionsToFile();
try self.allocateLinkeditSegment();
- try self.writeLinkeditSectionsToFile();
+ self.writeLinkeditSectionsToFile() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.LinkFailure => return error.LinkFailure,
+ else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
+ };
var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
// Preallocate space for the code signature.
@@ -561,7 +568,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
// where the code signature goes into.
var codesig = CodeSignature.init(self.getPageSize());
codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
- if (self.entitlements) |path| try codesig.addEntitlements(gpa, path);
+ if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
+ return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
@@ -573,13 +581,29 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
self.getPageSize(),
);
- const ncmds, const sizeofcmds, const uuid_cmd_offset = try self.writeLoadCommands();
+ const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) {
+ error.NoSpaceLeft => unreachable,
+ error.OutOfMemory => return error.OutOfMemory,
+ error.LinkFailure => return error.LinkFailure,
+ };
try self.writeHeader(ncmds, sizeofcmds);
- try self.writeUuid(uuid_cmd_offset, self.requiresCodeSig());
- if (self.getDebugSymbols()) |dsym| try dsym.flushModule(self);
+ self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.LinkFailure => return error.LinkFailure,
+ else => |e| return diags.fail("failed to calculate and write uuid: {s}", .{@errorName(e)}),
+ };
+ if (self.getDebugSymbols()) |dsym| dsym.flushModule(self) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => |e| return diags.fail("failed to get debug symbols: {s}", .{@errorName(e)}),
+ };
+ // Code signing always comes last.
if (codesig) |*csig| {
- try self.writeCodeSignature(csig); // code signing always comes last
+ self.writeCodeSignature(csig) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.LinkFailure => return error.LinkFailure,
+ else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}),
+ };
const emit = self.base.emit;
try invalidateKernelCache(emit.root_dir.handle, emit.sub_path);
}
@@ -2171,7 +2195,7 @@ fn allocateSections(self: *MachO) !void {
fileoff = mem.alignForward(u32, fileoff, page_size);
}
- const alignment = try math.powi(u32, 2, header.@"align");
+ const alignment = try self.alignPow(header.@"align");
vmaddr = mem.alignForward(u64, vmaddr, alignment);
header.addr = vmaddr;
@@ -2327,7 +2351,7 @@ fn allocateLinkeditSegment(self: *MachO) !void {
seg.vmaddr = mem.alignForward(u64, vmaddr, page_size);
seg.fileoff = mem.alignForward(u64, fileoff, page_size);
- var off = math.cast(u32, seg.fileoff) orelse return error.Overflow;
+ var off = try self.cast(u32, seg.fileoff);
// DYLD_INFO_ONLY
{
const cmd = &self.dyld_info_cmd;
@@ -2392,7 +2416,7 @@ fn resizeSections(self: *MachO) !void {
if (header.isZerofill()) continue;
if (self.isZigSection(@intCast(n_sect))) continue; // TODO this is horrible
const cpu_arch = self.getTarget().cpu.arch;
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try self.cast(usize, header.size);
try out.resize(self.base.comp.gpa, size);
const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
@memset(out.items, padding_byte);
@@ -2489,7 +2513,7 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
const doWork = struct {
fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
- const off = math.cast(usize, th.value) orelse return error.Overflow;
+ const off = try macho_file.cast(usize, th.value);
const size = th.size();
var stream = std.io.fixedBufferStream(buffer[off..][0..size]);
try th.write(macho_file, stream.writer());
@@ -2601,7 +2625,7 @@ fn writeSectionsToFile(self: *MachO) !void {
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.out)) |header, out| {
- try self.base.file.?.pwriteAll(out.items, header.offset);
+ try self.pwriteAll(out.items, header.offset);
}
}
@@ -2644,7 +2668,7 @@ fn writeDyldInfo(self: *MachO) !void {
try self.lazy_bind_section.write(writer);
try stream.seekTo(cmd.export_off - base_off);
try self.export_trie.write(writer);
- try self.base.file.?.pwriteAll(buffer, cmd.rebase_off);
+ try self.pwriteAll(buffer, cmd.rebase_off);
}
pub fn writeDataInCode(self: *MachO) !void {
@@ -2655,7 +2679,7 @@ pub fn writeDataInCode(self: *MachO) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.data_in_code.size());
defer buffer.deinit();
try self.data_in_code.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, cmd.dataoff);
+ try self.pwriteAll(buffer.items, cmd.dataoff);
}
fn writeIndsymtab(self: *MachO) !void {
@@ -2667,15 +2691,15 @@ fn writeIndsymtab(self: *MachO) !void {
var buffer = try std.ArrayList(u8).initCapacity(gpa, needed_size);
defer buffer.deinit();
try self.indsymtab.write(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, cmd.indirectsymoff);
+ try self.pwriteAll(buffer.items, cmd.indirectsymoff);
}
pub fn writeSymtabToFile(self: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const cmd = self.symtab_cmd;
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff);
- try self.base.file.?.pwriteAll(self.strtab.items, cmd.stroff);
+ try self.pwriteAll(mem.sliceAsBytes(self.symtab.items), cmd.symoff);
+ try self.pwriteAll(self.strtab.items, cmd.stroff);
}
fn writeUnwindInfo(self: *MachO) !void {
@@ -2686,20 +2710,20 @@ fn writeUnwindInfo(self: *MachO) !void {
if (self.eh_frame_sect_index) |index| {
const header = self.sections.items(.header)[index];
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try self.cast(usize, header.size);
const buffer = try gpa.alloc(u8, size);
defer gpa.free(buffer);
eh_frame.write(self, buffer);
- try self.base.file.?.pwriteAll(buffer, header.offset);
+ try self.pwriteAll(buffer, header.offset);
}
if (self.unwind_info_sect_index) |index| {
const header = self.sections.items(.header)[index];
- const size = math.cast(usize, header.size) orelse return error.Overflow;
+ const size = try self.cast(usize, header.size);
const buffer = try gpa.alloc(u8, size);
defer gpa.free(buffer);
try self.unwind_info.write(self, buffer);
- try self.base.file.?.pwriteAll(buffer, header.offset);
+ try self.pwriteAll(buffer, header.offset);
}
}
@@ -2890,7 +2914,7 @@ fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
assert(stream.pos == needed_size);
- try self.base.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
+ try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len, uuid_cmd_offset };
}
@@ -2944,7 +2968,7 @@ fn writeHeader(self: *MachO, ncmds: usize, sizeofcmds: usize) !void {
log.debug("writing Mach-O header {}", .{header});
- try self.base.file.?.pwriteAll(mem.asBytes(&header), 0);
+ try self.pwriteAll(mem.asBytes(&header), 0);
}
fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void {
@@ -2954,7 +2978,7 @@ fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void {
} else self.codesig_cmd.dataoff;
try calcUuid(self.base.comp, self.base.file.?, file_size, &self.uuid_cmd.uuid);
const offset = uuid_cmd_offset + @sizeOf(macho.load_command);
- try self.base.file.?.pwriteAll(&self.uuid_cmd.uuid, offset);
+ try self.pwriteAll(&self.uuid_cmd.uuid, offset);
}
pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
@@ -2968,7 +2992,7 @@ pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data.
- try self.base.file.?.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
+ try self.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
@@ -2995,7 +3019,7 @@ pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
offset + buffer.items.len,
});
- try self.base.file.?.pwriteAll(buffer.items, offset);
+ try self.pwriteAll(buffer.items, offset);
}
pub fn updateFunc(
@@ -3109,7 +3133,7 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
}
}
- if (at_end) try self.base.file.?.setEndPos(end);
+ if (at_end) try self.setEndPos(end);
return null;
}
@@ -3193,22 +3217,25 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32)
return start;
}
-pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
+pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) error{LinkFailure}!void {
+ const diags = &self.base.comp.link_diags;
const file = self.base.file.?;
- const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
- if (amt != size) return error.InputOutput;
+ const amt = file.copyRangeAll(old_offset, file, new_offset, size) catch |err|
+ return diags.fail("failed to copy file range: {s}", .{@errorName(err)});
+ if (amt != size)
+ return diags.fail("unexpected short write in copy file range", .{});
}
/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
-fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
+fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) error{ LinkFailure, OutOfMemory }!void {
const gpa = self.base.comp.gpa;
try self.copyRangeAll(old_offset, new_offset, size);
- const size_u = math.cast(usize, size) orelse return error.Overflow;
- const zeroes = try gpa.alloc(u8, size_u);
+ const size_u = try self.cast(usize, size);
+ const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
defer gpa.free(zeroes);
@memset(zeroes, 0);
- try self.base.file.?.pwriteAll(zeroes, old_offset);
+ try self.pwriteAll(zeroes, old_offset);
}
const InitMetadataOptions = struct {
@@ -3312,10 +3339,9 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
const allocSect = struct {
fn allocSect(macho_file: *MachO, sect_id: u8, size: u64) !void {
const sect = &macho_file.sections.items(.header)[sect_id];
- const alignment = try math.powi(u32, 2, sect.@"align");
+ const alignment = try macho_file.alignPow(sect.@"align");
if (!sect.isZerofill()) {
- sect.offset = math.cast(u32, try macho_file.findFreeSpace(size, alignment)) orelse
- return error.Overflow;
+ sect.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(size, alignment));
}
sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
sect.size = size;
@@ -3397,7 +3423,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
};
}
-pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
+pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) error{ OutOfMemory, LinkFailure }!void {
if (self.base.isRelocatable()) {
try self.growSectionRelocatable(sect_index, needed_size);
} else {
@@ -3405,7 +3431,7 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
}
}
-fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
+fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) error{ OutOfMemory, LinkFailure }!void {
const diags = &self.base.comp.link_diags;
const sect = &self.sections.items(.header)[sect_index];
@@ -3433,7 +3459,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
sect.offset = @intCast(new_offset);
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.setEndPos(sect.offset + needed_size);
}
seg.filesize = needed_size;
}
@@ -3454,7 +3480,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
seg.vmsize = needed_size;
}
-fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
+fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) error{ OutOfMemory, LinkFailure }!void {
const sect = &self.sections.items(.header)[sect_index];
if (!sect.isZerofill()) {
@@ -3464,7 +3490,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.size = 0;
// Must move the entire section.
- const alignment = try math.powi(u32, 2, sect.@"align");
+ const alignment = try self.alignPow(sect.@"align");
const new_offset = try self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
@@ -3482,7 +3508,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
} else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
+ try self.setEndPos(sect.offset + needed_size);
}
}
sect.size = needed_size;
@@ -5316,6 +5342,40 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
return true;
}
+pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
+ const comp = macho_file.base.comp;
+ const diags = &comp.link_diags;
+ macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
+ return diags.fail("failed to write: {s}", .{@errorName(err)});
+ };
+}
+
+pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void {
+ const comp = macho_file.base.comp;
+ const diags = &comp.link_diags;
+ macho_file.base.file.?.setEndPos(length) catch |err| {
+ return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
+ };
+}
+
+pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T {
+ return std.math.cast(T, x) orelse {
+ const comp = macho_file.base.comp;
+ const diags = &comp.link_diags;
+ return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
+ };
+}
+
+pub fn alignPow(macho_file: *MachO, x: u32) error{LinkFailure}!u32 {
+ const result, const ov = @shlWithOverflow(@as(u32, 1), try cast(macho_file, u5, x));
+ if (ov != 0) {
+ const comp = macho_file.base.comp;
+ const diags = &comp.link_diags;
+ return diags.fail("alignment overflow", .{});
+ }
+ return result;
+}
+
/// Branch instruction has 26 bits immediate but is 4 byte aligned.
const jump_bits = @bitSizeOf(i28);
const max_distance = (1 << (jump_bits - 1));
src/link/Plan9.zig
@@ -535,16 +535,21 @@ fn allocateGotIndex(self: *Plan9) usize {
}
}
-pub fn flush(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flush(
+ self: *Plan9,
+ arena: Allocator,
+ tid: Zcu.PerThread.Id,
+ prog_node: std.Progress.Node,
+) link.File.FlushError!void {
const comp = self.base.comp;
+ const diags = &comp.link_diags;
const use_lld = build_options.have_llvm and comp.config.use_lld;
assert(!use_lld);
switch (link.File.effectiveOutputMode(use_lld, comp.config.output_mode)) {
.Exe => {},
- // plan9 object files are totally different
- .Obj => return error.TODOImplementPlan9Objs,
- .Lib => return error.TODOImplementWritingLibFiles,
+ .Obj => return diags.fail("writing plan9 object files unimplemented", .{}),
+ .Lib => return diags.fail("writing plan9 lib files unimplemented", .{}),
}
return self.flushModule(arena, tid, prog_node);
}
@@ -589,7 +594,13 @@ fn atomCount(self: *Plan9) usize {
return data_nav_count + fn_nav_count + lazy_atom_count + extern_atom_count + uav_atom_count;
}
-pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(
+ self: *Plan9,
+ arena: Allocator,
+ /// TODO: stop using this
+ tid: Zcu.PerThread.Id,
+ prog_node: std.Progress.Node,
+) link.File.FlushError!void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
@@ -600,6 +611,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
_ = arena; // Has the same lifetime as the call to Compilation.update.
const comp = self.base.comp;
+ const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
@@ -611,7 +623,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
defer assert(self.hdr.entry != 0x0);
const pt: Zcu.PerThread = .activate(
- self.base.comp.zcu orelse return error.LinkingWithoutZigSourceUnimplemented,
+ self.base.comp.zcu orelse return diags.fail("linking without zig source unimplemented", .{}),
tid,
);
defer pt.deactivate();
@@ -620,22 +632,16 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.lazy_syms.getPtr(.none)) |metadata| {
// Most lazy symbols can be updated on first use, but
// anyerror needs to wait for everything to be flushed.
- if (metadata.text_state != .unused) self.updateLazySymbolAtom(
+ if (metadata.text_state != .unused) try self.updateLazySymbolAtom(
pt,
.{ .kind = .code, .ty = .anyerror_type },
metadata.text_atom,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.LinkFailure,
- else => |e| e,
- };
- if (metadata.rodata_state != .unused) self.updateLazySymbolAtom(
+ );
+ if (metadata.rodata_state != .unused) try self.updateLazySymbolAtom(
pt,
.{ .kind = .const_data, .ty = .anyerror_type },
metadata.rodata_atom,
- ) catch |err| return switch (err) {
- error.CodegenFail => error.LinkFailure,
- else => |e| e,
- };
+ );
}
for (self.lazy_syms.values()) |*metadata| {
if (metadata.text_state != .unused) metadata.text_state = .flushed;
@@ -908,8 +914,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
}
}
}
- // write it all!
- try file.pwritevAll(iovecs, 0);
+ file.pwritevAll(iovecs, 0) catch |err| return diags.fail("failed to write file: {s}", .{@errorName(err)});
}
fn addNavExports(
self: *Plan9,
@@ -1047,8 +1052,15 @@ pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, lazy_sym: F
return atom;
}
-fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, atom_index: Atom.Index) !void {
+fn updateLazySymbolAtom(
+ self: *Plan9,
+ pt: Zcu.PerThread,
+ sym: File.LazySymbol,
+ atom_index: Atom.Index,
+) error{ LinkFailure, OutOfMemory }!void {
const gpa = pt.zcu.gpa;
+ const comp = self.base.comp;
+ const diags = &comp.link_diags;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
@@ -1069,7 +1081,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a
// generate the code
const src = Type.fromInterned(sym.ty).srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded;
- const res = try codegen.generateLazySymbol(
+ const res = codegen.generateLazySymbol(
&self.base,
pt,
src,
@@ -1078,13 +1090,14 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a
&code_buffer,
.none,
.{ .atom_index = @intCast(atom_index) },
- );
+ ) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.CodegenFail => return error.LinkFailure,
+ error.Overflow => return diags.fail("codegen failure: encountered number too big for compiler", .{}),
+ };
const code = switch (res) {
.ok => code_buffer.items,
- .fail => |em| {
- log.err("{s}", .{em.msg});
- return error.CodegenFail;
- },
+ .fail => |em| return diags.fail("codegen failure: {s}", .{em.msg}),
};
// duped_code is freed when the atom is freed
const duped_code = try gpa.dupe(u8, code);
src/link/SpirV.zig
@@ -206,7 +206,17 @@ pub fn flush(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
return self.flushModule(arena, tid, prog_node);
}
-pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
+pub fn flushModule(
+ self: *SpirV,
+ arena: Allocator,
+ tid: Zcu.PerThread.Id,
+ prog_node: std.Progress.Node,
+) link.File.FlushError!void {
+ // The goal is to never use this because it's only needed if we need to
+ // write to InternPool, but flushModule is too late to be writing to the
+ // InternPool.
+ _ = tid;
+
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
@@ -217,12 +227,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
const sub_prog_node = prog_node.start("Flush Module", 0);
defer sub_prog_node.end();
- const spv = &self.object.spv;
-
const comp = self.base.comp;
+ const spv = &self.object.spv;
+ const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.getTarget();
- _ = tid;
try writeCapabilities(spv, target);
try writeMemoryModel(spv, target);
@@ -265,13 +274,11 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
- else => |other| {
- log.err("error while linking: {s}", .{@errorName(other)});
- return error.LinkFailure;
- },
+ else => |other| return diags.fail("error while linking: {s}", .{@errorName(other)}),
};
- try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module));
+ self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)) catch |err|
+ return diags.fail("failed to write: {s}", .{@errorName(err)});
}
fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress.Node) ![]Word {
src/link/Wasm.zig
@@ -1,3 +1,14 @@
+//! The overall strategy here is to load all the object file data into memory
+//! as inputs are parsed. During `prelink`, as much linking as possible is
+//! performed without any knowledge of functions and globals provided by the
+//! Zcu. If there is no Zcu, effectively all linking is done in `prelink`.
+//!
+//! `updateFunc`, `updateNav`, `updateExports`, and `deleteExport` are handled
+//! by merely tracking references to the relevant functions and globals. All
+//! the linking logic between objects and Zcu happens in `flush`. Many
+//! components of the final output are computed on-the-fly at this time rather
+//! than being precomputed and stored separately.
+
const Wasm = @This();
const Archive = @import("Wasm/Archive.zig");
const Object = @import("Wasm/Object.zig");
@@ -164,10 +175,12 @@ functions: std.AutoArrayHashMapUnmanaged(FunctionImport.Resolution, void) = .emp
functions_len: u32 = 0,
/// Immutable after prelink. The undefined functions coming only from all object files.
/// The Zcu must satisfy these.
-function_imports_init: []FunctionImportId = &.{},
-/// Initialized as copy of `function_imports_init`; entries are deleted as
-/// they are satisfied by the Zcu.
-function_imports: std.AutoArrayHashMapUnmanaged(FunctionImportId, void) = .empty,
+function_imports_init_keys: []String = &.{},
+function_imports_init_vals: []FunctionImportId = &.{},
+/// Initialized as copy of `function_imports_init_keys` and
+/// `function_import_init_vals`; entries are deleted as they are satisfied by
+/// the Zcu.
+function_imports: std.AutoArrayHashMapUnmanaged(String, FunctionImportId) = .empty,
/// Ordered list of non-import globals that will appear in the final binary.
/// Empty until prelink.
@@ -175,38 +188,53 @@ globals: std.AutoArrayHashMapUnmanaged(GlobalImport.Resolution, void) = .empty,
/// Tracks the value at the end of prelink, at which point `globals`
/// contains only object file globals, and nothing from the Zcu yet.
globals_len: u32 = 0,
-global_imports_init: []GlobalImportId = &.{},
-global_imports: std.AutoArrayHashMapUnmanaged(GlobalImportId, void) = .empty,
+global_imports_init_keys: []String = &.{},
+global_imports_init_vals: []GlobalImportId = &.{},
+global_imports: std.AutoArrayHashMapUnmanaged(String, GlobalImportId) = .empty,
/// Ordered list of non-import tables that will appear in the final binary.
/// Empty until prelink.
tables: std.AutoArrayHashMapUnmanaged(TableImport.Resolution, void) = .empty,
-table_imports: std.AutoArrayHashMapUnmanaged(ObjectTableImportIndex, void) = .empty,
+table_imports: std.AutoArrayHashMapUnmanaged(String, ObjectTableImportIndex) = .empty,
any_exports_updated: bool = true,
+/// Index into `objects`.
+pub const ObjectIndex = enum(u32) {
+ _,
+};
+
/// Index into `functions`.
pub const FunctionIndex = enum(u32) {
_,
- pub fn fromNav(nav_index: InternPool.Nav.Index, wasm: *const Wasm) FunctionIndex {
- return @enumFromInt(wasm.functions.getIndex(.pack(wasm, .{ .nav = nav_index })).?);
+ pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) ?FunctionIndex {
+ const i = wasm.functions.getIndex(.fromIpNav(wasm, nav_index)) orelse return null;
+ return @enumFromInt(i);
}
};
/// 0. Index into `function_imports`
/// 1. Index into `functions`.
+///
+/// Note that function_imports indexes are subject to swap removals during
+/// `flush`.
pub const OutputFunctionIndex = enum(u32) {
_,
};
/// Index into `globals`.
-const GlobalIndex = enum(u32) {
+pub const GlobalIndex = enum(u32) {
_,
fn key(index: GlobalIndex, f: *const Flush) *Wasm.GlobalImport.Resolution {
return &f.globals.items[@intFromEnum(index)];
}
+
+ pub fn fromIpNav(wasm: *const Wasm, nav_index: InternPool.Nav.Index) ?GlobalIndex {
+ const i = wasm.globals.getIndex(.fromIpNav(wasm, nav_index)) orelse return null;
+ return @enumFromInt(i);
+ }
};
/// The first N indexes correspond to input objects (`objects`) array.
@@ -218,6 +246,38 @@ pub const SourceLocation = enum(u32) {
zig_object_nofile = std.math.maxInt(u32) - 1,
none = std.math.maxInt(u32),
_,
+
+ /// Index into `source_locations`.
+ pub const Index = enum(u32) {
+ _,
+ };
+
+ pub const Unpacked = union(enum) {
+ none,
+ zig_object_nofile,
+ object_index: ObjectIndex,
+ source_location_index: Index,
+ };
+
+ pub fn pack(unpacked: Unpacked, wasm: *const Wasm) SourceLocation {
+ _ = wasm;
+ return switch (unpacked) {
+ .zig_object_nofile => .zig_object_nofile,
+ .none => .none,
+ .object_index => |object_index| @enumFromInt(@intFromEnum(object_index)),
+ .source_location_index => @panic("TODO"),
+ };
+ }
+
+ pub fn addError(sl: SourceLocation, wasm: *Wasm, comptime f: []const u8, args: anytype) void {
+ const diags = &wasm.base.comp.link_diags;
+ switch (sl.unpack(wasm)) {
+ .none => unreachable,
+ .zig_object_nofile => diags.addError("zig compilation unit: " ++ f, args),
+ .object_index => |i| diags.addError("{}: " ++ f, .{wasm.objects.items[i].path} ++ args),
+ .source_location_index => @panic("TODO"),
+ }
+ }
};
/// The lower bits of this ABI-match the flags here:
@@ -445,6 +505,10 @@ pub const FunctionImport = extern struct {
};
}
+ pub fn fromIpNav(wasm: *const Wasm, ip_nav: InternPool.Nav.Index) Resolution {
+ return pack(wasm, .{ .nav = @enumFromInt(wasm.navs.getIndex(ip_nav).?) });
+ }
+
pub fn isNavOrUnresolved(r: Resolution, wasm: *const Wasm) bool {
return switch (r.unpack(wasm)) {
.unresolved, .nav => true,
@@ -587,6 +651,10 @@ pub const ObjectGlobalImportIndex = enum(u32) {
/// Index into `object_table_imports`.
pub const ObjectTableImportIndex = enum(u32) {
_,
+
+ pub fn ptr(index: ObjectTableImportIndex, wasm: *const Wasm) *TableImport {
+ return &wasm.object_table_imports.items[@intFromEnum(index)];
+ }
};
/// Index into `object_tables`.
@@ -797,12 +865,48 @@ pub const ValtypeList = enum(u32) {
/// 1. Index into `imports`.
pub const FunctionImportId = enum(u32) {
_,
+
+ /// This function is allowed O(N) lookup because it is only called during
+ /// diagnostic generation.
+ pub fn sourceLocation(id: FunctionImportId, wasm: *const Wasm) SourceLocation {
+ switch (id.unpack(wasm)) {
+ .object_function_import => |obj_func_index| {
+ // TODO binary search
+ for (wasm.objects.items, 0..) |o, i| {
+ if (o.function_imports.off <= obj_func_index and
+ o.function_imports.off + o.function_imports.len > obj_func_index)
+ {
+ return .pack(wasm, .{ .object_index = @enumFromInt(i) });
+ }
+ } else unreachable;
+ },
+ .zcu_import => return .zig_object_nofile, // TODO give a better source location
+ }
+ }
};
/// 0. Index into `object_global_imports`.
/// 1. Index into `imports`.
pub const GlobalImportId = enum(u32) {
_,
+
+ /// This function is allowed O(N) lookup because it is only called during
+ /// diagnostic generation.
+ pub fn sourceLocation(id: GlobalImportId, wasm: *const Wasm) SourceLocation {
+ switch (id.unpack(wasm)) {
+ .object_global_import => |obj_func_index| {
+ // TODO binary search
+ for (wasm.objects.items, 0..) |o, i| {
+ if (o.global_imports.off <= obj_func_index and
+ o.global_imports.off + o.global_imports.len > obj_func_index)
+ {
+ return .pack(wasm, .{ .object_index = @enumFromInt(i) });
+ }
+ } else unreachable;
+ },
+ .zcu_import => return .zig_object_nofile, // TODO give a better source location
+ }
+ }
};
pub const Relocation = struct {
@@ -897,7 +1001,7 @@ pub const InitFunc = extern struct {
priority: u32,
function_index: ObjectFunctionIndex,
- fn lessThan(ctx: void, lhs: InitFunc, rhs: InitFunc) bool {
+ pub fn lessThan(ctx: void, lhs: InitFunc, rhs: InitFunc) bool {
_ = ctx;
if (lhs.priority == rhs.priority) {
return @intFromEnum(lhs.function_index) < @intFromEnum(rhs.function_index);
@@ -1237,18 +1341,19 @@ pub fn deinit(wasm: *Wasm) void {
wasm.object_comdat_symbols.deinit(gpa);
wasm.objects.deinit(gpa);
- wasm.atoms.deinit(gpa);
-
wasm.synthetic_symbols.deinit(gpa);
- wasm.globals.deinit(gpa);
wasm.undefs.deinit(gpa);
wasm.discarded.deinit(gpa);
wasm.segments.deinit(gpa);
wasm.segment_info.deinit(gpa);
- wasm.global_imports.deinit(gpa);
wasm.func_types.deinit(gpa);
+ wasm.function_exports.deinit(gpa);
+ wasm.function_imports.deinit(gpa);
wasm.functions.deinit(gpa);
+ wasm.globals.deinit(gpa);
+ wasm.global_imports.deinit(gpa);
+ wasm.table_imports.deinit(gpa);
wasm.output_globals.deinit(gpa);
wasm.exports.deinit(gpa);
@@ -1340,13 +1445,19 @@ pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index
if (!nav_init.typeOf(zcu).hasRuntimeBits(zcu)) {
_ = wasm.imports.swapRemove(nav_index);
- _ = wasm.navs.swapRemove(nav_index); // TODO reclaim resources
+ if (wasm.navs.swapRemove(nav_index)) |old| {
+ _ = old;
+ @panic("TODO reclaim resources");
+ }
return;
}
if (is_extern) {
try wasm.imports.put(nav_index, {});
- _ = wasm.navs.swapRemove(nav_index); // TODO reclaim resources
+ if (wasm.navs.swapRemove(nav_index)) |old| {
+ _ = old;
+ @panic("TODO reclaim resources");
+ }
return;
}
@@ -1528,7 +1639,8 @@ pub fn prelink(wasm: *Wasm, prog_node: std.Progress.Node) link.File.FlushError!v
}
}
wasm.functions_len = @intCast(wasm.functions.items.len);
- wasm.function_imports_init = try gpa.dupe(FunctionImportId, wasm.functions.keys());
+ wasm.function_imports_init_keys = try gpa.dupe(String, wasm.function_imports.keys());
+ wasm.function_imports_init_vals = try gpa.dupe(FunctionImportId, wasm.function_imports.vals());
wasm.function_exports_len = @intCast(wasm.function_exports.items.len);
for (wasm.object_global_imports.keys(), wasm.object_global_imports.values(), 0..) |name, *import, i| {
@@ -1538,12 +1650,13 @@ pub fn prelink(wasm: *Wasm, prog_node: std.Progress.Node) link.File.FlushError!v
}
}
wasm.globals_len = @intCast(wasm.globals.items.len);
- wasm.global_imports_init = try gpa.dupe(GlobalImportId, wasm.globals.keys());
+ wasm.global_imports_init_keys = try gpa.dupe(String, wasm.global_imports.keys());
+ wasm.global_imports_init_vals = try gpa.dupe(GlobalImportId, wasm.global_imports.values());
wasm.global_exports_len = @intCast(wasm.global_exports.items.len);
- for (wasm.object_table_imports.keys(), wasm.object_table_imports.values(), 0..) |name, *import, i| {
+ for (wasm.object_table_imports.items, 0..) |*import, i| {
if (import.flags.isIncluded(rdynamic)) {
- try markTable(wasm, name, import, @enumFromInt(i));
+ try markTable(wasm, import.name, import, @enumFromInt(i));
continue;
}
}
@@ -1581,7 +1694,7 @@ fn markFunction(
import.resolution = .__wasm_init_tls;
wasm.functions.putAssumeCapacity(.__wasm_init_tls, {});
} else {
- try wasm.function_imports.put(gpa, .fromObject(func_index), {});
+ try wasm.function_imports.put(gpa, name, .fromObject(func_index));
}
} else {
const gop = wasm.functions.getOrPutAssumeCapacity(import.resolution);
@@ -1631,7 +1744,7 @@ fn markGlobal(
import.resolution = .__tls_size;
wasm.globals.putAssumeCapacity(.__tls_size, {});
} else {
- try wasm.global_imports.put(gpa, .fromObject(global_index), {});
+ try wasm.global_imports.put(gpa, name, .fromObject(global_index));
}
} else {
const gop = wasm.globals.getOrPutAssumeCapacity(import.resolution);
@@ -1663,7 +1776,7 @@ fn markTable(
import.resolution = .__indirect_function_table;
wasm.tables.putAssumeCapacity(.__indirect_function_table, {});
} else {
- try wasm.table_imports.put(gpa, .fromObject(table_index), {});
+ try wasm.table_imports.put(gpa, name, .fromObject(table_index));
}
} else {
wasm.tables.putAssumeCapacity(import.resolution, {});
@@ -1722,7 +1835,6 @@ pub fn flushModule(
defer sub_prog_node.end();
wasm.flush_buffer.clear();
- defer wasm.flush_buffer.subsequent = true;
return wasm.flush_buffer.finish(wasm, arena);
}
src/Zcu/PerThread.zig
@@ -1728,7 +1728,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.LinkFailure => assert(comp.link_diags.hasErrors()),
error.Overflow => {
- try zcu.failed_codegen.putNoClobber(nav_index, try Zcu.ErrorMsg.create(
+ try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
@@ -3114,7 +3114,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error
error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)),
error.LinkFailure => assert(comp.link_diags.hasErrors()),
error.Overflow => {
- try zcu.failed_codegen.putNoClobber(nav_index, try Zcu.ErrorMsg.create(
+ try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create(
gpa,
zcu.navSrcLoc(nav_index),
"unable to codegen: {s}",
src/link.zig
@@ -745,7 +745,7 @@ pub const File = struct {
}
pub const FlushError = error{
- /// Indicates an error will be present in `Compilation.link_errors`.
+ /// Indicates an error will be present in `Compilation.link_diags`.
LinkFailure,
OutOfMemory,
};