Commit 458f658b42
Changed files (8)
src
src/arch/wasm/CodeGen.zig
@@ -944,8 +944,11 @@ fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
cg.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(@field(extra, field.name)),
- InternPool.Index => @intFromEnum(@field(extra, field.name)),
- InternPool.Nav.Index => @intFromEnum(@field(extra, field.name)),
+ InternPool.Index,
+ InternPool.Nav.Index,
+ Wasm.UavsObjIndex,
+ Wasm.UavsExeIndex,
+ => @intFromEnum(@field(extra, field.name)),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
});
}
@@ -1034,14 +1037,26 @@ fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void {
}
},
.uav_ref => |uav| {
+ const wasm = cg.wasm;
+ const is_obj = wasm.base.comp.config.output_mode == .Obj;
if (uav.offset == 0) {
- try cg.addInst(.{ .tag = .uav_ref, .data = .{ .ip_index = uav.ip_index } });
+ try cg.addInst(.{
+ .tag = .uav_ref,
+ .data = if (is_obj) .{
+ .uav_obj = try wasm.refUavObj(cg.pt, uav.ip_index),
+ } else .{
+ .uav_exe = try wasm.refUavExe(cg.pt, uav.ip_index),
+ },
+ });
} else {
try cg.addInst(.{
.tag = .uav_ref_off,
.data = .{
- .payload = try cg.addExtra(Mir.UavRefOff{
- .ip_index = uav.ip_index,
+ .payload = if (is_obj) try cg.addExtra(Mir.UavRefOffObj{
+ .uav_obj = try wasm.refUavObj(cg.pt, uav.ip_index),
+ .offset = uav.offset,
+ }) else try cg.addExtra(Mir.UavRefOffExe{
+ .uav_exe = try wasm.refUavExe(cg.pt, uav.ip_index),
.offset = uav.offset,
}),
},
@@ -1148,11 +1163,11 @@ pub const Function = extern struct {
}
};
- pub fn lower(f: *Function, wasm: *const Wasm, code: *std.ArrayList(u8)) Allocator.Error!void {
+ pub fn lower(f: *Function, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
// Write the locals in the prologue of the function body.
- const locals = wasm.all_zcu_locals[f.locals_off..][0..f.locals_len];
+ const locals = wasm.all_zcu_locals.items[f.locals_off..][0..f.locals_len];
try code.ensureUnusedCapacity(gpa, 5 + locals.len * 6 + 38);
std.leb.writeUleb128(code.writer(gpa), @as(u32, @intCast(locals.len))) catch unreachable;
@@ -1164,7 +1179,7 @@ pub const Function = extern struct {
// Stack management section of function prologue.
const stack_alignment = f.prologue.flags.stack_alignment;
if (stack_alignment.toByteUnits()) |align_bytes| {
- const sp_global = try wasm.stackPointerGlobalIndex();
+ const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
std.leb.writeULEB128(code.writer(gpa), @intFromEnum(sp_global)) catch unreachable;
@@ -1172,7 +1187,7 @@ pub const Function = extern struct {
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.writer(gpa), f.prologue.sp_local) catch unreachable;
// get the total stack size
- const aligned_stack: i32 = @intCast(f.stack_alignment.forward(f.prologue.stack_size));
+ const aligned_stack: i32 = @intCast(stack_alignment.forward(f.prologue.stack_size));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.writer(gpa), aligned_stack) catch unreachable;
// subtract it from the current stack pointer
@@ -1197,7 +1212,7 @@ pub const Function = extern struct {
.mir = .{
.instruction_tags = wasm.mir_instructions.items(.tag)[f.mir_off..][0..f.mir_len],
.instruction_datas = wasm.mir_instructions.items(.data)[f.mir_off..][0..f.mir_len],
- .extra = wasm.mir_extra[f.mir_extra_off..][0..f.mir_extra_len],
+ .extra = wasm.mir_extra.items[f.mir_extra_off..][0..f.mir_extra_len],
},
.wasm = wasm,
.code = code,
@@ -5846,6 +5861,8 @@ fn airErrorName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const name_ty = Type.slice_const_u8_sentinel_0;
const abi_size = name_ty.abiSize(pt.zcu);
+ cg.wasm.error_name_table_ref_count += 1;
+
// Lowers to a i32.const or i64.const with the error table memory address.
try cg.addTag(.error_name_table_ref);
try cg.emitWValue(operand);
src/arch/wasm/Emit.zig
@@ -5,6 +5,7 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const leb = std.leb;
+const Wasm = link.File.Wasm;
const Mir = @import("Mir.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
@@ -12,7 +13,7 @@ const InternPool = @import("../../InternPool.zig");
const codegen = @import("../../codegen.zig");
mir: Mir,
-wasm: *link.File.Wasm,
+wasm: *Wasm,
/// The binary representation that will be emitted by this module.
code: *std.ArrayListUnmanaged(u8),
@@ -30,8 +31,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const target = &comp.root_mod.resolved_target.result;
const is_wasm32 = target.cpu.arch == .wasm32;
- const tags = mir.instructions.items(.tag);
- const datas = mir.instructions.items(.data);
+ const tags = mir.instruction_tags;
+ const datas = mir.instruction_datas;
var inst: u32 = 0;
loop: switch (tags[inst]) {
@@ -48,17 +49,25 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
.uav_ref => {
- try uavRefOff(wasm, code, .{ .ip_index = datas[inst].ip_index, .offset = 0 });
+ if (is_obj) {
+ try uavRefOffObj(wasm, code, .{ .uav_obj = datas[inst].uav_obj, .offset = 0 }, is_wasm32);
+ } else {
+ try uavRefOffExe(wasm, code, .{ .uav_exe = datas[inst].uav_exe, .offset = 0 }, is_wasm32);
+ }
inst += 1;
continue :loop tags[inst];
},
.uav_ref_off => {
- try uavRefOff(wasm, code, mir.extraData(Mir.UavRefOff, datas[inst].payload).data);
+ if (is_obj) {
+ try uavRefOffObj(wasm, code, mir.extraData(Mir.UavRefOffObj, datas[inst].payload).data, is_wasm32);
+ } else {
+ try uavRefOffExe(wasm, code, mir.extraData(Mir.UavRefOffExe, datas[inst].payload).data, is_wasm32);
+ }
inst += 1;
continue :loop tags[inst];
},
.nav_ref => {
- try navRefOff(wasm, code, .{ .ip_index = datas[inst].ip_index, .offset = 0 }, is_wasm32);
+ try navRefOff(wasm, code, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
inst += 1;
continue :loop tags[inst];
},
@@ -124,7 +133,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.br_table => {
- const extra_index = mir.instructions.items(.data)[inst].payload;
+ const extra_index = datas[inst].payload;
const extra = mir.extraData(Mir.JumpTable, extra_index);
const labels = mir.extra[extra.end..][0..extra.data.length];
try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
@@ -223,7 +232,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
- .global_set => {
+ .global_set_sp => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
if (is_obj) {
@@ -235,7 +244,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
});
code.appendNTimesAssumeCapacity(0, 5);
} else {
- const sp_global = try wasm.stackPointerGlobalIndex();
+ const sp_global: Wasm.GlobalIndex = .stack_pointer;
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
}
@@ -243,26 +252,6 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
- .function_index => {
- try code.ensureUnusedCapacity(gpa, 6);
- code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
- if (is_obj) {
- try wasm.out_relocs.append(gpa, .{
- .offset = @intCast(code.items.len),
- .pointee = .{ .symbol_index = try wasm.functionSymbolIndex(datas[inst].ip_index) },
- .tag = .TABLE_INDEX_SLEB,
- .addend = 0,
- });
- code.appendNTimesAssumeCapacity(0, 5);
- } else {
- const func_index = try wasm.functionIndex(datas[inst].ip_index);
- std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(func_index)) catch unreachable;
- }
-
- inst += 1;
- continue :loop tags[inst];
- },
-
.f32_const => {
try code.ensureUnusedCapacity(gpa, 5);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f32_const));
@@ -521,7 +510,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.simd_prefix => {
try code.ensureUnusedCapacity(gpa, 6 + 20);
- const extra_index = mir.instructions.items(.data)[inst].payload;
+ const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
@@ -578,7 +567,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.atomics_prefix => {
try code.ensureUnusedCapacity(gpa, 6 + 20);
- const extra_index = mir.instructions.items(.data)[inst].payload;
+ const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
@@ -677,34 +666,36 @@ fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
}
-fn uavRefOff(wasm: *link.File.Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOff, is_wasm32: bool) !void {
+fn uavRefOffObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffObj, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
- const is_obj = comp.config.output_mode == .Obj;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(opcode));
- // If outputting an object file, this needs to be a relocation, since global
- // constant data may be mixed with other object files in the final link.
- if (is_obj) {
- try wasm.out_relocs.append(gpa, .{
- .offset = @intCast(code.items.len),
- .pointee = .{ .symbol_index = try wasm.uavSymbolIndex(data.ip_index) },
- .tag = if (is_wasm32) .MEMORY_ADDR_LEB else .MEMORY_ADDR_LEB64,
- .addend = data.offset,
- });
- code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
- return;
- }
+ try wasm.out_relocs.append(gpa, .{
+ .offset = @intCast(code.items.len),
+ .pointee = .{ .symbol_index = try wasm.uavSymbolIndex(data.uav_obj.key(wasm).*) },
+ .tag = if (is_wasm32) .MEMORY_ADDR_LEB else .MEMORY_ADDR_LEB64,
+ .addend = data.offset,
+ });
+ code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
+}
+
+fn uavRefOffExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.UavRefOffExe, is_wasm32: bool) !void {
+ const comp = wasm.base.comp;
+ const gpa = comp.gpa;
+ const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
+
+ try code.ensureUnusedCapacity(gpa, 11);
+ code.appendAssumeCapacity(@intFromEnum(opcode));
- // When linking into the final binary, no relocation mechanism is necessary.
- const addr = try wasm.uavAddr(data.ip_index);
- leb.writeUleb128(code.fixedWriter(), addr + data.offset) catch unreachable;
+ const addr = try wasm.uavAddr(data.uav_exe);
+ leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
}
-fn navRefOff(wasm: *link.File.Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
+fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
@@ -715,7 +706,7 @@ fn navRefOff(wasm: *link.File.Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir
try code.ensureUnusedCapacity(gpa, 11);
if (ip.isFunctionType(nav_ty)) {
- code.appendAssumeCapacity(std.wasm.Opcode.i32_const);
+ code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
assert(data.offset == 0);
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
@@ -727,7 +718,7 @@ fn navRefOff(wasm: *link.File.Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir
code.appendNTimesAssumeCapacity(0, 5);
} else {
const addr = try wasm.navAddr(data.nav_index);
- leb.writeUleb128(code.fixedWriter(), addr + data.offset) catch unreachable;
+ leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
}
} else {
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
@@ -742,7 +733,7 @@ fn navRefOff(wasm: *link.File.Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
} else {
const addr = try wasm.navAddr(data.nav_index);
- leb.writeUleb128(code.fixedWriter(), addr + data.offset) catch unreachable;
+ leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
}
}
}
src/arch/wasm/Mir.zig
@@ -610,6 +610,8 @@ pub const Inst = struct {
nav_index: InternPool.Nav.Index,
func_ty: Wasm.FunctionType.Index,
intrinsic: Intrinsic,
+ uav_obj: Wasm.UavsObjIndex,
+ uav_exe: Wasm.UavsExeIndex,
comptime {
switch (builtin.mode) {
@@ -633,6 +635,11 @@ pub fn extraData(self: *const Mir, comptime T: type, index: usize) struct { data
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => self.extra[i],
+ i32 => @bitCast(self.extra[i]),
+ Wasm.UavsObjIndex,
+ Wasm.UavsExeIndex,
+ InternPool.Nav.Index,
+ => @enumFromInt(self.extra[i]),
else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
};
i += 1;
@@ -684,8 +691,13 @@ pub const MemArg = struct {
alignment: u32,
};
-pub const UavRefOff = struct {
- ip_index: InternPool.Index,
+pub const UavRefOffObj = struct {
+ uav_obj: Wasm.UavsObjIndex,
+ offset: i32,
+};
+
+pub const UavRefOffExe = struct {
+ uav_exe: Wasm.UavsExeIndex,
offset: i32,
};
src/link/Wasm/Flush.zig
@@ -19,10 +19,6 @@ const leb = std.leb;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
-/// Ordered list of data segments that will appear in the final binary.
-/// When sorted, to-be-merged segments will be made adjacent.
-/// Values are offset relative to segment start.
-data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegment.Index, u32) = .empty,
/// Each time a `data_segment` offset equals zero it indicates a new group, and
/// the next element in this array will contain the total merged segment size.
data_segment_groups: std.ArrayListUnmanaged(u32) = .empty,
@@ -32,21 +28,14 @@ missing_exports: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
indirect_function_table: std.AutoArrayHashMapUnmanaged(Wasm.OutputFunctionIndex, u32) = .empty,
-/// 0. Index into `data_segments`.
-const DataSegmentIndex = enum(u32) {
- _,
-};
-
pub fn clear(f: *Flush) void {
f.binary_bytes.clearRetainingCapacity();
- f.data_segments.clearRetainingCapacity();
f.data_segment_groups.clearRetainingCapacity();
f.indirect_function_table.clearRetainingCapacity();
}
pub fn deinit(f: *Flush, gpa: Allocator) void {
f.binary_bytes.deinit(gpa);
- f.data_segments.deinit(gpa);
f.data_segment_groups.deinit(gpa);
f.indirect_function_table.deinit(gpa);
f.* = undefined;
@@ -141,12 +130,14 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Merge and order the data segments. Depends on garbage collection so that
// unused segments can be omitted.
- try f.data_segments.ensureUnusedCapacity(gpa, wasm.object_data_segments.items.len);
+ try wasm.data_segments.ensureUnusedCapacity(gpa, wasm.object_data_segments.items.len);
for (wasm.object_data_segments.items, 0..) |*ds, i| {
if (!ds.flags.alive) continue;
- const data_segment_index: Wasm.DataSegment.Index = @enumFromInt(i);
- any_passive_inits = any_passive_inits or ds.flags.is_passive or (import_memory and !isBss(wasm, ds.name));
- f.data_segments.putAssumeCapacityNoClobber(data_segment_index, @as(u32, undefined));
+ const data_segment_index: Wasm.ObjectDataSegmentIndex = @enumFromInt(i);
+ any_passive_inits = any_passive_inits or ds.flags.is_passive or (import_memory and !wasm.isBss(ds.name));
+ wasm.data_segments.putAssumeCapacityNoClobber(.pack(wasm, .{
+ .object = data_segment_index,
+ }), @as(u32, undefined));
}
try wasm.functions.ensureUnusedCapacity(gpa, 3);
@@ -170,48 +161,64 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
}
// Sort order:
- // 0. Whether the segment is TLS
+ // 0. Segment category (tls, data, zero)
// 1. Segment name prefix
// 2. Segment alignment
- // 3. Segment name suffix
- // 4. Segment index (to break ties, keeping it deterministic)
+ // 3. Reference count, descending (optimize for LEB encoding)
+ // 4. Segment name suffix
+ // 5. Segment ID interpreted as an integer (for determinism)
+ //
// TLS segments are intended to be merged with each other, and segments
// with a common prefix name are intended to be merged with each other.
// Sorting ensures the segments intended to be merged will be adjacent.
+ //
+ // Each Zcu Nav and Cau has an independent data segment ID in this logic.
+ // For the purposes of sorting, they are implicitly all named ".data".
const Sort = struct {
wasm: *const Wasm,
- segments: []const Wasm.DataSegment.Index,
+ segments: []const Wasm.DataSegment.Id,
pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool {
- const lhs_segment_index = ctx.segments[lhs];
- const rhs_segment_index = ctx.segments[rhs];
- const lhs_segment = lhs_segment_index.ptr(ctx.wasm);
- const rhs_segment = rhs_segment_index.ptr(ctx.wasm);
- const lhs_tls = @intFromBool(lhs_segment.flags.tls);
- const rhs_tls = @intFromBool(rhs_segment.flags.tls);
- if (lhs_tls < rhs_tls) return true;
- if (lhs_tls > rhs_tls) return false;
- const lhs_prefix, const lhs_suffix = splitSegmentName(lhs_segment.name.unwrap().?.slice(ctx.wasm));
- const rhs_prefix, const rhs_suffix = splitSegmentName(rhs_segment.name.unwrap().?.slice(ctx.wasm));
+ const lhs_segment = ctx.segments[lhs];
+ const rhs_segment = ctx.segments[rhs];
+ const lhs_category = @intFromEnum(lhs_segment.category(ctx.wasm));
+ const rhs_category = @intFromEnum(rhs_segment.category(ctx.wasm));
+ switch (std.math.order(lhs_category, rhs_category)) {
+ .lt => return true,
+ .gt => return false,
+ .eq => {},
+ }
+ const lhs_segment_name = lhs_segment.name(ctx.wasm);
+ const rhs_segment_name = rhs_segment.name(ctx.wasm);
+ const lhs_prefix, const lhs_suffix = splitSegmentName(lhs_segment_name);
+ const rhs_prefix, const rhs_suffix = splitSegmentName(rhs_segment_name);
switch (mem.order(u8, lhs_prefix, rhs_prefix)) {
.lt => return true,
.gt => return false,
.eq => {},
}
- switch (lhs_segment.flags.alignment.order(rhs_segment.flags.alignment)) {
+ const lhs_alignment = lhs_segment.alignment(ctx.wasm);
+ const rhs_alignment = rhs_segment.alignment(ctx.wasm);
+ switch (lhs_alignment.order(rhs_alignment)) {
.lt => return false,
.gt => return true,
.eq => {},
}
- return switch (mem.order(u8, lhs_suffix, rhs_suffix)) {
- .lt => true,
- .gt => false,
- .eq => @intFromEnum(lhs_segment_index) < @intFromEnum(rhs_segment_index),
- };
+ switch (std.math.order(lhs_segment.refCount(ctx.wasm), rhs_segment.refCount(ctx.wasm))) {
+ .lt => return false,
+ .gt => return true,
+ .eq => {},
+ }
+ switch (mem.order(u8, lhs_suffix, rhs_suffix)) {
+ .lt => return true,
+ .gt => return false,
+ .eq => {},
+ }
+ return @intFromEnum(lhs_segment) < @intFromEnum(rhs_segment);
}
};
- f.data_segments.sortUnstable(@as(Sort, .{
+ wasm.data_segments.sortUnstable(@as(Sort, .{
.wasm = wasm,
- .segments = f.data_segments.keys(),
+ .segments = wasm.data_segments.keys(),
}));
const page_size = std.wasm.page_size; // 64kb
@@ -246,43 +253,44 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
virtual_addrs.stack_pointer = @intCast(memory_ptr);
}
- const segment_indexes = f.data_segments.keys();
- const segment_offsets = f.data_segments.values();
+ const segment_ids = wasm.data_segments.keys();
+ const segment_offsets = wasm.data_segments.values();
assert(f.data_segment_groups.items.len == 0);
{
var seen_tls: enum { before, during, after } = .before;
var offset: u32 = 0;
- for (segment_indexes, segment_offsets, 0..) |segment_index, *segment_offset, i| {
- const segment = segment_index.ptr(wasm);
- memory_ptr = segment.flags.alignment.forward(memory_ptr);
+ for (segment_ids, segment_offsets, 0..) |segment_id, *segment_offset, i| {
+ const alignment = segment_id.alignment(wasm);
+ memory_ptr = alignment.forward(memory_ptr);
const want_new_segment = b: {
if (is_obj) break :b false;
switch (seen_tls) {
- .before => if (segment.flags.tls) {
+ .before => if (segment_id.isTls(wasm)) {
virtual_addrs.tls_base = if (shared_memory) 0 else @intCast(memory_ptr);
- virtual_addrs.tls_align = segment.flags.alignment;
+ virtual_addrs.tls_align = alignment;
seen_tls = .during;
break :b true;
},
- .during => if (!segment.flags.tls) {
+ .during => if (!segment_id.isTls(wasm)) {
virtual_addrs.tls_size = @intCast(memory_ptr - virtual_addrs.tls_base.?);
- virtual_addrs.tls_align = virtual_addrs.tls_align.maxStrict(segment.flags.alignment);
+ virtual_addrs.tls_align = virtual_addrs.tls_align.maxStrict(alignment);
seen_tls = .after;
break :b true;
},
.after => {},
}
- break :b i >= 1 and !wantSegmentMerge(wasm, segment_indexes[i - 1], segment_index);
+ break :b i >= 1 and !wantSegmentMerge(wasm, segment_ids[i - 1], segment_id);
};
if (want_new_segment) {
if (offset > 0) try f.data_segment_groups.append(gpa, offset);
offset = 0;
}
+ const size = segment_id.size(wasm);
segment_offset.* = offset;
- offset += segment.payload.len;
- memory_ptr += segment.payload.len;
+ offset += size;
+ memory_ptr += size;
}
if (offset > 0) try f.data_segment_groups.append(gpa, offset);
}
@@ -599,7 +607,6 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Code section.
if (wasm.functions.count() != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
- const start_offset = binary_bytes.items.len - 5; // minus 5 so start offset is 5 to include entry count
for (wasm.functions.keys()) |resolution| switch (resolution.unpack(wasm)) {
.unresolved => unreachable,
@@ -610,21 +617,21 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
.__zig_error_names => @panic("TODO lower __zig_error_names "),
.object_function => |i| {
_ = i;
- _ = start_offset;
@panic("TODO lower object function code and apply relocations");
//try leb.writeUleb128(binary_writer, atom.code.len);
//try binary_bytes.appendSlice(gpa, atom.code.slice(wasm));
},
.zcu_func => |i| {
- _ = i;
- _ = start_offset;
- @panic("TODO lower zcu_func code and apply relocations");
- //try leb.writeUleb128(binary_writer, atom.code.len);
- //try binary_bytes.appendSlice(gpa, atom.code.slice(wasm));
+ const code_start = try reserveSize(gpa, binary_bytes);
+ defer replaceSize(binary_bytes, code_start);
+
+ const function = &i.value(wasm).function;
+ try function.lower(wasm, binary_bytes);
},
};
replaceVecSectionHeader(binary_bytes, header_offset, .code, @intCast(wasm.functions.entries.len));
+ if (is_obj) @panic("TODO apply offset to code relocs");
code_section_index = section_index;
section_index += 1;
}
@@ -635,11 +642,11 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
var group_index: u32 = 0;
var offset: u32 = undefined;
- for (segment_indexes, segment_offsets) |segment_index, segment_offset| {
- const segment = segment_index.ptr(wasm);
+ for (segment_ids, segment_offsets) |segment_id, segment_offset| {
+ const segment = segment_id.ptr(wasm);
const segment_payload = segment.payload.slice(wasm);
if (segment_payload.len == 0) continue;
- if (!import_memory and isBss(wasm, segment.name)) {
+ if (!import_memory and wasm.isBss(segment.name)) {
// It counted for virtual memory but it does not go into the binary.
continue;
}
@@ -682,7 +689,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// try wasm.emitDataRelocations(binary_bytes, data_index, symbol_table);
//}
} else if (comp.config.debug_format != .strip) {
- try emitNameSection(wasm, &f.data_segments, binary_bytes);
+ try emitNameSection(wasm, &wasm.data_segments, binary_bytes);
}
if (comp.config.debug_format != .strip) {
@@ -997,27 +1004,23 @@ fn emitProducerSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8)
// writeCustomSectionHeader(binary_bytes, header_offset);
//}
-fn isBss(wasm: *Wasm, optional_name: Wasm.OptionalString) bool {
- const s = optional_name.slice(wasm) orelse return false;
- return mem.eql(u8, s, ".bss") or mem.startsWith(u8, s, ".bss.");
-}
-
fn splitSegmentName(name: []const u8) struct { []const u8, []const u8 } {
const start = @intFromBool(name.len >= 1 and name[0] == '.');
const pivot = mem.indexOfScalarPos(u8, name, start, '.') orelse 0;
return .{ name[0..pivot], name[pivot..] };
}
-fn wantSegmentMerge(wasm: *const Wasm, a_index: Wasm.DataSegment.Index, b_index: Wasm.DataSegment.Index) bool {
- const a = a_index.ptr(wasm);
- const b = b_index.ptr(wasm);
- if (a.flags.tls and b.flags.tls) return true;
- if (a.flags.tls != b.flags.tls) return false;
- if (a.flags.is_passive != b.flags.is_passive) return false;
- if (a.name == b.name) return true;
- const a_prefix, _ = splitSegmentName(a.name.slice(wasm).?);
- const b_prefix, _ = splitSegmentName(b.name.slice(wasm).?);
- return a_prefix.len > 0 and mem.eql(u8, a_prefix, b_prefix);
+fn wantSegmentMerge(wasm: *const Wasm, a_id: Wasm.DataSegment.Id, b_id: Wasm.DataSegment.Id) bool {
+ const a_category = a_id.category(wasm);
+ const b_category = b_id.category(wasm);
+ if (a_category != b_category) return false;
+ if (a_category == .tls or b_category == .tls) return false;
+ if (a_id.isPassive(wasm) != b_id.isPassive(wasm)) return false;
+ const a_name = a_id.name(wasm);
+ const b_name = b_id.name(wasm);
+ const a_prefix, _ = splitSegmentName(a_name);
+ const b_prefix, _ = splitSegmentName(b_name);
+ return mem.eql(u8, a_prefix, b_prefix);
}
/// section id + fixed leb contents size + fixed leb vector length
@@ -1064,6 +1067,21 @@ fn replaceHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32, tag: u8) void
bytes.replaceRangeAssumeCapacity(offset, section_header_size, fbw.getWritten());
}
+const max_size_encoding = 5;
+
+fn reserveSize(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
+ try bytes.appendNTimes(gpa, 0, max_size_encoding);
+ return @intCast(bytes.items.len - max_size_encoding);
+}
+
+fn replaceSize(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
+ const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding);
+ var buf: [max_size_encoding]u8 = undefined;
+ var fbw = std.io.fixedBufferStream(&buf);
+ leb.writeUleb128(fbw.writer(), size) catch unreachable;
+ bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, fbw.getWritten());
+}
+
fn emitLimits(
gpa: Allocator,
binary_bytes: *std.ArrayListUnmanaged(u8),
src/link/Wasm/Object.zig
@@ -103,7 +103,7 @@ pub const Symbol = struct {
function: Wasm.ObjectFunctionIndex,
function_import: ScratchSpace.FuncImportIndex,
data: struct {
- segment_index: Wasm.DataSegment.Index,
+ segment_index: Wasm.ObjectDataSegmentIndex,
segment_offset: u32,
size: u32,
},
@@ -497,7 +497,7 @@ pub fn parse(
try wasm.object_custom_segments.put(gpa, section_index, .{
.payload = .{
- .off = data_off,
+ .off = @enumFromInt(data_off),
.len = @intCast(debug_content.len),
},
.flags = .{},
src/link/Wasm.zig
@@ -119,21 +119,6 @@ object_relocations: std.MultiArrayList(ObjectRelocation) = .empty,
/// by the (synthetic) __wasm_call_ctors function.
object_init_funcs: std.ArrayListUnmanaged(InitFunc) = .empty,
-/// Relocations to be emitted into an object file. Remains empty when not
-/// emitting an object file.
-out_relocs: std.MultiArrayList(OutReloc) = .empty,
-/// List of locations within `string_bytes` that must be patched with the virtual
-/// memory address of a Uav during `flush`.
-/// When emitting an object file, `out_relocs` is used instead.
-uav_fixups: std.ArrayListUnmanaged(UavFixup) = .empty,
-/// List of locations within `string_bytes` that must be patched with the virtual
-/// memory address of a Nav during `flush`.
-/// When emitting an object file, `out_relocs` is used instead.
-nav_fixups: std.ArrayListUnmanaged(NavFixup) = .empty,
-/// Symbols to be emitted into an object file. Remains empty when not emitting
-/// an object file.
-symbol_table: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
-
/// Non-synthetic section that can essentially be mem-cpy'd into place after performing relocations.
object_data_segments: std.ArrayListUnmanaged(DataSegment) = .empty,
/// Non-synthetic section that can essentially be mem-cpy'd into place after performing relocations.
@@ -149,6 +134,21 @@ object_total_sections: u32 = 0,
/// All comdat symbols from all objects concatenated.
object_comdat_symbols: std.MultiArrayList(Comdat.Symbol) = .empty,
+/// Relocations to be emitted into an object file. Remains empty when not
+/// emitting an object file.
+out_relocs: std.MultiArrayList(OutReloc) = .empty,
+/// List of locations within `string_bytes` that must be patched with the virtual
+/// memory address of a Uav during `flush`.
+/// When emitting an object file, `out_relocs` is used instead.
+uav_fixups: std.ArrayListUnmanaged(UavFixup) = .empty,
+/// List of locations within `string_bytes` that must be patched with the virtual
+/// memory address of a Nav during `flush`.
+/// When emitting an object file, `out_relocs` is used instead.
+nav_fixups: std.ArrayListUnmanaged(NavFixup) = .empty,
+/// Symbols to be emitted into an object file. Remains empty when not emitting
+/// an object file.
+symbol_table: std.AutoArrayHashMapUnmanaged(String, void) = .empty,
+
/// When importing objects from the host environment, a name must be supplied.
/// LLVM uses "env" by default when none is given. This would be a good default for Zig
/// to support existing code.
@@ -170,9 +170,15 @@ dump_argv_list: std.ArrayListUnmanaged([]const u8),
preloaded_strings: PreloadedStrings,
/// This field is used when emitting an object; `navs_exe` used otherwise.
-navs_obj: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, NavObj) = .empty,
+navs_obj: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ZcuDataObj) = .empty,
/// This field is unused when emitting an object; `navs_exe` used otherwise.
-navs_exe: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, NavExe) = .empty,
+navs_exe: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ZcuDataExe) = .empty,
+/// Tracks all InternPool values referenced by codegen. Needed for outputting
+/// the data segment. This one does not track ref count because object files
+/// require using max LEB encoding for these references anyway.
+uavs_obj: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuDataObj) = .empty,
+/// Tracks ref count to optimize LEB encodings for UAV references.
+uavs_exe: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuDataExe) = .empty,
zcu_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ZcuFunc) = .empty,
nav_exports: std.AutoArrayHashMapUnmanaged(NavExport, Zcu.Export.Index) = .empty,
uav_exports: std.AutoArrayHashMapUnmanaged(UavExport, Zcu.Export.Index) = .empty,
@@ -224,6 +230,13 @@ global_imports: std.AutoArrayHashMapUnmanaged(String, GlobalImportId) = .empty,
tables: std.AutoArrayHashMapUnmanaged(TableImport.Resolution, void) = .empty,
table_imports: std.AutoArrayHashMapUnmanaged(String, TableImport.Index) = .empty,
+/// Ordered list of data segments that will appear in the final binary.
+/// When sorted, to-be-merged segments will be made adjacent.
+/// Values are offset relative to segment start.
+data_segments: std.AutoArrayHashMapUnmanaged(Wasm.DataSegment.Id, u32) = .empty,
+
+error_name_table_ref_count: u32 = 0,
+
any_exports_updated: bool = true,
/// Set to true if any `GLOBAL_INDEX` relocation is encountered with
/// `SymbolFlags.tls` set to true. This is for objects only; final
@@ -307,6 +320,16 @@ pub const OutputFunctionIndex = enum(u32) {
pub const GlobalIndex = enum(u32) {
_,
+ /// This is only accurate when there is a Zcu.
+ pub const stack_pointer: GlobalIndex = @enumFromInt(0);
+
+ /// Same as `stack_pointer` but with a safety assertion.
+ pub fn stackPointer(wasm: *const Wasm) Global.Index {
+ const comp = wasm.base.comp;
+ assert(comp.zcu != null);
+ return .stack_pointer;
+ }
+
pub fn ptr(index: GlobalIndex, f: *const Flush) *Wasm.GlobalImport.Resolution {
return &f.globals.items[@intFromEnum(index)];
}
@@ -545,56 +568,83 @@ pub const Valtype3 = enum(u3) {
}
};
-pub const NavObj = extern struct {
- code: DataSegment.Payload,
- /// Empty if not emitting an object.
- relocs: OutReloc.Slice,
+/// Index into `Wasm.navs_obj`.
+pub const NavsObjIndex = enum(u32) {
+ _,
- /// Index into `Wasm.navs_obj`.
- /// Note that swapRemove is sometimes performed on `navs`.
- pub const Index = enum(u32) {
- _,
+ pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index {
+ return &wasm.navs_obj.keys()[@intFromEnum(i)];
+ }
- pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index {
- return &wasm.navs_obj.keys()[@intFromEnum(i)];
- }
+ pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataObj {
+ return &wasm.navs_obj.values()[@intFromEnum(i)];
+ }
- pub fn value(i: @This(), wasm: *const Wasm) *NavObj {
- return &wasm.navs_obj.values()[@intFromEnum(i)];
- }
+ pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ return nav.fqn.toSlice(ip);
+ }
+};
- pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 {
- const zcu = wasm.base.comp.zcu.?;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(i.key(wasm).*);
- return nav.fqn.toSlice(ip);
- }
- };
+/// Index into `Wasm.navs_exe`.
+pub const NavsExeIndex = enum(u32) {
+ _,
+
+ pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index {
+ return &wasm.navs_exe.keys()[@intFromEnum(i)];
+ }
+
+ pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataExe {
+ return &wasm.navs_exe.values()[@intFromEnum(i)];
+ }
+
+ pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ return nav.fqn.toSlice(ip);
+ }
};
-pub const NavExe = extern struct {
- code: DataSegment.Payload,
+/// Index into `Wasm.uavs_obj`.
+pub const UavsObjIndex = enum(u32) {
+ _,
- /// Index into `Wasm.navs_exe`.
- /// Note that swapRemove is sometimes performed on `navs`.
- pub const Index = enum(u32) {
- _,
+ pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Index {
+ return &wasm.uavs_obj.keys()[@intFromEnum(i)];
+ }
- pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Nav.Index {
- return &wasm.navs_exe.keys()[@intFromEnum(i)];
- }
+ pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataObj {
+ return &wasm.uavs_obj.values()[@intFromEnum(i)];
+ }
+};
- pub fn value(i: @This(), wasm: *const Wasm) *NavExe {
- return &wasm.navs_exe.values()[@intFromEnum(i)];
- }
+/// Index into `Wasm.uavs_exe`.
+pub const UavsExeIndex = enum(u32) {
+ _,
- pub fn name(i: @This(), wasm: *const Wasm) [:0]const u8 {
- const zcu = wasm.base.comp.zcu.?;
- const ip = &zcu.intern_pool;
- const nav = ip.getNav(i.key(wasm).*);
- return nav.fqn.toSlice(ip);
- }
- };
+ pub fn key(i: @This(), wasm: *const Wasm) *InternPool.Index {
+ return &wasm.uavs_exe.keys()[@intFromEnum(i)];
+ }
+
+ pub fn value(i: @This(), wasm: *const Wasm) *ZcuDataExe {
+ return &wasm.uavs_exe.values()[@intFromEnum(i)];
+ }
+};
+
+/// Used when emitting a relocatable object.
+pub const ZcuDataObj = extern struct {
+ code: DataSegment.Payload,
+ relocs: OutReloc.Slice,
+};
+
+/// Used when not emitting a relocatable object.
+pub const ZcuDataExe = extern struct {
+ code: DataSegment.Payload,
+ /// Tracks how many references there are for the purposes of sorting data segments.
+ count: u32,
};
pub const ZcuFunc = extern struct {
@@ -841,8 +891,8 @@ pub const GlobalImport = extern struct {
__tls_size,
__zig_error_name_table,
object_global: ObjectGlobalIndex,
- nav_exe: NavExe.Index,
- nav_obj: NavObj.Index,
+ nav_exe: NavsExeIndex,
+ nav_obj: NavsObjIndex,
};
pub fn unpack(r: Resolution, wasm: *const Wasm) Unpacked {
@@ -1150,28 +1200,211 @@ pub const DataSegment = extern struct {
segment_offset: u32,
section_index: ObjectSectionIndex,
+ pub const Category = enum {
+ /// Thread-local variables.
+ tls,
+ /// Data that is not zero initialized and not threadlocal.
+ data,
+ /// Zero-initialized. Does not require corresponding bytes in the
+ /// output file.
+ zero,
+ };
+
pub const Payload = extern struct {
- /// Points into string_bytes. No corresponding string_table entry.
- off: u32,
+ off: Off,
/// The size in bytes of the data representing the segment within the section.
len: u32,
+ pub const Off = enum(u32) {
+ /// The payload is all zeroes (bss section).
+ none = std.math.maxInt(u32),
+ /// Points into string_bytes. No corresponding string_table entry.
+ _,
+
+ pub fn unwrap(off: Off) ?u32 {
+ return if (off == .none) null else @intFromEnum(off);
+ }
+ };
+
pub fn slice(p: DataSegment.Payload, wasm: *const Wasm) []const u8 {
- assert(p.off != p.len);
- return wasm.string_bytes.items[p.off..][0..p.len];
+ return wasm.string_bytes.items[p.off.unwrap().?..][0..p.len];
}
};
- /// Index into `Wasm.object_data_segments`.
- pub const Index = enum(u32) {
+ pub const Id = enum(u32) {
+ __zig_error_name_table,
+ /// First, an `ObjectDataSegmentIndex`.
+ /// Next, index into `uavs_obj` or `uavs_exe` depending on whether emitting an object.
+ /// Next, index into `navs_obj` or `navs_exe` depending on whether emitting an object.
_,
- pub fn ptr(i: Index, wasm: *const Wasm) *DataSegment {
- return &wasm.object_data_segments.items[@intFromEnum(i)];
+ const first_object = @intFromEnum(Id.__zig_error_name_table) + 1;
+
+ pub const Unpacked = union(enum) {
+ __zig_error_name_table,
+ object: ObjectDataSegmentIndex,
+ uav_exe: UavsExeIndex,
+ uav_obj: UavsObjIndex,
+ nav_exe: NavsExeIndex,
+ nav_obj: NavsObjIndex,
+ };
+
+ pub fn pack(wasm: *const Wasm, unpacked: Unpacked) Id {
+ return switch (unpacked) {
+ .__zig_error_name_table => .__zig_error_name_table,
+ .object => |i| @enumFromInt(first_object + @intFromEnum(i)),
+ inline .uav_exe, .uav_obj => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + @intFromEnum(i)),
+ .nav_exe => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + wasm.uavs_exe.entries.len + @intFromEnum(i)),
+ .nav_obj => |i| @enumFromInt(first_object + wasm.object_data_segments.items.len + wasm.uavs_obj.entries.len + @intFromEnum(i)),
+ };
+ }
+
+ pub fn unpack(id: Id, wasm: *const Wasm) Unpacked {
+ return switch (id) {
+ .__zig_error_name_table => .__zig_error_name_table,
+ _ => {
+ const object_index = @intFromEnum(id) - first_object;
+
+ const uav_index = if (object_index < wasm.object_data_segments.items.len)
+ return .{ .object = @enumFromInt(object_index) }
+ else
+ object_index - wasm.object_data_segments.items.len;
+
+ const comp = wasm.base.comp;
+ const is_obj = comp.config.output_mode == .Obj;
+ if (is_obj) {
+ const nav_index = if (uav_index < wasm.uavs_obj.entries.len)
+ return .{ .uav_obj = @enumFromInt(uav_index) }
+ else
+ uav_index - wasm.uavs_obj.entries.len;
+
+ return .{ .nav_obj = @enumFromInt(nav_index) };
+ } else {
+ const nav_index = if (uav_index < wasm.uavs_exe.entries.len)
+ return .{ .uav_obj = @enumFromInt(uav_index) }
+ else
+ uav_index - wasm.uavs_exe.entries.len;
+
+ return .{ .nav_exe = @enumFromInt(nav_index) };
+ }
+ },
+ };
+ }
+
+ pub fn category(id: Id, wasm: *const Wasm) Category {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => .data,
+ .object => |i| {
+ const ptr = i.ptr(wasm);
+ if (ptr.flags.tls) return .tls;
+ if (isBss(wasm, ptr.name)) return .zero;
+ return .data;
+ },
+ inline .uav_exe, .uav_obj => |i| if (i.value(wasm).code.off == .none) .zero else .data,
+ inline .nav_exe, .nav_obj => |i| {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ if (nav.isThreadLocal(ip)) return .tls;
+ const code = i.value(wasm).code;
+ return if (code.off == .none) .zero else .data;
+ },
+ };
+ }
+
+ pub fn isTls(id: Id, wasm: *const Wasm) bool {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => false,
+ .object => |i| i.ptr(wasm).flags.tls,
+ .uav_exe, .uav_obj => false,
+ inline .nav_exe, .nav_obj => |i| {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ return nav.isThreadLocal(ip);
+ },
+ };
+ }
+
+ pub fn name(id: Id, wasm: *const Wasm) []const u8 {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table, .uav_exe, .uav_obj => ".data",
+ .object => |i| i.ptr(wasm).name.unwrap().?.slice(wasm),
+ inline .nav_exe, .nav_obj => |i| {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ return nav.status.resolved.@"linksection".toSlice(ip) orelse ".data";
+ },
+ };
+ }
+
+ pub fn alignment(id: Id, wasm: *const Wasm) Alignment {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => wasm.pointerAlignment(),
+ .object => |i| i.ptr(wasm).flags.alignment,
+ inline .uav_exe, .uav_obj => |i| {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const ip_index = i.key(wasm).*;
+ const ty: ZcuType = .fromInterned(ip.typeOf(ip_index));
+ return ty.abiAlignment(zcu);
+ },
+ inline .nav_exe, .nav_obj => |i| {
+ const zcu = wasm.base.comp.zcu.?;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(i.key(wasm).*);
+ return nav.status.resolved.alignment;
+ },
+ };
+ }
+
+ pub fn refCount(id: Id, wasm: *const Wasm) u32 {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => wasm.error_name_table_ref_count,
+ .object, .uav_obj, .nav_obj => 0,
+ inline .uav_exe, .nav_exe => |i| i.value(wasm).count,
+ };
+ }
+
+ pub fn isPassive(id: Id, wasm: *const Wasm) bool {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => true,
+ .object => |i| i.ptr(wasm).flags.is_passive,
+ .uav_exe, .uav_obj, .nav_exe, .nav_obj => true,
+ };
+ }
+
+ pub fn size(id: Id, wasm: *const Wasm) u32 {
+ return switch (unpack(id, wasm)) {
+ .__zig_error_name_table => {
+ const comp = wasm.base.comp;
+ const zcu = comp.zcu.?;
+ const errors_len = 1 + zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
+ const elem_size = ZcuType.slice_const_u8_sentinel_0.abiSize(zcu);
+ return @intCast(errors_len * elem_size);
+ },
+ .object => |i| i.ptr(wasm).payload.len,
+ inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code.len,
+ };
}
};
};
+/// Index into `Wasm.object_data_segments`.
+pub const ObjectDataSegmentIndex = enum(u32) {
+ _,
+
+ pub fn ptr(i: ObjectDataSegmentIndex, wasm: *const Wasm) *DataSegment {
+ return &wasm.object_data_segments.items[@intFromEnum(i)];
+ }
+};
+
+/// Index into `Wasm.uavs`.
+pub const UavIndex = enum(u32) {
+ _,
+};
+
pub const CustomSegment = extern struct {
payload: Payload,
flags: SymbolFlags,
@@ -1940,6 +2173,8 @@ pub fn deinit(wasm: *Wasm) void {
wasm.navs_exe.deinit(gpa);
wasm.navs_obj.deinit(gpa);
+ wasm.uavs_exe.deinit(gpa);
+ wasm.uavs_obj.deinit(gpa);
wasm.zcu_funcs.deinit(gpa);
wasm.nav_exports.deinit(gpa);
wasm.uav_exports.deinit(gpa);
@@ -1978,6 +2213,7 @@ pub fn deinit(wasm: *Wasm) void {
wasm.global_exports.deinit(gpa);
wasm.global_imports.deinit(gpa);
wasm.table_imports.deinit(gpa);
+ wasm.data_segments.deinit(gpa);
wasm.symbol_table.deinit(gpa);
wasm.out_relocs.deinit(gpa);
wasm.uav_fixups.deinit(gpa);
@@ -2053,57 +2289,28 @@ pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index
return;
}
- const code_start: u32 = @intCast(wasm.string_bytes.items.len);
- const relocs_start: u32 = @intCast(wasm.out_relocs.len);
- wasm.string_bytes_lock.lock();
-
- try codegen.generateSymbol(
- &wasm.base,
- pt,
- zcu.navSrcLoc(nav_index),
- Value.fromInterned(nav_init),
- &wasm.string_bytes,
- .none,
- );
-
- const code_len: u32 = @intCast(wasm.string_bytes.items.len - code_start);
- const relocs_len: u32 = @intCast(wasm.out_relocs.len - relocs_start);
- wasm.string_bytes_lock.unlock();
+ const zcu_data = try lowerZcuData(wasm, pt, nav_init);
- const naive_code: DataSegment.Payload = .{
- .off = code_start,
- .len = code_len,
- };
-
- // Only nonzero init values need to take up space in the output.
- const all_zeroes = std.mem.allEqual(u8, naive_code.slice(wasm), 0);
- const code: DataSegment.Payload = if (!all_zeroes) naive_code else c: {
- wasm.string_bytes.shrinkRetainingCapacity(code_start);
- // Indicate empty by making off and len the same value, however, still
- // transmit the data size by using the size as that value.
- break :c .{
- .off = naive_code.len,
- .len = naive_code.len,
- };
- };
+ try wasm.data_segments.ensureUnusedCapacity(gpa, 1);
if (is_obj) {
const gop = try wasm.navs_obj.getOrPut(gpa, nav_index);
- gop.value_ptr.* = .{
- .code = code,
- .relocs = .{
- .off = relocs_start,
- .len = relocs_len,
- },
- };
+ gop.value_ptr.* = zcu_data;
+ wasm.data_segments.putAssumeCapacity(.pack(wasm, .{
+ .nav_obj = @enumFromInt(gop.index),
+ }), @as(u32, undefined));
}
- assert(relocs_len == 0);
+ assert(zcu_data.relocs.len == 0);
const gop = try wasm.navs_exe.getOrPut(gpa, nav_index);
gop.value_ptr.* = .{
- .code = code,
+ .code = zcu_data.code,
+ .count = 0,
};
+ wasm.data_segments.putAssumeCapacity(.pack(wasm, .{
+ .nav_exe = @enumFromInt(gop.index),
+ }), @as(u32, undefined));
}
pub fn updateLineNumber(wasm: *Wasm, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
@@ -2251,6 +2458,12 @@ pub fn prelink(wasm: *Wasm, prog_node: std.Progress.Node) link.File.FlushError!v
}
}
+ if (comp.zcu != null) {
+ // Zig always depends on a stack pointer global.
+ try wasm.globals.put(gpa, .__stack_pointer, {});
+ assert(wasm.globals.entries.len - 1 == @intFromEnum(GlobalIndex.stack_pointer));
+ }
+
// These loops do both recursive marking of alive symbols well as checking for undefined symbols.
// At the end, output functions and globals will be populated.
for (wasm.object_function_imports.keys(), wasm.object_function_imports.values(), 0..) |name, *import, i| {
@@ -2468,6 +2681,9 @@ pub fn flushModule(
const globals_end_zcu: u32 = @intCast(wasm.globals.entries.len);
defer wasm.globals.shrinkRetainingCapacity(globals_end_zcu);
+ const data_segments_end_zcu: u32 = @intCast(wasm.data_segments.entries.len);
+ defer wasm.data_segments.shrinkRetainingCapacity(data_segments_end_zcu);
+
wasm.flush_buffer.clear();
return wasm.flush_buffer.finish(wasm) catch |err| switch (err) {
@@ -2999,7 +3215,7 @@ pub fn addRelocatableDataPayload(wasm: *Wasm, bytes: []const u8) Allocator.Error
const gpa = wasm.base.comp.gpa;
try wasm.string_bytes.appendSlice(gpa, bytes);
return .{
- .off = @intCast(wasm.string_bytes.items.len - bytes.len),
+ .off = @enumFromInt(wasm.string_bytes.items.len - bytes.len),
.len = @intCast(bytes.len),
};
}
@@ -3025,6 +3241,65 @@ pub fn navSymbolIndex(wasm: *Wasm, nav_index: InternPool.Nav.Index) Allocator.Er
return @enumFromInt(gop.index);
}
+pub fn errorNameTableSymbolIndex(wasm: *Wasm) Allocator.Error!SymbolTableIndex {
+ const comp = wasm.base.comp;
+ assert(comp.config.output_mode == .Obj);
+ const gpa = comp.gpa;
+ const gop = try wasm.symbol_table.getOrPut(gpa, wasm.preloaded_strings.__zig_error_name_table);
+ gop.value_ptr.* = {};
+ return @enumFromInt(gop.index);
+}
+
+pub fn refUavObj(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !UavsObjIndex {
+ const comp = wasm.base.comp;
+ const gpa = comp.gpa;
+ assert(comp.config.output_mode == .Obj);
+ const gop = try wasm.uavs_obj.getOrPut(gpa, ip_index);
+ if (!gop.found_existing) gop.value_ptr.* = try lowerZcuData(wasm, pt, ip_index);
+ const uav_index: UavsObjIndex = @enumFromInt(gop.index);
+ try wasm.data_segments.put(gpa, .pack(wasm, .{ .uav_obj = uav_index }), @as(u32, undefined));
+ return uav_index;
+}
+
+pub fn refUavExe(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !UavsExeIndex {
+ const comp = wasm.base.comp;
+ const gpa = comp.gpa;
+ assert(comp.config.output_mode != .Obj);
+ const gop = try wasm.uavs_exe.getOrPut(gpa, ip_index);
+ if (gop.found_existing) {
+ gop.value_ptr.count += 1;
+ } else {
+ const zcu_data = try lowerZcuData(wasm, pt, ip_index);
+ gop.value_ptr.* = .{
+ .code = zcu_data.code,
+ .count = 1,
+ };
+ }
+ const uav_index: UavsExeIndex = @enumFromInt(gop.index);
+ wasm.data_segments.putAssumeCapacity(.pack(wasm, .{ .uav_exe = uav_index }), @as(u32, undefined));
+ return uav_index;
+}
+
+pub fn uavAddr(wasm: *Wasm, uav_index: UavsExeIndex) Allocator.Error!u32 {
+ const comp = wasm.base.comp;
+ assert(comp.config.output_mode != .Obj);
+ const ds_id: DataSegment.Id = .pack(wasm, .{ .uav_exe = uav_index });
+ return wasm.data_segments.get(ds_id).?;
+}
+
+pub fn navAddr(wasm: *Wasm, nav_index: InternPool.Nav.Index) Allocator.Error!u32 {
+ const comp = wasm.base.comp;
+ assert(comp.config.output_mode != .Obj);
+ const ds_id: DataSegment.Id = .pack(wasm, .{ .nav_exe = @enumFromInt(wasm.navs_exe.getIndex(nav_index).?) });
+ return wasm.data_segments.get(ds_id).?;
+}
+
+pub fn errorNameTableAddr(wasm: *Wasm) Allocator.Error!u32 {
+ const comp = wasm.base.comp;
+ assert(comp.config.output_mode != .Obj);
+ return wasm.data_segments.get(.__zig_error_name_table).?;
+}
+
fn convertZcuFnType(
comp: *Compilation,
cc: std.builtin.CallingConvention,
@@ -3080,3 +3355,54 @@ fn convertZcuFnType(
}
}
}
+
+pub fn isBss(wasm: *const Wasm, optional_name: OptionalString) bool {
+ const s = optional_name.slice(wasm) orelse return false;
+ return mem.eql(u8, s, ".bss") or mem.startsWith(u8, s, ".bss.");
+}
+
+fn lowerZcuData(wasm: *Wasm, pt: Zcu.PerThread, ip_index: InternPool.Index) !ZcuDataObj {
+ const code_start: u32 = @intCast(wasm.string_bytes.items.len);
+ const relocs_start: u32 = @intCast(wasm.out_relocs.len);
+ wasm.string_bytes_lock.lock();
+
+ try codegen.generateSymbol(&wasm.base, pt, .unneeded, .fromInterned(ip_index), &wasm.string_bytes, .none);
+
+ const code_len: u32 = @intCast(wasm.string_bytes.items.len - code_start);
+ const relocs_len: u32 = @intCast(wasm.out_relocs.len - relocs_start);
+ wasm.string_bytes_lock.unlock();
+
+ const naive_code: DataSegment.Payload = .{
+ .off = @enumFromInt(code_start),
+ .len = code_len,
+ };
+
+ // Only nonzero init values need to take up space in the output.
+ const all_zeroes = std.mem.allEqual(u8, naive_code.slice(wasm), 0);
+ const code: DataSegment.Payload = if (!all_zeroes) naive_code else c: {
+ wasm.string_bytes.shrinkRetainingCapacity(code_start);
+ // Indicate empty by making off and len the same value, however, still
+ // transmit the data size by using the size as that value.
+ break :c .{
+ .off = .none,
+ .len = naive_code.len,
+ };
+ };
+
+ return .{
+ .code = code,
+ .relocs = .{
+ .off = relocs_start,
+ .len = relocs_len,
+ },
+ };
+}
+
+fn pointerAlignment(wasm: *const Wasm) Alignment {
+ const target = &wasm.base.comp.root_mod.resolved_target.result;
+ return switch (target.cpu.arch) {
+ .wasm32 => .@"4",
+ .wasm64 => .@"8",
+ else => unreachable,
+ };
+}
src/codegen.zig
@@ -590,7 +590,7 @@ fn lowerPtr(
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
- .nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, reloc_parent, offset),
+ .nav => |nav| try lowerNavRef(bin_file, pt, nav, code, reloc_parent, offset),
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset),
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent),
.eu_payload => |eu_ptr| try lowerPtr(
@@ -708,13 +708,11 @@ fn lowerUavRef(
fn lowerNavRef(
lf: *link.File,
pt: Zcu.PerThread,
- src_loc: Zcu.LazySrcLoc,
nav_index: InternPool.Nav.Index,
code: *std.ArrayListUnmanaged(u8),
reloc_parent: link.File.RelocInfo.Parent,
offset: u64,
) GenerateSymbolError!void {
- _ = src_loc;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
src/InternPool.zig
@@ -620,6 +620,13 @@ pub const Nav = struct {
};
}
+ /// Asserts that `status == .resolved`.
+ pub fn isThreadLocal(nav: Nav, ip: *const InternPool) bool {
+ const val = nav.status.resolved.val;
+ if (!isVariable(ip, val)) return false;
+ return ip.indexToKey(val).variable.is_threadlocal;
+ }
+
/// Get the ZIR instruction corresponding to this `Nav`, used to resolve source locations.
/// This is a `declaration`.
pub fn srcInst(nav: Nav, ip: *const InternPool) TrackedInst.Index {