Commit 6d84f22fa0

Luuk de Gram <luuk@degram.dev>
2022-03-05 15:19:54
stage2: Fix wasm linker for llvm backend
This fixes 2 entrypoints within the self-hosted wasm linker that would be called for the llvm backend, whereas we should simply call into the llvm backend to perform such action. i.e. not allocate a decl index when we have an llvm object, and when flushing a module, we should be calling it on llvm's object, rather than have the wasm linker perform the operation. Also, this fixes the wasm intrinsics for wasm.memory.size and wasm.memory.grow. Lastly, this commit ensures that when an extern function is being resolved, we tell LLVM how to import such function.
1 parent 716abe3
Changed files (3)
src
codegen
link
test
behavior
src/codegen/llvm.zig
@@ -708,6 +708,14 @@ pub const DeclGen = struct {
         if (!is_extern) {
             llvm_fn.setLinkage(.Internal);
             llvm_fn.setUnnamedAddr(.True);
+        } else if (dg.module.getTarget().isWasm()) {
+            dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0));
+            if (decl.getExternFn().?.lib_name) |lib_name| {
+                const module_name = std.mem.sliceTo(lib_name, 0);
+                if (!std.mem.eql(u8, module_name, "c")) {
+                    dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name);
+                }
+            }
         }
 
         if (sret) {
@@ -3483,7 +3491,7 @@ pub const FuncGen = struct {
         const pl_op = self.air.instructions.items(.data)[inst].pl_op;
         const index = pl_op.payload;
         const llvm_u32 = self.context.intType(32);
-        const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size.i32", &.{llvm_u32});
+        const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size", &.{llvm_u32});
         const args: [1]*const llvm.Value = .{llvm_u32.constInt(index, .False)};
         return self.builder.buildCall(llvm_fn, &args, args.len, .Fast, .Auto, "");
     }
@@ -3493,7 +3501,7 @@ pub const FuncGen = struct {
         const index = pl_op.payload;
         const operand = try self.resolveInst(pl_op.operand);
         const llvm_u32 = self.context.intType(32);
-        const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow.i32", &.{ llvm_u32, llvm_u32 });
+        const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow", &.{llvm_u32});
         const args: [2]*const llvm.Value = .{
             llvm_u32.constInt(index, .False),
             operand,
src/link/Wasm.zig
@@ -466,6 +466,7 @@ pub fn deinit(self: *Wasm) void {
 }
 
 pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
+    if (self.llvm_object) |_| return;
     if (decl.link.wasm.sym_index != 0) return;
 
     try self.symbols.ensureUnusedCapacity(self.base.allocator, 1);
@@ -1365,10 +1366,15 @@ pub fn flush(self: *Wasm, comp: *Compilation) !void {
 }
 
 pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
-    _ = comp;
     const tracy = trace(@src());
     defer tracy.end();
 
+    if (build_options.have_llvm) {
+        if (self.llvm_object) |llvm_object| {
+            return try llvm_object.flushModule(comp);
+        }
+    }
+
     // The amount of sections that will be written
     var section_count: u32 = 0;
     // Index of the code section. Used to tell relocation table where the section lives.
test/behavior/struct.zig
@@ -429,6 +429,7 @@ test "packed struct 24bits" {
     if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
+    if (builtin.zig_backend == .stage2_llvm and builtin.stage2_arch == .wasm32) return error.SkipZigTest; // TODO
 
     comptime {
         try expect(@sizeOf(Foo24Bits) == 4);