Commit a9b68308b9

kcbanner <kcbanner@gmail.com>
2023-01-26 06:45:40
cbe: fixes for tls, support for not linking libc, and enabling tests
- cbe: Implement linksection support, to support TLS when not linking libc - cbe: Support under-aligned variables / struct fields - cbe: Support packed structs (in the C definition of packed) - windows: Fix regression with x86 _tls_array - compiler_rt: Add 128-bit atomics to compiler_rt - tests: Re-enable threadlocal tests on cbe+windows, and llvm+x86 - tests: Re-enable f80 tests that now pass - ci: change windows ci to run the CBE behaviour tests with -lc, to match how the compiler is bootstrapped - update zig1.wasm
1 parent 9177e0d
ci/x86_64-windows-debug.ps1
@@ -76,7 +76,8 @@ Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
   -ofmt=c `
   -femit-bin="test-x86_64-windows-msvc.c" `
   --test-no-exec `
-  -target x86_64-windows-msvc
+  -target x86_64-windows-msvc `
+  -lc
 CheckLastExitCode
 
 & "stage3-debug\bin\zig.exe" build-obj `
@@ -99,7 +100,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files\Microsoft Visual Studio\2022\E
 CheckLastExitCode
 
 Write-Output "Build and run behavior tests with msvc..."
-& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console -entry:wWinMainCRTStartup kernel32.lib ntdll.lib vcruntime.lib libucrt.lib
+& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
 CheckLastExitCode
 
 & .\test-x86_64-windows-msvc.exe
ci/x86_64-windows-release.ps1
@@ -76,7 +76,8 @@ Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
   -ofmt=c `
   -femit-bin="test-x86_64-windows-msvc.c" `
   --test-no-exec `
-  -target x86_64-windows-msvc
+  -target x86_64-windows-msvc `
+  -lc
 CheckLastExitCode
 
 & "stage3-release\bin\zig.exe" build-obj `
@@ -99,7 +100,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files\Microsoft Visual Studio\2022\E
 CheckLastExitCode
 
 Write-Output "Build and run behavior tests with msvc..."
-& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console -entry:wWinMainCRTStartup kernel32.lib ntdll.lib vcruntime.lib libucrt.lib
+& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
 CheckLastExitCode
 
 & .\test-x86_64-windows-msvc.exe
lib/compiler_rt/atomics.zig
@@ -192,6 +192,10 @@ fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
     return atomic_load_N(u64, src, model);
 }
 
+fn __atomic_load_16(src: *u128, model: i32) callconv(.C) u128 {
+    return atomic_load_N(u128, src, model);
+}
+
 inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
     _ = model;
     if (@sizeOf(T) > largest_atomic_size) {
@@ -219,6 +223,10 @@ fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
     return atomic_store_N(u64, dst, value, model);
 }
 
+fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.C) void {
+    return atomic_store_N(u128, dst, value, model);
+}
+
 fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
     const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8);
 
@@ -282,6 +290,10 @@ fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return atomic_exchange_N(u64, ptr, val, model);
 }
 
+fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return atomic_exchange_N(u128, ptr, val, model);
+}
+
 inline fn atomic_compare_exchange_N(
     comptime T: type,
     ptr: *T,
@@ -327,6 +339,10 @@ fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success:
     return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
 }
 
+fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.C) i32 {
+    return atomic_compare_exchange_N(u128, ptr, expected, desired, success, failure);
+}
+
 inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T {
     _ = model;
     const Updater = struct {
@@ -338,6 +354,8 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
                 .Nand => ~(old & new),
                 .Or => old | new,
                 .Xor => old ^ new,
+                .Max => @max(old, new),
+                .Min => @min(old, new),
                 else => @compileError("unsupported atomic op"),
             };
         }
@@ -374,6 +392,10 @@ fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .Add, ptr, val, model);
 }
 
+fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Add, ptr, val, model);
+}
+
 fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
     return fetch_op_N(u8, .Sub, ptr, val, model);
 }
@@ -390,6 +412,10 @@ fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .Sub, ptr, val, model);
 }
 
+fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Sub, ptr, val, model);
+}
+
 fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
     return fetch_op_N(u8, .And, ptr, val, model);
 }
@@ -406,6 +432,10 @@ fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .And, ptr, val, model);
 }
 
+fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .And, ptr, val, model);
+}
+
 fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
     return fetch_op_N(u8, .Or, ptr, val, model);
 }
@@ -422,6 +452,10 @@ fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .Or, ptr, val, model);
 }
 
+fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Or, ptr, val, model);
+}
+
 fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
     return fetch_op_N(u8, .Xor, ptr, val, model);
 }
@@ -438,6 +472,10 @@ fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .Xor, ptr, val, model);
 }
 
+fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Xor, ptr, val, model);
+}
+
 fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
     return fetch_op_N(u8, .Nand, ptr, val, model);
 }
@@ -454,6 +492,50 @@ fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
     return fetch_op_N(u64, .Nand, ptr, val, model);
 }
 
+fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Nand, ptr, val, model);
+}
+
+fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+    return fetch_op_N(u8, .Max, ptr, val, model);
+}
+
+fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+    return fetch_op_N(u16, .Max, ptr, val, model);
+}
+
+fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+    return fetch_op_N(u32, .Max, ptr, val, model);
+}
+
+fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+    return fetch_op_N(u64, .Max, ptr, val, model);
+}
+
+fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Max, ptr, val, model);
+}
+
+fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
+    return fetch_op_N(u8, .Min, ptr, val, model);
+}
+
+fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
+    return fetch_op_N(u16, .Min, ptr, val, model);
+}
+
+fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
+    return fetch_op_N(u32, .Min, ptr, val, model);
+}
+
+fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
+    return fetch_op_N(u64, .Min, ptr, val, model);
+}
+
+fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
+    return fetch_op_N(u128, .Min, ptr, val, model);
+}
+
 comptime {
     if (supports_atomic_ops and builtin.object_format != .c) {
         @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage, .visibility = visibility });
@@ -465,50 +547,72 @@ comptime {
         @export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_add_16, .{ .name = "__atomic_fetch_add_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_sub_16, .{ .name = "__atomic_fetch_sub_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_and_16, .{ .name = "__atomic_fetch_and_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_or_16, .{ .name = "__atomic_fetch_or_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_xor_16, .{ .name = "__atomic_fetch_xor_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_nand_16, .{ .name = "__atomic_fetch_nand_16", .linkage = linkage, .visibility = visibility });
+
+        @export(__atomic_fetch_umax_1, .{ .name = "__atomic_fetch_umax_1", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umax_2, .{ .name = "__atomic_fetch_umax_2", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umax_4, .{ .name = "__atomic_fetch_umax_4", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umax_8, .{ .name = "__atomic_fetch_umax_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umax_16, .{ .name = "__atomic_fetch_umax_16", .linkage = linkage, .visibility = visibility });
+
+        @export(__atomic_fetch_umin_1, .{ .name = "__atomic_fetch_umin_1", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umin_2, .{ .name = "__atomic_fetch_umin_2", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umin_4, .{ .name = "__atomic_fetch_umin_4", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umin_8, .{ .name = "__atomic_fetch_umin_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_fetch_umin_16, .{ .name = "__atomic_fetch_umin_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_load_16, .{ .name = "__atomic_load_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_store_16, .{ .name = "__atomic_store_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_exchange_16, .{ .name = "__atomic_exchange_16", .linkage = linkage, .visibility = visibility });
 
         @export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage, .visibility = visibility });
         @export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage, .visibility = visibility });
         @export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage, .visibility = visibility });
         @export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage, .visibility = visibility });
+        @export(__atomic_compare_exchange_16, .{ .name = "__atomic_compare_exchange_16", .linkage = linkage, .visibility = visibility });
     }
 }
lib/std/start_windows_tls.zig
@@ -7,12 +7,14 @@ export var _tls_end: u8 linksection(".tls$ZZZ") = 0;
 export var __xl_a: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLA") = null;
 export var __xl_z: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLZ") = null;
 
-const tls_array: u32 = 0x2c;
 comptime {
-    if (builtin.target.cpu.arch == .x86) {
+    if (builtin.target.cpu.arch == .x86 and builtin.zig_backend != .stage2_c) {
         // The __tls_array is the offset of the ThreadLocalStoragePointer field
         // in the TEB block whose base address held in the %fs segment.
-        @export(tls_array, .{ .name = "_tls_array" });
+        asm (
+            \\ .global __tls_array
+            \\ __tls_array = 0x2C
+        );
     }
 }
 
lib/zig.h
@@ -93,6 +93,14 @@ typedef char bool;
 #define zig_align zig_align_unavailable
 #endif
 
+#if zig_has_attribute(aligned)
+#define zig_under_align(alignment) __attribute__((aligned(alignment)))
+#elif _MSC_VER
+#define zig_under_align(alignment) zig_align(alignment)
+#else
+#define zig_align zig_align_unavailable
+#endif
+
 #if zig_has_attribute(aligned)
 #define zig_align_fn(alignment) __attribute__((aligned(alignment)))
 #elif _MSC_VER
@@ -101,6 +109,22 @@ typedef char bool;
 #define zig_align_fn zig_align_fn_unavailable
 #endif
 
+#if zig_has_attribute(packed)
+#define zig_packed(definition) __attribute__((packed)) definition
+#elif _MSC_VER
+#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack())
+#else
+#define zig_packed(definition) zig_packed_unavailable
+#endif
+
+#if zig_has_attribute(section)
+#define zig_linksection(name, def, ...) def __attribute__((section(name)))
+#elif _MSC_VER
+#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def
+#else
+#define zig_linksection(name, def, ...) zig_linksection_unavailable
+#endif
+
 #if zig_has_builtin(unreachable) || defined(zig_gnuc)
 #define zig_unreachable() __builtin_unreachable()
 #else
src/codegen/c.zig
@@ -1663,6 +1663,22 @@ pub const DeclGen = struct {
         defer buffer.deinit();
 
         try buffer.appendSlice("struct ");
+
+        var needs_pack_attr = false;
+        {
+            var it = t.structFields().iterator();
+            while (it.next()) |field| {
+                const field_ty = field.value_ptr.ty;
+                if (!field_ty.hasRuntimeBits()) continue;
+                const alignment = field.value_ptr.abi_align;
+                if (alignment != 0 and alignment < field_ty.abiAlignment(dg.module.getTarget())) {
+                    needs_pack_attr = true;
+                    try buffer.appendSlice("zig_packed(");
+                    break;
+                }
+            }
+        }
+
         try buffer.appendSlice(name);
         try buffer.appendSlice(" {\n");
         {
@@ -1672,7 +1688,7 @@ pub const DeclGen = struct {
                 const field_ty = field.value_ptr.ty;
                 if (!field_ty.hasRuntimeBits()) continue;
 
-                const alignment = field.value_ptr.abi_align;
+                const alignment = field.value_ptr.alignment(dg.module.getTarget(), t.containerLayout());
                 const field_name = CValue{ .identifier = field.key_ptr.* };
                 try buffer.append(' ');
                 try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete);
@@ -1682,7 +1698,7 @@ pub const DeclGen = struct {
             }
             if (empty) try buffer.appendSlice(" char empty_struct;\n");
         }
-        try buffer.appendSlice("};\n");
+        if (needs_pack_attr) try buffer.appendSlice("});\n") else try buffer.appendSlice("};\n");
 
         const rendered = try buffer.toOwnedSlice();
         errdefer dg.typedefs.allocator.free(rendered);
@@ -2367,8 +2383,13 @@ pub const DeclGen = struct {
             depth += 1;
         }
 
-        if (alignment != 0 and alignment > ty.abiAlignment(target)) {
-            try w.print("zig_align({}) ", .{alignment});
+        if (alignment != 0) {
+            const abi_alignment = ty.abiAlignment(target);
+            if (alignment < abi_alignment) {
+                try w.print("zig_under_align({}) ", .{alignment});
+            } else if (alignment > abi_alignment) {
+                try w.print("zig_align({}) ", .{alignment});
+            }
         }
         try dg.renderType(w, render_ty, kind);
 
@@ -2860,27 +2881,30 @@ pub fn genDecl(o: *Object) !void {
         const w = o.writer();
         if (!is_global) try w.writeAll("static ");
         if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
+        if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
         try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
+        if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)");
         try w.writeAll(" = ");
         try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
         try w.writeByte(';');
         try o.indent_writer.insertNewline();
     } else {
+        const is_global = o.dg.module.decl_exports.contains(o.dg.decl_index);
+        const fwd_decl_writer = o.dg.fwd_decl.writer();
         const decl_c_value: CValue = .{ .decl = o.dg.decl_index };
 
-        const fwd_decl_writer = o.dg.fwd_decl.writer();
-        try fwd_decl_writer.writeAll("static ");
-        try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
+        try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
+        try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
         try fwd_decl_writer.writeAll(";\n");
 
-        const writer = o.writer();
-        try writer.writeAll("static ");
-        // TODO ask the Decl if it is const
-        // https://github.com/ziglang/zig/issues/7582
-        try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
-        try writer.writeAll(" = ");
-        try o.dg.renderValue(writer, tv.ty, tv.val, .StaticInitializer);
-        try writer.writeAll(";\n");
+        const w = o.writer();
+        if (!is_global) try w.writeAll("static ");
+        if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
+        try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
+        if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)");
+        try w.writeAll(" = ");
+        try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
+        try w.writeAll(";\n");
     }
 }
 
stage1/zig1.wasm
Binary file
test/behavior/math.zig
@@ -1332,7 +1332,6 @@ test "float remainder division using @rem" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
 
     comptime try frem(f16);
     comptime try frem(f32);
@@ -1375,7 +1374,6 @@ test "float modulo division using @mod" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
 
     comptime try fmod(f16);
     comptime try fmod(f32);
@@ -1438,7 +1436,6 @@ test "@round f80" {
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
 
     try testRound(f80, 12.0);
     comptime try testRound(f80, 12.0);
test/behavior/muladd.zig
@@ -50,7 +50,6 @@ test "@mulAdd f80" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
 
     comptime try testMulAdd80();
     try testMulAdd80();
@@ -178,7 +177,6 @@ test "vector f80" {
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/12602
 
     comptime try vector80();
     try vector80();
test/behavior/threadlocal.zig
@@ -7,8 +7,10 @@ test "thread local variable" {
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch != .x86_64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+    if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
+        .x86_64, .x86 => {},
+        else => return error.SkipZigTest,
+    }; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     const S = struct {
@@ -23,8 +25,10 @@ test "pointer to thread local array" {
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch != .x86_64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+    if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
+        .x86_64, .x86 => {},
+        else => return error.SkipZigTest,
+    }; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     const s = "Hello world";
@@ -39,8 +43,10 @@ test "reference a global threadlocal variable" {
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch != .x86_64) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // TODO
+    if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {
+        .x86_64, .x86 => {},
+        else => return error.SkipZigTest,
+    }; // TODO
     if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
 
     _ = nrfx_uart_rx(&g_uart0);