Commit bc28454b43

kcbanner <kcbanner@gmail.com>
2024-07-13 23:46:24
zig.h: replace `_InterlockedExchangeAdd` with a plain volatile load
This was causing zig2.exe to crash during bootstrap, because there was an atomic load of read-only memory, and the attempt to write to it as part of the (idempotent) atomic exchange was invalid. Aligned reads (of u32 / u64) are atomic on x86 / x64, so this is replaced with an optimization-proof load (`__iso_volatile_load8*`) and a reordering barrier.
1 parent 11534aa
Changed files (2)
lib
stage1
lib/zig.h
@@ -3670,7 +3670,7 @@ typedef int zig_memory_order;
 
 /* TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 */
 
-#define zig_msvc_atomics(ZigType, Type, SigType, suffix) \
+#define zig_msvc_atomics(ZigType, Type, SigType, suffix, iso_suffix) \
     static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
         Type comparand = *expected; \
         Type initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, (SigType)desired, (SigType)comparand); \
@@ -3741,21 +3741,23 @@ typedef int zig_memory_order;
     } \
     static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
         (void)_InterlockedExchange##suffix((SigType volatile*)obj, (SigType)value); \
-    } \
+    }                                                                   \
     static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
-        return _InterlockedExchangeAdd##suffix((SigType volatile*)obj, (SigType)0); \
+        Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
+        _ReadWriteBarrier(); \
+        return val; \
     }
 
-zig_msvc_atomics( u8,  uint8_t,    char,  8)
-zig_msvc_atomics( i8,   int8_t,    char,  8)
-zig_msvc_atomics(u16, uint16_t,   short, 16)
-zig_msvc_atomics(i16,  int16_t,   short, 16)
-zig_msvc_atomics(u32, uint32_t,    long, )
-zig_msvc_atomics(i32,  int32_t,    long, )
+zig_msvc_atomics( u8,  uint8_t,    char,  8, 8)
+zig_msvc_atomics( i8,   int8_t,    char,  8, 8)
+zig_msvc_atomics(u16, uint16_t,   short, 16, 16)
+zig_msvc_atomics(i16,  int16_t,   short, 16, 16)
+zig_msvc_atomics(u32, uint32_t,    long,   , 32)
+zig_msvc_atomics(i32,  int32_t,    long,   , 32)
 
 #if _M_X64
-zig_msvc_atomics(u64, uint64_t, __int64, 64)
-zig_msvc_atomics(i64,  int64_t, __int64, 64)
+zig_msvc_atomics(u64, uint64_t, __int64, 64, 64)
+zig_msvc_atomics(i64,  int64_t, __int64, 64, 64)
 #endif
 
 #define zig_msvc_flt_atomics(Type, SigType, suffix) \
stage1/zig.h
@@ -207,16 +207,16 @@ typedef char bool;
     __asm(zig_mangle_c(name) " = " zig_mangle_c(symbol))
 #endif
 
+#define zig_mangled_tentative zig_mangled
+#define zig_mangled_final zig_mangled
 #if _MSC_VER
-#define zig_mangled_tentative(mangled, unmangled)
-#define zig_mangled_final(mangled, unmangled) ; \
+#define zig_mangled(mangled, unmangled) ; \
     zig_export(#mangled, unmangled)
 #define zig_mangled_export(mangled, unmangled, symbol) \
     zig_export(unmangled, #mangled) \
     zig_export(symbol, unmangled)
 #else /* _MSC_VER */
-#define zig_mangled_tentative(mangled, unmangled) __asm(zig_mangle_c(unmangled))
-#define zig_mangled_final(mangled, unmangled) zig_mangled_tentative(mangled, unmangled)
+#define zig_mangled(mangled, unmangled) __asm(zig_mangle_c(unmangled))
 #define zig_mangled_export(mangled, unmangled, symbol) \
     zig_mangled_final(mangled, unmangled) \
     zig_export(symbol, unmangled)
@@ -3670,7 +3670,7 @@ typedef int zig_memory_order;
 
 /* TODO: zig_msvc_atomic_load should load 32 bit without interlocked on x86, and load 64 bit without interlocked on x64 */
 
-#define zig_msvc_atomics(ZigType, Type, SigType, suffix) \
+#define zig_msvc_atomics(ZigType, Type, SigType, suffix, iso_suffix) \
     static inline bool zig_msvc_cmpxchg_##ZigType(Type volatile* obj, Type* expected, Type desired) { \
         Type comparand = *expected; \
         Type initial = _InterlockedCompareExchange##suffix((SigType volatile*)obj, (SigType)desired, (SigType)comparand); \
@@ -3741,21 +3741,23 @@ typedef int zig_memory_order;
     } \
     static inline void zig_msvc_atomic_store_##ZigType(Type volatile* obj, Type value) { \
         (void)_InterlockedExchange##suffix((SigType volatile*)obj, (SigType)value); \
-    } \
+    }                                                                   \
     static inline Type zig_msvc_atomic_load_##ZigType(Type volatile* obj) { \
-        return _InterlockedExchangeAdd##suffix((SigType volatile*)obj, (SigType)0); \
+        Type val = __iso_volatile_load##iso_suffix((SigType volatile*)obj); \
+        _ReadWriteBarrier(); \
+        return val; \
     }
 
-zig_msvc_atomics( u8,  uint8_t,    char,  8)
-zig_msvc_atomics( i8,   int8_t,    char,  8)
-zig_msvc_atomics(u16, uint16_t,   short, 16)
-zig_msvc_atomics(i16,  int16_t,   short, 16)
-zig_msvc_atomics(u32, uint32_t,    long, )
-zig_msvc_atomics(i32,  int32_t,    long, )
+zig_msvc_atomics( u8,  uint8_t,    char,  8, 8)
+zig_msvc_atomics( i8,   int8_t,    char,  8, 8)
+zig_msvc_atomics(u16, uint16_t,   short, 16, 16)
+zig_msvc_atomics(i16,  int16_t,   short, 16, 16)
+zig_msvc_atomics(u32, uint32_t,    long,   , 32)
+zig_msvc_atomics(i32,  int32_t,    long,   , 32)
 
 #if _M_X64
-zig_msvc_atomics(u64, uint64_t, __int64, 64)
-zig_msvc_atomics(i64,  int64_t, __int64, 64)
+zig_msvc_atomics(u64, uint64_t, __int64, 64, 64)
+zig_msvc_atomics(i64,  int64_t, __int64, 64, 64)
 #endif
 
 #define zig_msvc_flt_atomics(Type, SigType, suffix) \