master
1const std = @import("std");
2const builtin = @import("builtin");
3const expect = std.testing.expect;
4
5const supports_128_bit_atomics = switch (builtin.cpu.arch) {
6 // TODO: Ideally this could be sync'd with the logic in Sema.
7 .aarch64 => true,
8 .aarch64_be => false, // Fails due to LLVM issues.
9 .x86_64 => builtin.cpu.has(.x86, .cx16),
10 else => false,
11};
12
13test "cmpxchg" {
14 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
15 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
16 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
17 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
18
19 try testCmpxchg();
20 try comptime testCmpxchg();
21}
22
23fn testCmpxchg() !void {
24 var x: i32 = 1234;
25 if (@cmpxchgWeak(i32, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
26 try expect(x1 == 1234);
27 } else {
28 @panic("cmpxchg should have failed");
29 }
30
31 while (@cmpxchgWeak(i32, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
32 try expect(x1 == 1234);
33 }
34 try expect(x == 5678);
35
36 try expect(@cmpxchgStrong(i32, &x, 5678, 42, .seq_cst, .seq_cst) == null);
37 try expect(x == 42);
38}
39
40test "atomicrmw and atomicload" {
41 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
42 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
43 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
44 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
45 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
46
47 var data: u8 = 200;
48 try testAtomicRmw(&data);
49 try expect(data == 42);
50 try testAtomicLoad(&data);
51}
52
53fn testAtomicRmw(ptr: *u8) !void {
54 const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .seq_cst);
55 try expect(prev_value == 200);
56 comptime {
57 var x: i32 = 1234;
58 const y: i32 = 12345;
59 try expect(@atomicLoad(i32, &x, .seq_cst) == 1234);
60 try expect(@atomicLoad(i32, &y, .seq_cst) == 12345);
61 }
62}
63
64fn testAtomicLoad(ptr: *u8) !void {
65 const x = @atomicLoad(u8, ptr, .seq_cst);
66 try expect(x == 42);
67}
68
69test "cmpxchg with ptr" {
70 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
71 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
72 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
73 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
74 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
75
76 var data1: i32 = 1234;
77 var data2: i32 = 5678;
78 var data3: i32 = 9101;
79 var x: *i32 = &data1;
80 if (@cmpxchgWeak(*i32, &x, &data2, &data3, .seq_cst, .seq_cst)) |x1| {
81 try expect(x1 == &data1);
82 } else {
83 @panic("cmpxchg should have failed");
84 }
85
86 while (@cmpxchgWeak(*i32, &x, &data1, &data3, .seq_cst, .seq_cst)) |x1| {
87 try expect(x1 == &data1);
88 }
89 try expect(x == &data3);
90
91 try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .seq_cst, .seq_cst) == null);
92 try expect(x == &data2);
93}
94
95test "cmpxchg with ignored result" {
96 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
97 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
98 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
99 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
100
101 var x: i32 = 1234;
102
103 _ = @cmpxchgStrong(i32, &x, 1234, 5678, .monotonic, .monotonic);
104
105 try expect(5678 == x);
106}
107
108test "128-bit cmpxchg" {
109 // TODO: this must appear first
110 if (!supports_128_bit_atomics) return error.SkipZigTest;
111
112 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
113 if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
114 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
115 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
116
117 try test_u128_cmpxchg();
118 try comptime test_u128_cmpxchg();
119}
120
121fn test_u128_cmpxchg() !void {
122 var x: u128 align(16) = 1234;
123 if (@cmpxchgWeak(u128, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
124 try expect(x1 == 1234);
125 } else {
126 @panic("cmpxchg should have failed");
127 }
128
129 while (@cmpxchgWeak(u128, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
130 try expect(x1 == 1234);
131 }
132 try expect(x == 5678);
133
134 try expect(@cmpxchgStrong(u128, &x, 5678, 42, .seq_cst, .seq_cst) == null);
135 try expect(x == 42);
136}
137
138var a_global_variable = @as(u32, 1234);
139
140test "cmpxchg on a global variable" {
141 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
142 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
143 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
144 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
145
146 _ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .acquire, .monotonic);
147 try expect(a_global_variable == 42);
148}
149
150test "atomic load and rmw with enum" {
151 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
152 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
153 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
154 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
155 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
156
157 const Value = enum(u8) { a, b, c };
158 var x = Value.a;
159
160 try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
161
162 _ = @atomicRmw(Value, &x, .Xchg, .c, .seq_cst);
163 try expect(@atomicLoad(Value, &x, .seq_cst) == .c);
164 try expect(@atomicLoad(Value, &x, .seq_cst) != .a);
165 try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
166}
167
168test "atomic store" {
169 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
170 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
171 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
172 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
173
174 try comptime testAtomicStore();
175 try testAtomicStore();
176}
177
178fn testAtomicStore() !void {
179 var x: u32 = 0;
180 @atomicStore(u32, &x, 1, .seq_cst);
181 try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
182 @atomicStore(u32, &x, 12345678, .seq_cst);
183 try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
184}
185
186test "atomicrmw with floats" {
187 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
188 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
189 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
190 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
191 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
192
193 try testAtomicRmwFloat();
194 try comptime testAtomicRmwFloat();
195}
196
197fn testAtomicRmwFloat() !void {
198 var x: f32 = 0;
199 try expect(x == 0);
200 _ = @atomicRmw(f32, &x, .Xchg, 1, .seq_cst);
201 try expect(x == 1);
202 _ = @atomicRmw(f32, &x, .Add, 5, .seq_cst);
203 try expect(x == 6);
204 _ = @atomicRmw(f32, &x, .Sub, 2, .seq_cst);
205 try expect(x == 4);
206 _ = @atomicRmw(f32, &x, .Max, 13, .seq_cst);
207 try expect(x == 13);
208 _ = @atomicRmw(f32, &x, .Min, 42, .seq_cst);
209 try expect(x == 13);
210}
211
212test "atomicrmw with ints" {
213 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
214 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
215 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
216 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
217 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
218
219 if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) {
220 // https://github.com/ziglang/zig/issues/16846
221 return error.SkipZigTest;
222 }
223
224 try testAtomicRmwInts();
225 try comptime testAtomicRmwInts();
226}
227
228fn testAtomicRmwInts() !void {
229 // TODO: Use the max atomic bit size for the target, maybe builtin?
230 try testAtomicRmwInt(.unsigned, 8);
231
232 if (builtin.cpu.arch == .x86_64) {
233 try testAtomicRmwInt(.unsigned, 16);
234 try testAtomicRmwInt(.unsigned, 32);
235 try testAtomicRmwInt(.unsigned, 64);
236 }
237}
238
239fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usize) !void {
240 const int = std.meta.Int(signedness, N);
241
242 var x: int = 1;
243 var res = @atomicRmw(int, &x, .Xchg, 3, .seq_cst);
244 try expect(x == 3 and res == 1);
245
246 res = @atomicRmw(int, &x, .Add, 3, .seq_cst);
247 var y: int = 3;
248 try expect(res == y);
249 y = y + 3;
250 try expect(x == y);
251
252 res = @atomicRmw(int, &x, .Sub, 1, .seq_cst);
253 try expect(res == y);
254 y = y - 1;
255 try expect(x == y);
256
257 res = @atomicRmw(int, &x, .And, 4, .seq_cst);
258 try expect(res == y);
259 y = y & 4;
260 try expect(x == y);
261
262 res = @atomicRmw(int, &x, .Nand, 4, .seq_cst);
263 try expect(res == y);
264 y = ~(y & 4);
265 try expect(x == y);
266
267 res = @atomicRmw(int, &x, .Or, 6, .seq_cst);
268 try expect(res == y);
269 y = y | 6;
270 try expect(x == y);
271
272 res = @atomicRmw(int, &x, .Xor, 2, .seq_cst);
273 try expect(res == y);
274 y = y ^ 2;
275 try expect(x == y);
276
277 res = @atomicRmw(int, &x, .Max, 1, .seq_cst);
278 try expect(res == y);
279 y = @max(y, 1);
280 try expect(x == y);
281
282 res = @atomicRmw(int, &x, .Min, 1, .seq_cst);
283 try expect(res == y);
284 y = @min(y, 1);
285 try expect(x == y);
286}
287
288test "atomicrmw with 128-bit ints" {
289 // TODO: this must appear first
290 if (!supports_128_bit_atomics) return error.SkipZigTest;
291
292 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
293
294 try testAtomicRmwInt128(.signed);
295 try testAtomicRmwInt128(.unsigned);
296 try comptime testAtomicRmwInt128(.signed);
297 try comptime testAtomicRmwInt128(.unsigned);
298}
299
300fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
301 const uint = std.meta.Int(.unsigned, 128);
302 const int = std.meta.Int(signedness, 128);
303
304 const initial: int = @as(int, @bitCast(@as(uint, 0xaaaaaaaa_bbbbbbbb_cccccccc_dddddddd)));
305 const replacement: int = 0x00000000_00000005_00000000_00000003;
306
307 var x: int align(16) = initial;
308 var res = @atomicRmw(int, &x, .Xchg, replacement, .seq_cst);
309 try expect(x == replacement and res == initial);
310
311 var operator: int = 0x00000001_00000000_20000000_00000000;
312 res = @atomicRmw(int, &x, .Add, operator, .seq_cst);
313 var y: int = replacement;
314 try expect(res == y);
315 y = y + operator;
316 try expect(x == y);
317
318 operator = 0x00000000_10000000_00000000_20000000;
319 res = @atomicRmw(int, &x, .Sub, operator, .seq_cst);
320 try expect(res == y);
321 y = y - operator;
322 try expect(x == y);
323
324 operator = 0x12345678_87654321_12345678_87654321;
325 res = @atomicRmw(int, &x, .And, operator, .seq_cst);
326 try expect(res == y);
327 y = y & operator;
328 try expect(x == y);
329
330 operator = 0x00000000_10000000_00000000_20000000;
331 res = @atomicRmw(int, &x, .Nand, operator, .seq_cst);
332 try expect(res == y);
333 y = ~(y & operator);
334 try expect(x == y);
335
336 operator = 0x12340000_56780000_67890000_98760000;
337 res = @atomicRmw(int, &x, .Or, operator, .seq_cst);
338 try expect(res == y);
339 y = y | operator;
340 try expect(x == y);
341
342 operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a;
343 res = @atomicRmw(int, &x, .Xor, operator, .seq_cst);
344 try expect(res == y);
345 y = y ^ operator;
346 try expect(x == y);
347
348 operator = 0x00000000_10000000_00000000_20000000;
349 res = @atomicRmw(int, &x, .Max, operator, .seq_cst);
350 try expect(res == y);
351 y = @max(y, operator);
352 try expect(x == y);
353
354 res = @atomicRmw(int, &x, .Min, operator, .seq_cst);
355 try expect(res == y);
356 y = @min(y, operator);
357 try expect(x == y);
358}
359
360test "atomics with different types" {
361 if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
362 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
363 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
364 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
365 if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
366 if (builtin.target.cpu.arch.endian() == .big) return error.SkipZigTest; // #24282
367
368 try testAtomicsWithType(bool, true, false);
369
370 try testAtomicsWithType(u1, 0, 1);
371 try testAtomicsWithType(i4, 2, 1);
372 try testAtomicsWithType(u5, 2, 1);
373 try testAtomicsWithType(i15, 2, 1);
374 try testAtomicsWithType(u24, 2, 1);
375
376 try testAtomicsWithType(u0, 0, 0);
377 try testAtomicsWithType(i0, 0, 0);
378
379 try testAtomicsWithType(enum(u32) { x = 1234, y = 5678 }, .x, .y);
380 try testAtomicsWithType(enum(u19) { x = 1234, y = 5678 }, .x, .y);
381
382 try testAtomicsWithPackedStruct(
383 packed struct { x: u7, y: u24, z: bool },
384 .{ .x = 1, .y = 2, .z = true },
385 .{ .x = 3, .y = 4, .z = false },
386 );
387 try testAtomicsWithPackedStruct(
388 packed struct { x: u19, y: bool },
389 .{ .x = 1, .y = true },
390 .{ .x = 3, .y = false },
391 );
392}
393
394fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
395 var x: T = b;
396 @atomicStore(T, &x, a, .seq_cst);
397 try expect(x == a);
398 try expect(@atomicLoad(T, &x, .seq_cst) == a);
399 try expect(@atomicRmw(T, &x, .Xchg, b, .seq_cst) == a);
400 try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst) == null);
401 if (@sizeOf(T) != 0)
402 try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).? == a);
403}
404
405fn testAtomicsWithPackedStruct(comptime T: type, a: T, b: T) !void {
406 const BackingInt = @typeInfo(T).@"struct".backing_integer.?;
407 var x: T = b;
408 @atomicStore(T, &x, a, .seq_cst);
409 try expect(@as(BackingInt, @bitCast(x)) == @as(BackingInt, @bitCast(a)));
410 try expect(@as(BackingInt, @bitCast(@atomicLoad(T, &x, .seq_cst))) == @as(BackingInt, @bitCast(a)));
411 try expect(@as(BackingInt, @bitCast(@atomicRmw(T, &x, .Xchg, b, .seq_cst))) == @as(BackingInt, @bitCast(a)));
412 try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst) == null);
413 if (@sizeOf(T) != 0)
414 try expect(@as(BackingInt, @bitCast(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).?)) == @as(BackingInt, @bitCast(a)));
415}
416
417test "return @atomicStore, using it as a void value" {
418 if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
419 if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
420 if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
421
422 const S = struct {
423 const A = struct {
424 value: usize,
425
426 pub fn store(self: *A, value: usize) void {
427 return @atomicStore(usize, &self.value, value, .unordered);
428 }
429
430 pub fn store2(self: *A, value: usize) void {
431 return switch (value) {
432 else => @atomicStore(usize, &self.value, value, .unordered),
433 };
434 }
435 };
436
437 fn doTheTest() !void {
438 var x: A = .{ .value = 5 };
439 x.store(10);
440 try expect(x.value == 10);
441 x.store(100);
442 try expect(x.value == 100);
443 }
444 };
445 try S.doTheTest();
446 try comptime S.doTheTest();
447}