master
  1const std = @import("std");
  2const builtin = @import("builtin");
  3const common = @import("./common.zig");
  4const cpu = builtin.cpu;
  5const arch = cpu.arch;
  6const linkage = common.linkage;
  7const visibility = common.visibility;
  8pub const panic = common.panic;
  9
 10// This parameter is true iff the target architecture supports the bare minimum
 11// to implement the atomic load/store intrinsics.
 12// Some architectures support atomic load/stores but no CAS, but we ignore this
 13// detail to keep the export logic clean and because we need some kind of CAS to
 14// implement the spinlocks.
 15const supports_atomic_ops = switch (arch) {
 16    .msp430, .avr, .bpfel, .bpfeb => false,
 17    .arm, .armeb, .thumb, .thumbeb =>
 18    // The ARM v6m ISA has no ldrex/strex and so it's impossible to do CAS
 19    // operations (unless we're targeting Linux, the kernel provides a way to
 20    // perform CAS operations).
 21    // XXX: The Linux code path is not implemented yet.
 22    !builtin.cpu.has(.arm, .has_v6m),
 23    else => true,
 24};
 25
 26// The size (in bytes) of the biggest object that the architecture can
 27// load/store atomically.
 28// Objects bigger than this threshold require the use of a lock.
 29const largest_atomic_size = switch (arch) {
 30    // On SPARC systems that lacks CAS and/or swap instructions, the only
 31    // available atomic operation is a test-and-set (`ldstub`), so we force
 32    // every atomic memory access to go through the lock.
 33    .sparc => if (builtin.cpu.has(.sparc, .hasleoncasa)) @sizeOf(usize) else 0,
 34
 35    // XXX: On x86/x86_64 we could check the presence of cmpxchg8b/cmpxchg16b
 36    // and set this parameter accordingly.
 37    else => @sizeOf(usize),
 38};
 39
 40// The size (in bytes) of the smallest atomic object that the architecture can
 41// perform fetch/exchange atomically. Note, this does not encompass load and store.
 42// Objects smaller than this threshold are implemented in terms of compare-exchange
 43// of a larger value.
 44const smallest_atomic_fetch_exch_size = switch (arch) {
 45    // On AMDGCN, there are no instructions for atomic operations other than load and store
 46    // (as of LLVM 15), and so these need to be implemented in terms of atomic CAS.
 47    .amdgcn => @sizeOf(u32),
 48    else => @sizeOf(u8),
 49};
 50
 51const cache_line_size = 64;
 52
 53const SpinlockTable = struct {
 54    // Allocate ~4096 bytes of memory for the spinlock table
 55    const max_spinlocks = 64;
 56
 57    const Spinlock = struct {
 58        // SPARC ldstub instruction will write a 255 into the memory location.
 59        // We'll use that as a sign that the lock is currently held.
 60        // See also: Section B.7 in SPARCv8 spec & A.29 in SPARCv9 spec.
 61        const sparc_lock: type = enum(u8) { Unlocked = 0, Locked = 255 };
 62        const other_lock: type = enum(usize) { Unlocked = 0, Locked };
 63
 64        // Prevent false sharing by providing enough padding between two
 65        // consecutive spinlock elements
 66        v: if (arch.isSPARC()) sparc_lock else other_lock align(cache_line_size) = .Unlocked,
 67
 68        fn acquire(self: *@This()) void {
 69            while (true) {
 70                const flag = if (comptime arch.isSPARC()) flag: {
 71                    break :flag asm volatile ("ldstub [%[addr]], %[flag]"
 72                        : [flag] "=r" (-> @TypeOf(self.v)),
 73                        : [addr] "r" (&self.v),
 74                        : .{ .memory = true });
 75                } else flag: {
 76                    break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .acquire);
 77                };
 78
 79                switch (flag) {
 80                    .Unlocked => break,
 81                    .Locked => {},
 82                }
 83            }
 84        }
 85        fn release(self: *@This()) void {
 86            if (comptime arch.isSPARC()) {
 87                _ = asm volatile ("clrb [%[addr]]"
 88                    :
 89                    : [addr] "r" (&self.v),
 90                    : .{ .memory = true });
 91            } else {
 92                @atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .release);
 93            }
 94        }
 95    };
 96
 97    list: [max_spinlocks]Spinlock = [_]Spinlock{.{}} ** max_spinlocks,
 98
 99    // The spinlock table behaves as a really simple hash table, mapping
100    // addresses to spinlocks. The mapping is not unique but that's only a
101    // performance problem as the lock will be contended by more than a pair of
102    // threads.
103    fn get(self: *@This(), address: usize) *Spinlock {
104        var sl = &self.list[(address >> 3) % max_spinlocks];
105        sl.acquire();
106        return sl;
107    }
108};
109
110var spinlocks: SpinlockTable = SpinlockTable{};
111
112// The following builtins do not respect the specified memory model and instead
113// uses seq_cst, the strongest one, for simplicity sake.
114
115// Generic version of GCC atomic builtin functions.
116// Those work on any object no matter the pointer alignment nor its size.
117
118fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.c) void {
119    _ = model;
120    var sl = spinlocks.get(@intFromPtr(src));
121    defer sl.release();
122    @memcpy(dest[0..size], src);
123}
124
125fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.c) void {
126    _ = model;
127    var sl = spinlocks.get(@intFromPtr(dest));
128    defer sl.release();
129    @memcpy(dest[0..size], src);
130}
131
132fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.c) void {
133    _ = model;
134    var sl = spinlocks.get(@intFromPtr(ptr));
135    defer sl.release();
136    @memcpy(old[0..size], ptr);
137    @memcpy(ptr[0..size], val);
138}
139
140fn __atomic_compare_exchange(
141    size: u32,
142    ptr: [*]u8,
143    expected: [*]u8,
144    desired: [*]u8,
145    success: i32,
146    failure: i32,
147) callconv(.c) i32 {
148    _ = success;
149    _ = failure;
150    var sl = spinlocks.get(@intFromPtr(ptr));
151    defer sl.release();
152    for (ptr[0..size], 0..) |b, i| {
153        if (expected[i] != b) break;
154    } else {
155        // The two objects, ptr and expected, are equal
156        @memcpy(ptr[0..size], desired);
157        return 1;
158    }
159    @memcpy(expected[0..size], ptr);
160    return 0;
161}
162
163// Specialized versions of the GCC atomic builtin functions.
164// LLVM emits those iff the object size is known and the pointers are correctly
165// aligned.
166inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
167    _ = model;
168    if (@sizeOf(T) > largest_atomic_size) {
169        var sl = spinlocks.get(@intFromPtr(src));
170        defer sl.release();
171        return src.*;
172    } else {
173        return @atomicLoad(T, src, .seq_cst);
174    }
175}
176
177fn __atomic_load_1(src: *u8, model: i32) callconv(.c) u8 {
178    return atomic_load_N(u8, src, model);
179}
180
181fn __atomic_load_2(src: *u16, model: i32) callconv(.c) u16 {
182    return atomic_load_N(u16, src, model);
183}
184
185fn __atomic_load_4(src: *u32, model: i32) callconv(.c) u32 {
186    return atomic_load_N(u32, src, model);
187}
188
189fn __atomic_load_8(src: *u64, model: i32) callconv(.c) u64 {
190    return atomic_load_N(u64, src, model);
191}
192
193fn __atomic_load_16(src: *u128, model: i32) callconv(.c) u128 {
194    return atomic_load_N(u128, src, model);
195}
196
197inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
198    _ = model;
199    if (@sizeOf(T) > largest_atomic_size) {
200        var sl = spinlocks.get(@intFromPtr(dst));
201        defer sl.release();
202        dst.* = value;
203    } else {
204        @atomicStore(T, dst, value, .seq_cst);
205    }
206}
207
208fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.c) void {
209    return atomic_store_N(u8, dst, value, model);
210}
211
212fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.c) void {
213    return atomic_store_N(u16, dst, value, model);
214}
215
216fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.c) void {
217    return atomic_store_N(u32, dst, value, model);
218}
219
220fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.c) void {
221    return atomic_store_N(u64, dst, value, model);
222}
223
224fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.c) void {
225    return atomic_store_N(u128, dst, value, model);
226}
227
228fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
229    const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8);
230
231    const addr = @intFromPtr(ptr);
232    const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1);
233    const wide_ptr: *align(smallest_atomic_fetch_exch_size) WideAtomic = @alignCast(@as(*WideAtomic, @ptrFromInt(wide_addr)));
234
235    const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1);
236    const inner_shift = @as(std.math.Log2Int(T), @intCast(inner_offset * 8));
237
238    const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift;
239
240    var wide_old = @atomicLoad(WideAtomic, wide_ptr, .seq_cst);
241    while (true) {
242        const old = @as(T, @truncate((wide_old & mask) >> inner_shift));
243        const new = update(val, old);
244        const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift);
245        if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .seq_cst, .seq_cst)) |new_wide_old| {
246            wide_old = new_wide_old;
247        } else {
248            return old;
249        }
250    }
251}
252
253inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
254    _ = model;
255    if (@sizeOf(T) > largest_atomic_size) {
256        var sl = spinlocks.get(@intFromPtr(ptr));
257        defer sl.release();
258        const value = ptr.*;
259        ptr.* = val;
260        return value;
261    } else if (@sizeOf(T) < smallest_atomic_fetch_exch_size) {
262        // Machine does not support this type, but it does support a larger type.
263        const Updater = struct {
264            fn update(new: T, old: T) T {
265                _ = old;
266                return new;
267            }
268        };
269        return wideUpdate(T, ptr, val, Updater.update);
270    } else {
271        return @atomicRmw(T, ptr, .Xchg, val, .seq_cst);
272    }
273}
274
275fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
276    return atomic_exchange_N(u8, ptr, val, model);
277}
278
279fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
280    return atomic_exchange_N(u16, ptr, val, model);
281}
282
283fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
284    return atomic_exchange_N(u32, ptr, val, model);
285}
286
287fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
288    return atomic_exchange_N(u64, ptr, val, model);
289}
290
291fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
292    return atomic_exchange_N(u128, ptr, val, model);
293}
294
295inline fn atomic_compare_exchange_N(
296    comptime T: type,
297    ptr: *T,
298    expected: *T,
299    desired: T,
300    success: i32,
301    failure: i32,
302) i32 {
303    _ = success;
304    _ = failure;
305    if (@sizeOf(T) > largest_atomic_size) {
306        var sl = spinlocks.get(@intFromPtr(ptr));
307        defer sl.release();
308        const value = ptr.*;
309        if (value == expected.*) {
310            ptr.* = desired;
311            return 1;
312        }
313        expected.* = value;
314        return 0;
315    } else {
316        if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| {
317            expected.* = old_value;
318            return 0;
319        }
320        return 1;
321    }
322}
323
324fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.c) i32 {
325    return atomic_compare_exchange_N(u8, ptr, expected, desired, success, failure);
326}
327
328fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.c) i32 {
329    return atomic_compare_exchange_N(u16, ptr, expected, desired, success, failure);
330}
331
332fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.c) i32 {
333    return atomic_compare_exchange_N(u32, ptr, expected, desired, success, failure);
334}
335
336fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.c) i32 {
337    return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
338}
339
340fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.c) i32 {
341    return atomic_compare_exchange_N(u128, ptr, expected, desired, success, failure);
342}
343
344inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T {
345    _ = model;
346    const Updater = struct {
347        fn update(new: T, old: T) T {
348            return switch (op) {
349                .Add => old +% new,
350                .Sub => old -% new,
351                .And => old & new,
352                .Nand => ~(old & new),
353                .Or => old | new,
354                .Xor => old ^ new,
355                .Max => @max(old, new),
356                .Min => @min(old, new),
357                else => @compileError("unsupported atomic op"),
358            };
359        }
360    };
361
362    if (@sizeOf(T) > largest_atomic_size) {
363        var sl = spinlocks.get(@intFromPtr(ptr));
364        defer sl.release();
365
366        const value = ptr.*;
367        ptr.* = Updater.update(val, value);
368        return value;
369    } else if (@sizeOf(T) < smallest_atomic_fetch_exch_size) {
370        // Machine does not support this type, but it does support a larger type.
371        return wideUpdate(T, ptr, val, Updater.update);
372    }
373
374    return @atomicRmw(T, ptr, op, val, .seq_cst);
375}
376
377fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
378    return fetch_op_N(u8, .Add, ptr, val, model);
379}
380
381fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
382    return fetch_op_N(u16, .Add, ptr, val, model);
383}
384
385fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
386    return fetch_op_N(u32, .Add, ptr, val, model);
387}
388
389fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
390    return fetch_op_N(u64, .Add, ptr, val, model);
391}
392
393fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
394    return fetch_op_N(u128, .Add, ptr, val, model);
395}
396
397fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
398    return fetch_op_N(u8, .Sub, ptr, val, model);
399}
400
401fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
402    return fetch_op_N(u16, .Sub, ptr, val, model);
403}
404
405fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
406    return fetch_op_N(u32, .Sub, ptr, val, model);
407}
408
409fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
410    return fetch_op_N(u64, .Sub, ptr, val, model);
411}
412
413fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
414    return fetch_op_N(u128, .Sub, ptr, val, model);
415}
416
417fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
418    return fetch_op_N(u8, .And, ptr, val, model);
419}
420
421fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
422    return fetch_op_N(u16, .And, ptr, val, model);
423}
424
425fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
426    return fetch_op_N(u32, .And, ptr, val, model);
427}
428
429fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
430    return fetch_op_N(u64, .And, ptr, val, model);
431}
432
433fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
434    return fetch_op_N(u128, .And, ptr, val, model);
435}
436
437fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
438    return fetch_op_N(u8, .Or, ptr, val, model);
439}
440
441fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
442    return fetch_op_N(u16, .Or, ptr, val, model);
443}
444
445fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
446    return fetch_op_N(u32, .Or, ptr, val, model);
447}
448
449fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
450    return fetch_op_N(u64, .Or, ptr, val, model);
451}
452
453fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
454    return fetch_op_N(u128, .Or, ptr, val, model);
455}
456
457fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
458    return fetch_op_N(u8, .Xor, ptr, val, model);
459}
460
461fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
462    return fetch_op_N(u16, .Xor, ptr, val, model);
463}
464
465fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
466    return fetch_op_N(u32, .Xor, ptr, val, model);
467}
468
469fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
470    return fetch_op_N(u64, .Xor, ptr, val, model);
471}
472
473fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
474    return fetch_op_N(u128, .Xor, ptr, val, model);
475}
476
477fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
478    return fetch_op_N(u8, .Nand, ptr, val, model);
479}
480
481fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
482    return fetch_op_N(u16, .Nand, ptr, val, model);
483}
484
485fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
486    return fetch_op_N(u32, .Nand, ptr, val, model);
487}
488
489fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
490    return fetch_op_N(u64, .Nand, ptr, val, model);
491}
492
493fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
494    return fetch_op_N(u128, .Nand, ptr, val, model);
495}
496
497fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
498    return fetch_op_N(u8, .Max, ptr, val, model);
499}
500
501fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
502    return fetch_op_N(u16, .Max, ptr, val, model);
503}
504
505fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
506    return fetch_op_N(u32, .Max, ptr, val, model);
507}
508
509fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
510    return fetch_op_N(u64, .Max, ptr, val, model);
511}
512
513fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
514    return fetch_op_N(u128, .Max, ptr, val, model);
515}
516
517fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.c) u8 {
518    return fetch_op_N(u8, .Min, ptr, val, model);
519}
520
521fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.c) u16 {
522    return fetch_op_N(u16, .Min, ptr, val, model);
523}
524
525fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.c) u32 {
526    return fetch_op_N(u32, .Min, ptr, val, model);
527}
528
529fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.c) u64 {
530    return fetch_op_N(u64, .Min, ptr, val, model);
531}
532
533fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.c) u128 {
534    return fetch_op_N(u128, .Min, ptr, val, model);
535}
536
537comptime {
538    if (supports_atomic_ops and builtin.object_format != .c) {
539        @export(&__atomic_load, .{ .name = "__atomic_load", .linkage = linkage, .visibility = visibility });
540        @export(&__atomic_store, .{ .name = "__atomic_store", .linkage = linkage, .visibility = visibility });
541        @export(&__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage, .visibility = visibility });
542        @export(&__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage, .visibility = visibility });
543
544        @export(&__atomic_fetch_add_1, .{ .name = "__atomic_fetch_add_1", .linkage = linkage, .visibility = visibility });
545        @export(&__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage, .visibility = visibility });
546        @export(&__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage, .visibility = visibility });
547        @export(&__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage, .visibility = visibility });
548        @export(&__atomic_fetch_add_16, .{ .name = "__atomic_fetch_add_16", .linkage = linkage, .visibility = visibility });
549
550        @export(&__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage, .visibility = visibility });
551        @export(&__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage, .visibility = visibility });
552        @export(&__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage, .visibility = visibility });
553        @export(&__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage, .visibility = visibility });
554        @export(&__atomic_fetch_sub_16, .{ .name = "__atomic_fetch_sub_16", .linkage = linkage, .visibility = visibility });
555
556        @export(&__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage, .visibility = visibility });
557        @export(&__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage, .visibility = visibility });
558        @export(&__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage, .visibility = visibility });
559        @export(&__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage, .visibility = visibility });
560        @export(&__atomic_fetch_and_16, .{ .name = "__atomic_fetch_and_16", .linkage = linkage, .visibility = visibility });
561
562        @export(&__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage, .visibility = visibility });
563        @export(&__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage, .visibility = visibility });
564        @export(&__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage, .visibility = visibility });
565        @export(&__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage, .visibility = visibility });
566        @export(&__atomic_fetch_or_16, .{ .name = "__atomic_fetch_or_16", .linkage = linkage, .visibility = visibility });
567
568        @export(&__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage, .visibility = visibility });
569        @export(&__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage, .visibility = visibility });
570        @export(&__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage, .visibility = visibility });
571        @export(&__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage, .visibility = visibility });
572        @export(&__atomic_fetch_xor_16, .{ .name = "__atomic_fetch_xor_16", .linkage = linkage, .visibility = visibility });
573
574        @export(&__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage, .visibility = visibility });
575        @export(&__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage, .visibility = visibility });
576        @export(&__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage, .visibility = visibility });
577        @export(&__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage, .visibility = visibility });
578        @export(&__atomic_fetch_nand_16, .{ .name = "__atomic_fetch_nand_16", .linkage = linkage, .visibility = visibility });
579
580        @export(&__atomic_fetch_umax_1, .{ .name = "__atomic_fetch_umax_1", .linkage = linkage, .visibility = visibility });
581        @export(&__atomic_fetch_umax_2, .{ .name = "__atomic_fetch_umax_2", .linkage = linkage, .visibility = visibility });
582        @export(&__atomic_fetch_umax_4, .{ .name = "__atomic_fetch_umax_4", .linkage = linkage, .visibility = visibility });
583        @export(&__atomic_fetch_umax_8, .{ .name = "__atomic_fetch_umax_8", .linkage = linkage, .visibility = visibility });
584        @export(&__atomic_fetch_umax_16, .{ .name = "__atomic_fetch_umax_16", .linkage = linkage, .visibility = visibility });
585
586        @export(&__atomic_fetch_umin_1, .{ .name = "__atomic_fetch_umin_1", .linkage = linkage, .visibility = visibility });
587        @export(&__atomic_fetch_umin_2, .{ .name = "__atomic_fetch_umin_2", .linkage = linkage, .visibility = visibility });
588        @export(&__atomic_fetch_umin_4, .{ .name = "__atomic_fetch_umin_4", .linkage = linkage, .visibility = visibility });
589        @export(&__atomic_fetch_umin_8, .{ .name = "__atomic_fetch_umin_8", .linkage = linkage, .visibility = visibility });
590        @export(&__atomic_fetch_umin_16, .{ .name = "__atomic_fetch_umin_16", .linkage = linkage, .visibility = visibility });
591
592        @export(&__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage, .visibility = visibility });
593        @export(&__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage, .visibility = visibility });
594        @export(&__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage, .visibility = visibility });
595        @export(&__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage, .visibility = visibility });
596        @export(&__atomic_load_16, .{ .name = "__atomic_load_16", .linkage = linkage, .visibility = visibility });
597
598        @export(&__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage, .visibility = visibility });
599        @export(&__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage, .visibility = visibility });
600        @export(&__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage, .visibility = visibility });
601        @export(&__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage, .visibility = visibility });
602        @export(&__atomic_store_16, .{ .name = "__atomic_store_16", .linkage = linkage, .visibility = visibility });
603
604        @export(&__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage, .visibility = visibility });
605        @export(&__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage, .visibility = visibility });
606        @export(&__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage, .visibility = visibility });
607        @export(&__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage, .visibility = visibility });
608        @export(&__atomic_exchange_16, .{ .name = "__atomic_exchange_16", .linkage = linkage, .visibility = visibility });
609
610        @export(&__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage, .visibility = visibility });
611        @export(&__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage, .visibility = visibility });
612        @export(&__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage, .visibility = visibility });
613        @export(&__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage, .visibility = visibility });
614        @export(&__atomic_compare_exchange_16, .{ .name = "__atomic_compare_exchange_16", .linkage = linkage, .visibility = visibility });
615    }
616}